xref: /spdk/module/sock/posix/posix.c (revision 723dd06eb869d6cfdc895dc29bcf439c1e41f20c)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation. All rights reserved.
3  *   Copyright (c) 2020, 2021 Mellanox Technologies LTD. All rights reserved.
4  *   Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #if defined(__FreeBSD__)
10 #include <sys/event.h>
11 #define SPDK_KEVENT
12 #else
13 #include <sys/epoll.h>
14 #define SPDK_EPOLL
15 #endif
16 
17 #if defined(__linux__)
18 #include <linux/errqueue.h>
19 #endif
20 
21 #include "spdk/env.h"
22 #include "spdk/log.h"
23 #include "spdk/pipe.h"
24 #include "spdk/sock.h"
25 #include "spdk/util.h"
26 #include "spdk/string.h"
27 #include "spdk_internal/sock.h"
28 #include "../sock_kernel.h"
29 
30 #include "openssl/crypto.h"
31 #include "openssl/err.h"
32 #include "openssl/ssl.h"
33 
34 #define MAX_TMPBUF 1024
35 #define PORTNUMLEN 32
36 
37 #if defined(SO_ZEROCOPY) && defined(MSG_ZEROCOPY)
38 #define SPDK_ZEROCOPY
39 #endif
40 
41 struct spdk_posix_sock {
42 	struct spdk_sock	base;
43 	int			fd;
44 
45 	uint32_t		sendmsg_idx;
46 
47 	struct spdk_pipe	*recv_pipe;
48 	void			*recv_buf;
49 	int			recv_buf_sz;
50 	bool			pipe_has_data;
51 	bool			socket_has_data;
52 	bool			zcopy;
53 
54 	int			placement_id;
55 
56 	SSL_CTX			*ctx;
57 	SSL			*ssl;
58 
59 	TAILQ_ENTRY(spdk_posix_sock)	link;
60 };
61 
62 TAILQ_HEAD(spdk_has_data_list, spdk_posix_sock);
63 
64 struct spdk_posix_sock_group_impl {
65 	struct spdk_sock_group_impl	base;
66 	int				fd;
67 	struct spdk_has_data_list	socks_with_data;
68 	int				placement_id;
69 };
70 
71 static struct spdk_sock_impl_opts g_spdk_posix_sock_impl_opts = {
72 	.recv_buf_size = MIN_SO_RCVBUF_SIZE,
73 	.send_buf_size = MIN_SO_SNDBUF_SIZE,
74 	.enable_recv_pipe = true,
75 	.enable_quickack = false,
76 	.enable_placement_id = PLACEMENT_NONE,
77 	.enable_zerocopy_send_server = true,
78 	.enable_zerocopy_send_client = false,
79 	.zerocopy_threshold = 0,
80 	.tls_version = 0,
81 	.enable_ktls = false,
82 	.psk_key = NULL,
83 	.psk_identity = NULL
84 };
85 
86 static struct spdk_sock_map g_map = {
87 	.entries = STAILQ_HEAD_INITIALIZER(g_map.entries),
88 	.mtx = PTHREAD_MUTEX_INITIALIZER
89 };
90 
91 __attribute((destructor)) static void
92 posix_sock_map_cleanup(void)
93 {
94 	spdk_sock_map_cleanup(&g_map);
95 }
96 
97 #define __posix_sock(sock) (struct spdk_posix_sock *)sock
98 #define __posix_group_impl(group) (struct spdk_posix_sock_group_impl *)group
99 
100 static void
101 posix_sock_copy_impl_opts(struct spdk_sock_impl_opts *dest, const struct spdk_sock_impl_opts *src,
102 			  size_t len)
103 {
104 #define FIELD_OK(field) \
105 	offsetof(struct spdk_sock_impl_opts, field) + sizeof(src->field) <= len
106 
107 #define SET_FIELD(field) \
108 	if (FIELD_OK(field)) { \
109 		dest->field = src->field; \
110 	}
111 
112 	SET_FIELD(recv_buf_size);
113 	SET_FIELD(send_buf_size);
114 	SET_FIELD(enable_recv_pipe);
115 	SET_FIELD(enable_zerocopy_send);
116 	SET_FIELD(enable_quickack);
117 	SET_FIELD(enable_placement_id);
118 	SET_FIELD(enable_zerocopy_send_server);
119 	SET_FIELD(enable_zerocopy_send_client);
120 	SET_FIELD(zerocopy_threshold);
121 	SET_FIELD(tls_version);
122 	SET_FIELD(enable_ktls);
123 	SET_FIELD(psk_key);
124 	SET_FIELD(psk_identity);
125 
126 #undef SET_FIELD
127 #undef FIELD_OK
128 }
129 
130 static int
131 posix_sock_impl_get_opts(struct spdk_sock_impl_opts *opts, size_t *len)
132 {
133 	if (!opts || !len) {
134 		errno = EINVAL;
135 		return -1;
136 	}
137 
138 	assert(sizeof(*opts) >= *len);
139 	memset(opts, 0, *len);
140 
141 	posix_sock_copy_impl_opts(opts, &g_spdk_posix_sock_impl_opts, *len);
142 	*len = spdk_min(*len, sizeof(g_spdk_posix_sock_impl_opts));
143 
144 	return 0;
145 }
146 
147 static int
148 posix_sock_impl_set_opts(const struct spdk_sock_impl_opts *opts, size_t len)
149 {
150 	if (!opts) {
151 		errno = EINVAL;
152 		return -1;
153 	}
154 
155 	assert(sizeof(*opts) >= len);
156 	posix_sock_copy_impl_opts(&g_spdk_posix_sock_impl_opts, opts, len);
157 
158 	return 0;
159 }
160 
161 static void
162 posix_opts_get_impl_opts(const struct spdk_sock_opts *opts, struct spdk_sock_impl_opts *dest)
163 {
164 	/* Copy the default impl_opts first to cover cases when user's impl_opts is smaller */
165 	memcpy(dest, &g_spdk_posix_sock_impl_opts, sizeof(*dest));
166 
167 	if (opts->impl_opts != NULL) {
168 		assert(sizeof(*dest) >= opts->impl_opts_size);
169 		posix_sock_copy_impl_opts(dest, opts->impl_opts, opts->impl_opts_size);
170 	}
171 }
172 
173 static int
174 posix_sock_getaddr(struct spdk_sock *_sock, char *saddr, int slen, uint16_t *sport,
175 		   char *caddr, int clen, uint16_t *cport)
176 {
177 	struct spdk_posix_sock *sock = __posix_sock(_sock);
178 	struct sockaddr_storage sa;
179 	socklen_t salen;
180 	int rc;
181 
182 	assert(sock != NULL);
183 
184 	memset(&sa, 0, sizeof sa);
185 	salen = sizeof sa;
186 	rc = getsockname(sock->fd, (struct sockaddr *) &sa, &salen);
187 	if (rc != 0) {
188 		SPDK_ERRLOG("getsockname() failed (errno=%d)\n", errno);
189 		return -1;
190 	}
191 
192 	switch (sa.ss_family) {
193 	case AF_UNIX:
194 		/* Acceptable connection types that don't have IPs */
195 		return 0;
196 	case AF_INET:
197 	case AF_INET6:
198 		/* Code below will get IP addresses */
199 		break;
200 	default:
201 		/* Unsupported socket family */
202 		return -1;
203 	}
204 
205 	rc = get_addr_str((struct sockaddr *)&sa, saddr, slen);
206 	if (rc != 0) {
207 		SPDK_ERRLOG("getnameinfo() failed (errno=%d)\n", errno);
208 		return -1;
209 	}
210 
211 	if (sport) {
212 		if (sa.ss_family == AF_INET) {
213 			*sport = ntohs(((struct sockaddr_in *) &sa)->sin_port);
214 		} else if (sa.ss_family == AF_INET6) {
215 			*sport = ntohs(((struct sockaddr_in6 *) &sa)->sin6_port);
216 		}
217 	}
218 
219 	memset(&sa, 0, sizeof sa);
220 	salen = sizeof sa;
221 	rc = getpeername(sock->fd, (struct sockaddr *) &sa, &salen);
222 	if (rc != 0) {
223 		SPDK_ERRLOG("getpeername() failed (errno=%d)\n", errno);
224 		return -1;
225 	}
226 
227 	rc = get_addr_str((struct sockaddr *)&sa, caddr, clen);
228 	if (rc != 0) {
229 		SPDK_ERRLOG("getnameinfo() failed (errno=%d)\n", errno);
230 		return -1;
231 	}
232 
233 	if (cport) {
234 		if (sa.ss_family == AF_INET) {
235 			*cport = ntohs(((struct sockaddr_in *) &sa)->sin_port);
236 		} else if (sa.ss_family == AF_INET6) {
237 			*cport = ntohs(((struct sockaddr_in6 *) &sa)->sin6_port);
238 		}
239 	}
240 
241 	return 0;
242 }
243 
244 enum posix_sock_create_type {
245 	SPDK_SOCK_CREATE_LISTEN,
246 	SPDK_SOCK_CREATE_CONNECT,
247 };
248 
249 static int
250 posix_sock_alloc_pipe(struct spdk_posix_sock *sock, int sz)
251 {
252 	uint8_t *new_buf;
253 	struct spdk_pipe *new_pipe;
254 	struct iovec siov[2];
255 	struct iovec diov[2];
256 	int sbytes;
257 	ssize_t bytes;
258 
259 	if (sock->recv_buf_sz == sz) {
260 		return 0;
261 	}
262 
263 	/* If the new size is 0, just free the pipe */
264 	if (sz == 0) {
265 		spdk_pipe_destroy(sock->recv_pipe);
266 		free(sock->recv_buf);
267 		sock->recv_pipe = NULL;
268 		sock->recv_buf = NULL;
269 		return 0;
270 	} else if (sz < MIN_SOCK_PIPE_SIZE) {
271 		SPDK_ERRLOG("The size of the pipe must be larger than %d\n", MIN_SOCK_PIPE_SIZE);
272 		return -1;
273 	}
274 
275 	/* Round up to next 64 byte multiple */
276 	new_buf = calloc(SPDK_ALIGN_CEIL(sz + 1, 64), sizeof(uint8_t));
277 	if (!new_buf) {
278 		SPDK_ERRLOG("socket recv buf allocation failed\n");
279 		return -ENOMEM;
280 	}
281 
282 	new_pipe = spdk_pipe_create(new_buf, sz + 1);
283 	if (new_pipe == NULL) {
284 		SPDK_ERRLOG("socket pipe allocation failed\n");
285 		free(new_buf);
286 		return -ENOMEM;
287 	}
288 
289 	if (sock->recv_pipe != NULL) {
290 		/* Pull all of the data out of the old pipe */
291 		sbytes = spdk_pipe_reader_get_buffer(sock->recv_pipe, sock->recv_buf_sz, siov);
292 		if (sbytes > sz) {
293 			/* Too much data to fit into the new pipe size */
294 			spdk_pipe_destroy(new_pipe);
295 			free(new_buf);
296 			return -EINVAL;
297 		}
298 
299 		sbytes = spdk_pipe_writer_get_buffer(new_pipe, sz, diov);
300 		assert(sbytes == sz);
301 
302 		bytes = spdk_iovcpy(siov, 2, diov, 2);
303 		spdk_pipe_writer_advance(new_pipe, bytes);
304 
305 		spdk_pipe_destroy(sock->recv_pipe);
306 		free(sock->recv_buf);
307 	}
308 
309 	sock->recv_buf_sz = sz;
310 	sock->recv_buf = new_buf;
311 	sock->recv_pipe = new_pipe;
312 
313 	return 0;
314 }
315 
316 static int
317 posix_sock_set_recvbuf(struct spdk_sock *_sock, int sz)
318 {
319 	struct spdk_posix_sock *sock = __posix_sock(_sock);
320 	int min_size;
321 	int rc;
322 
323 	assert(sock != NULL);
324 
325 	if (_sock->impl_opts.enable_recv_pipe) {
326 		rc = posix_sock_alloc_pipe(sock, sz);
327 		if (rc) {
328 			return rc;
329 		}
330 	}
331 
332 	/* Set kernel buffer size to be at least MIN_SO_RCVBUF_SIZE and
333 	 * g_spdk_posix_sock_impl_opts.recv_buf_size. */
334 	min_size = spdk_max(MIN_SO_RCVBUF_SIZE, g_spdk_posix_sock_impl_opts.recv_buf_size);
335 
336 	if (sz < min_size) {
337 		sz = min_size;
338 	}
339 
340 	rc = setsockopt(sock->fd, SOL_SOCKET, SO_RCVBUF, &sz, sizeof(sz));
341 	if (rc < 0) {
342 		return rc;
343 	}
344 
345 	_sock->impl_opts.recv_buf_size = sz;
346 
347 	return 0;
348 }
349 
350 static int
351 posix_sock_set_sendbuf(struct spdk_sock *_sock, int sz)
352 {
353 	struct spdk_posix_sock *sock = __posix_sock(_sock);
354 	int min_size;
355 	int rc;
356 
357 	assert(sock != NULL);
358 
359 	/* Set kernel buffer size to be at least MIN_SO_SNDBUF_SIZE and
360 	 * g_spdk_posix_sock_impl_opts.send_buf_size. */
361 	min_size = spdk_max(MIN_SO_SNDBUF_SIZE, g_spdk_posix_sock_impl_opts.send_buf_size);
362 
363 	if (sz < min_size) {
364 		sz = min_size;
365 	}
366 
367 	rc = setsockopt(sock->fd, SOL_SOCKET, SO_SNDBUF, &sz, sizeof(sz));
368 	if (rc < 0) {
369 		return rc;
370 	}
371 
372 	_sock->impl_opts.send_buf_size = sz;
373 
374 	return 0;
375 }
376 
377 static void
378 posix_sock_init(struct spdk_posix_sock *sock, bool enable_zero_copy)
379 {
380 #if defined(SPDK_ZEROCOPY) || defined(__linux__)
381 	int flag;
382 	int rc;
383 #endif
384 
385 #if defined(SPDK_ZEROCOPY)
386 	flag = 1;
387 
388 	if (enable_zero_copy) {
389 		/* Try to turn on zero copy sends */
390 		rc = setsockopt(sock->fd, SOL_SOCKET, SO_ZEROCOPY, &flag, sizeof(flag));
391 		if (rc == 0) {
392 			sock->zcopy = true;
393 		}
394 	}
395 #endif
396 
397 #if defined(__linux__)
398 	flag = 1;
399 
400 	if (sock->base.impl_opts.enable_quickack) {
401 		rc = setsockopt(sock->fd, IPPROTO_TCP, TCP_QUICKACK, &flag, sizeof(flag));
402 		if (rc != 0) {
403 			SPDK_ERRLOG("quickack was failed to set\n");
404 		}
405 	}
406 
407 	spdk_sock_get_placement_id(sock->fd, sock->base.impl_opts.enable_placement_id,
408 				   &sock->placement_id);
409 
410 	if (sock->base.impl_opts.enable_placement_id == PLACEMENT_MARK) {
411 		/* Save placement_id */
412 		spdk_sock_map_insert(&g_map, sock->placement_id, NULL);
413 	}
414 #endif
415 }
416 
417 static struct spdk_posix_sock *
418 posix_sock_alloc(int fd, struct spdk_sock_impl_opts *impl_opts, bool enable_zero_copy)
419 {
420 	struct spdk_posix_sock *sock;
421 
422 	sock = calloc(1, sizeof(*sock));
423 	if (sock == NULL) {
424 		SPDK_ERRLOG("sock allocation failed\n");
425 		return NULL;
426 	}
427 
428 	sock->fd = fd;
429 	memcpy(&sock->base.impl_opts, impl_opts, sizeof(*impl_opts));
430 	posix_sock_init(sock, enable_zero_copy);
431 
432 	return sock;
433 }
434 
435 static int
436 posix_fd_create(struct addrinfo *res, struct spdk_sock_opts *opts,
437 		struct spdk_sock_impl_opts *impl_opts)
438 {
439 	int fd;
440 	int val = 1;
441 	int rc, sz;
442 #if defined(__linux__)
443 	int to;
444 #endif
445 
446 	fd = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
447 	if (fd < 0) {
448 		/* error */
449 		return -1;
450 	}
451 
452 	sz = impl_opts->recv_buf_size;
453 	rc = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &sz, sizeof(sz));
454 	if (rc) {
455 		/* Not fatal */
456 	}
457 
458 	sz = impl_opts->send_buf_size;
459 	rc = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sz, sizeof(sz));
460 	if (rc) {
461 		/* Not fatal */
462 	}
463 
464 	rc = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof val);
465 	if (rc != 0) {
466 		close(fd);
467 		/* error */
468 		return -1;
469 	}
470 	rc = setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &val, sizeof val);
471 	if (rc != 0) {
472 		close(fd);
473 		/* error */
474 		return -1;
475 	}
476 
477 #if defined(SO_PRIORITY)
478 	if (opts->priority) {
479 		rc = setsockopt(fd, SOL_SOCKET, SO_PRIORITY, &opts->priority, sizeof val);
480 		if (rc != 0) {
481 			close(fd);
482 			/* error */
483 			return -1;
484 		}
485 	}
486 #endif
487 
488 	if (res->ai_family == AF_INET6) {
489 		rc = setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &val, sizeof val);
490 		if (rc != 0) {
491 			close(fd);
492 			/* error */
493 			return -1;
494 		}
495 	}
496 
497 	if (opts->ack_timeout) {
498 #if defined(__linux__)
499 		to = opts->ack_timeout;
500 		rc = setsockopt(fd, IPPROTO_TCP, TCP_USER_TIMEOUT, &to, sizeof(to));
501 		if (rc != 0) {
502 			close(fd);
503 			/* error */
504 			return -1;
505 		}
506 #else
507 		SPDK_WARNLOG("TCP_USER_TIMEOUT is not supported.\n");
508 #endif
509 	}
510 
511 	return fd;
512 }
513 
514 static unsigned int
515 posix_sock_tls_psk_server_cb(SSL *ssl,
516 			     const char *id,
517 			     unsigned char *psk,
518 			     unsigned int max_psk_len)
519 {
520 	long key_len;
521 	unsigned char *default_psk;
522 	struct spdk_sock_impl_opts *impl_opts;
523 
524 	impl_opts = SSL_get_app_data(ssl);
525 
526 	if (impl_opts->psk_key == NULL) {
527 		SPDK_ERRLOG("PSK is not set\n");
528 		goto err;
529 	}
530 	SPDK_DEBUGLOG(sock_posix, "Length of Client's PSK ID %lu\n", strlen(impl_opts->psk_identity));
531 	if (id == NULL) {
532 		SPDK_ERRLOG("Received empty PSK ID\n");
533 		goto err;
534 	}
535 	SPDK_DEBUGLOG(sock_posix,  "Received PSK ID '%s'\n", id);
536 	if (strcmp(impl_opts->psk_identity, id) != 0) {
537 		SPDK_ERRLOG("Unknown Client's PSK ID\n");
538 		goto err;
539 	}
540 
541 	SPDK_DEBUGLOG(sock_posix, "Length of Client's PSK KEY %u\n", max_psk_len);
542 	default_psk = OPENSSL_hexstr2buf(impl_opts->psk_key, &key_len);
543 	if (default_psk == NULL) {
544 		SPDK_ERRLOG("Could not unhexlify PSK\n");
545 		goto err;
546 	}
547 	if (key_len > max_psk_len) {
548 		SPDK_ERRLOG("Insufficient buffer size to copy PSK\n");
549 		OPENSSL_free(default_psk);
550 		goto err;
551 	}
552 
553 	memcpy(psk, default_psk, key_len);
554 	OPENSSL_free(default_psk);
555 
556 	return key_len;
557 
558 err:
559 	return 0;
560 }
561 
562 static unsigned int
563 posix_sock_tls_psk_client_cb(SSL *ssl, const char *hint,
564 			     char *identity,
565 			     unsigned int max_identity_len,
566 			     unsigned char *psk,
567 			     unsigned int max_psk_len)
568 {
569 	long key_len;
570 	unsigned char *default_psk;
571 	struct spdk_sock_impl_opts *impl_opts;
572 
573 	impl_opts = SSL_get_app_data(ssl);
574 
575 	if (hint) {
576 		SPDK_DEBUGLOG(sock_posix,  "Received PSK identity hint '%s'\n", hint);
577 	}
578 
579 	if (impl_opts->psk_key == NULL) {
580 		SPDK_ERRLOG("PSK is not set\n");
581 		goto err;
582 	}
583 	default_psk = OPENSSL_hexstr2buf(impl_opts->psk_key, &key_len);
584 	if (default_psk == NULL) {
585 		SPDK_ERRLOG("Could not unhexlify PSK\n");
586 		goto err;
587 	}
588 	if ((strlen(impl_opts->psk_identity) + 1 > max_identity_len)
589 	    || (key_len > max_psk_len)) {
590 		OPENSSL_free(default_psk);
591 		SPDK_ERRLOG("PSK ID or Key buffer is not sufficient\n");
592 		goto err;
593 	}
594 	spdk_strcpy_pad(identity, impl_opts->psk_identity, strlen(impl_opts->psk_identity), 0);
595 	SPDK_DEBUGLOG(sock_posix, "Sending PSK identity '%s'\n", identity);
596 
597 	memcpy(psk, default_psk, key_len);
598 	SPDK_DEBUGLOG(sock_posix, "Provided out-of-band (OOB) PSK for TLS1.3 client\n");
599 	OPENSSL_free(default_psk);
600 
601 	return key_len;
602 
603 err:
604 	return 0;
605 }
606 
607 static SSL_CTX *
608 posix_sock_create_ssl_context(const SSL_METHOD *method, struct spdk_sock_opts *opts,
609 			      struct spdk_sock_impl_opts *impl_opts)
610 {
611 	SSL_CTX *ctx;
612 	int tls_version = 0;
613 	bool ktls_enabled = false;
614 #ifdef SSL_OP_ENABLE_KTLS
615 	long options;
616 #endif
617 
618 	SSL_library_init();
619 	OpenSSL_add_all_algorithms();
620 	SSL_load_error_strings();
621 	/* Produce a SSL CTX in SSL V2 and V3 standards compliant way */
622 	ctx = SSL_CTX_new(method);
623 	if (!ctx) {
624 		SPDK_ERRLOG("SSL_CTX_new() failed, msg = %s\n", ERR_error_string(ERR_peek_last_error(), NULL));
625 		return NULL;
626 	}
627 	SPDK_DEBUGLOG(sock_posix, "SSL context created\n");
628 
629 	switch (impl_opts->tls_version) {
630 	case 0:
631 		/* auto-negotioation */
632 		break;
633 	case SPDK_TLS_VERSION_1_1:
634 		tls_version = TLS1_1_VERSION;
635 		break;
636 	case SPDK_TLS_VERSION_1_2:
637 		tls_version = TLS1_2_VERSION;
638 		break;
639 	case SPDK_TLS_VERSION_1_3:
640 		tls_version = TLS1_3_VERSION;
641 		break;
642 	default:
643 		SPDK_ERRLOG("Incorrect TLS version provided: %d\n", impl_opts->tls_version);
644 		goto err;
645 	}
646 
647 	if (tls_version) {
648 		SPDK_DEBUGLOG(sock_posix, "Hardening TLS version to '%d'='0x%X'\n", impl_opts->tls_version,
649 			      tls_version);
650 		if (!SSL_CTX_set_min_proto_version(ctx, tls_version)) {
651 			SPDK_ERRLOG("Unable to set Min TLS version to '%d'='0x%X\n", impl_opts->tls_version, tls_version);
652 			goto err;
653 		}
654 		if (!SSL_CTX_set_max_proto_version(ctx, tls_version)) {
655 			SPDK_ERRLOG("Unable to set Max TLS version to '%d'='0x%X\n", impl_opts->tls_version, tls_version);
656 			goto err;
657 		}
658 	}
659 	if (impl_opts->enable_ktls) {
660 		SPDK_DEBUGLOG(sock_posix, "Enabling kTLS offload\n");
661 #ifdef SSL_OP_ENABLE_KTLS
662 		options = SSL_CTX_set_options(ctx, SSL_OP_ENABLE_KTLS);
663 		ktls_enabled = options & SSL_OP_ENABLE_KTLS;
664 #else
665 		ktls_enabled = false;
666 #endif
667 		if (!ktls_enabled) {
668 			SPDK_ERRLOG("Unable to set kTLS offload via SSL_CTX_set_options(). Configure openssl with 'enable-ktls'\n");
669 			goto err;
670 		}
671 	}
672 
673 	return ctx;
674 
675 err:
676 	SSL_CTX_free(ctx);
677 	return NULL;
678 }
679 
680 static SSL *
681 ssl_sock_connect_loop(SSL_CTX *ctx, int fd, struct spdk_sock_impl_opts *impl_opts)
682 {
683 	int rc;
684 	SSL *ssl;
685 	int ssl_get_error;
686 
687 	ssl = SSL_new(ctx);
688 	if (!ssl) {
689 		SPDK_ERRLOG("SSL_new() failed, msg = %s\n", ERR_error_string(ERR_peek_last_error(), NULL));
690 		return NULL;
691 	}
692 	SSL_set_fd(ssl, fd);
693 	SSL_set_app_data(ssl, impl_opts);
694 	SSL_set_psk_client_callback(ssl, posix_sock_tls_psk_client_cb);
695 	SPDK_DEBUGLOG(sock_posix, "SSL object creation finished: %p\n", ssl);
696 	SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
697 	while ((rc = SSL_connect(ssl)) != 1) {
698 		SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
699 		ssl_get_error = SSL_get_error(ssl, rc);
700 		SPDK_DEBUGLOG(sock_posix, "SSL_connect failed %d = SSL_connect(%p), %d = SSL_get_error(%p, %d)\n",
701 			      rc, ssl, ssl_get_error, ssl, rc);
702 		switch (ssl_get_error) {
703 		case SSL_ERROR_WANT_READ:
704 		case SSL_ERROR_WANT_WRITE:
705 			continue;
706 		default:
707 			break;
708 		}
709 		SPDK_ERRLOG("SSL_connect() failed, errno = %d\n", errno);
710 		SSL_free(ssl);
711 		return NULL;
712 	}
713 	SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
714 	SPDK_DEBUGLOG(sock_posix, "Negotiated Cipher suite:%s\n",
715 		      SSL_CIPHER_get_name(SSL_get_current_cipher(ssl)));
716 	return ssl;
717 }
718 
719 static SSL *
720 ssl_sock_accept_loop(SSL_CTX *ctx, int fd, struct spdk_sock_impl_opts *impl_opts)
721 {
722 	int rc;
723 	SSL *ssl;
724 	int ssl_get_error;
725 
726 	ssl = SSL_new(ctx);
727 	if (!ssl) {
728 		SPDK_ERRLOG("SSL_new() failed, msg = %s\n", ERR_error_string(ERR_peek_last_error(), NULL));
729 		return NULL;
730 	}
731 	SSL_set_fd(ssl, fd);
732 	SSL_set_app_data(ssl, impl_opts);
733 	SSL_set_psk_server_callback(ssl, posix_sock_tls_psk_server_cb);
734 	SPDK_DEBUGLOG(sock_posix, "SSL object creation finished: %p\n", ssl);
735 	SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
736 	while ((rc = SSL_accept(ssl)) != 1) {
737 		SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
738 		ssl_get_error = SSL_get_error(ssl, rc);
739 		SPDK_DEBUGLOG(sock_posix, "SSL_accept failed %d = SSL_accept(%p), %d = SSL_get_error(%p, %d)\n", rc,
740 			      ssl, ssl_get_error, ssl, rc);
741 		switch (ssl_get_error) {
742 		case SSL_ERROR_WANT_READ:
743 		case SSL_ERROR_WANT_WRITE:
744 			continue;
745 		default:
746 			break;
747 		}
748 		SPDK_ERRLOG("SSL_accept() failed, errno = %d\n", errno);
749 		SSL_free(ssl);
750 		return NULL;
751 	}
752 	SPDK_DEBUGLOG(sock_posix, "%s = SSL_state_string_long(%p)\n", SSL_state_string_long(ssl), ssl);
753 	SPDK_DEBUGLOG(sock_posix, "Negotiated Cipher suite:%s\n",
754 		      SSL_CIPHER_get_name(SSL_get_current_cipher(ssl)));
755 	return ssl;
756 }
757 
758 static ssize_t
759 SSL_readv(SSL *ssl, const struct iovec *iov, int iovcnt)
760 {
761 	int i, rc = 0;
762 	ssize_t total = 0;
763 
764 	for (i = 0; i < iovcnt; i++) {
765 		rc = SSL_read(ssl, iov[i].iov_base, iov[i].iov_len);
766 
767 		if (rc > 0) {
768 			total += rc;
769 		}
770 		if (rc != (int)iov[i].iov_len) {
771 			break;
772 		}
773 	}
774 	if (total > 0) {
775 		errno = 0;
776 		return total;
777 	}
778 	switch (SSL_get_error(ssl, rc)) {
779 	case SSL_ERROR_ZERO_RETURN:
780 		errno = ENOTCONN;
781 		return 0;
782 	case SSL_ERROR_WANT_READ:
783 	case SSL_ERROR_WANT_WRITE:
784 	case SSL_ERROR_WANT_CONNECT:
785 	case SSL_ERROR_WANT_ACCEPT:
786 	case SSL_ERROR_WANT_X509_LOOKUP:
787 	case SSL_ERROR_WANT_ASYNC:
788 	case SSL_ERROR_WANT_ASYNC_JOB:
789 	case SSL_ERROR_WANT_CLIENT_HELLO_CB:
790 		errno = EAGAIN;
791 		return -1;
792 	case SSL_ERROR_SYSCALL:
793 	case SSL_ERROR_SSL:
794 		errno = ENOTCONN;
795 		return -1;
796 	default:
797 		errno = ENOTCONN;
798 		return -1;
799 	}
800 }
801 
802 static ssize_t
803 SSL_writev(SSL *ssl, struct iovec *iov, int iovcnt)
804 {
805 	int i, rc = 0;
806 	ssize_t total = 0;
807 
808 	for (i = 0; i < iovcnt; i++) {
809 		rc = SSL_write(ssl, iov[i].iov_base, iov[i].iov_len);
810 
811 		if (rc > 0) {
812 			total += rc;
813 		}
814 		if (rc != (int)iov[i].iov_len) {
815 			break;
816 		}
817 	}
818 	if (total > 0) {
819 		errno = 0;
820 		return total;
821 	}
822 	switch (SSL_get_error(ssl, rc)) {
823 	case SSL_ERROR_ZERO_RETURN:
824 		errno = ENOTCONN;
825 		return 0;
826 	case SSL_ERROR_WANT_READ:
827 	case SSL_ERROR_WANT_WRITE:
828 	case SSL_ERROR_WANT_CONNECT:
829 	case SSL_ERROR_WANT_ACCEPT:
830 	case SSL_ERROR_WANT_X509_LOOKUP:
831 	case SSL_ERROR_WANT_ASYNC:
832 	case SSL_ERROR_WANT_ASYNC_JOB:
833 	case SSL_ERROR_WANT_CLIENT_HELLO_CB:
834 		errno = EAGAIN;
835 		return -1;
836 	case SSL_ERROR_SYSCALL:
837 	case SSL_ERROR_SSL:
838 		errno = ENOTCONN;
839 		return -1;
840 	default:
841 		errno = ENOTCONN;
842 		return -1;
843 	}
844 }
845 
846 static struct spdk_sock *
847 posix_sock_create(const char *ip, int port,
848 		  enum posix_sock_create_type type,
849 		  struct spdk_sock_opts *opts,
850 		  bool enable_ssl)
851 {
852 	struct spdk_posix_sock *sock;
853 	struct spdk_sock_impl_opts impl_opts;
854 	char buf[MAX_TMPBUF];
855 	char portnum[PORTNUMLEN];
856 	char *p;
857 	struct addrinfo hints, *res, *res0;
858 	int fd, flag;
859 	int rc;
860 	bool enable_zcopy_user_opts = true;
861 	bool enable_zcopy_impl_opts = true;
862 	SSL_CTX *ctx = 0;
863 	SSL *ssl = 0;
864 
865 	assert(opts != NULL);
866 	posix_opts_get_impl_opts(opts, &impl_opts);
867 
868 	if (ip == NULL) {
869 		return NULL;
870 	}
871 	if (ip[0] == '[') {
872 		snprintf(buf, sizeof(buf), "%s", ip + 1);
873 		p = strchr(buf, ']');
874 		if (p != NULL) {
875 			*p = '\0';
876 		}
877 		ip = (const char *) &buf[0];
878 	}
879 
880 	snprintf(portnum, sizeof portnum, "%d", port);
881 	memset(&hints, 0, sizeof hints);
882 	hints.ai_family = PF_UNSPEC;
883 	hints.ai_socktype = SOCK_STREAM;
884 	hints.ai_flags = AI_NUMERICSERV;
885 	hints.ai_flags |= AI_PASSIVE;
886 	hints.ai_flags |= AI_NUMERICHOST;
887 	rc = getaddrinfo(ip, portnum, &hints, &res0);
888 	if (rc != 0) {
889 		SPDK_ERRLOG("getaddrinfo() failed %s (%d)\n", gai_strerror(rc), rc);
890 		return NULL;
891 	}
892 
893 	/* try listen */
894 	fd = -1;
895 	for (res = res0; res != NULL; res = res->ai_next) {
896 retry:
897 		fd = posix_fd_create(res, opts, &impl_opts);
898 		if (fd < 0) {
899 			continue;
900 		}
901 		if (type == SPDK_SOCK_CREATE_LISTEN) {
902 			rc = bind(fd, res->ai_addr, res->ai_addrlen);
903 			if (rc != 0) {
904 				SPDK_ERRLOG("bind() failed at port %d, errno = %d\n", port, errno);
905 				switch (errno) {
906 				case EINTR:
907 					/* interrupted? */
908 					close(fd);
909 					goto retry;
910 				case EADDRNOTAVAIL:
911 					SPDK_ERRLOG("IP address %s not available. "
912 						    "Verify IP address in config file "
913 						    "and make sure setup script is "
914 						    "run before starting spdk app.\n", ip);
915 				/* FALLTHROUGH */
916 				default:
917 					/* try next family */
918 					close(fd);
919 					fd = -1;
920 					continue;
921 				}
922 			}
923 			/* bind OK */
924 			rc = listen(fd, 512);
925 			if (rc != 0) {
926 				SPDK_ERRLOG("listen() failed, errno = %d\n", errno);
927 				close(fd);
928 				fd = -1;
929 				break;
930 			}
931 			enable_zcopy_impl_opts = impl_opts.enable_zerocopy_send_server;
932 		} else if (type == SPDK_SOCK_CREATE_CONNECT) {
933 			rc = connect(fd, res->ai_addr, res->ai_addrlen);
934 			if (rc != 0) {
935 				SPDK_ERRLOG("connect() failed, errno = %d\n", errno);
936 				/* try next family */
937 				close(fd);
938 				fd = -1;
939 				continue;
940 			}
941 			enable_zcopy_impl_opts = impl_opts.enable_zerocopy_send_client;
942 			if (enable_ssl) {
943 				ctx = posix_sock_create_ssl_context(TLS_client_method(), opts, &impl_opts);
944 				if (!ctx) {
945 					SPDK_ERRLOG("posix_sock_create_ssl_context() failed, errno = %d\n", errno);
946 					close(fd);
947 					fd = -1;
948 					break;
949 				}
950 				ssl = ssl_sock_connect_loop(ctx, fd, &impl_opts);
951 				if (!ssl) {
952 					SPDK_ERRLOG("ssl_sock_connect_loop() failed, errno = %d\n", errno);
953 					close(fd);
954 					fd = -1;
955 					SSL_CTX_free(ctx);
956 					break;
957 				}
958 			}
959 		}
960 
961 		flag = fcntl(fd, F_GETFL);
962 		if (fcntl(fd, F_SETFL, flag | O_NONBLOCK) < 0) {
963 			SPDK_ERRLOG("fcntl can't set nonblocking mode for socket, fd: %d (%d)\n", fd, errno);
964 			SSL_free(ssl);
965 			SSL_CTX_free(ctx);
966 			close(fd);
967 			fd = -1;
968 			break;
969 		}
970 		break;
971 	}
972 	freeaddrinfo(res0);
973 
974 	if (fd < 0) {
975 		return NULL;
976 	}
977 
978 	/* Only enable zero copy for non-loopback and non-ssl sockets. */
979 	enable_zcopy_user_opts = opts->zcopy && !sock_is_loopback(fd) && !enable_ssl;
980 
981 	sock = posix_sock_alloc(fd, &impl_opts, enable_zcopy_user_opts && enable_zcopy_impl_opts);
982 	if (sock == NULL) {
983 		SPDK_ERRLOG("sock allocation failed\n");
984 		SSL_free(ssl);
985 		SSL_CTX_free(ctx);
986 		close(fd);
987 		return NULL;
988 	}
989 
990 	if (ctx) {
991 		sock->ctx = ctx;
992 	}
993 
994 	if (ssl) {
995 		sock->ssl = ssl;
996 	}
997 
998 	return &sock->base;
999 }
1000 
1001 static struct spdk_sock *
1002 posix_sock_listen(const char *ip, int port, struct spdk_sock_opts *opts)
1003 {
1004 	return posix_sock_create(ip, port, SPDK_SOCK_CREATE_LISTEN, opts, false);
1005 }
1006 
1007 static struct spdk_sock *
1008 posix_sock_connect(const char *ip, int port, struct spdk_sock_opts *opts)
1009 {
1010 	return posix_sock_create(ip, port, SPDK_SOCK_CREATE_CONNECT, opts, false);
1011 }
1012 
1013 static struct spdk_sock *
1014 _posix_sock_accept(struct spdk_sock *_sock, bool enable_ssl)
1015 {
1016 	struct spdk_posix_sock		*sock = __posix_sock(_sock);
1017 	struct sockaddr_storage		sa;
1018 	socklen_t			salen;
1019 	int				rc, fd;
1020 	struct spdk_posix_sock		*new_sock;
1021 	int				flag;
1022 	SSL_CTX *ctx = 0;
1023 	SSL *ssl = 0;
1024 
1025 	memset(&sa, 0, sizeof(sa));
1026 	salen = sizeof(sa);
1027 
1028 	assert(sock != NULL);
1029 
1030 	rc = accept(sock->fd, (struct sockaddr *)&sa, &salen);
1031 
1032 	if (rc == -1) {
1033 		return NULL;
1034 	}
1035 
1036 	fd = rc;
1037 
1038 	flag = fcntl(fd, F_GETFL);
1039 	if ((!(flag & O_NONBLOCK)) && (fcntl(fd, F_SETFL, flag | O_NONBLOCK) < 0)) {
1040 		SPDK_ERRLOG("fcntl can't set nonblocking mode for socket, fd: %d (%d)\n", fd, errno);
1041 		close(fd);
1042 		return NULL;
1043 	}
1044 
1045 #if defined(SO_PRIORITY)
1046 	/* The priority is not inherited, so call this function again */
1047 	if (sock->base.opts.priority) {
1048 		rc = setsockopt(fd, SOL_SOCKET, SO_PRIORITY, &sock->base.opts.priority, sizeof(int));
1049 		if (rc != 0) {
1050 			close(fd);
1051 			return NULL;
1052 		}
1053 	}
1054 #endif
1055 
1056 	/* Establish SSL connection */
1057 	if (enable_ssl) {
1058 		ctx = posix_sock_create_ssl_context(TLS_server_method(), &sock->base.opts, &sock->base.impl_opts);
1059 		if (!ctx) {
1060 			SPDK_ERRLOG("posix_sock_create_ssl_context() failed, errno = %d\n", errno);
1061 			close(fd);
1062 			return NULL;
1063 		}
1064 		ssl = ssl_sock_accept_loop(ctx, fd, &sock->base.impl_opts);
1065 		if (!ssl) {
1066 			SPDK_ERRLOG("ssl_sock_accept_loop() failed, errno = %d\n", errno);
1067 			close(fd);
1068 			SSL_CTX_free(ctx);
1069 			return NULL;
1070 		}
1071 	}
1072 
1073 	/* Inherit the zero copy feature from the listen socket */
1074 	new_sock = posix_sock_alloc(fd, &sock->base.impl_opts, sock->zcopy);
1075 	if (new_sock == NULL) {
1076 		close(fd);
1077 		SSL_free(ssl);
1078 		SSL_CTX_free(ctx);
1079 		return NULL;
1080 	}
1081 
1082 	if (ctx) {
1083 		new_sock->ctx = ctx;
1084 	}
1085 
1086 	if (ssl) {
1087 		new_sock->ssl = ssl;
1088 	}
1089 
1090 	return &new_sock->base;
1091 }
1092 
1093 static struct spdk_sock *
1094 posix_sock_accept(struct spdk_sock *_sock)
1095 {
1096 	return _posix_sock_accept(_sock, false);
1097 }
1098 
1099 static int
1100 posix_sock_close(struct spdk_sock *_sock)
1101 {
1102 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1103 
1104 	assert(TAILQ_EMPTY(&_sock->pending_reqs));
1105 
1106 	/* If the socket fails to close, the best choice is to
1107 	 * leak the fd but continue to free the rest of the sock
1108 	 * memory. */
1109 	close(sock->fd);
1110 
1111 	SSL_free(sock->ssl);
1112 	SSL_CTX_free(sock->ctx);
1113 
1114 	spdk_pipe_destroy(sock->recv_pipe);
1115 	free(sock->recv_buf);
1116 	free(sock);
1117 
1118 	return 0;
1119 }
1120 
1121 #ifdef SPDK_ZEROCOPY
1122 static int
1123 _sock_check_zcopy(struct spdk_sock *sock)
1124 {
1125 	struct spdk_posix_sock *psock = __posix_sock(sock);
1126 	struct msghdr msgh = {};
1127 	uint8_t buf[sizeof(struct cmsghdr) + sizeof(struct sock_extended_err)];
1128 	ssize_t rc;
1129 	struct sock_extended_err *serr;
1130 	struct cmsghdr *cm;
1131 	uint32_t idx;
1132 	struct spdk_sock_request *req, *treq;
1133 	bool found;
1134 
1135 	msgh.msg_control = buf;
1136 	msgh.msg_controllen = sizeof(buf);
1137 
1138 	while (true) {
1139 		rc = recvmsg(psock->fd, &msgh, MSG_ERRQUEUE);
1140 
1141 		if (rc < 0) {
1142 			if (errno == EWOULDBLOCK || errno == EAGAIN) {
1143 				return 0;
1144 			}
1145 
1146 			if (!TAILQ_EMPTY(&sock->pending_reqs)) {
1147 				SPDK_ERRLOG("Attempting to receive from ERRQUEUE yielded error, but pending list still has orphaned entries\n");
1148 			} else {
1149 				SPDK_WARNLOG("Recvmsg yielded an error!\n");
1150 			}
1151 			return 0;
1152 		}
1153 
1154 		cm = CMSG_FIRSTHDR(&msgh);
1155 		if (!(cm &&
1156 		      ((cm->cmsg_level == SOL_IP && cm->cmsg_type == IP_RECVERR) ||
1157 		       (cm->cmsg_level == SOL_IPV6 && cm->cmsg_type == IPV6_RECVERR)))) {
1158 			SPDK_WARNLOG("Unexpected cmsg level or type!\n");
1159 			return 0;
1160 		}
1161 
1162 		serr = (struct sock_extended_err *)CMSG_DATA(cm);
1163 		if (serr->ee_errno != 0 || serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY) {
1164 			SPDK_WARNLOG("Unexpected extended error origin\n");
1165 			return 0;
1166 		}
1167 
1168 		/* Most of the time, the pending_reqs array is in the exact
1169 		 * order we need such that all of the requests to complete are
1170 		 * in order, in the front. It is guaranteed that all requests
1171 		 * belonging to the same sendmsg call are sequential, so once
1172 		 * we encounter one match we can stop looping as soon as a
1173 		 * non-match is found.
1174 		 */
1175 		for (idx = serr->ee_info; idx <= serr->ee_data; idx++) {
1176 			found = false;
1177 			TAILQ_FOREACH_SAFE(req, &sock->pending_reqs, internal.link, treq) {
1178 				if (!req->internal.is_zcopy) {
1179 					/* This wasn't a zcopy request. It was just waiting in line to complete */
1180 					rc = spdk_sock_request_put(sock, req, 0);
1181 					if (rc < 0) {
1182 						return rc;
1183 					}
1184 				} else if (req->internal.offset == idx) {
1185 					found = true;
1186 					rc = spdk_sock_request_put(sock, req, 0);
1187 					if (rc < 0) {
1188 						return rc;
1189 					}
1190 				} else if (found) {
1191 					break;
1192 				}
1193 			}
1194 		}
1195 	}
1196 
1197 	return 0;
1198 }
1199 #endif
1200 
1201 static int
1202 _sock_flush(struct spdk_sock *sock)
1203 {
1204 	struct spdk_posix_sock *psock = __posix_sock(sock);
1205 	struct msghdr msg = {};
1206 	int flags;
1207 	struct iovec iovs[IOV_BATCH_SIZE];
1208 	int iovcnt;
1209 	int retval;
1210 	struct spdk_sock_request *req;
1211 	int i;
1212 	ssize_t rc;
1213 	unsigned int offset;
1214 	size_t len;
1215 	bool is_zcopy = false;
1216 
1217 	/* Can't flush from within a callback or we end up with recursive calls */
1218 	if (sock->cb_cnt > 0) {
1219 		return 0;
1220 	}
1221 
1222 #ifdef SPDK_ZEROCOPY
1223 	if (psock->zcopy) {
1224 		flags = MSG_ZEROCOPY | MSG_NOSIGNAL;
1225 	} else
1226 #endif
1227 	{
1228 		flags = MSG_NOSIGNAL;
1229 	}
1230 
1231 	iovcnt = spdk_sock_prep_reqs(sock, iovs, 0, NULL, &flags);
1232 	if (iovcnt == 0) {
1233 		return 0;
1234 	}
1235 
1236 #ifdef SPDK_ZEROCOPY
1237 	is_zcopy = flags & MSG_ZEROCOPY;
1238 #endif
1239 
1240 	/* Perform the vectored write */
1241 	msg.msg_iov = iovs;
1242 	msg.msg_iovlen = iovcnt;
1243 
1244 	if (psock->ssl) {
1245 		rc = SSL_writev(psock->ssl, iovs, iovcnt);
1246 	} else {
1247 		rc = sendmsg(psock->fd, &msg, flags);
1248 	}
1249 	if (rc <= 0) {
1250 		if (errno == EAGAIN || errno == EWOULDBLOCK || (errno == ENOBUFS && psock->zcopy)) {
1251 			return 0;
1252 		}
1253 		return rc;
1254 	}
1255 
1256 	if (is_zcopy) {
1257 		/* Handling overflow case, because we use psock->sendmsg_idx - 1 for the
1258 		 * req->internal.offset, so sendmsg_idx should not be zero  */
1259 		if (spdk_unlikely(psock->sendmsg_idx == UINT32_MAX)) {
1260 			psock->sendmsg_idx = 1;
1261 		} else {
1262 			psock->sendmsg_idx++;
1263 		}
1264 	}
1265 
1266 	/* Consume the requests that were actually written */
1267 	req = TAILQ_FIRST(&sock->queued_reqs);
1268 	while (req) {
1269 		offset = req->internal.offset;
1270 
1271 		/* req->internal.is_zcopy is true when the whole req or part of it is sent with zerocopy */
1272 		req->internal.is_zcopy = is_zcopy;
1273 
1274 		for (i = 0; i < req->iovcnt; i++) {
1275 			/* Advance by the offset first */
1276 			if (offset >= SPDK_SOCK_REQUEST_IOV(req, i)->iov_len) {
1277 				offset -= SPDK_SOCK_REQUEST_IOV(req, i)->iov_len;
1278 				continue;
1279 			}
1280 
1281 			/* Calculate the remaining length of this element */
1282 			len = SPDK_SOCK_REQUEST_IOV(req, i)->iov_len - offset;
1283 
1284 			if (len > (size_t)rc) {
1285 				/* This element was partially sent. */
1286 				req->internal.offset += rc;
1287 				return 0;
1288 			}
1289 
1290 			offset = 0;
1291 			req->internal.offset += len;
1292 			rc -= len;
1293 		}
1294 
1295 		/* Handled a full request. */
1296 		spdk_sock_request_pend(sock, req);
1297 
1298 		if (!req->internal.is_zcopy && req == TAILQ_FIRST(&sock->pending_reqs)) {
1299 			/* The sendmsg syscall above isn't currently asynchronous,
1300 			* so it's already done. */
1301 			retval = spdk_sock_request_put(sock, req, 0);
1302 			if (retval) {
1303 				break;
1304 			}
1305 		} else {
1306 			/* Re-use the offset field to hold the sendmsg call index. The
1307 			 * index is 0 based, so subtract one here because we've already
1308 			 * incremented above. */
1309 			req->internal.offset = psock->sendmsg_idx - 1;
1310 		}
1311 
1312 		if (rc == 0) {
1313 			break;
1314 		}
1315 
1316 		req = TAILQ_FIRST(&sock->queued_reqs);
1317 	}
1318 
1319 	return 0;
1320 }
1321 
1322 static int
1323 posix_sock_flush(struct spdk_sock *sock)
1324 {
1325 #ifdef SPDK_ZEROCOPY
1326 	struct spdk_posix_sock *psock = __posix_sock(sock);
1327 
1328 	if (psock->zcopy && !TAILQ_EMPTY(&sock->pending_reqs)) {
1329 		_sock_check_zcopy(sock);
1330 	}
1331 #endif
1332 
1333 	return _sock_flush(sock);
1334 }
1335 
1336 static ssize_t
1337 posix_sock_recv_from_pipe(struct spdk_posix_sock *sock, struct iovec *diov, int diovcnt)
1338 {
1339 	struct iovec siov[2];
1340 	int sbytes;
1341 	ssize_t bytes;
1342 	struct spdk_posix_sock_group_impl *group;
1343 
1344 	sbytes = spdk_pipe_reader_get_buffer(sock->recv_pipe, sock->recv_buf_sz, siov);
1345 	if (sbytes < 0) {
1346 		errno = EINVAL;
1347 		return -1;
1348 	} else if (sbytes == 0) {
1349 		errno = EAGAIN;
1350 		return -1;
1351 	}
1352 
1353 	bytes = spdk_iovcpy(siov, 2, diov, diovcnt);
1354 
1355 	if (bytes == 0) {
1356 		/* The only way this happens is if diov is 0 length */
1357 		errno = EINVAL;
1358 		return -1;
1359 	}
1360 
1361 	spdk_pipe_reader_advance(sock->recv_pipe, bytes);
1362 
1363 	/* If we drained the pipe, mark it appropriately */
1364 	if (spdk_pipe_reader_bytes_available(sock->recv_pipe) == 0) {
1365 		assert(sock->pipe_has_data == true);
1366 
1367 		group = __posix_group_impl(sock->base.group_impl);
1368 		if (group && !sock->socket_has_data) {
1369 			TAILQ_REMOVE(&group->socks_with_data, sock, link);
1370 		}
1371 
1372 		sock->pipe_has_data = false;
1373 	}
1374 
1375 	return bytes;
1376 }
1377 
1378 static inline ssize_t
1379 posix_sock_read(struct spdk_posix_sock *sock)
1380 {
1381 	struct iovec iov[2];
1382 	int bytes_avail, bytes_recvd;
1383 	struct spdk_posix_sock_group_impl *group;
1384 
1385 	bytes_avail = spdk_pipe_writer_get_buffer(sock->recv_pipe, sock->recv_buf_sz, iov);
1386 
1387 	if (bytes_avail <= 0) {
1388 		return bytes_avail;
1389 	}
1390 
1391 	if (sock->ssl) {
1392 		bytes_recvd = SSL_readv(sock->ssl, iov, 2);
1393 	} else {
1394 		bytes_recvd = readv(sock->fd, iov, 2);
1395 	}
1396 
1397 	assert(sock->pipe_has_data == false);
1398 
1399 	if (bytes_recvd <= 0) {
1400 		/* Errors count as draining the socket data */
1401 		if (sock->base.group_impl && sock->socket_has_data) {
1402 			group = __posix_group_impl(sock->base.group_impl);
1403 			TAILQ_REMOVE(&group->socks_with_data, sock, link);
1404 		}
1405 
1406 		sock->socket_has_data = false;
1407 
1408 		return bytes_recvd;
1409 	}
1410 
1411 	spdk_pipe_writer_advance(sock->recv_pipe, bytes_recvd);
1412 
1413 #if DEBUG
1414 	if (sock->base.group_impl) {
1415 		assert(sock->socket_has_data == true);
1416 	}
1417 #endif
1418 
1419 	sock->pipe_has_data = true;
1420 	if (bytes_recvd < bytes_avail) {
1421 		/* We drained the kernel socket entirely. */
1422 		sock->socket_has_data = false;
1423 	}
1424 
1425 	return bytes_recvd;
1426 }
1427 
1428 static ssize_t
1429 posix_sock_readv(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)
1430 {
1431 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1432 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(sock->base.group_impl);
1433 	int rc, i;
1434 	size_t len;
1435 
1436 	if (sock->recv_pipe == NULL) {
1437 		assert(sock->pipe_has_data == false);
1438 		if (group && sock->socket_has_data) {
1439 			sock->socket_has_data = false;
1440 			TAILQ_REMOVE(&group->socks_with_data, sock, link);
1441 		}
1442 		if (sock->ssl) {
1443 			return SSL_readv(sock->ssl, iov, iovcnt);
1444 		} else {
1445 			return readv(sock->fd, iov, iovcnt);
1446 		}
1447 	}
1448 
1449 	/* If the socket is not in a group, we must assume it always has
1450 	 * data waiting for us because it is not epolled */
1451 	if (!sock->pipe_has_data && (group == NULL || sock->socket_has_data)) {
1452 		/* If the user is receiving a sufficiently large amount of data,
1453 		 * receive directly to their buffers. */
1454 		len = 0;
1455 		for (i = 0; i < iovcnt; i++) {
1456 			len += iov[i].iov_len;
1457 		}
1458 
1459 		if (len >= MIN_SOCK_PIPE_SIZE) {
1460 			/* TODO: Should this detect if kernel socket is drained? */
1461 			if (sock->ssl) {
1462 				return SSL_readv(sock->ssl, iov, iovcnt);
1463 			} else {
1464 				return readv(sock->fd, iov, iovcnt);
1465 			}
1466 		}
1467 
1468 		/* Otherwise, do a big read into our pipe */
1469 		rc = posix_sock_read(sock);
1470 		if (rc <= 0) {
1471 			return rc;
1472 		}
1473 	}
1474 
1475 	return posix_sock_recv_from_pipe(sock, iov, iovcnt);
1476 }
1477 
1478 static ssize_t
1479 posix_sock_recv(struct spdk_sock *sock, void *buf, size_t len)
1480 {
1481 	struct iovec iov[1];
1482 
1483 	iov[0].iov_base = buf;
1484 	iov[0].iov_len = len;
1485 
1486 	return posix_sock_readv(sock, iov, 1);
1487 }
1488 
1489 static void
1490 posix_sock_readv_async(struct spdk_sock *sock, struct spdk_sock_request *req)
1491 {
1492 	req->cb_fn(req->cb_arg, -ENOTSUP);
1493 }
1494 
1495 static ssize_t
1496 posix_sock_writev(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)
1497 {
1498 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1499 	int rc;
1500 
1501 	/* In order to process a writev, we need to flush any asynchronous writes
1502 	 * first. */
1503 	rc = _sock_flush(_sock);
1504 	if (rc < 0) {
1505 		return rc;
1506 	}
1507 
1508 	if (!TAILQ_EMPTY(&_sock->queued_reqs)) {
1509 		/* We weren't able to flush all requests */
1510 		errno = EAGAIN;
1511 		return -1;
1512 	}
1513 
1514 	if (sock->ssl) {
1515 		return SSL_writev(sock->ssl, iov, iovcnt);
1516 	} else {
1517 		return writev(sock->fd, iov, iovcnt);
1518 	}
1519 }
1520 
1521 static void
1522 posix_sock_writev_async(struct spdk_sock *sock, struct spdk_sock_request *req)
1523 {
1524 	int rc;
1525 
1526 	spdk_sock_request_queue(sock, req);
1527 
1528 	/* If there are a sufficient number queued, just flush them out immediately. */
1529 	if (sock->queued_iovcnt >= IOV_BATCH_SIZE) {
1530 		rc = _sock_flush(sock);
1531 		if (rc) {
1532 			spdk_sock_abort_requests(sock);
1533 		}
1534 	}
1535 }
1536 
1537 static int
1538 posix_sock_set_recvlowat(struct spdk_sock *_sock, int nbytes)
1539 {
1540 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1541 	int val;
1542 	int rc;
1543 
1544 	assert(sock != NULL);
1545 
1546 	val = nbytes;
1547 	rc = setsockopt(sock->fd, SOL_SOCKET, SO_RCVLOWAT, &val, sizeof val);
1548 	if (rc != 0) {
1549 		return -1;
1550 	}
1551 	return 0;
1552 }
1553 
1554 static bool
1555 posix_sock_is_ipv6(struct spdk_sock *_sock)
1556 {
1557 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1558 	struct sockaddr_storage sa;
1559 	socklen_t salen;
1560 	int rc;
1561 
1562 	assert(sock != NULL);
1563 
1564 	memset(&sa, 0, sizeof sa);
1565 	salen = sizeof sa;
1566 	rc = getsockname(sock->fd, (struct sockaddr *) &sa, &salen);
1567 	if (rc != 0) {
1568 		SPDK_ERRLOG("getsockname() failed (errno=%d)\n", errno);
1569 		return false;
1570 	}
1571 
1572 	return (sa.ss_family == AF_INET6);
1573 }
1574 
1575 static bool
1576 posix_sock_is_ipv4(struct spdk_sock *_sock)
1577 {
1578 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1579 	struct sockaddr_storage sa;
1580 	socklen_t salen;
1581 	int rc;
1582 
1583 	assert(sock != NULL);
1584 
1585 	memset(&sa, 0, sizeof sa);
1586 	salen = sizeof sa;
1587 	rc = getsockname(sock->fd, (struct sockaddr *) &sa, &salen);
1588 	if (rc != 0) {
1589 		SPDK_ERRLOG("getsockname() failed (errno=%d)\n", errno);
1590 		return false;
1591 	}
1592 
1593 	return (sa.ss_family == AF_INET);
1594 }
1595 
1596 static bool
1597 posix_sock_is_connected(struct spdk_sock *_sock)
1598 {
1599 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1600 	uint8_t byte;
1601 	int rc;
1602 
1603 	rc = recv(sock->fd, &byte, 1, MSG_PEEK);
1604 	if (rc == 0) {
1605 		return false;
1606 	}
1607 
1608 	if (rc < 0) {
1609 		if (errno == EAGAIN || errno == EWOULDBLOCK) {
1610 			return true;
1611 		}
1612 
1613 		return false;
1614 	}
1615 
1616 	return true;
1617 }
1618 
1619 static struct spdk_sock_group_impl *
1620 posix_sock_group_impl_get_optimal(struct spdk_sock *_sock, struct spdk_sock_group_impl *hint)
1621 {
1622 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1623 	struct spdk_sock_group_impl *group_impl;
1624 
1625 	if (sock->placement_id != -1) {
1626 		spdk_sock_map_lookup(&g_map, sock->placement_id, &group_impl, hint);
1627 		return group_impl;
1628 	}
1629 
1630 	return NULL;
1631 }
1632 
1633 static struct spdk_sock_group_impl *
1634 posix_sock_group_impl_create(void)
1635 {
1636 	struct spdk_posix_sock_group_impl *group_impl;
1637 	int fd;
1638 
1639 #if defined(SPDK_EPOLL)
1640 	fd = epoll_create1(0);
1641 #elif defined(SPDK_KEVENT)
1642 	fd = kqueue();
1643 #endif
1644 	if (fd == -1) {
1645 		return NULL;
1646 	}
1647 
1648 	group_impl = calloc(1, sizeof(*group_impl));
1649 	if (group_impl == NULL) {
1650 		SPDK_ERRLOG("group_impl allocation failed\n");
1651 		close(fd);
1652 		return NULL;
1653 	}
1654 
1655 	group_impl->fd = fd;
1656 	TAILQ_INIT(&group_impl->socks_with_data);
1657 	group_impl->placement_id = -1;
1658 
1659 	if (g_spdk_posix_sock_impl_opts.enable_placement_id == PLACEMENT_CPU) {
1660 		spdk_sock_map_insert(&g_map, spdk_env_get_current_core(), &group_impl->base);
1661 		group_impl->placement_id = spdk_env_get_current_core();
1662 	}
1663 
1664 	return &group_impl->base;
1665 }
1666 
1667 static void
1668 posix_sock_mark(struct spdk_posix_sock_group_impl *group, struct spdk_posix_sock *sock,
1669 		int placement_id)
1670 {
1671 #if defined(SO_MARK)
1672 	int rc;
1673 
1674 	rc = setsockopt(sock->fd, SOL_SOCKET, SO_MARK,
1675 			&placement_id, sizeof(placement_id));
1676 	if (rc != 0) {
1677 		/* Not fatal */
1678 		SPDK_ERRLOG("Error setting SO_MARK\n");
1679 		return;
1680 	}
1681 
1682 	rc = spdk_sock_map_insert(&g_map, placement_id, &group->base);
1683 	if (rc != 0) {
1684 		/* Not fatal */
1685 		SPDK_ERRLOG("Failed to insert sock group into map: %d\n", rc);
1686 		return;
1687 	}
1688 
1689 	sock->placement_id = placement_id;
1690 #endif
1691 }
1692 
1693 static void
1694 posix_sock_update_mark(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
1695 {
1696 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
1697 
1698 	if (group->placement_id == -1) {
1699 		group->placement_id = spdk_sock_map_find_free(&g_map);
1700 
1701 		/* If a free placement id is found, update existing sockets in this group */
1702 		if (group->placement_id != -1) {
1703 			struct spdk_sock  *sock, *tmp;
1704 
1705 			TAILQ_FOREACH_SAFE(sock, &_group->socks, link, tmp) {
1706 				posix_sock_mark(group, __posix_sock(sock), group->placement_id);
1707 			}
1708 		}
1709 	}
1710 
1711 	if (group->placement_id != -1) {
1712 		/*
1713 		 * group placement id is already determined for this poll group.
1714 		 * Mark socket with group's placement id.
1715 		 */
1716 		posix_sock_mark(group, __posix_sock(_sock), group->placement_id);
1717 	}
1718 }
1719 
1720 static int
1721 posix_sock_group_impl_add_sock(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
1722 {
1723 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
1724 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1725 	int rc;
1726 
1727 #if defined(SPDK_EPOLL)
1728 	struct epoll_event event;
1729 
1730 	memset(&event, 0, sizeof(event));
1731 	/* EPOLLERR is always on even if we don't set it, but be explicit for clarity */
1732 	event.events = EPOLLIN | EPOLLERR;
1733 	event.data.ptr = sock;
1734 
1735 	rc = epoll_ctl(group->fd, EPOLL_CTL_ADD, sock->fd, &event);
1736 #elif defined(SPDK_KEVENT)
1737 	struct kevent event;
1738 	struct timespec ts = {0};
1739 
1740 	EV_SET(&event, sock->fd, EVFILT_READ, EV_ADD, 0, 0, sock);
1741 
1742 	rc = kevent(group->fd, &event, 1, NULL, 0, &ts);
1743 #endif
1744 
1745 	if (rc != 0) {
1746 		return rc;
1747 	}
1748 
1749 	/* switched from another polling group due to scheduling */
1750 	if (spdk_unlikely(sock->recv_pipe != NULL  &&
1751 			  (spdk_pipe_reader_bytes_available(sock->recv_pipe) > 0))) {
1752 		sock->pipe_has_data = true;
1753 		sock->socket_has_data = false;
1754 		TAILQ_INSERT_TAIL(&group->socks_with_data, sock, link);
1755 	}
1756 
1757 	if (g_spdk_posix_sock_impl_opts.enable_placement_id == PLACEMENT_MARK) {
1758 		posix_sock_update_mark(_group, _sock);
1759 	} else if (sock->placement_id != -1) {
1760 		rc = spdk_sock_map_insert(&g_map, sock->placement_id, &group->base);
1761 		if (rc != 0) {
1762 			SPDK_ERRLOG("Failed to insert sock group into map: %d\n", rc);
1763 			/* Do not treat this as an error. The system will continue running. */
1764 		}
1765 	}
1766 
1767 	return rc;
1768 }
1769 
1770 static int
1771 posix_sock_group_impl_remove_sock(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
1772 {
1773 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
1774 	struct spdk_posix_sock *sock = __posix_sock(_sock);
1775 	int rc;
1776 
1777 	if (sock->pipe_has_data || sock->socket_has_data) {
1778 		TAILQ_REMOVE(&group->socks_with_data, sock, link);
1779 		sock->pipe_has_data = false;
1780 		sock->socket_has_data = false;
1781 	}
1782 
1783 	if (sock->placement_id != -1) {
1784 		spdk_sock_map_release(&g_map, sock->placement_id);
1785 	}
1786 
1787 #if defined(SPDK_EPOLL)
1788 	struct epoll_event event;
1789 
1790 	/* Event parameter is ignored but some old kernel version still require it. */
1791 	rc = epoll_ctl(group->fd, EPOLL_CTL_DEL, sock->fd, &event);
1792 #elif defined(SPDK_KEVENT)
1793 	struct kevent event;
1794 	struct timespec ts = {0};
1795 
1796 	EV_SET(&event, sock->fd, EVFILT_READ, EV_DELETE, 0, 0, NULL);
1797 
1798 	rc = kevent(group->fd, &event, 1, NULL, 0, &ts);
1799 	if (rc == 0 && event.flags & EV_ERROR) {
1800 		rc = -1;
1801 		errno = event.data;
1802 	}
1803 #endif
1804 
1805 	spdk_sock_abort_requests(_sock);
1806 
1807 	return rc;
1808 }
1809 
1810 static int
1811 posix_sock_group_impl_poll(struct spdk_sock_group_impl *_group, int max_events,
1812 			   struct spdk_sock **socks)
1813 {
1814 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
1815 	struct spdk_sock *sock, *tmp;
1816 	int num_events, i, rc;
1817 	struct spdk_posix_sock *psock, *ptmp;
1818 #if defined(SPDK_EPOLL)
1819 	struct epoll_event events[MAX_EVENTS_PER_POLL];
1820 #elif defined(SPDK_KEVENT)
1821 	struct kevent events[MAX_EVENTS_PER_POLL];
1822 	struct timespec ts = {0};
1823 #endif
1824 
1825 #ifdef SPDK_ZEROCOPY
1826 	/* When all of the following conditions are met
1827 	 * - non-blocking socket
1828 	 * - zero copy is enabled
1829 	 * - interrupts suppressed (i.e. busy polling)
1830 	 * - the NIC tx queue is full at the time sendmsg() is called
1831 	 * - epoll_wait determines there is an EPOLLIN event for the socket
1832 	 * then we can get into a situation where data we've sent is queued
1833 	 * up in the kernel network stack, but interrupts have been suppressed
1834 	 * because other traffic is flowing so the kernel misses the signal
1835 	 * to flush the software tx queue. If there wasn't incoming data
1836 	 * pending on the socket, then epoll_wait would have been sufficient
1837 	 * to kick off the send operation, but since there is a pending event
1838 	 * epoll_wait does not trigger the necessary operation.
1839 	 *
1840 	 * We deal with this by checking for all of the above conditions and
1841 	 * additionally looking for EPOLLIN events that were not consumed from
1842 	 * the last poll loop. We take this to mean that the upper layer is
1843 	 * unable to consume them because it is blocked waiting for resources
1844 	 * to free up, and those resources are most likely freed in response
1845 	 * to a pending asynchronous write completing.
1846 	 *
1847 	 * Additionally, sockets that have the same placement_id actually share
1848 	 * an underlying hardware queue. That means polling one of them is
1849 	 * equivalent to polling all of them. As a quick mechanism to avoid
1850 	 * making extra poll() calls, stash the last placement_id during the loop
1851 	 * and only poll if it's not the same. The overwhelmingly common case
1852 	 * is that all sockets in this list have the same placement_id because
1853 	 * SPDK is intentionally grouping sockets by that value, so even
1854 	 * though this won't stop all extra calls to poll(), it's very fast
1855 	 * and will catch all of them in practice.
1856 	 */
1857 	int last_placement_id = -1;
1858 
1859 	TAILQ_FOREACH(psock, &group->socks_with_data, link) {
1860 		if (psock->zcopy && psock->placement_id >= 0 &&
1861 		    psock->placement_id != last_placement_id) {
1862 			struct pollfd pfd = {psock->fd, POLLIN | POLLERR, 0};
1863 
1864 			poll(&pfd, 1, 0);
1865 			last_placement_id = psock->placement_id;
1866 		}
1867 	}
1868 #endif
1869 
1870 	/* This must be a TAILQ_FOREACH_SAFE because while flushing,
1871 	 * a completion callback could remove the sock from the
1872 	 * group. */
1873 	TAILQ_FOREACH_SAFE(sock, &_group->socks, link, tmp) {
1874 		rc = _sock_flush(sock);
1875 		if (rc) {
1876 			spdk_sock_abort_requests(sock);
1877 		}
1878 	}
1879 
1880 	assert(max_events > 0);
1881 
1882 #if defined(SPDK_EPOLL)
1883 	num_events = epoll_wait(group->fd, events, max_events, 0);
1884 #elif defined(SPDK_KEVENT)
1885 	num_events = kevent(group->fd, NULL, 0, events, max_events, &ts);
1886 #endif
1887 
1888 	if (num_events == -1) {
1889 		return -1;
1890 	} else if (num_events == 0 && !TAILQ_EMPTY(&_group->socks)) {
1891 		sock = TAILQ_FIRST(&_group->socks);
1892 		psock = __posix_sock(sock);
1893 		/* poll() is called here to busy poll the queue associated with
1894 		 * first socket in list and potentially reap incoming data.
1895 		 */
1896 		if (sock->opts.priority) {
1897 			struct pollfd pfd = {0, 0, 0};
1898 
1899 			pfd.fd = psock->fd;
1900 			pfd.events = POLLIN | POLLERR;
1901 			poll(&pfd, 1, 0);
1902 		}
1903 	}
1904 
1905 	for (i = 0; i < num_events; i++) {
1906 #if defined(SPDK_EPOLL)
1907 		sock = events[i].data.ptr;
1908 		psock = __posix_sock(sock);
1909 
1910 #ifdef SPDK_ZEROCOPY
1911 		if (events[i].events & EPOLLERR) {
1912 			rc = _sock_check_zcopy(sock);
1913 			/* If the socket was closed or removed from
1914 			 * the group in response to a send ack, don't
1915 			 * add it to the array here. */
1916 			if (rc || sock->cb_fn == NULL) {
1917 				continue;
1918 			}
1919 		}
1920 #endif
1921 		if ((events[i].events & EPOLLIN) == 0) {
1922 			continue;
1923 		}
1924 
1925 #elif defined(SPDK_KEVENT)
1926 		sock = events[i].udata;
1927 		psock = __posix_sock(sock);
1928 #endif
1929 
1930 		/* If the socket is not already in the list, add it now */
1931 		if (!psock->socket_has_data && !psock->pipe_has_data) {
1932 			TAILQ_INSERT_TAIL(&group->socks_with_data, psock, link);
1933 		}
1934 		psock->socket_has_data = true;
1935 	}
1936 
1937 	num_events = 0;
1938 
1939 	TAILQ_FOREACH_SAFE(psock, &group->socks_with_data, link, ptmp) {
1940 		if (num_events == max_events) {
1941 			break;
1942 		}
1943 
1944 		/* If the socket's cb_fn is NULL, just remove it from the
1945 		 * list and do not add it to socks array */
1946 		if (spdk_unlikely(psock->base.cb_fn == NULL)) {
1947 			psock->socket_has_data = false;
1948 			psock->pipe_has_data = false;
1949 			TAILQ_REMOVE(&group->socks_with_data, psock, link);
1950 			continue;
1951 		}
1952 
1953 		socks[num_events++] = &psock->base;
1954 	}
1955 
1956 	/* Cycle the has_data list so that each time we poll things aren't
1957 	 * in the same order. Say we have 6 sockets in the list, named as follows:
1958 	 * A B C D E F
1959 	 * And all 6 sockets had epoll events, but max_events is only 3. That means
1960 	 * psock currently points at D. We want to rearrange the list to the following:
1961 	 * D E F A B C
1962 	 *
1963 	 * The variables below are named according to this example to make it easier to
1964 	 * follow the swaps.
1965 	 */
1966 	if (psock != NULL) {
1967 		struct spdk_posix_sock *pa, *pc, *pd, *pf;
1968 
1969 		/* Capture pointers to the elements we need */
1970 		pd = psock;
1971 		pc = TAILQ_PREV(pd, spdk_has_data_list, link);
1972 		pa = TAILQ_FIRST(&group->socks_with_data);
1973 		pf = TAILQ_LAST(&group->socks_with_data, spdk_has_data_list);
1974 
1975 		/* Break the link between C and D */
1976 		pc->link.tqe_next = NULL;
1977 
1978 		/* Connect F to A */
1979 		pf->link.tqe_next = pa;
1980 		pa->link.tqe_prev = &pf->link.tqe_next;
1981 
1982 		/* Fix up the list first/last pointers */
1983 		group->socks_with_data.tqh_first = pd;
1984 		group->socks_with_data.tqh_last = &pc->link.tqe_next;
1985 
1986 		/* D is in front of the list, make tqe prev pointer point to the head of list */
1987 		pd->link.tqe_prev = &group->socks_with_data.tqh_first;
1988 	}
1989 
1990 	return num_events;
1991 }
1992 
1993 static int
1994 posix_sock_group_impl_close(struct spdk_sock_group_impl *_group)
1995 {
1996 	struct spdk_posix_sock_group_impl *group = __posix_group_impl(_group);
1997 	int rc;
1998 
1999 	if (g_spdk_posix_sock_impl_opts.enable_placement_id == PLACEMENT_CPU) {
2000 		spdk_sock_map_release(&g_map, spdk_env_get_current_core());
2001 	}
2002 
2003 	rc = close(group->fd);
2004 	free(group);
2005 	return rc;
2006 }
2007 
2008 static struct spdk_net_impl g_posix_net_impl = {
2009 	.name		= "posix",
2010 	.getaddr	= posix_sock_getaddr,
2011 	.connect	= posix_sock_connect,
2012 	.listen		= posix_sock_listen,
2013 	.accept		= posix_sock_accept,
2014 	.close		= posix_sock_close,
2015 	.recv		= posix_sock_recv,
2016 	.readv		= posix_sock_readv,
2017 	.readv_async	= posix_sock_readv_async,
2018 	.writev		= posix_sock_writev,
2019 	.writev_async	= posix_sock_writev_async,
2020 	.flush		= posix_sock_flush,
2021 	.set_recvlowat	= posix_sock_set_recvlowat,
2022 	.set_recvbuf	= posix_sock_set_recvbuf,
2023 	.set_sendbuf	= posix_sock_set_sendbuf,
2024 	.is_ipv6	= posix_sock_is_ipv6,
2025 	.is_ipv4	= posix_sock_is_ipv4,
2026 	.is_connected	= posix_sock_is_connected,
2027 	.group_impl_get_optimal	= posix_sock_group_impl_get_optimal,
2028 	.group_impl_create	= posix_sock_group_impl_create,
2029 	.group_impl_add_sock	= posix_sock_group_impl_add_sock,
2030 	.group_impl_remove_sock = posix_sock_group_impl_remove_sock,
2031 	.group_impl_poll	= posix_sock_group_impl_poll,
2032 	.group_impl_close	= posix_sock_group_impl_close,
2033 	.get_opts	= posix_sock_impl_get_opts,
2034 	.set_opts	= posix_sock_impl_set_opts,
2035 };
2036 
2037 SPDK_NET_IMPL_REGISTER(posix, &g_posix_net_impl, DEFAULT_SOCK_PRIORITY + 1);
2038 
2039 static struct spdk_sock *
2040 ssl_sock_listen(const char *ip, int port, struct spdk_sock_opts *opts)
2041 {
2042 	return posix_sock_create(ip, port, SPDK_SOCK_CREATE_LISTEN, opts, true);
2043 }
2044 
2045 static struct spdk_sock *
2046 ssl_sock_connect(const char *ip, int port, struct spdk_sock_opts *opts)
2047 {
2048 	return posix_sock_create(ip, port, SPDK_SOCK_CREATE_CONNECT, opts, true);
2049 }
2050 
2051 static struct spdk_sock *
2052 ssl_sock_accept(struct spdk_sock *_sock)
2053 {
2054 	return _posix_sock_accept(_sock, true);
2055 }
2056 
2057 static struct spdk_net_impl g_ssl_net_impl = {
2058 	.name		= "ssl",
2059 	.getaddr	= posix_sock_getaddr,
2060 	.connect	= ssl_sock_connect,
2061 	.listen		= ssl_sock_listen,
2062 	.accept		= ssl_sock_accept,
2063 	.close		= posix_sock_close,
2064 	.recv		= posix_sock_recv,
2065 	.readv		= posix_sock_readv,
2066 	.writev		= posix_sock_writev,
2067 	.writev_async	= posix_sock_writev_async,
2068 	.flush		= posix_sock_flush,
2069 	.set_recvlowat	= posix_sock_set_recvlowat,
2070 	.set_recvbuf	= posix_sock_set_recvbuf,
2071 	.set_sendbuf	= posix_sock_set_sendbuf,
2072 	.is_ipv6	= posix_sock_is_ipv6,
2073 	.is_ipv4	= posix_sock_is_ipv4,
2074 	.is_connected	= posix_sock_is_connected,
2075 	.group_impl_get_optimal	= posix_sock_group_impl_get_optimal,
2076 	.group_impl_create	= posix_sock_group_impl_create,
2077 	.group_impl_add_sock	= posix_sock_group_impl_add_sock,
2078 	.group_impl_remove_sock = posix_sock_group_impl_remove_sock,
2079 	.group_impl_poll	= posix_sock_group_impl_poll,
2080 	.group_impl_close	= posix_sock_group_impl_close,
2081 	.get_opts	= posix_sock_impl_get_opts,
2082 	.set_opts	= posix_sock_impl_set_opts,
2083 };
2084 
2085 SPDK_NET_IMPL_REGISTER(ssl, &g_ssl_net_impl, DEFAULT_SOCK_PRIORITY);
2086 SPDK_LOG_REGISTER_COMPONENT(sock_posix)
2087