1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved. 2 * 3 * Permission is hereby granted, free of charge, to any person obtaining a copy 4 * of this software and associated documentation files (the "Software"), to 5 * deal in the Software without restriction, including without limitation the 6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 7 * sell copies of the Software, and to permit persons to whom the Software is 8 * furnished to do so, subject to the following conditions: 9 * 10 * The above copyright notice and this permission notice shall be included in 11 * all copies or substantial portions of the Software. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 19 * IN THE SOFTWARE. 20 */ 21 22 #include <assert.h> 23 #include <stdlib.h> 24 25 #include "uv.h" 26 #include "internal.h" 27 #include "handle-inl.h" 28 #include "stream-inl.h" 29 #include "req-inl.h" 30 31 32 /* 33 * Threshold of active udp streams for which to preallocate udp read buffers. 34 */ 35 const unsigned int uv_active_udp_streams_threshold = 0; 36 37 /* A zero-size buffer for use by uv_udp_read */ 38 static char uv_zero_[] = ""; 39 int uv_udp_getpeername(const uv_udp_t* handle, 40 struct sockaddr* name, 41 int* namelen) { 42 43 return uv__getsockpeername((const uv_handle_t*) handle, 44 getpeername, 45 name, 46 namelen, 47 0); 48 } 49 50 51 int uv_udp_getsockname(const uv_udp_t* handle, 52 struct sockaddr* name, 53 int* namelen) { 54 55 return uv__getsockpeername((const uv_handle_t*) handle, 56 getsockname, 57 name, 58 namelen, 59 0); 60 } 61 62 63 static int uv_udp_set_socket(uv_loop_t* loop, uv_udp_t* handle, SOCKET socket, 64 int family) { 65 DWORD yes = 1; 66 WSAPROTOCOL_INFOW info; 67 int opt_len; 68 69 if (handle->socket != INVALID_SOCKET) 70 return UV_EBUSY; 71 72 /* Set the socket to nonblocking mode */ 73 if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) { 74 return WSAGetLastError(); 75 } 76 77 /* Make the socket non-inheritable */ 78 if (!SetHandleInformation((HANDLE)socket, HANDLE_FLAG_INHERIT, 0)) { 79 return GetLastError(); 80 } 81 82 /* Associate it with the I/O completion port. Use uv_handle_t pointer as 83 * completion key. */ 84 if (CreateIoCompletionPort((HANDLE)socket, 85 loop->iocp, 86 (ULONG_PTR)socket, 87 0) == NULL) { 88 return GetLastError(); 89 } 90 91 /* All known Windows that support SetFileCompletionNotificationModes have a 92 * bug that makes it impossible to use this function in conjunction with 93 * datagram sockets. We can work around that but only if the user is using 94 * the default UDP driver (AFD) and has no other. LSPs stacked on top. Here 95 * we check whether that is the case. */ 96 opt_len = (int) sizeof info; 97 if (getsockopt( 98 socket, SOL_SOCKET, SO_PROTOCOL_INFOW, (char*) &info, &opt_len) == 99 SOCKET_ERROR) { 100 return GetLastError(); 101 } 102 103 if (info.ProtocolChain.ChainLen == 1) { 104 if (SetFileCompletionNotificationModes( 105 (HANDLE) socket, 106 FILE_SKIP_SET_EVENT_ON_HANDLE | 107 FILE_SKIP_COMPLETION_PORT_ON_SUCCESS)) { 108 handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP; 109 handle->func_wsarecv = uv_wsarecv_workaround; 110 handle->func_wsarecvfrom = uv_wsarecvfrom_workaround; 111 } else if (GetLastError() != ERROR_INVALID_FUNCTION) { 112 return GetLastError(); 113 } 114 } 115 116 handle->socket = socket; 117 118 if (family == AF_INET6) { 119 handle->flags |= UV_HANDLE_IPV6; 120 } else { 121 assert(!(handle->flags & UV_HANDLE_IPV6)); 122 } 123 124 return 0; 125 } 126 127 128 int uv__udp_init_ex(uv_loop_t* loop, 129 uv_udp_t* handle, 130 unsigned flags, 131 int domain) { 132 uv__handle_init(loop, (uv_handle_t*) handle, UV_UDP); 133 handle->socket = INVALID_SOCKET; 134 handle->reqs_pending = 0; 135 handle->activecnt = 0; 136 handle->func_wsarecv = WSARecv; 137 handle->func_wsarecvfrom = WSARecvFrom; 138 handle->send_queue_size = 0; 139 handle->send_queue_count = 0; 140 UV_REQ_INIT(&handle->recv_req, UV_UDP_RECV); 141 handle->recv_req.data = handle; 142 143 /* If anything fails beyond this point we need to remove the handle from 144 * the handle queue, since it was added by uv__handle_init. 145 */ 146 147 if (domain != AF_UNSPEC) { 148 SOCKET sock; 149 DWORD err; 150 151 sock = socket(domain, SOCK_DGRAM, 0); 152 if (sock == INVALID_SOCKET) { 153 err = WSAGetLastError(); 154 QUEUE_REMOVE(&handle->handle_queue); 155 return uv_translate_sys_error(err); 156 } 157 158 err = uv_udp_set_socket(handle->loop, handle, sock, domain); 159 if (err) { 160 closesocket(sock); 161 QUEUE_REMOVE(&handle->handle_queue); 162 return uv_translate_sys_error(err); 163 } 164 } 165 166 return 0; 167 } 168 169 170 void uv_udp_close(uv_loop_t* loop, uv_udp_t* handle) { 171 uv_udp_recv_stop(handle); 172 closesocket(handle->socket); 173 handle->socket = INVALID_SOCKET; 174 175 uv__handle_closing(handle); 176 177 if (handle->reqs_pending == 0) { 178 uv_want_endgame(loop, (uv_handle_t*) handle); 179 } 180 } 181 182 183 void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle) { 184 if (handle->flags & UV_HANDLE_CLOSING && 185 handle->reqs_pending == 0) { 186 assert(!(handle->flags & UV_HANDLE_CLOSED)); 187 uv__handle_close(handle); 188 } 189 } 190 191 192 static int uv_udp_maybe_bind(uv_udp_t* handle, 193 const struct sockaddr* addr, 194 unsigned int addrlen, 195 unsigned int flags) { 196 int r; 197 int err; 198 DWORD no = 0; 199 200 if (handle->flags & UV_HANDLE_BOUND) 201 return 0; 202 203 if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6) { 204 /* UV_UDP_IPV6ONLY is supported only for IPV6 sockets */ 205 return ERROR_INVALID_PARAMETER; 206 } 207 208 if (handle->socket == INVALID_SOCKET) { 209 SOCKET sock = socket(addr->sa_family, SOCK_DGRAM, 0); 210 if (sock == INVALID_SOCKET) { 211 return WSAGetLastError(); 212 } 213 214 err = uv_udp_set_socket(handle->loop, handle, sock, addr->sa_family); 215 if (err) { 216 closesocket(sock); 217 return err; 218 } 219 } 220 221 if (flags & UV_UDP_REUSEADDR) { 222 DWORD yes = 1; 223 /* Set SO_REUSEADDR on the socket. */ 224 if (setsockopt(handle->socket, 225 SOL_SOCKET, 226 SO_REUSEADDR, 227 (char*) &yes, 228 sizeof yes) == SOCKET_ERROR) { 229 err = WSAGetLastError(); 230 return err; 231 } 232 } 233 234 if (addr->sa_family == AF_INET6) 235 handle->flags |= UV_HANDLE_IPV6; 236 237 if (addr->sa_family == AF_INET6 && !(flags & UV_UDP_IPV6ONLY)) { 238 /* On windows IPV6ONLY is on by default. If the user doesn't specify it 239 * libuv turns it off. */ 240 241 /* TODO: how to handle errors? This may fail if there is no ipv4 stack 242 * available, or when run on XP/2003 which have no support for dualstack 243 * sockets. For now we're silently ignoring the error. */ 244 setsockopt(handle->socket, 245 IPPROTO_IPV6, 246 IPV6_V6ONLY, 247 (char*) &no, 248 sizeof no); 249 } 250 251 r = bind(handle->socket, addr, addrlen); 252 if (r == SOCKET_ERROR) { 253 return WSAGetLastError(); 254 } 255 256 handle->flags |= UV_HANDLE_BOUND; 257 258 return 0; 259 } 260 261 262 static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) { 263 uv_req_t* req; 264 uv_buf_t buf; 265 DWORD bytes, flags; 266 int result; 267 268 assert(handle->flags & UV_HANDLE_READING); 269 assert(!(handle->flags & UV_HANDLE_READ_PENDING)); 270 271 req = &handle->recv_req; 272 memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); 273 274 /* 275 * Preallocate a read buffer if the number of active streams is below 276 * the threshold. 277 */ 278 if (loop->active_udp_streams < uv_active_udp_streams_threshold) { 279 handle->flags &= ~UV_HANDLE_ZERO_READ; 280 281 handle->recv_buffer = uv_buf_init(NULL, 0); 282 handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->recv_buffer); 283 if (handle->recv_buffer.base == NULL || handle->recv_buffer.len == 0) { 284 handle->recv_cb(handle, UV_ENOBUFS, &handle->recv_buffer, NULL, 0); 285 return; 286 } 287 assert(handle->recv_buffer.base != NULL); 288 289 buf = handle->recv_buffer; 290 memset(&handle->recv_from, 0, sizeof handle->recv_from); 291 handle->recv_from_len = sizeof handle->recv_from; 292 flags = 0; 293 294 result = handle->func_wsarecvfrom(handle->socket, 295 (WSABUF*) &buf, 296 1, 297 &bytes, 298 &flags, 299 (struct sockaddr*) &handle->recv_from, 300 &handle->recv_from_len, 301 &req->u.io.overlapped, 302 NULL); 303 304 if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { 305 /* Process the req without IOCP. */ 306 handle->flags |= UV_HANDLE_READ_PENDING; 307 req->u.io.overlapped.InternalHigh = bytes; 308 handle->reqs_pending++; 309 uv_insert_pending_req(loop, req); 310 } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { 311 /* The req will be processed with IOCP. */ 312 handle->flags |= UV_HANDLE_READ_PENDING; 313 handle->reqs_pending++; 314 } else { 315 /* Make this req pending reporting an error. */ 316 SET_REQ_ERROR(req, WSAGetLastError()); 317 uv_insert_pending_req(loop, req); 318 handle->reqs_pending++; 319 } 320 321 } else { 322 handle->flags |= UV_HANDLE_ZERO_READ; 323 324 buf.base = (char*) uv_zero_; 325 buf.len = 0; 326 flags = MSG_PEEK; 327 328 result = handle->func_wsarecv(handle->socket, 329 (WSABUF*) &buf, 330 1, 331 &bytes, 332 &flags, 333 &req->u.io.overlapped, 334 NULL); 335 336 if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { 337 /* Process the req without IOCP. */ 338 handle->flags |= UV_HANDLE_READ_PENDING; 339 req->u.io.overlapped.InternalHigh = bytes; 340 handle->reqs_pending++; 341 uv_insert_pending_req(loop, req); 342 } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { 343 /* The req will be processed with IOCP. */ 344 handle->flags |= UV_HANDLE_READ_PENDING; 345 handle->reqs_pending++; 346 } else { 347 /* Make this req pending reporting an error. */ 348 SET_REQ_ERROR(req, WSAGetLastError()); 349 uv_insert_pending_req(loop, req); 350 handle->reqs_pending++; 351 } 352 } 353 } 354 355 356 int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb, 357 uv_udp_recv_cb recv_cb) { 358 uv_loop_t* loop = handle->loop; 359 int err; 360 361 if (handle->flags & UV_HANDLE_READING) { 362 return UV_EALREADY; 363 } 364 365 err = uv_udp_maybe_bind(handle, 366 (const struct sockaddr*) &uv_addr_ip4_any_, 367 sizeof(uv_addr_ip4_any_), 368 0); 369 if (err) 370 return uv_translate_sys_error(err); 371 372 handle->flags |= UV_HANDLE_READING; 373 INCREASE_ACTIVE_COUNT(loop, handle); 374 loop->active_udp_streams++; 375 376 handle->recv_cb = recv_cb; 377 handle->alloc_cb = alloc_cb; 378 379 /* If reading was stopped and then started again, there could still be a recv 380 * request pending. */ 381 if (!(handle->flags & UV_HANDLE_READ_PENDING)) 382 uv_udp_queue_recv(loop, handle); 383 384 return 0; 385 } 386 387 388 int uv__udp_recv_stop(uv_udp_t* handle) { 389 if (handle->flags & UV_HANDLE_READING) { 390 handle->flags &= ~UV_HANDLE_READING; 391 handle->loop->active_udp_streams--; 392 DECREASE_ACTIVE_COUNT(loop, handle); 393 } 394 395 return 0; 396 } 397 398 399 static int uv__send(uv_udp_send_t* req, 400 uv_udp_t* handle, 401 const uv_buf_t bufs[], 402 unsigned int nbufs, 403 const struct sockaddr* addr, 404 unsigned int addrlen, 405 uv_udp_send_cb cb) { 406 uv_loop_t* loop = handle->loop; 407 DWORD result, bytes; 408 409 UV_REQ_INIT(req, UV_UDP_SEND); 410 req->handle = handle; 411 req->cb = cb; 412 memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); 413 414 result = WSASendTo(handle->socket, 415 (WSABUF*)bufs, 416 nbufs, 417 &bytes, 418 0, 419 addr, 420 addrlen, 421 &req->u.io.overlapped, 422 NULL); 423 424 if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { 425 /* Request completed immediately. */ 426 req->u.io.queued_bytes = 0; 427 handle->reqs_pending++; 428 handle->send_queue_size += req->u.io.queued_bytes; 429 handle->send_queue_count++; 430 REGISTER_HANDLE_REQ(loop, handle, req); 431 uv_insert_pending_req(loop, (uv_req_t*)req); 432 } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { 433 /* Request queued by the kernel. */ 434 req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs); 435 handle->reqs_pending++; 436 handle->send_queue_size += req->u.io.queued_bytes; 437 handle->send_queue_count++; 438 REGISTER_HANDLE_REQ(loop, handle, req); 439 } else { 440 /* Send failed due to an error. */ 441 return WSAGetLastError(); 442 } 443 444 return 0; 445 } 446 447 448 void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle, 449 uv_req_t* req) { 450 uv_buf_t buf; 451 int partial; 452 453 assert(handle->type == UV_UDP); 454 455 handle->flags &= ~UV_HANDLE_READ_PENDING; 456 457 if (!REQ_SUCCESS(req)) { 458 DWORD err = GET_REQ_SOCK_ERROR(req); 459 if (err == WSAEMSGSIZE) { 460 /* Not a real error, it just indicates that the received packet was 461 * bigger than the receive buffer. */ 462 } else if (err == WSAECONNRESET || err == WSAENETRESET) { 463 /* A previous sendto operation failed; ignore this error. If zero-reading 464 * we need to call WSARecv/WSARecvFrom _without_ the. MSG_PEEK flag to 465 * clear out the error queue. For nonzero reads, immediately queue a new 466 * receive. */ 467 if (!(handle->flags & UV_HANDLE_ZERO_READ)) { 468 goto done; 469 } 470 } else { 471 /* A real error occurred. Report the error to the user only if we're 472 * currently reading. */ 473 if (handle->flags & UV_HANDLE_READING) { 474 uv_udp_recv_stop(handle); 475 buf = (handle->flags & UV_HANDLE_ZERO_READ) ? 476 uv_buf_init(NULL, 0) : handle->recv_buffer; 477 handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0); 478 } 479 goto done; 480 } 481 } 482 483 if (!(handle->flags & UV_HANDLE_ZERO_READ)) { 484 /* Successful read */ 485 partial = !REQ_SUCCESS(req); 486 handle->recv_cb(handle, 487 req->u.io.overlapped.InternalHigh, 488 &handle->recv_buffer, 489 (const struct sockaddr*) &handle->recv_from, 490 partial ? UV_UDP_PARTIAL : 0); 491 } else if (handle->flags & UV_HANDLE_READING) { 492 DWORD bytes, err, flags; 493 struct sockaddr_storage from; 494 int from_len; 495 496 /* Do a nonblocking receive. 497 * TODO: try to read multiple datagrams at once. FIONREAD maybe? */ 498 buf = uv_buf_init(NULL, 0); 499 handle->alloc_cb((uv_handle_t*) handle, 65536, &buf); 500 if (buf.base == NULL || buf.len == 0) { 501 handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0); 502 goto done; 503 } 504 assert(buf.base != NULL); 505 506 memset(&from, 0, sizeof from); 507 from_len = sizeof from; 508 509 flags = 0; 510 511 if (WSARecvFrom(handle->socket, 512 (WSABUF*)&buf, 513 1, 514 &bytes, 515 &flags, 516 (struct sockaddr*) &from, 517 &from_len, 518 NULL, 519 NULL) != SOCKET_ERROR) { 520 521 /* Message received */ 522 handle->recv_cb(handle, bytes, &buf, (const struct sockaddr*) &from, 0); 523 } else { 524 err = WSAGetLastError(); 525 if (err == WSAEMSGSIZE) { 526 /* Message truncated */ 527 handle->recv_cb(handle, 528 bytes, 529 &buf, 530 (const struct sockaddr*) &from, 531 UV_UDP_PARTIAL); 532 } else if (err == WSAEWOULDBLOCK) { 533 /* Kernel buffer empty */ 534 handle->recv_cb(handle, 0, &buf, NULL, 0); 535 } else if (err == WSAECONNRESET || err == WSAENETRESET) { 536 /* WSAECONNRESET/WSANETRESET is ignored because this just indicates 537 * that a previous sendto operation failed. 538 */ 539 handle->recv_cb(handle, 0, &buf, NULL, 0); 540 } else { 541 /* Any other error that we want to report back to the user. */ 542 uv_udp_recv_stop(handle); 543 handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0); 544 } 545 } 546 } 547 548 done: 549 /* Post another read if still reading and not closing. */ 550 if ((handle->flags & UV_HANDLE_READING) && 551 !(handle->flags & UV_HANDLE_READ_PENDING)) { 552 uv_udp_queue_recv(loop, handle); 553 } 554 555 DECREASE_PENDING_REQ_COUNT(handle); 556 } 557 558 559 void uv_process_udp_send_req(uv_loop_t* loop, uv_udp_t* handle, 560 uv_udp_send_t* req) { 561 int err; 562 563 assert(handle->type == UV_UDP); 564 565 assert(handle->send_queue_size >= req->u.io.queued_bytes); 566 assert(handle->send_queue_count >= 1); 567 handle->send_queue_size -= req->u.io.queued_bytes; 568 handle->send_queue_count--; 569 570 UNREGISTER_HANDLE_REQ(loop, handle, req); 571 572 if (req->cb) { 573 err = 0; 574 if (!REQ_SUCCESS(req)) { 575 err = GET_REQ_SOCK_ERROR(req); 576 } 577 req->cb(req, uv_translate_sys_error(err)); 578 } 579 580 DECREASE_PENDING_REQ_COUNT(handle); 581 } 582 583 584 static int uv__udp_set_membership4(uv_udp_t* handle, 585 const struct sockaddr_in* multicast_addr, 586 const char* interface_addr, 587 uv_membership membership) { 588 int err; 589 int optname; 590 struct ip_mreq mreq; 591 592 if (handle->flags & UV_HANDLE_IPV6) 593 return UV_EINVAL; 594 595 /* If the socket is unbound, bind to inaddr_any. */ 596 err = uv_udp_maybe_bind(handle, 597 (const struct sockaddr*) &uv_addr_ip4_any_, 598 sizeof(uv_addr_ip4_any_), 599 UV_UDP_REUSEADDR); 600 if (err) 601 return uv_translate_sys_error(err); 602 603 memset(&mreq, 0, sizeof mreq); 604 605 if (interface_addr) { 606 err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr); 607 if (err) 608 return err; 609 } else { 610 mreq.imr_interface.s_addr = htonl(INADDR_ANY); 611 } 612 613 mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr; 614 615 switch (membership) { 616 case UV_JOIN_GROUP: 617 optname = IP_ADD_MEMBERSHIP; 618 break; 619 case UV_LEAVE_GROUP: 620 optname = IP_DROP_MEMBERSHIP; 621 break; 622 default: 623 return UV_EINVAL; 624 } 625 626 if (setsockopt(handle->socket, 627 IPPROTO_IP, 628 optname, 629 (char*) &mreq, 630 sizeof mreq) == SOCKET_ERROR) { 631 return uv_translate_sys_error(WSAGetLastError()); 632 } 633 634 return 0; 635 } 636 637 638 int uv__udp_set_membership6(uv_udp_t* handle, 639 const struct sockaddr_in6* multicast_addr, 640 const char* interface_addr, 641 uv_membership membership) { 642 int optname; 643 int err; 644 struct ipv6_mreq mreq; 645 struct sockaddr_in6 addr6; 646 647 if ((handle->flags & UV_HANDLE_BOUND) && !(handle->flags & UV_HANDLE_IPV6)) 648 return UV_EINVAL; 649 650 err = uv_udp_maybe_bind(handle, 651 (const struct sockaddr*) &uv_addr_ip6_any_, 652 sizeof(uv_addr_ip6_any_), 653 UV_UDP_REUSEADDR); 654 655 if (err) 656 return uv_translate_sys_error(err); 657 658 memset(&mreq, 0, sizeof(mreq)); 659 660 if (interface_addr) { 661 if (uv_ip6_addr(interface_addr, 0, &addr6)) 662 return UV_EINVAL; 663 mreq.ipv6mr_interface = addr6.sin6_scope_id; 664 } else { 665 mreq.ipv6mr_interface = 0; 666 } 667 668 mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr; 669 670 switch (membership) { 671 case UV_JOIN_GROUP: 672 optname = IPV6_ADD_MEMBERSHIP; 673 break; 674 case UV_LEAVE_GROUP: 675 optname = IPV6_DROP_MEMBERSHIP; 676 break; 677 default: 678 return UV_EINVAL; 679 } 680 681 if (setsockopt(handle->socket, 682 IPPROTO_IPV6, 683 optname, 684 (char*) &mreq, 685 sizeof mreq) == SOCKET_ERROR) { 686 return uv_translate_sys_error(WSAGetLastError()); 687 } 688 689 return 0; 690 } 691 692 693 static int uv__udp_set_source_membership4(uv_udp_t* handle, 694 const struct sockaddr_in* multicast_addr, 695 const char* interface_addr, 696 const struct sockaddr_in* source_addr, 697 uv_membership membership) { 698 struct ip_mreq_source mreq; 699 int optname; 700 int err; 701 702 if (handle->flags & UV_HANDLE_IPV6) 703 return UV_EINVAL; 704 705 /* If the socket is unbound, bind to inaddr_any. */ 706 err = uv_udp_maybe_bind(handle, 707 (const struct sockaddr*) &uv_addr_ip4_any_, 708 sizeof(uv_addr_ip4_any_), 709 UV_UDP_REUSEADDR); 710 if (err) 711 return uv_translate_sys_error(err); 712 713 memset(&mreq, 0, sizeof(mreq)); 714 715 if (interface_addr != NULL) { 716 err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr); 717 if (err) 718 return err; 719 } else { 720 mreq.imr_interface.s_addr = htonl(INADDR_ANY); 721 } 722 723 mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr; 724 mreq.imr_sourceaddr.s_addr = source_addr->sin_addr.s_addr; 725 726 if (membership == UV_JOIN_GROUP) 727 optname = IP_ADD_SOURCE_MEMBERSHIP; 728 else if (membership == UV_LEAVE_GROUP) 729 optname = IP_DROP_SOURCE_MEMBERSHIP; 730 else 731 return UV_EINVAL; 732 733 if (setsockopt(handle->socket, 734 IPPROTO_IP, 735 optname, 736 (char*) &mreq, 737 sizeof(mreq)) == SOCKET_ERROR) { 738 return uv_translate_sys_error(WSAGetLastError()); 739 } 740 741 return 0; 742 } 743 744 745 int uv__udp_set_source_membership6(uv_udp_t* handle, 746 const struct sockaddr_in6* multicast_addr, 747 const char* interface_addr, 748 const struct sockaddr_in6* source_addr, 749 uv_membership membership) { 750 struct group_source_req mreq; 751 struct sockaddr_in6 addr6; 752 int optname; 753 int err; 754 755 if ((handle->flags & UV_HANDLE_BOUND) && !(handle->flags & UV_HANDLE_IPV6)) 756 return UV_EINVAL; 757 758 err = uv_udp_maybe_bind(handle, 759 (const struct sockaddr*) &uv_addr_ip6_any_, 760 sizeof(uv_addr_ip6_any_), 761 UV_UDP_REUSEADDR); 762 763 if (err) 764 return uv_translate_sys_error(err); 765 766 memset(&mreq, 0, sizeof(mreq)); 767 768 if (interface_addr != NULL) { 769 err = uv_ip6_addr(interface_addr, 0, &addr6); 770 if (err) 771 return err; 772 mreq.gsr_interface = addr6.sin6_scope_id; 773 } else { 774 mreq.gsr_interface = 0; 775 } 776 777 STATIC_ASSERT(sizeof(mreq.gsr_group) >= sizeof(*multicast_addr)); 778 STATIC_ASSERT(sizeof(mreq.gsr_source) >= sizeof(*source_addr)); 779 memcpy(&mreq.gsr_group, multicast_addr, sizeof(*multicast_addr)); 780 memcpy(&mreq.gsr_source, source_addr, sizeof(*source_addr)); 781 782 if (membership == UV_JOIN_GROUP) 783 optname = MCAST_JOIN_SOURCE_GROUP; 784 else if (membership == UV_LEAVE_GROUP) 785 optname = MCAST_LEAVE_SOURCE_GROUP; 786 else 787 return UV_EINVAL; 788 789 if (setsockopt(handle->socket, 790 IPPROTO_IPV6, 791 optname, 792 (char*) &mreq, 793 sizeof(mreq)) == SOCKET_ERROR) { 794 return uv_translate_sys_error(WSAGetLastError()); 795 } 796 797 return 0; 798 } 799 800 801 int uv_udp_set_membership(uv_udp_t* handle, 802 const char* multicast_addr, 803 const char* interface_addr, 804 uv_membership membership) { 805 struct sockaddr_in addr4; 806 struct sockaddr_in6 addr6; 807 808 if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) 809 return uv__udp_set_membership4(handle, &addr4, interface_addr, membership); 810 else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) 811 return uv__udp_set_membership6(handle, &addr6, interface_addr, membership); 812 else 813 return UV_EINVAL; 814 } 815 816 817 int uv_udp_set_source_membership(uv_udp_t* handle, 818 const char* multicast_addr, 819 const char* interface_addr, 820 const char* source_addr, 821 uv_membership membership) { 822 int err; 823 struct sockaddr_storage mcast_addr; 824 struct sockaddr_in* mcast_addr4; 825 struct sockaddr_in6* mcast_addr6; 826 struct sockaddr_storage src_addr; 827 struct sockaddr_in* src_addr4; 828 struct sockaddr_in6* src_addr6; 829 830 mcast_addr4 = (struct sockaddr_in*)&mcast_addr; 831 mcast_addr6 = (struct sockaddr_in6*)&mcast_addr; 832 src_addr4 = (struct sockaddr_in*)&src_addr; 833 src_addr6 = (struct sockaddr_in6*)&src_addr; 834 835 err = uv_ip4_addr(multicast_addr, 0, mcast_addr4); 836 if (err) { 837 err = uv_ip6_addr(multicast_addr, 0, mcast_addr6); 838 if (err) 839 return err; 840 err = uv_ip6_addr(source_addr, 0, src_addr6); 841 if (err) 842 return err; 843 return uv__udp_set_source_membership6(handle, 844 mcast_addr6, 845 interface_addr, 846 src_addr6, 847 membership); 848 } 849 850 err = uv_ip4_addr(source_addr, 0, src_addr4); 851 if (err) 852 return err; 853 return uv__udp_set_source_membership4(handle, 854 mcast_addr4, 855 interface_addr, 856 src_addr4, 857 membership); 858 } 859 860 861 int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) { 862 struct sockaddr_storage addr_st; 863 struct sockaddr_in* addr4; 864 struct sockaddr_in6* addr6; 865 866 addr4 = (struct sockaddr_in*) &addr_st; 867 addr6 = (struct sockaddr_in6*) &addr_st; 868 869 if (!interface_addr) { 870 memset(&addr_st, 0, sizeof addr_st); 871 if (handle->flags & UV_HANDLE_IPV6) { 872 addr_st.ss_family = AF_INET6; 873 addr6->sin6_scope_id = 0; 874 } else { 875 addr_st.ss_family = AF_INET; 876 addr4->sin_addr.s_addr = htonl(INADDR_ANY); 877 } 878 } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) { 879 /* nothing, address was parsed */ 880 } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) { 881 /* nothing, address was parsed */ 882 } else { 883 return UV_EINVAL; 884 } 885 886 if (handle->socket == INVALID_SOCKET) 887 return UV_EBADF; 888 889 if (addr_st.ss_family == AF_INET) { 890 if (setsockopt(handle->socket, 891 IPPROTO_IP, 892 IP_MULTICAST_IF, 893 (char*) &addr4->sin_addr, 894 sizeof(addr4->sin_addr)) == SOCKET_ERROR) { 895 return uv_translate_sys_error(WSAGetLastError()); 896 } 897 } else if (addr_st.ss_family == AF_INET6) { 898 if (setsockopt(handle->socket, 899 IPPROTO_IPV6, 900 IPV6_MULTICAST_IF, 901 (char*) &addr6->sin6_scope_id, 902 sizeof(addr6->sin6_scope_id)) == SOCKET_ERROR) { 903 return uv_translate_sys_error(WSAGetLastError()); 904 } 905 } else { 906 assert(0 && "unexpected address family"); 907 abort(); 908 } 909 910 return 0; 911 } 912 913 914 int uv_udp_set_broadcast(uv_udp_t* handle, int value) { 915 BOOL optval = (BOOL) value; 916 917 if (handle->socket == INVALID_SOCKET) 918 return UV_EBADF; 919 920 if (setsockopt(handle->socket, 921 SOL_SOCKET, 922 SO_BROADCAST, 923 (char*) &optval, 924 sizeof optval)) { 925 return uv_translate_sys_error(WSAGetLastError()); 926 } 927 928 return 0; 929 } 930 931 932 int uv__udp_is_bound(uv_udp_t* handle) { 933 struct sockaddr_storage addr; 934 int addrlen; 935 936 addrlen = sizeof(addr); 937 if (uv_udp_getsockname(handle, (struct sockaddr*) &addr, &addrlen) != 0) 938 return 0; 939 940 return addrlen > 0; 941 } 942 943 944 int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) { 945 WSAPROTOCOL_INFOW protocol_info; 946 int opt_len; 947 int err; 948 949 /* Detect the address family of the socket. */ 950 opt_len = (int) sizeof protocol_info; 951 if (getsockopt(sock, 952 SOL_SOCKET, 953 SO_PROTOCOL_INFOW, 954 (char*) &protocol_info, 955 &opt_len) == SOCKET_ERROR) { 956 return uv_translate_sys_error(GetLastError()); 957 } 958 959 err = uv_udp_set_socket(handle->loop, 960 handle, 961 sock, 962 protocol_info.iAddressFamily); 963 if (err) 964 return uv_translate_sys_error(err); 965 966 if (uv__udp_is_bound(handle)) 967 handle->flags |= UV_HANDLE_BOUND; 968 969 if (uv__udp_is_connected(handle)) 970 handle->flags |= UV_HANDLE_UDP_CONNECTED; 971 972 return 0; 973 } 974 975 976 #define SOCKOPT_SETTER(name, option4, option6, validate) \ 977 int uv_udp_set_##name(uv_udp_t* handle, int value) { \ 978 DWORD optval = (DWORD) value; \ 979 \ 980 if (!(validate(value))) { \ 981 return UV_EINVAL; \ 982 } \ 983 \ 984 if (handle->socket == INVALID_SOCKET) \ 985 return UV_EBADF; \ 986 \ 987 if (!(handle->flags & UV_HANDLE_IPV6)) { \ 988 /* Set IPv4 socket option */ \ 989 if (setsockopt(handle->socket, \ 990 IPPROTO_IP, \ 991 option4, \ 992 (char*) &optval, \ 993 sizeof optval)) { \ 994 return uv_translate_sys_error(WSAGetLastError()); \ 995 } \ 996 } else { \ 997 /* Set IPv6 socket option */ \ 998 if (setsockopt(handle->socket, \ 999 IPPROTO_IPV6, \ 1000 option6, \ 1001 (char*) &optval, \ 1002 sizeof optval)) { \ 1003 return uv_translate_sys_error(WSAGetLastError()); \ 1004 } \ 1005 } \ 1006 return 0; \ 1007 } 1008 1009 #define VALIDATE_TTL(value) ((value) >= 1 && (value) <= 255) 1010 #define VALIDATE_MULTICAST_TTL(value) ((value) >= -1 && (value) <= 255) 1011 #define VALIDATE_MULTICAST_LOOP(value) (1) 1012 1013 SOCKOPT_SETTER(ttl, 1014 IP_TTL, 1015 IPV6_HOPLIMIT, 1016 VALIDATE_TTL) 1017 SOCKOPT_SETTER(multicast_ttl, 1018 IP_MULTICAST_TTL, 1019 IPV6_MULTICAST_HOPS, 1020 VALIDATE_MULTICAST_TTL) 1021 SOCKOPT_SETTER(multicast_loop, 1022 IP_MULTICAST_LOOP, 1023 IPV6_MULTICAST_LOOP, 1024 VALIDATE_MULTICAST_LOOP) 1025 1026 #undef SOCKOPT_SETTER 1027 #undef VALIDATE_TTL 1028 #undef VALIDATE_MULTICAST_TTL 1029 #undef VALIDATE_MULTICAST_LOOP 1030 1031 1032 /* This function is an egress point, i.e. it returns libuv errors rather than 1033 * system errors. 1034 */ 1035 int uv__udp_bind(uv_udp_t* handle, 1036 const struct sockaddr* addr, 1037 unsigned int addrlen, 1038 unsigned int flags) { 1039 int err; 1040 1041 err = uv_udp_maybe_bind(handle, addr, addrlen, flags); 1042 if (err) 1043 return uv_translate_sys_error(err); 1044 1045 return 0; 1046 } 1047 1048 1049 int uv__udp_connect(uv_udp_t* handle, 1050 const struct sockaddr* addr, 1051 unsigned int addrlen) { 1052 const struct sockaddr* bind_addr; 1053 int err; 1054 1055 if (!(handle->flags & UV_HANDLE_BOUND)) { 1056 if (addrlen == sizeof(uv_addr_ip4_any_)) 1057 bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_; 1058 else if (addrlen == sizeof(uv_addr_ip6_any_)) 1059 bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_; 1060 else 1061 return UV_EINVAL; 1062 1063 err = uv_udp_maybe_bind(handle, bind_addr, addrlen, 0); 1064 if (err) 1065 return uv_translate_sys_error(err); 1066 } 1067 1068 err = connect(handle->socket, addr, addrlen); 1069 if (err) 1070 return uv_translate_sys_error(err); 1071 1072 handle->flags |= UV_HANDLE_UDP_CONNECTED; 1073 1074 return 0; 1075 } 1076 1077 1078 int uv__udp_disconnect(uv_udp_t* handle) { 1079 int err; 1080 struct sockaddr addr; 1081 1082 memset(&addr, 0, sizeof(addr)); 1083 1084 err = connect(handle->socket, &addr, sizeof(addr)); 1085 if (err) 1086 return uv_translate_sys_error(err); 1087 1088 handle->flags &= ~UV_HANDLE_UDP_CONNECTED; 1089 return 0; 1090 } 1091 1092 1093 /* This function is an egress point, i.e. it returns libuv errors rather than 1094 * system errors. 1095 */ 1096 int uv__udp_send(uv_udp_send_t* req, 1097 uv_udp_t* handle, 1098 const uv_buf_t bufs[], 1099 unsigned int nbufs, 1100 const struct sockaddr* addr, 1101 unsigned int addrlen, 1102 uv_udp_send_cb send_cb) { 1103 const struct sockaddr* bind_addr; 1104 int err; 1105 1106 if (!(handle->flags & UV_HANDLE_BOUND)) { 1107 if (addrlen == sizeof(uv_addr_ip4_any_)) 1108 bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_; 1109 else if (addrlen == sizeof(uv_addr_ip6_any_)) 1110 bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_; 1111 else 1112 return UV_EINVAL; 1113 1114 err = uv_udp_maybe_bind(handle, bind_addr, addrlen, 0); 1115 if (err) 1116 return uv_translate_sys_error(err); 1117 } 1118 1119 err = uv__send(req, handle, bufs, nbufs, addr, addrlen, send_cb); 1120 if (err) 1121 return uv_translate_sys_error(err); 1122 1123 return 0; 1124 } 1125 1126 1127 int uv__udp_try_send(uv_udp_t* handle, 1128 const uv_buf_t bufs[], 1129 unsigned int nbufs, 1130 const struct sockaddr* addr, 1131 unsigned int addrlen) { 1132 DWORD bytes; 1133 const struct sockaddr* bind_addr; 1134 struct sockaddr_storage converted; 1135 int err; 1136 1137 assert(nbufs > 0); 1138 1139 if (addr != NULL) { 1140 err = uv__convert_to_localhost_if_unspecified(addr, &converted); 1141 if (err) 1142 return err; 1143 } 1144 1145 /* Already sending a message.*/ 1146 if (handle->send_queue_count != 0) 1147 return UV_EAGAIN; 1148 1149 if (!(handle->flags & UV_HANDLE_BOUND)) { 1150 if (addrlen == sizeof(uv_addr_ip4_any_)) 1151 bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_; 1152 else if (addrlen == sizeof(uv_addr_ip6_any_)) 1153 bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_; 1154 else 1155 return UV_EINVAL; 1156 err = uv_udp_maybe_bind(handle, bind_addr, addrlen, 0); 1157 if (err) 1158 return uv_translate_sys_error(err); 1159 } 1160 1161 err = WSASendTo(handle->socket, 1162 (WSABUF*)bufs, 1163 nbufs, 1164 &bytes, 1165 0, 1166 (const struct sockaddr*) &converted, 1167 addrlen, 1168 NULL, 1169 NULL); 1170 1171 if (err) 1172 return uv_translate_sys_error(WSAGetLastError()); 1173 1174 return bytes; 1175 } 1176