1 /* 2 * Copyright (c) 2012 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@dragonflybsd.org> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 /* 35 * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS - Please see sys/dmsg.h for an 36 * involved explanation of the protocol. 37 */ 38 39 #include "dmsg_local.h" 40 41 /* 42 * Maximum spanning tree distance. This has the practical effect of 43 * stopping tail-chasing closed loops when a feeder span is lost. 44 */ 45 #define DMSG_SPAN_MAXDIST 16 46 47 /* 48 * RED-BLACK TREE DEFINITIONS 49 * 50 * We need to track: 51 * 52 * (1) shared fsid's (a cluster). 53 * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions. 54 * 55 * We need to aggegate all active LNK_SPANs, aggregate, and create our own 56 * outgoing LNK_SPAN transactions on each of our connections representing 57 * the aggregated state. 58 * 59 * h2span_conn - list of iocom connections who wish to receive SPAN 60 * propagation from other connections. Might contain 61 * a filter string. Only iocom's with an open 62 * LNK_CONN transactions are applicable for SPAN 63 * propagation. 64 * 65 * h2span_relay - List of links relayed (via SPAN). Essentially 66 * each relay structure represents a LNK_SPAN 67 * transaction that we initiated, verses h2span_link 68 * which is a LNK_SPAN transaction that we received. 69 * 70 * -- 71 * 72 * h2span_cluster - Organizes the shared fsid's. One structure for 73 * each cluster. 74 * 75 * h2span_node - Organizes the nodes in a cluster. One structure 76 * for each unique {cluster,node}, aka {fsid, pfs_fsid}. 77 * 78 * h2span_link - Organizes all incoming and outgoing LNK_SPAN message 79 * transactions related to a node. 80 * 81 * One h2span_link structure for each incoming LNK_SPAN 82 * transaction. Links selected for propagation back 83 * out are also where the outgoing LNK_SPAN messages 84 * are indexed into (so we can propagate changes). 85 * 86 * The h2span_link's use a red-black tree to sort the 87 * distance hop metric for the incoming LNK_SPAN. We 88 * then select the top N for outgoing. When the 89 * topology changes the top N may also change and cause 90 * new outgoing LNK_SPAN transactions to be opened 91 * and less desireable ones to be closed, causing 92 * transactional aborts within the message flow in 93 * the process. 94 * 95 * Also note - All outgoing LNK_SPAN message transactions are also 96 * entered into a red-black tree for use by the routing 97 * function. This is handled by msg.c in the state 98 * code, not here. 99 */ 100 101 struct h2span_link; 102 struct h2span_relay; 103 TAILQ_HEAD(h2span_conn_queue, h2span_conn); 104 TAILQ_HEAD(h2span_relay_queue, h2span_relay); 105 106 RB_HEAD(h2span_cluster_tree, h2span_cluster); 107 RB_HEAD(h2span_node_tree, h2span_node); 108 RB_HEAD(h2span_link_tree, h2span_link); 109 RB_HEAD(h2span_relay_tree, h2span_relay); 110 uint32_t DMsgRNSS; 111 112 /* 113 * Received LNK_CONN transaction enables SPAN protocol over connection. 114 * (may contain filter). Typically one for each mount and several may 115 * share the same media. 116 */ 117 struct h2span_conn { 118 TAILQ_ENTRY(h2span_conn) entry; 119 struct h2span_relay_tree tree; 120 dmsg_state_t *state; 121 }; 122 123 /* 124 * All received LNK_SPANs are organized by cluster (pfs_clid), 125 * node (pfs_fsid), and link (received LNK_SPAN transaction). 126 */ 127 struct h2span_cluster { 128 RB_ENTRY(h2span_cluster) rbnode; 129 struct h2span_node_tree tree; 130 uuid_t pfs_clid; /* shared fsid */ 131 uint8_t peer_type; 132 char cl_label[128]; /* cluster label (typ PEER_BLOCK) */ 133 int refs; /* prevents destruction */ 134 }; 135 136 struct h2span_node { 137 RB_ENTRY(h2span_node) rbnode; 138 struct h2span_link_tree tree; 139 struct h2span_cluster *cls; 140 uint8_t pfs_type; 141 uuid_t pfs_fsid; /* unique fsid */ 142 char fs_label[128]; /* fs label (typ PEER_HAMMER2) */ 143 void *opaque; 144 }; 145 146 struct h2span_link { 147 RB_ENTRY(h2span_link) rbnode; 148 dmsg_state_t *state; /* state<->link */ 149 struct h2span_node *node; /* related node */ 150 uint32_t dist; 151 uint32_t rnss; 152 struct h2span_relay_queue relayq; /* relay out */ 153 }; 154 155 /* 156 * Any LNK_SPAN transactions we receive which are relayed out other 157 * connections utilize this structure to track the LNK_SPAN transactions 158 * we initiate (relay out) on other connections. We only relay out 159 * LNK_SPANs on connections we have an open CONN transaction for. 160 * 161 * The relay structure points to the outgoing LNK_SPAN trans (out_state) 162 * and to the incoming LNK_SPAN transaction (in_state). The relay 163 * structure holds refs on the related states. 164 * 165 * In many respects this is the core of the protocol... actually figuring 166 * out what LNK_SPANs to relay. The spanid used for relaying is the 167 * address of the 'state' structure, which is why h2span_relay has to 168 * be entered into a RB-TREE based at h2span_conn (so we can look 169 * up the spanid to validate it). 170 */ 171 struct h2span_relay { 172 TAILQ_ENTRY(h2span_relay) entry; /* from link */ 173 RB_ENTRY(h2span_relay) rbnode; /* from h2span_conn */ 174 struct h2span_conn *conn; /* related CONN transaction */ 175 dmsg_state_t *source_rt; /* h2span_link state */ 176 dmsg_state_t *target_rt; /* h2span_relay state */ 177 }; 178 179 typedef struct h2span_conn h2span_conn_t; 180 typedef struct h2span_cluster h2span_cluster_t; 181 typedef struct h2span_node h2span_node_t; 182 typedef struct h2span_link h2span_link_t; 183 typedef struct h2span_relay h2span_relay_t; 184 185 #define dmsg_termstr(array) _dmsg_termstr((array), sizeof(array)) 186 187 static h2span_relay_t *dmsg_generate_relay(h2span_conn_t *conn, 188 h2span_link_t *slink); 189 static uint32_t dmsg_rnss(void); 190 191 static __inline 192 void 193 _dmsg_termstr(char *base, size_t size) 194 { 195 base[size-1] = 0; 196 } 197 198 /* 199 * Cluster peer_type, uuid, AND label must match for a match 200 */ 201 static 202 int 203 h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2) 204 { 205 int r; 206 207 if (cls1->peer_type < cls2->peer_type) 208 return(-1); 209 if (cls1->peer_type > cls2->peer_type) 210 return(1); 211 r = uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL); 212 if (r == 0) 213 r = strcmp(cls1->cl_label, cls2->cl_label); 214 215 return r; 216 } 217 218 /* 219 * Match against fs_label/pfs_fsid. Together these two items represent a 220 * unique node. In most cases the primary differentiator is pfs_fsid but 221 * we also string-match fs_label. 222 */ 223 static 224 int 225 h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2) 226 { 227 int r; 228 229 r = strcmp(node1->fs_label, node2->fs_label); 230 if (r == 0) 231 r = uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL); 232 return (r); 233 } 234 235 /* 236 * Sort/subsort must match h2span_relay_cmp() under any given node 237 * to make the aggregation algorithm easier, so the best links are 238 * in the same sorted order as the best relays. 239 * 240 * NOTE: We cannot use link*->state->msgid because this msgid is created 241 * by each remote host and thus might wind up being the same. 242 */ 243 static 244 int 245 h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2) 246 { 247 if (link1->dist < link2->dist) 248 return(-1); 249 if (link1->dist > link2->dist) 250 return(1); 251 if (link1->rnss < link2->rnss) 252 return(-1); 253 if (link1->rnss > link2->rnss) 254 return(1); 255 #if 1 256 if ((uintptr_t)link1->state < (uintptr_t)link2->state) 257 return(-1); 258 if ((uintptr_t)link1->state > (uintptr_t)link2->state) 259 return(1); 260 #else 261 if (link1->state->msgid < link2->state->msgid) 262 return(-1); 263 if (link1->state->msgid > link2->state->msgid) 264 return(1); 265 #endif 266 return(0); 267 } 268 269 /* 270 * Relay entries are sorted by node, subsorted by distance and link 271 * address (so we can match up the conn->tree relay topology with 272 * a node's link topology). 273 */ 274 static 275 int 276 h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2) 277 { 278 h2span_link_t *link1 = relay1->source_rt->any.link; 279 h2span_link_t *link2 = relay2->source_rt->any.link; 280 281 if ((intptr_t)link1->node < (intptr_t)link2->node) 282 return(-1); 283 if ((intptr_t)link1->node > (intptr_t)link2->node) 284 return(1); 285 if (link1->dist < link2->dist) 286 return(-1); 287 if (link1->dist > link2->dist) 288 return(1); 289 if (link1->rnss < link2->rnss) 290 return(-1); 291 if (link1->rnss > link2->rnss) 292 return(1); 293 #if 1 294 if ((uintptr_t)link1->state < (uintptr_t)link2->state) 295 return(-1); 296 if ((uintptr_t)link1->state > (uintptr_t)link2->state) 297 return(1); 298 #else 299 if (link1->state->msgid < link2->state->msgid) 300 return(-1); 301 if (link1->state->msgid > link2->state->msgid) 302 return(1); 303 #endif 304 return(0); 305 } 306 307 RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster, 308 rbnode, h2span_cluster_cmp); 309 RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node, 310 rbnode, h2span_node_cmp); 311 RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link, 312 rbnode, h2span_link_cmp); 313 RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay, 314 rbnode, h2span_relay_cmp); 315 316 RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster, 317 rbnode, h2span_cluster_cmp); 318 RB_GENERATE_STATIC(h2span_node_tree, h2span_node, 319 rbnode, h2span_node_cmp); 320 RB_GENERATE_STATIC(h2span_link_tree, h2span_link, 321 rbnode, h2span_link_cmp); 322 RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay, 323 rbnode, h2span_relay_cmp); 324 325 /* 326 * Global mutex protects cluster_tree lookups, connq, mediaq. 327 */ 328 static pthread_mutex_t cluster_mtx; 329 static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree); 330 static struct h2span_conn_queue connq = TAILQ_HEAD_INITIALIZER(connq); 331 static struct dmsg_media_queue mediaq = TAILQ_HEAD_INITIALIZER(mediaq); 332 333 static void dmsg_lnk_span(dmsg_msg_t *msg); 334 static void dmsg_lnk_conn(dmsg_msg_t *msg); 335 static void dmsg_lnk_circ(dmsg_msg_t *msg); 336 static void dmsg_lnk_relay(dmsg_msg_t *msg); 337 static void dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node); 338 static void dmsg_relay_delete(h2span_relay_t *relay); 339 340 void 341 dmsg_msg_lnk_signal(dmsg_iocom_t *iocom __unused) 342 { 343 pthread_mutex_lock(&cluster_mtx); 344 dmsg_relay_scan(NULL, NULL); 345 pthread_mutex_unlock(&cluster_mtx); 346 } 347 348 /* 349 * DMSG_PROTO_LNK - Generic DMSG_PROTO_LNK. 350 * (incoming iocom lock not held) 351 * 352 * This function is typically called for one-way and opening-transactions 353 * since state->func is assigned after that, but it will also be called 354 * if no state->func is assigned on transaction-open. 355 */ 356 void 357 dmsg_msg_lnk(dmsg_msg_t *msg) 358 { 359 uint32_t icmd = msg->state ? msg->state->icmd : msg->any.head.cmd; 360 361 switch(icmd & DMSGF_BASECMDMASK) { 362 case DMSG_LNK_CONN: 363 dmsg_lnk_conn(msg); 364 break; 365 case DMSG_LNK_SPAN: 366 dmsg_lnk_span(msg); 367 break; 368 case DMSG_LNK_CIRC: 369 dmsg_lnk_circ(msg); 370 break; 371 default: 372 msg->iocom->usrmsg_callback(msg, 1); 373 /* state invalid after reply */ 374 break; 375 } 376 } 377 378 /* 379 * LNK_CONN - iocom identify message reception. 380 * (incoming iocom lock not held) 381 * 382 * Remote node identifies itself to us, sets up a SPAN filter, and gives us 383 * the ok to start transmitting SPANs. 384 */ 385 void 386 dmsg_lnk_conn(dmsg_msg_t *msg) 387 { 388 dmsg_state_t *state = msg->state; 389 dmsg_media_t *media; 390 h2span_conn_t *conn; 391 h2span_relay_t *relay; 392 char *alloc = NULL; 393 394 pthread_mutex_lock(&cluster_mtx); 395 396 fprintf(stderr, 397 "dmsg_lnk_conn: msg %p cmd %08x state %p " 398 "txcmd %08x rxcmd %08x\n", 399 msg, msg->any.head.cmd, state, 400 state->txcmd, state->rxcmd); 401 402 switch(msg->any.head.cmd & DMSGF_TRANSMASK) { 403 case DMSG_LNK_CONN | DMSGF_CREATE: 404 case DMSG_LNK_CONN | DMSGF_CREATE | DMSGF_DELETE: 405 /* 406 * On transaction start we allocate a new h2span_conn and 407 * acknowledge the request, leaving the transaction open. 408 * We then relay priority-selected SPANs. 409 */ 410 fprintf(stderr, "LNK_CONN(%08x): %s/%s/%s\n", 411 (uint32_t)msg->any.head.msgid, 412 dmsg_uuid_to_str(&msg->any.lnk_conn.pfs_clid, 413 &alloc), 414 msg->any.lnk_conn.cl_label, 415 msg->any.lnk_conn.fs_label); 416 free(alloc); 417 418 conn = dmsg_alloc(sizeof(*conn)); 419 420 RB_INIT(&conn->tree); 421 state->iocom->conn = conn; /* XXX only one */ 422 conn->state = state; 423 state->func = dmsg_lnk_conn; 424 state->any.conn = conn; 425 TAILQ_INSERT_TAIL(&connq, conn, entry); 426 427 /* 428 * Set up media 429 */ 430 TAILQ_FOREACH(media, &mediaq, entry) { 431 if (uuid_compare(&msg->any.lnk_conn.mediaid, 432 &media->mediaid, NULL) == 0) { 433 break; 434 } 435 } 436 if (media == NULL) { 437 media = dmsg_alloc(sizeof(*media)); 438 media->mediaid = msg->any.lnk_conn.mediaid; 439 TAILQ_INSERT_TAIL(&mediaq, media, entry); 440 } 441 state->media = media; 442 ++media->refs; 443 444 if ((msg->any.head.cmd & DMSGF_DELETE) == 0) { 445 msg->iocom->usrmsg_callback(msg, 0); 446 dmsg_msg_result(msg, 0); 447 dmsg_iocom_signal(msg->iocom); 448 break; 449 } 450 /* FALL THROUGH */ 451 case DMSG_LNK_CONN | DMSGF_DELETE: 452 case DMSG_LNK_ERROR | DMSGF_DELETE: 453 /* 454 * On transaction terminate we clean out our h2span_conn 455 * and acknowledge the request, closing the transaction. 456 */ 457 fprintf(stderr, "LNK_CONN: Terminated\n"); 458 conn = state->any.conn; 459 assert(conn); 460 461 /* 462 * Adjust media refs 463 * 464 * Callback will clean out media config / user-opaque state 465 */ 466 media = state->media; 467 --media->refs; 468 if (media->refs == 0) { 469 fprintf(stderr, "Media shutdown\n"); 470 TAILQ_REMOVE(&mediaq, media, entry); 471 pthread_mutex_unlock(&cluster_mtx); 472 msg->iocom->usrmsg_callback(msg, 0); 473 pthread_mutex_lock(&cluster_mtx); 474 dmsg_free(media); 475 } 476 state->media = NULL; 477 478 /* 479 * Clean out all relays. This requires terminating each 480 * relay transaction. 481 */ 482 while ((relay = RB_ROOT(&conn->tree)) != NULL) { 483 dmsg_relay_delete(relay); 484 } 485 486 /* 487 * Clean out conn 488 */ 489 conn->state = NULL; 490 msg->state->any.conn = NULL; 491 msg->state->iocom->conn = NULL; 492 TAILQ_REMOVE(&connq, conn, entry); 493 dmsg_free(conn); 494 495 dmsg_msg_reply(msg, 0); 496 /* state invalid after reply */ 497 break; 498 default: 499 msg->iocom->usrmsg_callback(msg, 1); 500 #if 0 501 if (msg->any.head.cmd & DMSGF_DELETE) 502 goto deleteconn; 503 dmsg_msg_reply(msg, DMSG_ERR_NOSUPP); 504 #endif 505 break; 506 } 507 pthread_mutex_unlock(&cluster_mtx); 508 } 509 510 /* 511 * LNK_SPAN - Spanning tree protocol message reception 512 * (incoming iocom lock not held) 513 * 514 * Receive a spanning tree transactional message, creating or destroying 515 * a SPAN and propagating it to other iocoms. 516 */ 517 void 518 dmsg_lnk_span(dmsg_msg_t *msg) 519 { 520 dmsg_state_t *state = msg->state; 521 h2span_cluster_t dummy_cls; 522 h2span_node_t dummy_node; 523 h2span_cluster_t *cls; 524 h2span_node_t *node; 525 h2span_link_t *slink; 526 h2span_relay_t *relay; 527 char *alloc = NULL; 528 529 assert((msg->any.head.cmd & DMSGF_REPLY) == 0); 530 531 pthread_mutex_lock(&cluster_mtx); 532 533 /* 534 * On transaction start we initialize the tracking infrastructure 535 */ 536 if (msg->any.head.cmd & DMSGF_CREATE) { 537 assert(state->func == NULL); 538 state->func = dmsg_lnk_span; 539 540 dmsg_termstr(msg->any.lnk_span.cl_label); 541 dmsg_termstr(msg->any.lnk_span.fs_label); 542 543 /* 544 * Find the cluster 545 */ 546 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid; 547 dummy_cls.peer_type = msg->any.lnk_span.peer_type; 548 bcopy(msg->any.lnk_span.cl_label, 549 dummy_cls.cl_label, 550 sizeof(dummy_cls.cl_label)); 551 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls); 552 if (cls == NULL) { 553 cls = dmsg_alloc(sizeof(*cls)); 554 cls->pfs_clid = msg->any.lnk_span.pfs_clid; 555 cls->peer_type = msg->any.lnk_span.peer_type; 556 bcopy(msg->any.lnk_span.cl_label, 557 cls->cl_label, 558 sizeof(cls->cl_label)); 559 RB_INIT(&cls->tree); 560 RB_INSERT(h2span_cluster_tree, &cluster_tree, cls); 561 } 562 563 /* 564 * Find the node 565 */ 566 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid; 567 bcopy(msg->any.lnk_span.fs_label, dummy_node.fs_label, 568 sizeof(dummy_node.fs_label)); 569 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node); 570 if (node == NULL) { 571 node = dmsg_alloc(sizeof(*node)); 572 node->pfs_fsid = msg->any.lnk_span.pfs_fsid; 573 node->pfs_type = msg->any.lnk_span.pfs_type; 574 bcopy(msg->any.lnk_span.fs_label, 575 node->fs_label, 576 sizeof(node->fs_label)); 577 node->cls = cls; 578 RB_INIT(&node->tree); 579 RB_INSERT(h2span_node_tree, &cls->tree, node); 580 if (msg->iocom->node_handler) { 581 msg->iocom->node_handler(&node->opaque, msg, 582 DMSG_NODEOP_ADD); 583 } 584 } 585 586 /* 587 * Create the link 588 */ 589 assert(state->any.link == NULL); 590 slink = dmsg_alloc(sizeof(*slink)); 591 TAILQ_INIT(&slink->relayq); 592 slink->node = node; 593 slink->dist = msg->any.lnk_span.dist; 594 slink->rnss = msg->any.lnk_span.rnss; 595 slink->state = state; 596 state->any.link = slink; 597 598 RB_INSERT(h2span_link_tree, &node->tree, slink); 599 600 fprintf(stderr, 601 "LNK_SPAN(thr %p): %p %s cl=%s fs=%s dist=%d\n", 602 msg->iocom, 603 slink, 604 dmsg_uuid_to_str(&msg->any.lnk_span.pfs_clid, &alloc), 605 msg->any.lnk_span.cl_label, 606 msg->any.lnk_span.fs_label, 607 msg->any.lnk_span.dist); 608 free(alloc); 609 #if 0 610 dmsg_relay_scan(NULL, node); 611 #endif 612 dmsg_iocom_signal(msg->iocom); 613 } 614 615 /* 616 * On transaction terminate we remove the tracking infrastructure. 617 */ 618 if (msg->any.head.cmd & DMSGF_DELETE) { 619 slink = state->any.link; 620 assert(slink != NULL); 621 node = slink->node; 622 cls = node->cls; 623 624 fprintf(stderr, "LNK_DELE(thr %p): %p %s cl=%s fs=%s dist=%d\n", 625 msg->iocom, 626 slink, 627 dmsg_uuid_to_str(&cls->pfs_clid, &alloc), 628 state->msg->any.lnk_span.cl_label, 629 state->msg->any.lnk_span.fs_label, 630 state->msg->any.lnk_span.dist); 631 free(alloc); 632 633 /* 634 * Clean out all relays. This requires terminating each 635 * relay transaction. 636 */ 637 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) { 638 dmsg_relay_delete(relay); 639 } 640 641 /* 642 * Clean out the topology 643 */ 644 RB_REMOVE(h2span_link_tree, &node->tree, slink); 645 if (RB_EMPTY(&node->tree)) { 646 RB_REMOVE(h2span_node_tree, &cls->tree, node); 647 if (msg->iocom->node_handler) { 648 msg->iocom->node_handler(&node->opaque, msg, 649 DMSG_NODEOP_DEL); 650 } 651 if (RB_EMPTY(&cls->tree) && cls->refs == 0) { 652 RB_REMOVE(h2span_cluster_tree, 653 &cluster_tree, cls); 654 dmsg_free(cls); 655 } 656 node->cls = NULL; 657 dmsg_free(node); 658 node = NULL; 659 } 660 state->any.link = NULL; 661 slink->state = NULL; 662 slink->node = NULL; 663 dmsg_free(slink); 664 665 /* 666 * We have to terminate the transaction 667 */ 668 dmsg_state_reply(state, 0); 669 /* state invalid after reply */ 670 671 /* 672 * If the node still exists issue any required updates. If 673 * it doesn't then all related relays have already been 674 * removed and there's nothing left to do. 675 */ 676 #if 0 677 if (node) 678 dmsg_relay_scan(NULL, node); 679 #endif 680 if (node) 681 dmsg_iocom_signal(msg->iocom); 682 } 683 684 pthread_mutex_unlock(&cluster_mtx); 685 } 686 687 /* 688 * LNK_CIRC - Virtual circuit protocol message reception 689 * (incoming iocom lock not held) 690 * 691 * Handles all cases. 692 */ 693 void 694 dmsg_lnk_circ(dmsg_msg_t *msg) 695 { 696 dmsg_circuit_t *circA; 697 dmsg_circuit_t *circB; 698 dmsg_state_t *rx_state; 699 dmsg_state_t *tx_state; 700 dmsg_state_t *state; 701 dmsg_state_t dummy; 702 dmsg_msg_t *fwd_msg; 703 dmsg_iocom_t *iocomA; 704 dmsg_iocom_t *iocomB; 705 int disconnect; 706 707 /*pthread_mutex_lock(&cluster_mtx);*/ 708 709 if (DMsgDebugOpt >= 4) 710 fprintf(stderr, "CIRC receive cmd=%08x\n", msg->any.head.cmd); 711 712 switch (msg->any.head.cmd & (DMSGF_CREATE | 713 DMSGF_DELETE | 714 DMSGF_REPLY)) { 715 case DMSGF_CREATE: 716 case DMSGF_CREATE | DMSGF_DELETE: 717 /* 718 * (A) wishes to establish a virtual circuit through us to (B). 719 * (B) is specified by lnk_circ.target (the message id for 720 * a LNK_SPAN that (A) received from us which represents (B)). 721 * 722 * Designate the originator of the circuit (the current 723 * remote end) as (A) and the other side as (B). 724 * 725 * Accept the VC but do not reply. We will wait for the end- 726 * to-end reply to propagate back. 727 */ 728 iocomA = msg->iocom; 729 730 /* 731 * Locate the open transaction state that the other end 732 * specified in <target>. This will be an open SPAN 733 * transaction that we transmitted (h2span_relay) over 734 * the interface the LNK_CIRC is being received on. 735 * 736 * (all LNK_CIRC's that we transmit are on circuit0) 737 */ 738 pthread_mutex_lock(&iocomA->mtx); 739 dummy.msgid = msg->any.lnk_circ.target; 740 tx_state = RB_FIND(dmsg_state_tree, 741 &iocomA->circuit0.statewr_tree, 742 &dummy); 743 pthread_mutex_unlock(&iocomA->mtx); 744 if (tx_state == NULL) { 745 /* XXX SMP race */ 746 fprintf(stderr, "dmsg_lnk_circ: no circuit\n"); 747 dmsg_msg_reply(msg, DMSG_ERR_CANTCIRC); 748 break; 749 } 750 if (tx_state->icmd != DMSG_LNK_SPAN) { 751 /* XXX SMP race */ 752 fprintf(stderr, "dmsg_lnk_circ: not LNK_SPAN\n"); 753 dmsg_msg_reply(msg, DMSG_ERR_CANTCIRC); 754 break; 755 } 756 757 /* locate h2span_link */ 758 rx_state = tx_state->any.relay->source_rt; 759 760 /* 761 * A wishes to establish a VC through us to the 762 * specified target. 763 * 764 * A sends us the msgid of an open SPAN transaction 765 * it received from us as <target>. 766 */ 767 circA = dmsg_alloc(sizeof(*circA)); 768 dmsg_circuit_init(iocomA, circA); 769 circA->state = msg->state; /* LNK_CIRC state */ 770 circA->msgid = msg->state->msgid; 771 circA->span_state = tx_state; /* H2SPAN_RELAY state */ 772 circA->is_relay = 1; 773 circA->refs = 2; /* state and peer */ 774 775 /* 776 * Upgrade received state so we act on both it and its 777 * peer (created below) symmetrically. 778 */ 779 msg->state->any.circ = circA; 780 msg->state->func = dmsg_lnk_circ; 781 782 iocomB = rx_state->iocom; 783 784 circB = dmsg_alloc(sizeof(*circB)); 785 dmsg_circuit_init(iocomB, circB); 786 787 /* 788 * Create a LNK_CIRC transaction on B 789 */ 790 fwd_msg = dmsg_msg_alloc(&iocomB->circuit0, 791 0, DMSG_LNK_CIRC | DMSGF_CREATE, 792 dmsg_lnk_circ, circB); 793 fwd_msg->state->any.circ = circB; 794 fwd_msg->any.lnk_circ.target = rx_state->msgid; 795 circB->state = fwd_msg->state; /* LNK_CIRC state */ 796 circB->msgid = fwd_msg->any.head.msgid; 797 circB->span_state = rx_state; /* H2SPAN_LINK state */ 798 circB->is_relay = 0; 799 circB->refs = 2; /* state and peer */ 800 801 if (DMsgDebugOpt >= 4) 802 fprintf(stderr, "CIRC forward %p->%p\n", circA, circB); 803 804 /* 805 * Link the two circuits together. 806 */ 807 circA->peer = circB; 808 circB->peer = circA; 809 810 if (iocomA < iocomB) { 811 pthread_mutex_lock(&iocomA->mtx); 812 pthread_mutex_lock(&iocomB->mtx); 813 } else { 814 pthread_mutex_lock(&iocomB->mtx); 815 pthread_mutex_lock(&iocomA->mtx); 816 } 817 if (RB_INSERT(dmsg_circuit_tree, &iocomA->circuit_tree, circA)) 818 assert(0); 819 if (RB_INSERT(dmsg_circuit_tree, &iocomB->circuit_tree, circB)) 820 assert(0); 821 if (iocomA < iocomB) { 822 pthread_mutex_unlock(&iocomB->mtx); 823 pthread_mutex_unlock(&iocomA->mtx); 824 } else { 825 pthread_mutex_unlock(&iocomA->mtx); 826 pthread_mutex_unlock(&iocomB->mtx); 827 } 828 829 dmsg_msg_write(fwd_msg); 830 831 if ((msg->any.head.cmd & DMSGF_DELETE) == 0) 832 break; 833 /* FALL THROUGH TO DELETE */ 834 case DMSGF_DELETE: 835 /* 836 * (A) Is deleting the virtual circuit, propogate closure 837 * to (B). 838 */ 839 iocomA = msg->iocom; 840 if (msg->state->any.circ == NULL) { 841 /* already returned an error/deleted */ 842 break; 843 } 844 circA = msg->state->any.circ; 845 circB = circA->peer; 846 assert(msg->state == circA->state); 847 848 /* 849 * We are closing B's send side. If B's receive side is 850 * already closed we disconnect the circuit from B's state. 851 */ 852 disconnect = 0; 853 if (circB && (state = circB->state) != NULL) { 854 if (state->rxcmd & DMSGF_DELETE) { 855 disconnect = 1; 856 circB->state = NULL; 857 state->any.circ = NULL; 858 dmsg_circuit_drop(circB); 859 } 860 dmsg_state_reply(state, msg->any.head.error); 861 } 862 863 /* 864 * We received a close on A. If A's send side is already 865 * closed we disconnect the circuit from A's state. 866 */ 867 if (circA && (state = circA->state) != NULL) { 868 if (state->txcmd & DMSGF_DELETE) { 869 disconnect = 1; 870 circA->state = NULL; 871 state->any.circ = NULL; 872 dmsg_circuit_drop(circA); 873 } 874 } 875 876 /* 877 * Disconnect the peer<->peer association 878 */ 879 if (disconnect) { 880 if (circB) { 881 circA->peer = NULL; 882 circB->peer = NULL; 883 dmsg_circuit_drop(circA); 884 dmsg_circuit_drop(circB); /* XXX SMP */ 885 } 886 } 887 break; 888 case DMSGF_REPLY | DMSGF_CREATE: 889 case DMSGF_REPLY | DMSGF_CREATE | DMSGF_DELETE: 890 /* 891 * (B) is acknowledging the creation of the virtual 892 * circuit. This propagates all the way back to (A), though 893 * it should be noted that (A) can start issuing commands 894 * via the virtual circuit before seeing this reply. 895 */ 896 circB = msg->state->any.circ; 897 assert(circB); 898 circA = circB->peer; 899 assert(msg->state == circB->state); 900 assert(circA); 901 if ((msg->any.head.cmd & DMSGF_DELETE) == 0) { 902 dmsg_state_result(circA->state, msg->any.head.error); 903 break; 904 } 905 /* FALL THROUGH TO DELETE */ 906 case DMSGF_REPLY | DMSGF_DELETE: 907 /* 908 * (B) Is deleting the virtual circuit or acknowledging 909 * our deletion of the virtual circuit, propogate closure 910 * to (A). 911 */ 912 iocomB = msg->iocom; 913 circB = msg->state->any.circ; 914 circA = circB->peer; 915 assert(msg->state == circB->state); 916 917 /* 918 * We received a close on (B), propagate to (A). If we have 919 * already received the close from (A) we disconnect the state. 920 */ 921 disconnect = 0; 922 if (circA && (state = circA->state) != NULL) { 923 if (state->rxcmd & DMSGF_DELETE) { 924 disconnect = 1; 925 circA->state = NULL; 926 state->any.circ = NULL; 927 dmsg_circuit_drop(circA); 928 } 929 dmsg_state_reply(state, msg->any.head.error); 930 } 931 932 /* 933 * We received a close on (B). If (B)'s send side is already 934 * closed we disconnect the state. 935 */ 936 if (circB && (state = circB->state) != NULL) { 937 if (state->txcmd & DMSGF_DELETE) { 938 disconnect = 1; 939 circB->state = NULL; 940 state->any.circ = NULL; 941 dmsg_circuit_drop(circB); 942 } 943 } 944 945 /* 946 * Disconnect the peer<->peer association 947 */ 948 if (disconnect) { 949 if (circA) { 950 circB->peer = NULL; 951 circA->peer = NULL; 952 dmsg_circuit_drop(circB); 953 dmsg_circuit_drop(circA); /* XXX SMP */ 954 } 955 } 956 break; 957 } 958 959 /*pthread_mutex_lock(&cluster_mtx);*/ 960 } 961 962 /* 963 * Update relay transactions for SPANs. 964 * 965 * Called with cluster_mtx held. 966 */ 967 static void dmsg_relay_scan_specific(h2span_node_t *node, 968 h2span_conn_t *conn); 969 970 static void 971 dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node) 972 { 973 h2span_cluster_t *cls; 974 975 if (node) { 976 /* 977 * Iterate specific node 978 */ 979 TAILQ_FOREACH(conn, &connq, entry) 980 dmsg_relay_scan_specific(node, conn); 981 } else { 982 /* 983 * Full iteration. 984 * 985 * Iterate cluster ids, nodes, and either a specific connection 986 * or all connections. 987 */ 988 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) { 989 /* 990 * Iterate node ids 991 */ 992 RB_FOREACH(node, h2span_node_tree, &cls->tree) { 993 /* 994 * Synchronize the node's link (received SPANs) 995 * with each connection's relays. 996 */ 997 if (conn) { 998 dmsg_relay_scan_specific(node, conn); 999 } else { 1000 TAILQ_FOREACH(conn, &connq, entry) { 1001 dmsg_relay_scan_specific(node, 1002 conn); 1003 } 1004 assert(conn == NULL); 1005 } 1006 } 1007 } 1008 } 1009 } 1010 1011 /* 1012 * Update the relay'd SPANs for this (node, conn). 1013 * 1014 * Iterate links and adjust relays to match. We only propagate the top link 1015 * for now (XXX we want to propagate the top two). 1016 * 1017 * The dmsg_relay_scan_cmp() function locates the first relay element 1018 * for any given node. The relay elements will be sub-sorted by dist. 1019 */ 1020 struct relay_scan_info { 1021 h2span_node_t *node; 1022 h2span_relay_t *relay; 1023 }; 1024 1025 static int 1026 dmsg_relay_scan_cmp(h2span_relay_t *relay, void *arg) 1027 { 1028 struct relay_scan_info *info = arg; 1029 1030 if ((intptr_t)relay->source_rt->any.link->node < (intptr_t)info->node) 1031 return(-1); 1032 if ((intptr_t)relay->source_rt->any.link->node > (intptr_t)info->node) 1033 return(1); 1034 return(0); 1035 } 1036 1037 static int 1038 dmsg_relay_scan_callback(h2span_relay_t *relay, void *arg) 1039 { 1040 struct relay_scan_info *info = arg; 1041 1042 info->relay = relay; 1043 return(-1); 1044 } 1045 1046 static void 1047 dmsg_relay_scan_specific(h2span_node_t *node, h2span_conn_t *conn) 1048 { 1049 struct relay_scan_info info; 1050 h2span_relay_t *relay; 1051 h2span_relay_t *next_relay; 1052 h2span_link_t *slink; 1053 dmsg_lnk_conn_t *lconn; 1054 dmsg_lnk_span_t *lspan; 1055 int count; 1056 int maxcount = 2; 1057 #ifdef REQUIRE_SYMMETRICAL 1058 uint32_t lastdist = DMSG_SPAN_MAXDIST; 1059 uint32_t lastrnss = 0; 1060 #endif 1061 1062 info.node = node; 1063 info.relay = NULL; 1064 1065 /* 1066 * Locate the first related relay for the node on this connection. 1067 * relay will be NULL if there were none. 1068 */ 1069 RB_SCAN(h2span_relay_tree, &conn->tree, 1070 dmsg_relay_scan_cmp, dmsg_relay_scan_callback, &info); 1071 relay = info.relay; 1072 info.relay = NULL; 1073 if (relay) 1074 assert(relay->source_rt->any.link->node == node); 1075 1076 if (DMsgDebugOpt > 8) 1077 fprintf(stderr, "relay scan for connection %p\n", conn); 1078 1079 /* 1080 * Iterate the node's links (received SPANs) in distance order, 1081 * lowest (best) dist first. 1082 * 1083 * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION. 1084 * 1085 * Track relays while iterating the best links and construct 1086 * missing relays when necessary. 1087 * 1088 * (If some prior better link was removed it would have also 1089 * removed the relay, so the relay can only match exactly or 1090 * be worse). 1091 */ 1092 count = 0; 1093 RB_FOREACH(slink, h2span_link_tree, &node->tree) { 1094 /* 1095 * Increment count of successful relays. This isn't 1096 * quite accurate if we break out but nothing after 1097 * the loop uses (count). 1098 * 1099 * If count exceeds the maximum number of relays we desire 1100 * we normally want to break out. However, in order to 1101 * guarantee a symmetric path we have to continue if both 1102 * (dist) and (rnss) continue to match. Otherwise the SPAN 1103 * propagation in the reverse direction may choose different 1104 * routes and we will not have a symmetric path. 1105 * 1106 * NOTE: Spanning tree does not have to be symmetrical so 1107 * this code is not currently enabled. 1108 */ 1109 if (++count >= maxcount) { 1110 #ifdef REQUIRE_SYMMETRICAL 1111 if (lastdist != slink->dist || lastrnss != slink->rnss) 1112 break; 1113 #else 1114 break; 1115 #endif 1116 /* go beyond the nominal maximum desired relays */ 1117 } 1118 1119 /* 1120 * Match, relay already in-place, get the next 1121 * relay to match against the next slink. 1122 */ 1123 if (relay && relay->source_rt->any.link == slink) { 1124 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay); 1125 continue; 1126 } 1127 1128 /* 1129 * We might want this SLINK, if it passes our filters. 1130 * 1131 * The spanning tree can cause closed loops so we have 1132 * to limit slink->dist. 1133 */ 1134 if (slink->dist > DMSG_SPAN_MAXDIST) 1135 break; 1136 1137 /* 1138 * Don't bother transmitting a LNK_SPAN out the same 1139 * connection it came in on. Trivial optimization. 1140 */ 1141 if (slink->state->iocom == conn->state->iocom) 1142 break; 1143 1144 /* 1145 * NOTE ON FILTERS: The protocol spec allows non-requested 1146 * SPANs to be transmitted, the other end is expected to 1147 * leave their transactions open but otherwise ignore them. 1148 * 1149 * Don't bother transmitting if the remote connection 1150 * is not accepting this SPAN's peer_type. 1151 * 1152 * pfs_mask is typically used so pure clients can filter 1153 * out receiving SPANs for other pure clients. 1154 */ 1155 lspan = &slink->state->msg->any.lnk_span; 1156 lconn = &conn->state->msg->any.lnk_conn; 1157 if (((1LLU << lspan->peer_type) & lconn->peer_mask) == 0) 1158 break; 1159 if (((1LLU << lspan->pfs_type) & lconn->pfs_mask) == 0) 1160 break; 1161 1162 #if 0 1163 /* 1164 * Do not give pure clients visibility to other pure clients 1165 */ 1166 if (lconn->pfs_type == DMSG_PFSTYPE_CLIENT && 1167 lspan->pfs_type == DMSG_PFSTYPE_CLIENT) { 1168 break; 1169 } 1170 #endif 1171 1172 /* 1173 * Connection filter, if cluster uuid is not NULL it must 1174 * match the span cluster uuid. Only applies when the 1175 * peer_type matches. 1176 */ 1177 if (lspan->peer_type == lconn->peer_type && 1178 !uuid_is_nil(&lconn->pfs_clid, NULL) && 1179 uuid_compare(&slink->node->cls->pfs_clid, 1180 &lconn->pfs_clid, NULL)) { 1181 break; 1182 } 1183 1184 /* 1185 * Connection filter, if cluster label is not empty it must 1186 * match the span cluster label. Only applies when the 1187 * peer_type matches. 1188 */ 1189 if (lspan->peer_type == lconn->peer_type && 1190 lconn->cl_label[0] && 1191 strcmp(lconn->cl_label, slink->node->cls->cl_label)) { 1192 break; 1193 } 1194 1195 /* 1196 * NOTE! pfs_fsid differentiates nodes within the same cluster 1197 * so we obviously don't want to match those. Similarly 1198 * for fs_label. 1199 */ 1200 1201 /* 1202 * Ok, we've accepted this SPAN for relaying. 1203 */ 1204 assert(relay == NULL || 1205 relay->source_rt->any.link->node != slink->node || 1206 relay->source_rt->any.link->dist >= slink->dist); 1207 relay = dmsg_generate_relay(conn, slink); 1208 #ifdef REQUIRE_SYMMETRICAL 1209 lastdist = slink->dist; 1210 lastrnss = slink->rnss; 1211 #endif 1212 1213 /* 1214 * Match (created new relay), get the next relay to 1215 * match against the next slink. 1216 */ 1217 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay); 1218 } 1219 1220 /* 1221 * Any remaining relay's belonging to this connection which match 1222 * the node are in excess of the current aggregate spanning state 1223 * and should be removed. 1224 */ 1225 while (relay && relay->source_rt->any.link->node == node) { 1226 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay); 1227 fprintf(stderr, "RELAY DELETE FROM EXTRAS\n"); 1228 dmsg_relay_delete(relay); 1229 relay = next_relay; 1230 } 1231 } 1232 1233 /* 1234 * Helper function to generate missing relay. 1235 * 1236 * cluster_mtx must be held 1237 */ 1238 static 1239 h2span_relay_t * 1240 dmsg_generate_relay(h2span_conn_t *conn, h2span_link_t *slink) 1241 { 1242 h2span_relay_t *relay; 1243 dmsg_msg_t *msg; 1244 1245 relay = dmsg_alloc(sizeof(*relay)); 1246 relay->conn = conn; 1247 relay->source_rt = slink->state; 1248 /* relay->source_rt->any.link = slink; */ 1249 1250 /* 1251 * NOTE: relay->target_rt->any.relay set to relay by alloc. 1252 */ 1253 msg = dmsg_msg_alloc(&conn->state->iocom->circuit0, 1254 0, DMSG_LNK_SPAN | DMSGF_CREATE, 1255 dmsg_lnk_relay, relay); 1256 relay->target_rt = msg->state; 1257 1258 msg->any.lnk_span = slink->state->msg->any.lnk_span; 1259 msg->any.lnk_span.dist = slink->dist + 1; 1260 msg->any.lnk_span.rnss = slink->rnss + dmsg_rnss(); 1261 1262 RB_INSERT(h2span_relay_tree, &conn->tree, relay); 1263 TAILQ_INSERT_TAIL(&slink->relayq, relay, entry); 1264 1265 dmsg_msg_write(msg); 1266 1267 return (relay); 1268 } 1269 1270 /* 1271 * Messages received on relay SPANs. These are open transactions so it is 1272 * in fact possible for the other end to close the transaction. 1273 * 1274 * XXX MPRACE on state structure 1275 */ 1276 static void 1277 dmsg_lnk_relay(dmsg_msg_t *msg) 1278 { 1279 dmsg_state_t *state = msg->state; 1280 h2span_relay_t *relay; 1281 1282 assert(msg->any.head.cmd & DMSGF_REPLY); 1283 1284 if (msg->any.head.cmd & DMSGF_DELETE) { 1285 pthread_mutex_lock(&cluster_mtx); 1286 fprintf(stderr, "RELAY DELETE FROM LNK_RELAY MSG\n"); 1287 if ((relay = state->any.relay) != NULL) { 1288 dmsg_relay_delete(relay); 1289 } else { 1290 dmsg_state_reply(state, 0); 1291 } 1292 pthread_mutex_unlock(&cluster_mtx); 1293 } 1294 } 1295 1296 /* 1297 * cluster_mtx held by caller 1298 */ 1299 static 1300 void 1301 dmsg_relay_delete(h2span_relay_t *relay) 1302 { 1303 fprintf(stderr, 1304 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p DIST=%d FD %d STATE %p\n", 1305 relay->source_rt->any.link, 1306 relay, 1307 relay->source_rt->any.link->node->cls, relay->source_rt->any.link->node, 1308 relay->source_rt->any.link->dist, 1309 relay->conn->state->iocom->sock_fd, relay->target_rt); 1310 1311 RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay); 1312 TAILQ_REMOVE(&relay->source_rt->any.link->relayq, relay, entry); 1313 1314 if (relay->target_rt) { 1315 relay->target_rt->any.relay = NULL; 1316 dmsg_state_reply(relay->target_rt, 0); 1317 /* state invalid after reply */ 1318 relay->target_rt = NULL; 1319 } 1320 relay->conn = NULL; 1321 relay->source_rt = NULL; 1322 dmsg_free(relay); 1323 } 1324 1325 /************************************************************************ 1326 * MESSAGE ROUTING AND SOURCE VALIDATION * 1327 ************************************************************************/ 1328 1329 int 1330 dmsg_circuit_route(dmsg_msg_t *msg) 1331 { 1332 dmsg_iocom_t *iocom = msg->iocom; 1333 dmsg_circuit_t *circ; 1334 dmsg_circuit_t *peer; 1335 dmsg_circuit_t dummy; 1336 int error = 0; 1337 1338 /* 1339 * Relay occurs before any state processing, msg state should always 1340 * be NULL. 1341 */ 1342 assert(msg->state == NULL); 1343 1344 /* 1345 * Lookup the circuit on the incoming iocom. 1346 */ 1347 pthread_mutex_lock(&iocom->mtx); 1348 1349 dummy.msgid = msg->any.head.circuit; 1350 circ = RB_FIND(dmsg_circuit_tree, &iocom->circuit_tree, &dummy); 1351 assert(circ); 1352 peer = circ->peer; 1353 dmsg_circuit_hold(peer); 1354 1355 if (DMsgDebugOpt >= 4) { 1356 fprintf(stderr, 1357 "CIRC relay %08x %p->%p\n", 1358 msg->any.head.cmd, circ, peer); 1359 } 1360 1361 msg->iocom = peer->iocom; 1362 msg->any.head.circuit = peer->msgid; 1363 dmsg_circuit_drop_locked(msg->circuit); 1364 msg->circuit = peer; 1365 1366 pthread_mutex_unlock(&iocom->mtx); 1367 1368 dmsg_msg_write(msg); 1369 error = DMSG_IOQ_ERROR_ROUTED; 1370 1371 return error; 1372 } 1373 1374 /************************************************************************ 1375 * ROUTER AND MESSAGING HANDLES * 1376 ************************************************************************ 1377 * 1378 * Basically the idea here is to provide a stable data structure which 1379 * can be localized to the caller for higher level protocols to work with. 1380 * Depends on the context, these dmsg_handle's can be pooled by use-case 1381 * and remain persistent through a client (or mount point's) life. 1382 */ 1383 1384 #if 0 1385 /* 1386 * Obtain a stable handle on a cluster given its uuid. This ties directly 1387 * into the global cluster topology, creating the structure if necessary 1388 * (even if the uuid does not exist or does not exist yet), and preventing 1389 * the structure from getting ripped out from under us while we hold a 1390 * pointer to it. 1391 */ 1392 h2span_cluster_t * 1393 dmsg_cluster_get(uuid_t *pfs_clid) 1394 { 1395 h2span_cluster_t dummy_cls; 1396 h2span_cluster_t *cls; 1397 1398 dummy_cls.pfs_clid = *pfs_clid; 1399 pthread_mutex_lock(&cluster_mtx); 1400 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls); 1401 if (cls) 1402 ++cls->refs; 1403 pthread_mutex_unlock(&cluster_mtx); 1404 return (cls); 1405 } 1406 1407 void 1408 dmsg_cluster_put(h2span_cluster_t *cls) 1409 { 1410 pthread_mutex_lock(&cluster_mtx); 1411 assert(cls->refs > 0); 1412 --cls->refs; 1413 if (RB_EMPTY(&cls->tree) && cls->refs == 0) { 1414 RB_REMOVE(h2span_cluster_tree, 1415 &cluster_tree, cls); 1416 dmsg_free(cls); 1417 } 1418 pthread_mutex_unlock(&cluster_mtx); 1419 } 1420 1421 /* 1422 * Obtain a stable handle to a specific cluster node given its uuid. 1423 * This handle does NOT lock in the route to the node and is typically 1424 * used as part of the dmsg_handle_*() API to obtain a set of 1425 * stable nodes. 1426 */ 1427 h2span_node_t * 1428 dmsg_node_get(h2span_cluster_t *cls, uuid_t *pfs_fsid) 1429 { 1430 } 1431 1432 #endif 1433 1434 /* 1435 * Dumps the spanning tree 1436 * 1437 * DEBUG ONLY 1438 */ 1439 void 1440 dmsg_shell_tree(dmsg_circuit_t *circuit, char *cmdbuf __unused) 1441 { 1442 h2span_cluster_t *cls; 1443 h2span_node_t *node; 1444 h2span_link_t *slink; 1445 h2span_relay_t *relay; 1446 char *uustr = NULL; 1447 1448 pthread_mutex_lock(&cluster_mtx); 1449 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) { 1450 dmsg_circuit_printf(circuit, "Cluster %s %s (%s)\n", 1451 dmsg_peer_type_to_str(cls->peer_type), 1452 dmsg_uuid_to_str(&cls->pfs_clid, &uustr), 1453 cls->cl_label); 1454 RB_FOREACH(node, h2span_node_tree, &cls->tree) { 1455 dmsg_circuit_printf(circuit, " Node %02x %s (%s)\n", 1456 node->pfs_type, 1457 dmsg_uuid_to_str(&node->pfs_fsid, &uustr), 1458 node->fs_label); 1459 RB_FOREACH(slink, h2span_link_tree, &node->tree) { 1460 dmsg_circuit_printf(circuit, 1461 "\tSLink msgid %016jx " 1462 "dist=%d via %d\n", 1463 (intmax_t)slink->state->msgid, 1464 slink->dist, 1465 slink->state->iocom->sock_fd); 1466 TAILQ_FOREACH(relay, &slink->relayq, entry) { 1467 dmsg_circuit_printf(circuit, 1468 "\t Relay-out msgid %016jx " 1469 "via %d\n", 1470 (intmax_t)relay->target_rt->msgid, 1471 relay->target_rt->iocom->sock_fd); 1472 } 1473 } 1474 } 1475 } 1476 pthread_mutex_unlock(&cluster_mtx); 1477 if (uustr) 1478 free(uustr); 1479 #if 0 1480 TAILQ_FOREACH(conn, &connq, entry) { 1481 } 1482 #endif 1483 } 1484 1485 /* 1486 * DEBUG ONLY 1487 * 1488 * Locate the state representing an incoming LNK_SPAN given its msgid. 1489 */ 1490 int 1491 dmsg_debug_findspan(uint64_t msgid, dmsg_state_t **statep) 1492 { 1493 h2span_cluster_t *cls; 1494 h2span_node_t *node; 1495 h2span_link_t *slink; 1496 1497 pthread_mutex_lock(&cluster_mtx); 1498 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) { 1499 RB_FOREACH(node, h2span_node_tree, &cls->tree) { 1500 RB_FOREACH(slink, h2span_link_tree, &node->tree) { 1501 if (slink->state->msgid == msgid) { 1502 *statep = slink->state; 1503 goto found; 1504 } 1505 } 1506 } 1507 } 1508 pthread_mutex_unlock(&cluster_mtx); 1509 *statep = NULL; 1510 return(ENOENT); 1511 found: 1512 pthread_mutex_unlock(&cluster_mtx); 1513 return(0); 1514 } 1515 1516 /* 1517 * Random number sub-sort value to add to SPAN rnss fields on relay. 1518 * This allows us to differentiate spans with the same <dist> field 1519 * for relaying purposes. We must normally limit the number of relays 1520 * for any given SPAN origination but we must also guarantee that a 1521 * symmetric reverse path exists, so we use the rnss field as a sub-sort 1522 * (since there can be thousands or millions if we only match on <dist>), 1523 * and if there STILL too many spans we go past the limit. 1524 */ 1525 static 1526 uint32_t 1527 dmsg_rnss(void) 1528 { 1529 if (DMsgRNSS == 0) { 1530 pthread_mutex_lock(&cluster_mtx); 1531 while (DMsgRNSS == 0) { 1532 srandomdev(); 1533 DMsgRNSS = random(); 1534 } 1535 pthread_mutex_unlock(&cluster_mtx); 1536 } 1537 return(DMsgRNSS); 1538 } 1539