1 /* 2 * Copyright (c) 2012 The DragonFly Project. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 3. Neither the name of The DragonFly Project nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific, prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/sysctl.h> 37 #include <sys/sbuf.h> 38 #include <sys/cpu_topology.h> 39 40 #include <machine/smp.h> 41 42 #ifndef NAPICID 43 #define NAPICID 256 44 #endif 45 46 #define INDENT_BUF_SIZE LEVEL_NO*3 47 #define INVALID_ID -1 48 49 /* Per-cpu sysctl nodes and info */ 50 struct per_cpu_sysctl_info { 51 struct sysctl_ctx_list sysctl_ctx; 52 struct sysctl_oid *sysctl_tree; 53 char cpu_name[32]; 54 int physical_id; 55 int core_id; 56 char physical_siblings[8*MAXCPU]; 57 char core_siblings[8*MAXCPU]; 58 }; 59 typedef struct per_cpu_sysctl_info per_cpu_sysctl_info_t; 60 61 static cpu_node_t cpu_topology_nodes[MAXCPU]; /* Memory for topology */ 62 static cpu_node_t *cpu_root_node; /* Root node pointer */ 63 64 static struct sysctl_ctx_list cpu_topology_sysctl_ctx; 65 static struct sysctl_oid *cpu_topology_sysctl_tree; 66 static char cpu_topology_members[8*MAXCPU]; 67 static per_cpu_sysctl_info_t pcpu_sysctl[MAXCPU]; 68 69 int cpu_topology_levels_number = 1; 70 cpu_node_t *root_cpu_node; 71 72 /* Get the next valid apicid starting 73 * from current apicid (curr_apicid 74 */ 75 static int 76 get_next_valid_apicid(int curr_apicid) 77 { 78 int next_apicid = curr_apicid; 79 do { 80 next_apicid++; 81 } 82 while(get_cpuid_from_apicid(next_apicid) == -1 && 83 next_apicid < NAPICID); 84 if (next_apicid == NAPICID) { 85 kprintf("Warning: No next valid APICID found. Returning -1\n"); 86 return -1; 87 } 88 return next_apicid; 89 } 90 91 /* Generic topology tree. The parameters have the following meaning: 92 * - children_no_per_level : the number of children on each level 93 * - level_types : the type of the level (THREAD, CORE, CHIP, etc) 94 * - cur_level : the current level of the tree 95 * - node : the current node 96 * - last_free_node : the last free node in the global array. 97 * - cpuid : basicly this are the ids of the leafs 98 */ 99 static void 100 build_topology_tree(int *children_no_per_level, 101 uint8_t *level_types, 102 int cur_level, 103 cpu_node_t *node, 104 cpu_node_t **last_free_node, 105 int *apicid) 106 { 107 int i; 108 109 node->child_no = children_no_per_level[cur_level]; 110 node->type = level_types[cur_level]; 111 node->members = 0; 112 node->compute_unit_id = -1; 113 114 if (node->child_no == 0) { 115 *apicid = get_next_valid_apicid(*apicid); 116 node->members = CPUMASK(get_cpuid_from_apicid(*apicid)); 117 return; 118 } 119 120 if (node->parent_node == NULL) 121 root_cpu_node = node; 122 123 for (i = 0; i < node->child_no; i++) { 124 node->child_node[i] = *last_free_node; 125 (*last_free_node)++; 126 127 node->child_node[i]->parent_node = node; 128 129 build_topology_tree(children_no_per_level, 130 level_types, 131 cur_level + 1, 132 node->child_node[i], 133 last_free_node, 134 apicid); 135 136 node->members |= node->child_node[i]->members; 137 } 138 } 139 140 #if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL) 141 static void 142 migrate_elements(cpu_node_t **a, int n, int pos) 143 { 144 int i; 145 146 for (i = pos; i < n - 1 ; i++) { 147 a[i] = a[i+1]; 148 } 149 a[i] = NULL; 150 } 151 #endif 152 153 /* Build CPU topology. The detection is made by comparing the 154 * chip, core and logical IDs of each CPU with the IDs of the 155 * BSP. When we found a match, at that level the CPUs are siblings. 156 */ 157 static void 158 build_cpu_topology(void) 159 { 160 detect_cpu_topology(); 161 int i; 162 int BSPID = 0; 163 int threads_per_core = 0; 164 int cores_per_chip = 0; 165 int chips_per_package = 0; 166 int children_no_per_level[LEVEL_NO]; 167 uint8_t level_types[LEVEL_NO]; 168 int apicid = -1; 169 170 cpu_node_t *root = &cpu_topology_nodes[0]; 171 cpu_node_t *last_free_node = root + 1; 172 173 /* Assume that the topology is uniform. 174 * Find the number of siblings within chip 175 * and witin core to build up the topology 176 */ 177 for (i = 0; i < ncpus; i++) { 178 179 cpumask_t mask = CPUMASK(i); 180 181 if ((mask & smp_active_mask) == 0) 182 continue; 183 184 if (get_chip_ID(BSPID) == get_chip_ID(i)) 185 cores_per_chip++; 186 else 187 continue; 188 189 if (get_core_number_within_chip(BSPID) == 190 get_core_number_within_chip(i)) 191 threads_per_core++; 192 } 193 194 cores_per_chip /= threads_per_core; 195 chips_per_package = ncpus / (cores_per_chip * threads_per_core); 196 197 if (bootverbose) 198 kprintf("CPU Topology: cores_per_chip: %d; threads_per_core: %d; chips_per_package: %d;\n", 199 cores_per_chip, threads_per_core, chips_per_package); 200 201 if (threads_per_core > 1) { /* HT available - 4 levels */ 202 203 children_no_per_level[0] = chips_per_package; 204 children_no_per_level[1] = cores_per_chip; 205 children_no_per_level[2] = threads_per_core; 206 children_no_per_level[3] = 0; 207 208 level_types[0] = PACKAGE_LEVEL; 209 level_types[1] = CHIP_LEVEL; 210 level_types[2] = CORE_LEVEL; 211 level_types[3] = THREAD_LEVEL; 212 213 build_topology_tree(children_no_per_level, 214 level_types, 215 0, 216 root, 217 &last_free_node, 218 &apicid); 219 220 cpu_topology_levels_number = 4; 221 222 } else if (cores_per_chip > 1) { /* No HT available - 3 levels */ 223 224 children_no_per_level[0] = chips_per_package; 225 children_no_per_level[1] = cores_per_chip; 226 children_no_per_level[2] = 0; 227 228 level_types[0] = PACKAGE_LEVEL; 229 level_types[1] = CHIP_LEVEL; 230 level_types[2] = CORE_LEVEL; 231 232 build_topology_tree(children_no_per_level, 233 level_types, 234 0, 235 root, 236 &last_free_node, 237 &apicid); 238 239 cpu_topology_levels_number = 3; 240 241 } else { /* No HT and no Multi-Core - 2 levels */ 242 243 children_no_per_level[0] = chips_per_package; 244 children_no_per_level[1] = 0; 245 246 level_types[0] = PACKAGE_LEVEL; 247 level_types[1] = CHIP_LEVEL; 248 249 build_topology_tree(children_no_per_level, 250 level_types, 251 0, 252 root, 253 &last_free_node, 254 &apicid); 255 256 cpu_topology_levels_number = 2; 257 258 } 259 260 cpu_root_node = root; 261 262 263 #if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL) 264 if (fix_amd_topology() == 0) { 265 int visited[MAXCPU], i, j, pos, cpuid; 266 cpu_node_t *leaf, *parent; 267 268 bzero(visited, MAXCPU * sizeof(int)); 269 270 for (i = 0; i < ncpus; i++) { 271 if (visited[i] == 0) { 272 pos = 0; 273 visited[i] = 1; 274 leaf = get_cpu_node_by_cpuid(i); 275 276 if (leaf->type == CORE_LEVEL) { 277 parent = leaf->parent_node; 278 279 last_free_node->child_node[0] = leaf; 280 last_free_node->child_no = 1; 281 last_free_node->members = leaf->members; 282 last_free_node->compute_unit_id = leaf->compute_unit_id; 283 last_free_node->parent_node = parent; 284 last_free_node->type = CORE_LEVEL; 285 286 287 for (j = 0; j < parent->child_no; j++) { 288 if (parent->child_node[j] != leaf) { 289 290 cpuid = BSFCPUMASK(parent->child_node[j]->members); 291 if (visited[cpuid] == 0 && 292 parent->child_node[j]->compute_unit_id == leaf->compute_unit_id) { 293 294 last_free_node->child_node[last_free_node->child_no] = parent->child_node[j]; 295 last_free_node->child_no++; 296 last_free_node->members |= parent->child_node[j]->members; 297 298 parent->child_node[j]->type = THREAD_LEVEL; 299 parent->child_node[j]->parent_node = last_free_node; 300 visited[cpuid] = 1; 301 302 migrate_elements(parent->child_node, parent->child_no, j); 303 parent->child_no--; 304 j--; 305 } 306 } else { 307 pos = j; 308 } 309 } 310 if (last_free_node->child_no > 1) { 311 parent->child_node[pos] = last_free_node; 312 leaf->type = THREAD_LEVEL; 313 leaf->parent_node = last_free_node; 314 last_free_node++; 315 } 316 } 317 } 318 } 319 } 320 #endif 321 } 322 323 /* Recursive function helper to print the CPU topology tree */ 324 static void 325 print_cpu_topology_tree_sysctl_helper(cpu_node_t *node, 326 struct sbuf *sb, 327 char * buf, 328 int buf_len, 329 int last) 330 { 331 int i; 332 int bsr_member; 333 334 sbuf_bcat(sb, buf, buf_len); 335 if (last) { 336 sbuf_printf(sb, "\\-"); 337 buf[buf_len] = ' ';buf_len++; 338 buf[buf_len] = ' ';buf_len++; 339 } else { 340 sbuf_printf(sb, "|-"); 341 buf[buf_len] = '|';buf_len++; 342 buf[buf_len] = ' ';buf_len++; 343 } 344 345 bsr_member = BSRCPUMASK(node->members); 346 347 if (node->type == PACKAGE_LEVEL) { 348 sbuf_printf(sb,"PACKAGE MEMBERS: "); 349 } else if (node->type == CHIP_LEVEL) { 350 sbuf_printf(sb,"CHIP ID %d: ", 351 get_chip_ID(bsr_member)); 352 } else if (node->type == CORE_LEVEL) { 353 if (node->compute_unit_id != -1) { 354 sbuf_printf(sb,"Compute Unit ID %d: ", 355 node->compute_unit_id); 356 } else { 357 sbuf_printf(sb,"CORE ID %d: ", 358 get_core_number_within_chip(bsr_member)); 359 } 360 } else if (node->type == THREAD_LEVEL) { 361 if (node->compute_unit_id != -1) { 362 sbuf_printf(sb,"CORE ID %d: ", 363 get_core_number_within_chip(bsr_member)); 364 } else { 365 sbuf_printf(sb,"THREAD ID %d: ", 366 get_logical_CPU_number_within_core(bsr_member)); 367 } 368 } else { 369 sbuf_printf(sb,"UNKNOWN: "); 370 } 371 CPUSET_FOREACH(i, node->members) { 372 sbuf_printf(sb,"cpu%d ", i); 373 } 374 375 sbuf_printf(sb,"\n"); 376 377 for (i = 0; i < node->child_no; i++) { 378 print_cpu_topology_tree_sysctl_helper(node->child_node[i], 379 sb, buf, buf_len, i == (node->child_no -1)); 380 } 381 } 382 383 /* SYSCTL PROCEDURE for printing the CPU Topology tree */ 384 static int 385 print_cpu_topology_tree_sysctl(SYSCTL_HANDLER_ARGS) 386 { 387 struct sbuf *sb; 388 int ret; 389 char buf[INDENT_BUF_SIZE]; 390 391 KASSERT(cpu_root_node != NULL, ("cpu_root_node isn't initialized")); 392 393 sb = sbuf_new(NULL, NULL, 500, SBUF_AUTOEXTEND); 394 if (sb == NULL) { 395 return (ENOMEM); 396 } 397 sbuf_printf(sb,"\n"); 398 print_cpu_topology_tree_sysctl_helper(cpu_root_node, sb, buf, 0, 1); 399 400 sbuf_finish(sb); 401 402 ret = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb)); 403 404 sbuf_delete(sb); 405 406 return ret; 407 } 408 409 /* SYSCTL PROCEDURE for printing the CPU Topology level description */ 410 static int 411 print_cpu_topology_level_description_sysctl(SYSCTL_HANDLER_ARGS) 412 { 413 struct sbuf *sb; 414 int ret; 415 416 sb = sbuf_new(NULL, NULL, 500, SBUF_AUTOEXTEND); 417 if (sb == NULL) 418 return (ENOMEM); 419 420 if (cpu_topology_levels_number == 4) /* HT available */ 421 sbuf_printf(sb, "0 - thread; 1 - core; 2 - socket; 3 - anything"); 422 else if (cpu_topology_levels_number == 3) /* No HT available */ 423 sbuf_printf(sb, "0 - core; 1 - socket; 2 - anything"); 424 else if (cpu_topology_levels_number == 2) /* No HT and no Multi-Core */ 425 sbuf_printf(sb, "0 - socket; 1 - anything"); 426 else 427 sbuf_printf(sb, "Unknown"); 428 429 sbuf_finish(sb); 430 431 ret = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb)); 432 433 sbuf_delete(sb); 434 435 return ret; 436 } 437 438 /* Find a cpu_node_t by a mask */ 439 static cpu_node_t * 440 get_cpu_node_by_cpumask(cpu_node_t * node, 441 cpumask_t mask) { 442 443 cpu_node_t * found = NULL; 444 int i; 445 446 if (node->members == mask) { 447 return node; 448 } 449 450 for (i = 0; i < node->child_no; i++) { 451 found = get_cpu_node_by_cpumask(node->child_node[i], mask); 452 if (found != NULL) { 453 return found; 454 } 455 } 456 return NULL; 457 } 458 459 cpu_node_t * 460 get_cpu_node_by_cpuid(int cpuid) { 461 cpumask_t mask = CPUMASK(cpuid); 462 463 KASSERT(cpu_root_node != NULL, ("cpu_root_node isn't initialized")); 464 465 return get_cpu_node_by_cpumask(cpu_root_node, mask); 466 } 467 468 /* Get the mask of siblings for level_type of a cpuid */ 469 cpumask_t 470 get_cpumask_from_level(int cpuid, 471 uint8_t level_type) 472 { 473 cpu_node_t * node; 474 cpumask_t mask = CPUMASK(cpuid); 475 476 KASSERT(cpu_root_node != NULL, ("cpu_root_node isn't initialized")); 477 478 node = get_cpu_node_by_cpumask(cpu_root_node, mask); 479 480 if (node == NULL) { 481 return 0; 482 } 483 484 while (node != NULL) { 485 if (node->type == level_type) { 486 return node->members; 487 } 488 node = node->parent_node; 489 } 490 491 return 0; 492 } 493 494 /* init pcpu_sysctl structure info */ 495 static void 496 init_pcpu_topology_sysctl(void) 497 { 498 int cpu; 499 int i; 500 cpumask_t mask; 501 struct sbuf sb; 502 503 for (i = 0; i < ncpus; i++) { 504 505 sbuf_new(&sb, pcpu_sysctl[i].cpu_name, 506 sizeof(pcpu_sysctl[i].cpu_name), SBUF_FIXEDLEN); 507 sbuf_printf(&sb,"cpu%d", i); 508 sbuf_finish(&sb); 509 510 511 /* Get physical siblings */ 512 mask = get_cpumask_from_level(i, CHIP_LEVEL); 513 if (mask == 0) { 514 pcpu_sysctl[i].physical_id = INVALID_ID; 515 continue; 516 } 517 518 sbuf_new(&sb, pcpu_sysctl[i].physical_siblings, 519 sizeof(pcpu_sysctl[i].physical_siblings), SBUF_FIXEDLEN); 520 CPUSET_FOREACH(cpu, mask) { 521 sbuf_printf(&sb,"cpu%d ", cpu); 522 } 523 sbuf_trim(&sb); 524 sbuf_finish(&sb); 525 526 pcpu_sysctl[i].physical_id = get_chip_ID(i); 527 528 /* Get core siblings */ 529 mask = get_cpumask_from_level(i, CORE_LEVEL); 530 if (mask == 0) { 531 pcpu_sysctl[i].core_id = INVALID_ID; 532 continue; 533 } 534 535 sbuf_new(&sb, pcpu_sysctl[i].core_siblings, 536 sizeof(pcpu_sysctl[i].core_siblings), SBUF_FIXEDLEN); 537 CPUSET_FOREACH(cpu, mask) { 538 sbuf_printf(&sb,"cpu%d ", cpu); 539 } 540 sbuf_trim(&sb); 541 sbuf_finish(&sb); 542 543 pcpu_sysctl[i].core_id = get_core_number_within_chip(i); 544 545 } 546 } 547 548 /* Build SYSCTL structure for revealing 549 * the CPU Topology to user-space. 550 */ 551 static void 552 build_sysctl_cpu_topology(void) 553 { 554 int i; 555 struct sbuf sb; 556 557 /* SYSCTL new leaf for "cpu_topology" */ 558 sysctl_ctx_init(&cpu_topology_sysctl_ctx); 559 cpu_topology_sysctl_tree = SYSCTL_ADD_NODE(&cpu_topology_sysctl_ctx, 560 SYSCTL_STATIC_CHILDREN(_hw), 561 OID_AUTO, 562 "cpu_topology", 563 CTLFLAG_RD, 0, ""); 564 565 /* SYSCTL cpu_topology "tree" entry */ 566 SYSCTL_ADD_PROC(&cpu_topology_sysctl_ctx, 567 SYSCTL_CHILDREN(cpu_topology_sysctl_tree), 568 OID_AUTO, "tree", CTLTYPE_STRING | CTLFLAG_RD, 569 NULL, 0, print_cpu_topology_tree_sysctl, "A", 570 "Tree print of CPU topology"); 571 572 /* SYSCTL cpu_topology "level_description" entry */ 573 SYSCTL_ADD_PROC(&cpu_topology_sysctl_ctx, 574 SYSCTL_CHILDREN(cpu_topology_sysctl_tree), 575 OID_AUTO, "level_description", CTLTYPE_STRING | CTLFLAG_RD, 576 NULL, 0, print_cpu_topology_level_description_sysctl, "A", 577 "Level description of CPU topology"); 578 579 /* SYSCTL cpu_topology "members" entry */ 580 sbuf_new(&sb, cpu_topology_members, 581 sizeof(cpu_topology_members), SBUF_FIXEDLEN); 582 CPUSET_FOREACH(i, cpu_root_node->members) { 583 sbuf_printf(&sb,"cpu%d ", i); 584 } 585 sbuf_trim(&sb); 586 sbuf_finish(&sb); 587 SYSCTL_ADD_STRING(&cpu_topology_sysctl_ctx, 588 SYSCTL_CHILDREN(cpu_topology_sysctl_tree), 589 OID_AUTO, "members", CTLFLAG_RD, 590 cpu_topology_members, 0, 591 "Members of the CPU Topology"); 592 593 /* SYSCTL per_cpu info */ 594 for (i = 0; i < ncpus; i++) { 595 /* New leaf : hw.cpu_topology.cpux */ 596 sysctl_ctx_init(&pcpu_sysctl[i].sysctl_ctx); 597 pcpu_sysctl[i].sysctl_tree = SYSCTL_ADD_NODE(&pcpu_sysctl[i].sysctl_ctx, 598 SYSCTL_CHILDREN(cpu_topology_sysctl_tree), 599 OID_AUTO, 600 pcpu_sysctl[i].cpu_name, 601 CTLFLAG_RD, 0, ""); 602 603 /* Check if the physical_id found is valid */ 604 if (pcpu_sysctl[i].physical_id == INVALID_ID) { 605 continue; 606 } 607 608 /* Add physical id info */ 609 SYSCTL_ADD_INT(&pcpu_sysctl[i].sysctl_ctx, 610 SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree), 611 OID_AUTO, "physical_id", CTLFLAG_RD, 612 &pcpu_sysctl[i].physical_id, 0, 613 "Physical ID"); 614 615 /* Add physical siblings */ 616 SYSCTL_ADD_STRING(&pcpu_sysctl[i].sysctl_ctx, 617 SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree), 618 OID_AUTO, "physical_siblings", CTLFLAG_RD, 619 pcpu_sysctl[i].physical_siblings, 0, 620 "Physical siblings"); 621 622 /* Check if the core_id found is valid */ 623 if (pcpu_sysctl[i].core_id == INVALID_ID) { 624 continue; 625 } 626 627 /* Add core id info */ 628 SYSCTL_ADD_INT(&pcpu_sysctl[i].sysctl_ctx, 629 SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree), 630 OID_AUTO, "core_id", CTLFLAG_RD, 631 &pcpu_sysctl[i].core_id, 0, 632 "Core ID"); 633 634 /*Add core siblings */ 635 SYSCTL_ADD_STRING(&pcpu_sysctl[i].sysctl_ctx, 636 SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree), 637 OID_AUTO, "core_siblings", CTLFLAG_RD, 638 pcpu_sysctl[i].core_siblings, 0, 639 "Core siblings"); 640 } 641 } 642 643 /* Build the CPU Topology and SYSCTL Topology tree */ 644 static void 645 init_cpu_topology(void) 646 { 647 build_cpu_topology(); 648 649 init_pcpu_topology_sysctl(); 650 build_sysctl_cpu_topology(); 651 } 652 SYSINIT(cpu_topology, SI_BOOT2_CPU_TOPOLOGY, SI_ORDER_FIRST, 653 init_cpu_topology, NULL) 654