13434Sesaxe /* 23434Sesaxe * CDDL HEADER START 33434Sesaxe * 43434Sesaxe * The contents of this file are subject to the terms of the 53434Sesaxe * Common Development and Distribution License (the "License"). 63434Sesaxe * You may not use this file except in compliance with the License. 73434Sesaxe * 83434Sesaxe * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93434Sesaxe * or http://www.opensolaris.org/os/licensing. 103434Sesaxe * See the License for the specific language governing permissions 113434Sesaxe * and limitations under the License. 123434Sesaxe * 133434Sesaxe * When distributing Covered Code, include this CDDL HEADER in each 143434Sesaxe * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153434Sesaxe * If applicable, add the following below this CDDL HEADER, with the 163434Sesaxe * fields enclosed by brackets "[]" replaced with your own identifying 173434Sesaxe * information: Portions Copyright [yyyy] [name of copyright owner] 183434Sesaxe * 193434Sesaxe * CDDL HEADER END 203434Sesaxe */ 213434Sesaxe /* 228689SEric.Saxe@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 233434Sesaxe * Use is subject to license terms. 243434Sesaxe */ 253434Sesaxe 263434Sesaxe #include <sys/systm.h> 273434Sesaxe #include <sys/types.h> 283434Sesaxe #include <sys/param.h> 293434Sesaxe #include <sys/thread.h> 303434Sesaxe #include <sys/cpuvar.h> 313434Sesaxe #include <sys/cpupart.h> 323434Sesaxe #include <sys/kmem.h> 333434Sesaxe #include <sys/cmn_err.h> 343434Sesaxe #include <sys/kstat.h> 353434Sesaxe #include <sys/processor.h> 363434Sesaxe #include <sys/disp.h> 373434Sesaxe #include <sys/group.h> 383434Sesaxe #include <sys/pghw.h> 393434Sesaxe #include <sys/bitset.h> 403434Sesaxe #include <sys/lgrp.h> 413434Sesaxe #include <sys/cmt.h> 428906SEric.Saxe@Sun.COM #include <sys/cpu_pm.h> 433434Sesaxe 443434Sesaxe /* 453434Sesaxe * CMT scheduler / dispatcher support 463434Sesaxe * 473434Sesaxe * This file implements CMT scheduler support using Processor Groups. 483434Sesaxe * The CMT processor group class creates and maintains the CMT class 493434Sesaxe * specific processor group pg_cmt_t. 503434Sesaxe * 513434Sesaxe * ---------------------------- <-- pg_cmt_t * 523434Sesaxe * | pghw_t | 533434Sesaxe * ---------------------------- 543434Sesaxe * | CMT class specific data | 553434Sesaxe * | - hierarchy linkage | 563434Sesaxe * | - CMT load balancing data| 573434Sesaxe * | - active CPU group/bitset| 583434Sesaxe * ---------------------------- 593434Sesaxe * 603434Sesaxe * The scheduler/dispatcher leverages knowledge of the performance 613434Sesaxe * relevant CMT sharing relationships existing between cpus to implement 628906SEric.Saxe@Sun.COM * optimized affinity, load balancing, and coalescence policies. 633434Sesaxe * 643434Sesaxe * Load balancing policy seeks to improve performance by minimizing 658906SEric.Saxe@Sun.COM * contention over shared processor resources / facilities, Affinity 668906SEric.Saxe@Sun.COM * policies seek to improve cache and TLB utilization. Coalescence 678906SEric.Saxe@Sun.COM * policies improve resource utilization and ultimately power efficiency. 683434Sesaxe * 693434Sesaxe * The CMT PGs created by this class are already arranged into a 703434Sesaxe * hierarchy (which is done in the pghw layer). To implement the top-down 713434Sesaxe * CMT load balancing algorithm, the CMT PGs additionally maintain 723434Sesaxe * parent, child and sibling hierarchy relationships. 733434Sesaxe * Parent PGs always contain a superset of their children(s) resources, 743434Sesaxe * each PG can have at most one parent, and siblings are the group of PGs 753434Sesaxe * sharing the same parent. 763434Sesaxe * 773434Sesaxe * On NUMA systems, the CMT load balancing algorithm balances across the 783434Sesaxe * CMT PGs within their respective lgroups. On UMA based system, there 793434Sesaxe * exists a top level group of PGs to balance across. On NUMA systems multiple 803434Sesaxe * top level groups are instantiated, where the top level balancing begins by 813434Sesaxe * balancng across the CMT PGs within their respective (per lgroup) top level 823434Sesaxe * groups. 833434Sesaxe */ 843676Sesaxe static cmt_lgrp_t *cmt_lgrps = NULL; /* cmt_lgrps list head */ 853676Sesaxe static cmt_lgrp_t *cpu0_lgrp = NULL; /* boot CPU's initial lgrp */ 863676Sesaxe /* used for null_proc_lpa */ 878906SEric.Saxe@Sun.COM cmt_lgrp_t *cmt_root = NULL; /* Reference to root cmt pg */ 883434Sesaxe 893676Sesaxe static int is_cpu0 = 1; /* true if this is boot CPU context */ 903676Sesaxe 913676Sesaxe /* 928906SEric.Saxe@Sun.COM * Array of hardware sharing relationships that are blacklisted. 938906SEric.Saxe@Sun.COM * PGs won't be instantiated for blacklisted hardware sharing relationships. 948906SEric.Saxe@Sun.COM */ 958906SEric.Saxe@Sun.COM static int cmt_hw_blacklisted[PGHW_NUM_COMPONENTS]; 968906SEric.Saxe@Sun.COM 978906SEric.Saxe@Sun.COM /* 983676Sesaxe * Set this to non-zero to disable CMT scheduling 993676Sesaxe * This must be done via kmdb -d, as /etc/system will be too late 1003676Sesaxe */ 1018906SEric.Saxe@Sun.COM int cmt_sched_disabled = 0; 1023434Sesaxe 1039036SEric.Saxe@Sun.COM /* 1049036SEric.Saxe@Sun.COM * Status codes for CMT lineage validation 1059036SEric.Saxe@Sun.COM * See pg_cmt_lineage_validate() below 1069036SEric.Saxe@Sun.COM */ 1079036SEric.Saxe@Sun.COM typedef enum cmt_lineage_validation { 1089036SEric.Saxe@Sun.COM CMT_LINEAGE_VALID, 1099036SEric.Saxe@Sun.COM CMT_LINEAGE_NON_CONCENTRIC, 1109036SEric.Saxe@Sun.COM CMT_LINEAGE_PG_SPANS_LGRPS, 1119036SEric.Saxe@Sun.COM CMT_LINEAGE_NON_PROMOTABLE, 1129036SEric.Saxe@Sun.COM CMT_LINEAGE_REPAIRED, 1139036SEric.Saxe@Sun.COM CMT_LINEAGE_UNRECOVERABLE 1149036SEric.Saxe@Sun.COM } cmt_lineage_validation_t; 1159036SEric.Saxe@Sun.COM 1169036SEric.Saxe@Sun.COM /* 1179036SEric.Saxe@Sun.COM * Status of the current lineage under construction. 1189036SEric.Saxe@Sun.COM * One must be holding cpu_lock to change this. 1199036SEric.Saxe@Sun.COM */ 1209036SEric.Saxe@Sun.COM cmt_lineage_validation_t cmt_lineage_status = CMT_LINEAGE_VALID; 1219036SEric.Saxe@Sun.COM 1229036SEric.Saxe@Sun.COM /* 1239036SEric.Saxe@Sun.COM * Power domain definitions (on x86) are defined by ACPI, and 1249036SEric.Saxe@Sun.COM * therefore may be subject to BIOS bugs. 1259036SEric.Saxe@Sun.COM */ 1269036SEric.Saxe@Sun.COM #define PG_CMT_HW_SUSPECT(hw) PGHW_IS_PM_DOMAIN(hw) 1279036SEric.Saxe@Sun.COM 1289036SEric.Saxe@Sun.COM /* 1299036SEric.Saxe@Sun.COM * Macro to test if PG is managed by the CMT PG class 1309036SEric.Saxe@Sun.COM */ 1319036SEric.Saxe@Sun.COM #define IS_CMT_PG(pg) (((pg_t *)(pg))->pg_class->pgc_id == pg_cmt_class_id) 1329036SEric.Saxe@Sun.COM 1333434Sesaxe static pg_cid_t pg_cmt_class_id; /* PG class id */ 1343434Sesaxe 1353434Sesaxe static pg_t *pg_cmt_alloc(); 1363434Sesaxe static void pg_cmt_free(pg_t *); 1379352SEric.Saxe@Sun.COM static void pg_cmt_cpu_init(cpu_t *, cpu_pg_t *); 1389352SEric.Saxe@Sun.COM static void pg_cmt_cpu_fini(cpu_t *, cpu_pg_t *); 1393434Sesaxe static void pg_cmt_cpu_active(cpu_t *); 1403434Sesaxe static void pg_cmt_cpu_inactive(cpu_t *); 1413434Sesaxe static void pg_cmt_cpupart_in(cpu_t *, cpupart_t *); 1423434Sesaxe static void pg_cmt_cpupart_move(cpu_t *, cpupart_t *, cpupart_t *); 1438906SEric.Saxe@Sun.COM static char *pg_cmt_policy_name(pg_t *); 1448906SEric.Saxe@Sun.COM static void pg_cmt_hier_sort(pg_cmt_t **, int); 1458906SEric.Saxe@Sun.COM static pg_cmt_t *pg_cmt_hier_rank(pg_cmt_t *, pg_cmt_t *); 1463434Sesaxe static int pg_cmt_cpu_belongs(pg_t *, cpu_t *); 1473434Sesaxe static int pg_cmt_hw(pghw_type_t); 1483434Sesaxe static cmt_lgrp_t *pg_cmt_find_lgrp(lgrp_handle_t); 1493676Sesaxe static cmt_lgrp_t *pg_cmt_lgrp_create(lgrp_handle_t); 1508906SEric.Saxe@Sun.COM static void cmt_ev_thread_swtch(pg_t *, cpu_t *, hrtime_t, 1518906SEric.Saxe@Sun.COM kthread_t *, kthread_t *); 1528906SEric.Saxe@Sun.COM static void cmt_ev_thread_swtch_pwr(pg_t *, cpu_t *, hrtime_t, 1538906SEric.Saxe@Sun.COM kthread_t *, kthread_t *); 1548906SEric.Saxe@Sun.COM static void cmt_ev_thread_remain_pwr(pg_t *, cpu_t *, kthread_t *); 155*9438SEric.Saxe@Sun.COM static cmt_lineage_validation_t pg_cmt_lineage_validate(pg_cmt_t **, int *, 156*9438SEric.Saxe@Sun.COM cpu_pg_t *); 1573434Sesaxe 1588906SEric.Saxe@Sun.COM 1598906SEric.Saxe@Sun.COM /* 1603434Sesaxe * CMT PG ops 1613434Sesaxe */ 1623434Sesaxe struct pg_ops pg_ops_cmt = { 1633434Sesaxe pg_cmt_alloc, 1643434Sesaxe pg_cmt_free, 1653434Sesaxe pg_cmt_cpu_init, 1663434Sesaxe pg_cmt_cpu_fini, 1673434Sesaxe pg_cmt_cpu_active, 1683434Sesaxe pg_cmt_cpu_inactive, 1693434Sesaxe pg_cmt_cpupart_in, 1703434Sesaxe NULL, /* cpupart_out */ 1713434Sesaxe pg_cmt_cpupart_move, 1723434Sesaxe pg_cmt_cpu_belongs, 1738906SEric.Saxe@Sun.COM pg_cmt_policy_name, 1743434Sesaxe }; 1753434Sesaxe 1763434Sesaxe /* 1773434Sesaxe * Initialize the CMT PG class 1783434Sesaxe */ 1793434Sesaxe void 1803434Sesaxe pg_cmt_class_init(void) 1813434Sesaxe { 1823434Sesaxe if (cmt_sched_disabled) 1833434Sesaxe return; 1843434Sesaxe 1853434Sesaxe pg_cmt_class_id = pg_class_register("cmt", &pg_ops_cmt, PGR_PHYSICAL); 1863434Sesaxe } 1873434Sesaxe 1883434Sesaxe /* 1893434Sesaxe * Called to indicate a new CPU has started up so 1903434Sesaxe * that either t0 or the slave startup thread can 1913434Sesaxe * be accounted for. 1923434Sesaxe */ 1933434Sesaxe void 1943434Sesaxe pg_cmt_cpu_startup(cpu_t *cp) 1953434Sesaxe { 1968906SEric.Saxe@Sun.COM pg_ev_thread_swtch(cp, gethrtime_unscaled(), cp->cpu_idle_thread, 1978906SEric.Saxe@Sun.COM cp->cpu_thread); 1983434Sesaxe } 1993434Sesaxe 2003434Sesaxe /* 2013434Sesaxe * Return non-zero if thread can migrate between "from" and "to" 2023434Sesaxe * without a performance penalty 2033434Sesaxe */ 2043434Sesaxe int 2053434Sesaxe pg_cmt_can_migrate(cpu_t *from, cpu_t *to) 2063434Sesaxe { 2073434Sesaxe if (from->cpu_physid->cpu_cacheid == 2083434Sesaxe to->cpu_physid->cpu_cacheid) 2093434Sesaxe return (1); 2103434Sesaxe return (0); 2113434Sesaxe } 2123434Sesaxe 2133434Sesaxe /* 2143434Sesaxe * CMT class specific PG allocation 2153434Sesaxe */ 2163434Sesaxe static pg_t * 2173434Sesaxe pg_cmt_alloc(void) 2183434Sesaxe { 2193434Sesaxe return (kmem_zalloc(sizeof (pg_cmt_t), KM_NOSLEEP)); 2203434Sesaxe } 2213434Sesaxe 2223434Sesaxe /* 2233434Sesaxe * Class specific PG de-allocation 2243434Sesaxe */ 2253434Sesaxe static void 2263434Sesaxe pg_cmt_free(pg_t *pg) 2273434Sesaxe { 2283434Sesaxe ASSERT(pg != NULL); 2293434Sesaxe ASSERT(IS_CMT_PG(pg)); 2303434Sesaxe 2313434Sesaxe kmem_free((pg_cmt_t *)pg, sizeof (pg_cmt_t)); 2323434Sesaxe } 2333434Sesaxe 2343434Sesaxe /* 2358906SEric.Saxe@Sun.COM * Given a hardware sharing relationship, return which dispatcher 2368906SEric.Saxe@Sun.COM * policies should be implemented to optimize performance and efficiency 2378906SEric.Saxe@Sun.COM */ 2388906SEric.Saxe@Sun.COM static pg_cmt_policy_t 2398906SEric.Saxe@Sun.COM pg_cmt_policy(pghw_type_t hw) 2408906SEric.Saxe@Sun.COM { 2418906SEric.Saxe@Sun.COM pg_cmt_policy_t p; 2428906SEric.Saxe@Sun.COM 2438906SEric.Saxe@Sun.COM /* 2448906SEric.Saxe@Sun.COM * Give the platform a chance to override the default 2458906SEric.Saxe@Sun.COM */ 2468906SEric.Saxe@Sun.COM if ((p = pg_plat_cmt_policy(hw)) != CMT_NO_POLICY) 2478906SEric.Saxe@Sun.COM return (p); 2488906SEric.Saxe@Sun.COM 2498906SEric.Saxe@Sun.COM switch (hw) { 2508906SEric.Saxe@Sun.COM case PGHW_IPIPE: 2518906SEric.Saxe@Sun.COM case PGHW_FPU: 2528906SEric.Saxe@Sun.COM case PGHW_CHIP: 2538906SEric.Saxe@Sun.COM return (CMT_BALANCE); 2548906SEric.Saxe@Sun.COM case PGHW_CACHE: 2558906SEric.Saxe@Sun.COM return (CMT_AFFINITY); 2568906SEric.Saxe@Sun.COM case PGHW_POW_ACTIVE: 2578906SEric.Saxe@Sun.COM case PGHW_POW_IDLE: 2588906SEric.Saxe@Sun.COM return (CMT_BALANCE); 2598906SEric.Saxe@Sun.COM default: 2608906SEric.Saxe@Sun.COM return (CMT_NO_POLICY); 2618906SEric.Saxe@Sun.COM } 2628906SEric.Saxe@Sun.COM } 2638906SEric.Saxe@Sun.COM 2648906SEric.Saxe@Sun.COM /* 2658906SEric.Saxe@Sun.COM * Rank the importance of optimizing for the pg1 relationship vs. 2668906SEric.Saxe@Sun.COM * the pg2 relationship. 2678906SEric.Saxe@Sun.COM */ 2688906SEric.Saxe@Sun.COM static pg_cmt_t * 2698906SEric.Saxe@Sun.COM pg_cmt_hier_rank(pg_cmt_t *pg1, pg_cmt_t *pg2) 2708906SEric.Saxe@Sun.COM { 2718906SEric.Saxe@Sun.COM pghw_type_t hw1 = ((pghw_t *)pg1)->pghw_hw; 2728906SEric.Saxe@Sun.COM pghw_type_t hw2 = ((pghw_t *)pg2)->pghw_hw; 2738906SEric.Saxe@Sun.COM 2748906SEric.Saxe@Sun.COM /* 2758906SEric.Saxe@Sun.COM * A power domain is only important if CPUPM is enabled. 2768906SEric.Saxe@Sun.COM */ 2778906SEric.Saxe@Sun.COM if (cpupm_get_policy() == CPUPM_POLICY_DISABLED) { 2788906SEric.Saxe@Sun.COM if (PGHW_IS_PM_DOMAIN(hw1) && !PGHW_IS_PM_DOMAIN(hw2)) 2798906SEric.Saxe@Sun.COM return (pg2); 2808906SEric.Saxe@Sun.COM if (PGHW_IS_PM_DOMAIN(hw2) && !PGHW_IS_PM_DOMAIN(hw1)) 2818906SEric.Saxe@Sun.COM return (pg1); 2828906SEric.Saxe@Sun.COM } 2838906SEric.Saxe@Sun.COM 2848906SEric.Saxe@Sun.COM /* 2858906SEric.Saxe@Sun.COM * Otherwise, ask the platform 2868906SEric.Saxe@Sun.COM */ 2878906SEric.Saxe@Sun.COM if (pg_plat_hw_rank(hw1, hw2) == hw1) 2888906SEric.Saxe@Sun.COM return (pg1); 2898906SEric.Saxe@Sun.COM else 2908906SEric.Saxe@Sun.COM return (pg2); 2918906SEric.Saxe@Sun.COM } 2928906SEric.Saxe@Sun.COM 2938906SEric.Saxe@Sun.COM /* 2948906SEric.Saxe@Sun.COM * Initialize CMT callbacks for the given PG 2958906SEric.Saxe@Sun.COM */ 2968906SEric.Saxe@Sun.COM static void 2978906SEric.Saxe@Sun.COM cmt_callback_init(pg_t *pg) 2988906SEric.Saxe@Sun.COM { 2998906SEric.Saxe@Sun.COM switch (((pghw_t *)pg)->pghw_hw) { 3008906SEric.Saxe@Sun.COM case PGHW_POW_ACTIVE: 3018906SEric.Saxe@Sun.COM pg->pg_cb.thread_swtch = cmt_ev_thread_swtch_pwr; 3028906SEric.Saxe@Sun.COM pg->pg_cb.thread_remain = cmt_ev_thread_remain_pwr; 3038906SEric.Saxe@Sun.COM break; 3048906SEric.Saxe@Sun.COM default: 3058906SEric.Saxe@Sun.COM pg->pg_cb.thread_swtch = cmt_ev_thread_swtch; 3068906SEric.Saxe@Sun.COM 3078906SEric.Saxe@Sun.COM } 3088906SEric.Saxe@Sun.COM } 3098906SEric.Saxe@Sun.COM 3108906SEric.Saxe@Sun.COM /* 3118906SEric.Saxe@Sun.COM * Promote PG above it's current parent. 312*9438SEric.Saxe@Sun.COM * This is only legal if PG has an equal or greater number of CPUs than its 313*9438SEric.Saxe@Sun.COM * parent. 314*9438SEric.Saxe@Sun.COM * 315*9438SEric.Saxe@Sun.COM * This routine operates on the CPU specific processor group data (for the CPUs 316*9438SEric.Saxe@Sun.COM * in the PG being promoted), and may be invoked from a context where one CPU's 317*9438SEric.Saxe@Sun.COM * PG data is under construction. In this case the argument "pgdata", if not 318*9438SEric.Saxe@Sun.COM * NULL, is a reference to the CPU's under-construction PG data. 3193434Sesaxe */ 3208906SEric.Saxe@Sun.COM static void 321*9438SEric.Saxe@Sun.COM cmt_hier_promote(pg_cmt_t *pg, cpu_pg_t *pgdata) 3223434Sesaxe { 3238906SEric.Saxe@Sun.COM pg_cmt_t *parent; 3248906SEric.Saxe@Sun.COM group_t *children; 3258906SEric.Saxe@Sun.COM cpu_t *cpu; 3268906SEric.Saxe@Sun.COM group_iter_t iter; 3278906SEric.Saxe@Sun.COM pg_cpu_itr_t cpu_iter; 3288906SEric.Saxe@Sun.COM int r; 3298906SEric.Saxe@Sun.COM int err; 3308906SEric.Saxe@Sun.COM 3318906SEric.Saxe@Sun.COM ASSERT(MUTEX_HELD(&cpu_lock)); 3328906SEric.Saxe@Sun.COM 3338906SEric.Saxe@Sun.COM parent = pg->cmt_parent; 3348906SEric.Saxe@Sun.COM if (parent == NULL) { 3358906SEric.Saxe@Sun.COM /* 3368906SEric.Saxe@Sun.COM * Nothing to do 3378906SEric.Saxe@Sun.COM */ 3388906SEric.Saxe@Sun.COM return; 3398906SEric.Saxe@Sun.COM } 3408906SEric.Saxe@Sun.COM 3418906SEric.Saxe@Sun.COM ASSERT(PG_NUM_CPUS((pg_t *)pg) >= PG_NUM_CPUS((pg_t *)parent)); 3428906SEric.Saxe@Sun.COM 3438906SEric.Saxe@Sun.COM /* 3448906SEric.Saxe@Sun.COM * We're changing around the hierarchy, which is actively traversed 3458906SEric.Saxe@Sun.COM * by the dispatcher. Pause CPUS to ensure exclusivity. 3468906SEric.Saxe@Sun.COM */ 3478906SEric.Saxe@Sun.COM pause_cpus(NULL); 3488906SEric.Saxe@Sun.COM 3498906SEric.Saxe@Sun.COM /* 3508906SEric.Saxe@Sun.COM * If necessary, update the parent's sibling set, replacing parent 3518906SEric.Saxe@Sun.COM * with PG. 3528906SEric.Saxe@Sun.COM */ 3538906SEric.Saxe@Sun.COM if (parent->cmt_siblings) { 3548906SEric.Saxe@Sun.COM if (group_remove(parent->cmt_siblings, parent, GRP_NORESIZE) 3558906SEric.Saxe@Sun.COM != -1) { 3568906SEric.Saxe@Sun.COM r = group_add(parent->cmt_siblings, pg, GRP_NORESIZE); 3578906SEric.Saxe@Sun.COM ASSERT(r != -1); 3588906SEric.Saxe@Sun.COM } 3598906SEric.Saxe@Sun.COM } 3608906SEric.Saxe@Sun.COM 3618906SEric.Saxe@Sun.COM /* 3628906SEric.Saxe@Sun.COM * If the parent is at the top of the hierarchy, replace it's entry 3638906SEric.Saxe@Sun.COM * in the root lgroup's group of top level PGs. 3648906SEric.Saxe@Sun.COM */ 3658906SEric.Saxe@Sun.COM if (parent->cmt_parent == NULL && 3668906SEric.Saxe@Sun.COM parent->cmt_siblings != &cmt_root->cl_pgs) { 3678906SEric.Saxe@Sun.COM if (group_remove(&cmt_root->cl_pgs, parent, GRP_NORESIZE) 3688906SEric.Saxe@Sun.COM != -1) { 3698906SEric.Saxe@Sun.COM r = group_add(&cmt_root->cl_pgs, pg, GRP_NORESIZE); 3708906SEric.Saxe@Sun.COM ASSERT(r != -1); 3718906SEric.Saxe@Sun.COM } 3728906SEric.Saxe@Sun.COM } 3738906SEric.Saxe@Sun.COM 3748906SEric.Saxe@Sun.COM /* 3758906SEric.Saxe@Sun.COM * We assume (and therefore assert) that the PG being promoted is an 3768906SEric.Saxe@Sun.COM * only child of it's parent. Update the parent's children set 3778906SEric.Saxe@Sun.COM * replacing PG's entry with the parent (since the parent is becoming 3788906SEric.Saxe@Sun.COM * the child). Then have PG and the parent swap children sets. 3798906SEric.Saxe@Sun.COM */ 3808906SEric.Saxe@Sun.COM ASSERT(GROUP_SIZE(parent->cmt_children) <= 1); 3818906SEric.Saxe@Sun.COM if (group_remove(parent->cmt_children, pg, GRP_NORESIZE) != -1) { 3828906SEric.Saxe@Sun.COM r = group_add(parent->cmt_children, parent, GRP_NORESIZE); 3838906SEric.Saxe@Sun.COM ASSERT(r != -1); 3848906SEric.Saxe@Sun.COM } 3858906SEric.Saxe@Sun.COM 3868906SEric.Saxe@Sun.COM children = pg->cmt_children; 3878906SEric.Saxe@Sun.COM pg->cmt_children = parent->cmt_children; 3888906SEric.Saxe@Sun.COM parent->cmt_children = children; 3898906SEric.Saxe@Sun.COM 3908906SEric.Saxe@Sun.COM /* 3918906SEric.Saxe@Sun.COM * Update the sibling references for PG and it's parent 3928906SEric.Saxe@Sun.COM */ 3938906SEric.Saxe@Sun.COM pg->cmt_siblings = parent->cmt_siblings; 3948906SEric.Saxe@Sun.COM parent->cmt_siblings = pg->cmt_children; 3958906SEric.Saxe@Sun.COM 3968906SEric.Saxe@Sun.COM /* 3978906SEric.Saxe@Sun.COM * Update any cached lineages in the per CPU pg data. 3988906SEric.Saxe@Sun.COM */ 3998906SEric.Saxe@Sun.COM PG_CPU_ITR_INIT(pg, cpu_iter); 4008906SEric.Saxe@Sun.COM while ((cpu = pg_cpu_next(&cpu_iter)) != NULL) { 4018906SEric.Saxe@Sun.COM int idx; 4028906SEric.Saxe@Sun.COM pg_cmt_t *cpu_pg; 403*9438SEric.Saxe@Sun.COM cpu_pg_t *pgd; /* CPU's PG data */ 404*9438SEric.Saxe@Sun.COM 405*9438SEric.Saxe@Sun.COM /* 406*9438SEric.Saxe@Sun.COM * The CPU's whose lineage is under construction still 407*9438SEric.Saxe@Sun.COM * references the bootstrap CPU PG data structure. 408*9438SEric.Saxe@Sun.COM */ 409*9438SEric.Saxe@Sun.COM if (pg_cpu_is_bootstrapped(cpu)) 410*9438SEric.Saxe@Sun.COM pgd = pgdata; 411*9438SEric.Saxe@Sun.COM else 412*9438SEric.Saxe@Sun.COM pgd = cpu->cpu_pg; 4138906SEric.Saxe@Sun.COM 4148906SEric.Saxe@Sun.COM /* 4158906SEric.Saxe@Sun.COM * Iterate over the CPU's PGs updating the children 4168906SEric.Saxe@Sun.COM * of the PG being promoted, since they have a new parent. 4178906SEric.Saxe@Sun.COM */ 4188906SEric.Saxe@Sun.COM group_iter_init(&iter); 419*9438SEric.Saxe@Sun.COM while ((cpu_pg = group_iterate(&pgd->cmt_pgs, &iter)) != NULL) { 4208906SEric.Saxe@Sun.COM if (cpu_pg->cmt_parent == pg) { 4218906SEric.Saxe@Sun.COM cpu_pg->cmt_parent = parent; 4228906SEric.Saxe@Sun.COM } 4238906SEric.Saxe@Sun.COM } 4248906SEric.Saxe@Sun.COM 4258906SEric.Saxe@Sun.COM /* 4268906SEric.Saxe@Sun.COM * Update the CMT load balancing lineage 4278906SEric.Saxe@Sun.COM */ 428*9438SEric.Saxe@Sun.COM if ((idx = group_find(&pgd->cmt_pgs, (void *)pg)) == -1) { 4298906SEric.Saxe@Sun.COM /* 4308906SEric.Saxe@Sun.COM * Unless this is the CPU who's lineage is being 4318906SEric.Saxe@Sun.COM * constructed, the PG being promoted should be 4328906SEric.Saxe@Sun.COM * in the lineage. 4338906SEric.Saxe@Sun.COM */ 434*9438SEric.Saxe@Sun.COM ASSERT(pg_cpu_is_bootstrapped(cpu)); 4358906SEric.Saxe@Sun.COM continue; 4368906SEric.Saxe@Sun.COM } 4378906SEric.Saxe@Sun.COM 438*9438SEric.Saxe@Sun.COM ASSERT(GROUP_ACCESS(&pgd->cmt_pgs, idx - 1) == parent); 4398906SEric.Saxe@Sun.COM ASSERT(idx > 0); 4408906SEric.Saxe@Sun.COM 4418906SEric.Saxe@Sun.COM /* 4428906SEric.Saxe@Sun.COM * Have the child and the parent swap places in the CPU's 4438906SEric.Saxe@Sun.COM * lineage 4448906SEric.Saxe@Sun.COM */ 445*9438SEric.Saxe@Sun.COM group_remove_at(&pgd->cmt_pgs, idx); 446*9438SEric.Saxe@Sun.COM group_remove_at(&pgd->cmt_pgs, idx - 1); 447*9438SEric.Saxe@Sun.COM err = group_add_at(&pgd->cmt_pgs, parent, idx); 4488906SEric.Saxe@Sun.COM ASSERT(err == 0); 449*9438SEric.Saxe@Sun.COM err = group_add_at(&pgd->cmt_pgs, pg, idx - 1); 4508906SEric.Saxe@Sun.COM ASSERT(err == 0); 4518906SEric.Saxe@Sun.COM } 4528906SEric.Saxe@Sun.COM 4538906SEric.Saxe@Sun.COM /* 4548906SEric.Saxe@Sun.COM * Update the parent references for PG and it's parent 4558906SEric.Saxe@Sun.COM */ 4568906SEric.Saxe@Sun.COM pg->cmt_parent = parent->cmt_parent; 4578906SEric.Saxe@Sun.COM parent->cmt_parent = pg; 4588906SEric.Saxe@Sun.COM 4598906SEric.Saxe@Sun.COM start_cpus(); 4603434Sesaxe } 4613434Sesaxe 4623434Sesaxe /* 4633434Sesaxe * CMT class callback for a new CPU entering the system 464*9438SEric.Saxe@Sun.COM * 465*9438SEric.Saxe@Sun.COM * This routine operates on the CPU specific processor group data (for the CPU 466*9438SEric.Saxe@Sun.COM * being initialized). The argument "pgdata" is a reference to the CPU's PG 467*9438SEric.Saxe@Sun.COM * data to be constructed. 468*9438SEric.Saxe@Sun.COM * 469*9438SEric.Saxe@Sun.COM * cp->cpu_pg is used by the dispatcher to access the CPU's PG data 470*9438SEric.Saxe@Sun.COM * references a "bootstrap" structure. pg_cmt_cpu_init() and the routines it 471*9438SEric.Saxe@Sun.COM * calls must be careful to operate only on the "pgdata" argument, and not 472*9438SEric.Saxe@Sun.COM * cp->cpu_pg. 4733434Sesaxe */ 4743434Sesaxe static void 475*9438SEric.Saxe@Sun.COM pg_cmt_cpu_init(cpu_t *cp, cpu_pg_t *pgdata) 4763434Sesaxe { 4773434Sesaxe pg_cmt_t *pg; 4783434Sesaxe group_t *cmt_pgs; 4798906SEric.Saxe@Sun.COM int levels, level; 4803434Sesaxe pghw_type_t hw; 4813434Sesaxe pg_t *pg_cache = NULL; 4823434Sesaxe pg_cmt_t *cpu_cmt_hier[PGHW_NUM_COMPONENTS]; 4833434Sesaxe lgrp_handle_t lgrp_handle; 4843434Sesaxe cmt_lgrp_t *lgrp; 4859036SEric.Saxe@Sun.COM cmt_lineage_validation_t lineage_status; 4863434Sesaxe 4873434Sesaxe ASSERT(MUTEX_HELD(&cpu_lock)); 488*9438SEric.Saxe@Sun.COM ASSERT(pg_cpu_is_bootstrapped(cp)); 4893434Sesaxe 4908906SEric.Saxe@Sun.COM if (cmt_sched_disabled) 4918906SEric.Saxe@Sun.COM return; 4928906SEric.Saxe@Sun.COM 4933434Sesaxe /* 4943434Sesaxe * A new CPU is coming into the system. 4953434Sesaxe * Interrogate the platform to see if the CPU 4968906SEric.Saxe@Sun.COM * has any performance or efficiency relevant 4978906SEric.Saxe@Sun.COM * sharing relationships 4983434Sesaxe */ 499*9438SEric.Saxe@Sun.COM cmt_pgs = &pgdata->cmt_pgs; 500*9438SEric.Saxe@Sun.COM pgdata->cmt_lineage = NULL; 5013434Sesaxe 5023434Sesaxe bzero(cpu_cmt_hier, sizeof (cpu_cmt_hier)); 5038906SEric.Saxe@Sun.COM levels = 0; 5043434Sesaxe for (hw = PGHW_START; hw < PGHW_NUM_COMPONENTS; hw++) { 5053434Sesaxe 5068906SEric.Saxe@Sun.COM pg_cmt_policy_t policy; 5078906SEric.Saxe@Sun.COM 5083434Sesaxe /* 5098906SEric.Saxe@Sun.COM * We're only interested in the hw sharing relationships 5108906SEric.Saxe@Sun.COM * for which we know how to optimize. 5113434Sesaxe */ 5128906SEric.Saxe@Sun.COM policy = pg_cmt_policy(hw); 5138906SEric.Saxe@Sun.COM if (policy == CMT_NO_POLICY || 5148906SEric.Saxe@Sun.COM pg_plat_hw_shared(cp, hw) == 0) 5153434Sesaxe continue; 5163434Sesaxe 5173434Sesaxe /* 5188906SEric.Saxe@Sun.COM * Continue if the hardware sharing relationship has been 5198906SEric.Saxe@Sun.COM * blacklisted. 5208906SEric.Saxe@Sun.COM */ 5218906SEric.Saxe@Sun.COM if (cmt_hw_blacklisted[hw]) { 5228906SEric.Saxe@Sun.COM continue; 5238906SEric.Saxe@Sun.COM } 5248906SEric.Saxe@Sun.COM 5258906SEric.Saxe@Sun.COM /* 5263434Sesaxe * Find (or create) the PG associated with 5273434Sesaxe * the hw sharing relationship in which cp 5283434Sesaxe * belongs. 5293434Sesaxe * 5303434Sesaxe * Determine if a suitable PG already 5313434Sesaxe * exists, or if one needs to be created. 5323434Sesaxe */ 5333434Sesaxe pg = (pg_cmt_t *)pghw_place_cpu(cp, hw); 5343434Sesaxe if (pg == NULL) { 5353434Sesaxe /* 5363434Sesaxe * Create a new one. 5373434Sesaxe * Initialize the common... 5383434Sesaxe */ 5393434Sesaxe pg = (pg_cmt_t *)pg_create(pg_cmt_class_id); 5403434Sesaxe 5413434Sesaxe /* ... physical ... */ 5423434Sesaxe pghw_init((pghw_t *)pg, cp, hw); 5433434Sesaxe 5443434Sesaxe /* 5453434Sesaxe * ... and CMT specific portions of the 5463434Sesaxe * structure. 5473434Sesaxe */ 5488906SEric.Saxe@Sun.COM pg->cmt_policy = policy; 5498906SEric.Saxe@Sun.COM 5508906SEric.Saxe@Sun.COM /* CMT event callbacks */ 5518906SEric.Saxe@Sun.COM cmt_callback_init((pg_t *)pg); 5528906SEric.Saxe@Sun.COM 5533434Sesaxe bitset_init(&pg->cmt_cpus_actv_set); 5543434Sesaxe group_create(&pg->cmt_cpus_actv); 5553434Sesaxe } else { 5563434Sesaxe ASSERT(IS_CMT_PG(pg)); 5573434Sesaxe } 5583434Sesaxe 5593434Sesaxe /* Add the CPU to the PG */ 560*9438SEric.Saxe@Sun.COM pg_cpu_add((pg_t *)pg, cp, pgdata); 5613434Sesaxe 5623434Sesaxe /* 5638408SEric.Saxe@Sun.COM * Ensure capacity of the active CPU group/bitset 5643434Sesaxe */ 5653434Sesaxe group_expand(&pg->cmt_cpus_actv, 5663434Sesaxe GROUP_SIZE(&((pg_t *)pg)->pg_cpus)); 5673434Sesaxe 5683434Sesaxe if (cp->cpu_seqid >= 5693434Sesaxe bitset_capacity(&pg->cmt_cpus_actv_set)) { 5703434Sesaxe bitset_resize(&pg->cmt_cpus_actv_set, 5713434Sesaxe cp->cpu_seqid + 1); 5723434Sesaxe } 5733434Sesaxe 5743434Sesaxe /* 5758906SEric.Saxe@Sun.COM * Build a lineage of CMT PGs for load balancing / coalescence 5763434Sesaxe */ 5778906SEric.Saxe@Sun.COM if (policy & (CMT_BALANCE | CMT_COALESCE)) { 5788906SEric.Saxe@Sun.COM cpu_cmt_hier[levels++] = pg; 5793434Sesaxe } 5803434Sesaxe 5813434Sesaxe /* Cache this for later */ 5823434Sesaxe if (hw == PGHW_CACHE) 5833434Sesaxe pg_cache = (pg_t *)pg; 5843434Sesaxe } 5853434Sesaxe 5868906SEric.Saxe@Sun.COM group_expand(cmt_pgs, levels); 5878408SEric.Saxe@Sun.COM 5888408SEric.Saxe@Sun.COM if (cmt_root == NULL) 5898408SEric.Saxe@Sun.COM cmt_root = pg_cmt_lgrp_create(lgrp_plat_root_hand()); 5903434Sesaxe 5913434Sesaxe /* 5928906SEric.Saxe@Sun.COM * Find the lgrp that encapsulates this CPU's CMT hierarchy 5938408SEric.Saxe@Sun.COM */ 5948408SEric.Saxe@Sun.COM lgrp_handle = lgrp_plat_cpu_to_hand(cp->cpu_id); 5958408SEric.Saxe@Sun.COM if ((lgrp = pg_cmt_find_lgrp(lgrp_handle)) == NULL) 5968408SEric.Saxe@Sun.COM lgrp = pg_cmt_lgrp_create(lgrp_handle); 5978408SEric.Saxe@Sun.COM 5988408SEric.Saxe@Sun.COM /* 5998906SEric.Saxe@Sun.COM * Ascendingly sort the PGs in the lineage by number of CPUs 6008906SEric.Saxe@Sun.COM */ 6018906SEric.Saxe@Sun.COM pg_cmt_hier_sort(cpu_cmt_hier, levels); 6028906SEric.Saxe@Sun.COM 6038906SEric.Saxe@Sun.COM /* 6048906SEric.Saxe@Sun.COM * Examine the lineage and validate it. 6058906SEric.Saxe@Sun.COM * This routine will also try to fix the lineage along with the 6068906SEric.Saxe@Sun.COM * rest of the PG hierarchy should it detect an issue. 6078906SEric.Saxe@Sun.COM * 6089036SEric.Saxe@Sun.COM * If it returns anything other than VALID or REPAIRED, an 6099036SEric.Saxe@Sun.COM * unrecoverable error has occurred, and we cannot proceed. 6108906SEric.Saxe@Sun.COM */ 611*9438SEric.Saxe@Sun.COM lineage_status = pg_cmt_lineage_validate(cpu_cmt_hier, &levels, pgdata); 6129036SEric.Saxe@Sun.COM if ((lineage_status != CMT_LINEAGE_VALID) && 613*9438SEric.Saxe@Sun.COM (lineage_status != CMT_LINEAGE_REPAIRED)) { 614*9438SEric.Saxe@Sun.COM /* 615*9438SEric.Saxe@Sun.COM * In the case of an unrecoverable error where CMT scheduling 616*9438SEric.Saxe@Sun.COM * has been disabled, assert that the under construction CPU's 617*9438SEric.Saxe@Sun.COM * PG data has an empty CMT load balancing lineage. 618*9438SEric.Saxe@Sun.COM */ 619*9438SEric.Saxe@Sun.COM ASSERT((cmt_sched_disabled == 0) || 620*9438SEric.Saxe@Sun.COM (GROUP_SIZE(&(pgdata->cmt_pgs)) == 0)); 6218906SEric.Saxe@Sun.COM return; 622*9438SEric.Saxe@Sun.COM } 6238906SEric.Saxe@Sun.COM 6248906SEric.Saxe@Sun.COM /* 6258906SEric.Saxe@Sun.COM * For existing PGs in the lineage, verify that the parent is 6268906SEric.Saxe@Sun.COM * correct, as the generation in the lineage may have changed 6278906SEric.Saxe@Sun.COM * as a result of the sorting. Start the traversal at the top 6288906SEric.Saxe@Sun.COM * of the lineage, moving down. 6298906SEric.Saxe@Sun.COM */ 6308906SEric.Saxe@Sun.COM for (level = levels - 1; level >= 0; ) { 6318906SEric.Saxe@Sun.COM int reorg; 6328906SEric.Saxe@Sun.COM 6338906SEric.Saxe@Sun.COM reorg = 0; 6348906SEric.Saxe@Sun.COM pg = cpu_cmt_hier[level]; 6358906SEric.Saxe@Sun.COM 6368906SEric.Saxe@Sun.COM /* 6378906SEric.Saxe@Sun.COM * Promote PGs at an incorrect generation into place. 6388906SEric.Saxe@Sun.COM */ 6398906SEric.Saxe@Sun.COM while (pg->cmt_parent && 6408906SEric.Saxe@Sun.COM pg->cmt_parent != cpu_cmt_hier[level + 1]) { 641*9438SEric.Saxe@Sun.COM cmt_hier_promote(pg, pgdata); 6428906SEric.Saxe@Sun.COM reorg++; 6438906SEric.Saxe@Sun.COM } 6448906SEric.Saxe@Sun.COM if (reorg > 0) 6458906SEric.Saxe@Sun.COM level = levels - 1; 6468906SEric.Saxe@Sun.COM else 6478906SEric.Saxe@Sun.COM level--; 6488906SEric.Saxe@Sun.COM } 6498906SEric.Saxe@Sun.COM 6508906SEric.Saxe@Sun.COM /* 6518408SEric.Saxe@Sun.COM * For each of the PGs in the CPU's lineage: 6528906SEric.Saxe@Sun.COM * - Add an entry in the CPU sorted CMT PG group 6538906SEric.Saxe@Sun.COM * which is used for top down CMT load balancing 6543434Sesaxe * - Tie the PG into the CMT hierarchy by connecting 6553434Sesaxe * it to it's parent and siblings. 6563434Sesaxe */ 6578906SEric.Saxe@Sun.COM for (level = 0; level < levels; level++) { 6583434Sesaxe uint_t children; 6593434Sesaxe int err; 6603434Sesaxe 6613434Sesaxe pg = cpu_cmt_hier[level]; 6628906SEric.Saxe@Sun.COM err = group_add_at(cmt_pgs, pg, levels - level - 1); 6633434Sesaxe ASSERT(err == 0); 6643434Sesaxe 6653434Sesaxe if (level == 0) 666*9438SEric.Saxe@Sun.COM pgdata->cmt_lineage = (pg_t *)pg; 6673434Sesaxe 6683434Sesaxe if (pg->cmt_siblings != NULL) { 6693434Sesaxe /* Already initialized */ 6703434Sesaxe ASSERT(pg->cmt_parent == NULL || 6713434Sesaxe pg->cmt_parent == cpu_cmt_hier[level + 1]); 6723434Sesaxe ASSERT(pg->cmt_siblings == &lgrp->cl_pgs || 6735933Sjb145095 ((pg->cmt_parent != NULL) && 6745933Sjb145095 pg->cmt_siblings == pg->cmt_parent->cmt_children)); 6753434Sesaxe continue; 6763434Sesaxe } 6773434Sesaxe 6788906SEric.Saxe@Sun.COM if ((level + 1) == levels) { 6793434Sesaxe pg->cmt_parent = NULL; 6808408SEric.Saxe@Sun.COM 6813434Sesaxe pg->cmt_siblings = &lgrp->cl_pgs; 6823434Sesaxe children = ++lgrp->cl_npgs; 6838906SEric.Saxe@Sun.COM if (cmt_root != lgrp) 6848906SEric.Saxe@Sun.COM cmt_root->cl_npgs++; 6853434Sesaxe } else { 6863434Sesaxe pg->cmt_parent = cpu_cmt_hier[level + 1]; 6873434Sesaxe 6883434Sesaxe /* 6893434Sesaxe * A good parent keeps track of their children. 6903434Sesaxe * The parent's children group is also the PG's 6913434Sesaxe * siblings. 6923434Sesaxe */ 6933434Sesaxe if (pg->cmt_parent->cmt_children == NULL) { 6943434Sesaxe pg->cmt_parent->cmt_children = 6953434Sesaxe kmem_zalloc(sizeof (group_t), KM_SLEEP); 6963434Sesaxe group_create(pg->cmt_parent->cmt_children); 6973434Sesaxe } 6983434Sesaxe pg->cmt_siblings = pg->cmt_parent->cmt_children; 6993434Sesaxe children = ++pg->cmt_parent->cmt_nchildren; 7003434Sesaxe } 7018408SEric.Saxe@Sun.COM 7023434Sesaxe group_expand(pg->cmt_siblings, children); 7038408SEric.Saxe@Sun.COM group_expand(&cmt_root->cl_pgs, cmt_root->cl_npgs); 7043434Sesaxe } 7053434Sesaxe 7063434Sesaxe /* 7073434Sesaxe * Cache the chip and core IDs in the cpu_t->cpu_physid structure 7083434Sesaxe * for fast lookups later. 7093434Sesaxe */ 7103434Sesaxe if (cp->cpu_physid) { 7113434Sesaxe cp->cpu_physid->cpu_chipid = 7123434Sesaxe pg_plat_hw_instance_id(cp, PGHW_CHIP); 7133434Sesaxe cp->cpu_physid->cpu_coreid = pg_plat_get_core_id(cp); 7143434Sesaxe 7153434Sesaxe /* 7163434Sesaxe * If this cpu has a PG representing shared cache, then set 7173434Sesaxe * cpu_cacheid to that PG's logical id 7183434Sesaxe */ 7193434Sesaxe if (pg_cache) 7203434Sesaxe cp->cpu_physid->cpu_cacheid = pg_cache->pg_id; 7213434Sesaxe } 7223434Sesaxe 7233434Sesaxe /* CPU0 only initialization */ 7243434Sesaxe if (is_cpu0) { 7253434Sesaxe pg_cmt_cpu_startup(cp); 7263434Sesaxe is_cpu0 = 0; 7273676Sesaxe cpu0_lgrp = lgrp; 7283434Sesaxe } 7293434Sesaxe 7303434Sesaxe } 7313434Sesaxe 7323434Sesaxe /* 7333434Sesaxe * Class callback when a CPU is leaving the system (deletion) 734*9438SEric.Saxe@Sun.COM * 735*9438SEric.Saxe@Sun.COM * "pgdata" is a reference to the CPU's PG data to be deconstructed. 736*9438SEric.Saxe@Sun.COM * 737*9438SEric.Saxe@Sun.COM * cp->cpu_pg is used by the dispatcher to access the CPU's PG data 738*9438SEric.Saxe@Sun.COM * references a "bootstrap" structure across this function's invocation. 739*9438SEric.Saxe@Sun.COM * pg_cmt_cpu_init() and the routines it calls must be careful to operate only 740*9438SEric.Saxe@Sun.COM * on the "pgdata" argument, and not cp->cpu_pg. 7413434Sesaxe */ 7423434Sesaxe static void 743*9438SEric.Saxe@Sun.COM pg_cmt_cpu_fini(cpu_t *cp, cpu_pg_t *pgdata) 7443434Sesaxe { 7453434Sesaxe group_iter_t i; 7463434Sesaxe pg_cmt_t *pg; 7473434Sesaxe group_t *pgs, *cmt_pgs; 7483434Sesaxe lgrp_handle_t lgrp_handle; 7493434Sesaxe cmt_lgrp_t *lgrp; 7503434Sesaxe 7518906SEric.Saxe@Sun.COM if (cmt_sched_disabled) 7528906SEric.Saxe@Sun.COM return; 7538906SEric.Saxe@Sun.COM 754*9438SEric.Saxe@Sun.COM ASSERT(pg_cpu_is_bootstrapped(cp)); 755*9438SEric.Saxe@Sun.COM 756*9438SEric.Saxe@Sun.COM pgs = &pgdata->pgs; 757*9438SEric.Saxe@Sun.COM cmt_pgs = &pgdata->cmt_pgs; 7583434Sesaxe 7593434Sesaxe /* 7603434Sesaxe * Find the lgroup that encapsulates this CPU's CMT hierarchy 7613434Sesaxe */ 7623434Sesaxe lgrp_handle = lgrp_plat_cpu_to_hand(cp->cpu_id); 7633676Sesaxe 7643434Sesaxe lgrp = pg_cmt_find_lgrp(lgrp_handle); 7658689SEric.Saxe@Sun.COM if (ncpus == 1 && lgrp != cpu0_lgrp) { 7663676Sesaxe /* 7678689SEric.Saxe@Sun.COM * One might wonder how we could be deconfiguring the 7688689SEric.Saxe@Sun.COM * only CPU in the system. 7693676Sesaxe * 7708689SEric.Saxe@Sun.COM * On Starcat systems when null_proc_lpa is detected, 7718689SEric.Saxe@Sun.COM * the boot CPU (which is already configured into a leaf 7728689SEric.Saxe@Sun.COM * lgroup), is moved into the root lgroup. This is done by 7738689SEric.Saxe@Sun.COM * deconfiguring it from both lgroups and processor 7748689SEric.Saxe@Sun.COM * groups), and then later reconfiguring it back in. This 7758689SEric.Saxe@Sun.COM * call to pg_cmt_cpu_fini() is part of that deconfiguration. 7768689SEric.Saxe@Sun.COM * 7778689SEric.Saxe@Sun.COM * This special case is detected by noting that the platform 7788689SEric.Saxe@Sun.COM * has changed the CPU's lgrp affiliation (since it now 7798689SEric.Saxe@Sun.COM * belongs in the root). In this case, use the cmt_lgrp_t 7808689SEric.Saxe@Sun.COM * cached for the boot CPU, since this is what needs to be 7818689SEric.Saxe@Sun.COM * torn down. 7823676Sesaxe */ 7833676Sesaxe lgrp = cpu0_lgrp; 7843676Sesaxe } 7853434Sesaxe 7868689SEric.Saxe@Sun.COM ASSERT(lgrp != NULL); 7878689SEric.Saxe@Sun.COM 7883434Sesaxe /* 7893434Sesaxe * First, clean up anything load balancing specific for each of 7903434Sesaxe * the CPU's PGs that participated in CMT load balancing 7913434Sesaxe */ 792*9438SEric.Saxe@Sun.COM pg = (pg_cmt_t *)pgdata->cmt_lineage; 7933434Sesaxe while (pg != NULL) { 7943434Sesaxe 7953434Sesaxe /* 7963434Sesaxe * Remove the PG from the CPU's load balancing lineage 7973434Sesaxe */ 7983434Sesaxe (void) group_remove(cmt_pgs, pg, GRP_RESIZE); 7993434Sesaxe 8003434Sesaxe /* 8013434Sesaxe * If it's about to become empty, destroy it's children 8023434Sesaxe * group, and remove it's reference from it's siblings. 8033434Sesaxe * This is done here (rather than below) to avoid removing 8043434Sesaxe * our reference from a PG that we just eliminated. 8053434Sesaxe */ 8063434Sesaxe if (GROUP_SIZE(&((pg_t *)pg)->pg_cpus) == 1) { 8073434Sesaxe if (pg->cmt_children != NULL) 8083434Sesaxe group_destroy(pg->cmt_children); 8093434Sesaxe if (pg->cmt_siblings != NULL) { 8103434Sesaxe if (pg->cmt_siblings == &lgrp->cl_pgs) 8113434Sesaxe lgrp->cl_npgs--; 8123434Sesaxe else 8133434Sesaxe pg->cmt_parent->cmt_nchildren--; 8143434Sesaxe } 8153434Sesaxe } 8163434Sesaxe pg = pg->cmt_parent; 8173434Sesaxe } 8183434Sesaxe ASSERT(GROUP_SIZE(cmt_pgs) == 0); 8193434Sesaxe 8203434Sesaxe /* 8213434Sesaxe * Now that the load balancing lineage updates have happened, 8223434Sesaxe * remove the CPU from all it's PGs (destroying any that become 8233434Sesaxe * empty). 8243434Sesaxe */ 8253434Sesaxe group_iter_init(&i); 8263434Sesaxe while ((pg = group_iterate(pgs, &i)) != NULL) { 8273434Sesaxe if (IS_CMT_PG(pg) == 0) 8283434Sesaxe continue; 8293434Sesaxe 830*9438SEric.Saxe@Sun.COM pg_cpu_delete((pg_t *)pg, cp, pgdata); 8313434Sesaxe /* 8323434Sesaxe * Deleting the CPU from the PG changes the CPU's 8333434Sesaxe * PG group over which we are actively iterating 8343434Sesaxe * Re-initialize the iteration 8353434Sesaxe */ 8363434Sesaxe group_iter_init(&i); 8373434Sesaxe 8383434Sesaxe if (GROUP_SIZE(&((pg_t *)pg)->pg_cpus) == 0) { 8393434Sesaxe 8403434Sesaxe /* 8413434Sesaxe * The PG has become zero sized, so destroy it. 8423434Sesaxe */ 8433434Sesaxe group_destroy(&pg->cmt_cpus_actv); 8443434Sesaxe bitset_fini(&pg->cmt_cpus_actv_set); 8453434Sesaxe pghw_fini((pghw_t *)pg); 8463434Sesaxe 8473434Sesaxe pg_destroy((pg_t *)pg); 8483434Sesaxe } 8493434Sesaxe } 8503434Sesaxe } 8513434Sesaxe 8523434Sesaxe /* 8533434Sesaxe * Class callback when a CPU is entering a cpu partition 8543434Sesaxe */ 8553434Sesaxe static void 8563434Sesaxe pg_cmt_cpupart_in(cpu_t *cp, cpupart_t *pp) 8573434Sesaxe { 8583434Sesaxe group_t *pgs; 8593434Sesaxe pg_t *pg; 8603434Sesaxe group_iter_t i; 8613434Sesaxe 8623434Sesaxe ASSERT(MUTEX_HELD(&cpu_lock)); 8633434Sesaxe 8648906SEric.Saxe@Sun.COM if (cmt_sched_disabled) 8658906SEric.Saxe@Sun.COM return; 8668906SEric.Saxe@Sun.COM 8673434Sesaxe pgs = &cp->cpu_pg->pgs; 8683434Sesaxe 8693434Sesaxe /* 8703434Sesaxe * Ensure that the new partition's PG bitset 8713434Sesaxe * is large enough for all CMT PG's to which cp 8723434Sesaxe * belongs 8733434Sesaxe */ 8743434Sesaxe group_iter_init(&i); 8753434Sesaxe while ((pg = group_iterate(pgs, &i)) != NULL) { 8763434Sesaxe if (IS_CMT_PG(pg) == 0) 8773434Sesaxe continue; 8783434Sesaxe 8793434Sesaxe if (bitset_capacity(&pp->cp_cmt_pgs) <= pg->pg_id) 8803434Sesaxe bitset_resize(&pp->cp_cmt_pgs, pg->pg_id + 1); 8813434Sesaxe } 8823434Sesaxe } 8833434Sesaxe 8843434Sesaxe /* 8853434Sesaxe * Class callback when a CPU is actually moving partitions 8863434Sesaxe */ 8873434Sesaxe static void 8883434Sesaxe pg_cmt_cpupart_move(cpu_t *cp, cpupart_t *oldpp, cpupart_t *newpp) 8893434Sesaxe { 8903434Sesaxe cpu_t *cpp; 8913434Sesaxe group_t *pgs; 8923434Sesaxe pg_t *pg; 8933434Sesaxe group_iter_t pg_iter; 8943434Sesaxe pg_cpu_itr_t cpu_iter; 8953434Sesaxe boolean_t found; 8963434Sesaxe 8973434Sesaxe ASSERT(MUTEX_HELD(&cpu_lock)); 8983434Sesaxe 8998906SEric.Saxe@Sun.COM if (cmt_sched_disabled) 9008906SEric.Saxe@Sun.COM return; 9018906SEric.Saxe@Sun.COM 9023434Sesaxe pgs = &cp->cpu_pg->pgs; 9033434Sesaxe group_iter_init(&pg_iter); 9043434Sesaxe 9053434Sesaxe /* 9063434Sesaxe * Iterate over the CPUs CMT PGs 9073434Sesaxe */ 9083434Sesaxe while ((pg = group_iterate(pgs, &pg_iter)) != NULL) { 9093434Sesaxe 9103434Sesaxe if (IS_CMT_PG(pg) == 0) 9113434Sesaxe continue; 9123434Sesaxe 9133434Sesaxe /* 9143434Sesaxe * Add the PG to the bitset in the new partition. 9153434Sesaxe */ 9163434Sesaxe bitset_add(&newpp->cp_cmt_pgs, pg->pg_id); 9173434Sesaxe 9183434Sesaxe /* 9193434Sesaxe * Remove the PG from the bitset in the old partition 9203434Sesaxe * if the last of the PG's CPUs have left. 9213434Sesaxe */ 9223434Sesaxe found = B_FALSE; 9233434Sesaxe PG_CPU_ITR_INIT(pg, cpu_iter); 9243434Sesaxe while ((cpp = pg_cpu_next(&cpu_iter)) != NULL) { 9253434Sesaxe if (cpp == cp) 9263434Sesaxe continue; 9273676Sesaxe if (CPU_ACTIVE(cpp) && 9283676Sesaxe cpp->cpu_part->cp_id == oldpp->cp_id) { 9293434Sesaxe found = B_TRUE; 9303434Sesaxe break; 9313434Sesaxe } 9323434Sesaxe } 9333434Sesaxe if (!found) 9343434Sesaxe bitset_del(&cp->cpu_part->cp_cmt_pgs, pg->pg_id); 9353434Sesaxe } 9363434Sesaxe } 9373434Sesaxe 9383434Sesaxe /* 9393434Sesaxe * Class callback when a CPU becomes active (online) 9403434Sesaxe * 9413434Sesaxe * This is called in a context where CPUs are paused 9423434Sesaxe */ 9433434Sesaxe static void 9443434Sesaxe pg_cmt_cpu_active(cpu_t *cp) 9453434Sesaxe { 9463434Sesaxe int err; 9473434Sesaxe group_iter_t i; 9483434Sesaxe pg_cmt_t *pg; 9493434Sesaxe group_t *pgs; 9503434Sesaxe 9513434Sesaxe ASSERT(MUTEX_HELD(&cpu_lock)); 9523434Sesaxe 9538906SEric.Saxe@Sun.COM if (cmt_sched_disabled) 9548906SEric.Saxe@Sun.COM return; 9558906SEric.Saxe@Sun.COM 9563434Sesaxe pgs = &cp->cpu_pg->pgs; 9573434Sesaxe group_iter_init(&i); 9583434Sesaxe 9593434Sesaxe /* 9603434Sesaxe * Iterate over the CPU's PGs 9613434Sesaxe */ 9623434Sesaxe while ((pg = group_iterate(pgs, &i)) != NULL) { 9633434Sesaxe 9643434Sesaxe if (IS_CMT_PG(pg) == 0) 9653434Sesaxe continue; 9663434Sesaxe 9673434Sesaxe err = group_add(&pg->cmt_cpus_actv, cp, GRP_NORESIZE); 9683434Sesaxe ASSERT(err == 0); 9693434Sesaxe 9703434Sesaxe /* 9713434Sesaxe * If this is the first active CPU in the PG, and it 9723434Sesaxe * represents a hardware sharing relationship over which 9733434Sesaxe * CMT load balancing is performed, add it as a candidate 9743434Sesaxe * for balancing with it's siblings. 9753434Sesaxe */ 9763434Sesaxe if (GROUP_SIZE(&pg->cmt_cpus_actv) == 1 && 9778906SEric.Saxe@Sun.COM (pg->cmt_policy & (CMT_BALANCE | CMT_COALESCE))) { 9783434Sesaxe err = group_add(pg->cmt_siblings, pg, GRP_NORESIZE); 9793434Sesaxe ASSERT(err == 0); 9808408SEric.Saxe@Sun.COM 9818408SEric.Saxe@Sun.COM /* 9828408SEric.Saxe@Sun.COM * If this is a top level PG, add it as a balancing 9838906SEric.Saxe@Sun.COM * candidate when balancing within the root lgroup. 9848408SEric.Saxe@Sun.COM */ 9858906SEric.Saxe@Sun.COM if (pg->cmt_parent == NULL && 9868906SEric.Saxe@Sun.COM pg->cmt_siblings != &cmt_root->cl_pgs) { 9878408SEric.Saxe@Sun.COM err = group_add(&cmt_root->cl_pgs, pg, 9888408SEric.Saxe@Sun.COM GRP_NORESIZE); 9898408SEric.Saxe@Sun.COM ASSERT(err == 0); 9908408SEric.Saxe@Sun.COM } 9913434Sesaxe } 9923434Sesaxe 9933434Sesaxe /* 9943434Sesaxe * Notate the CPU in the PGs active CPU bitset. 9953434Sesaxe * Also notate the PG as being active in it's associated 9963434Sesaxe * partition 9973434Sesaxe */ 9983434Sesaxe bitset_add(&pg->cmt_cpus_actv_set, cp->cpu_seqid); 9993434Sesaxe bitset_add(&cp->cpu_part->cp_cmt_pgs, ((pg_t *)pg)->pg_id); 10003434Sesaxe } 10013434Sesaxe } 10023434Sesaxe 10033434Sesaxe /* 10043434Sesaxe * Class callback when a CPU goes inactive (offline) 10053434Sesaxe * 10063434Sesaxe * This is called in a context where CPUs are paused 10073434Sesaxe */ 10083434Sesaxe static void 10093434Sesaxe pg_cmt_cpu_inactive(cpu_t *cp) 10103434Sesaxe { 10113434Sesaxe int err; 10123434Sesaxe group_t *pgs; 10133434Sesaxe pg_cmt_t *pg; 10143434Sesaxe cpu_t *cpp; 10153434Sesaxe group_iter_t i; 10163434Sesaxe pg_cpu_itr_t cpu_itr; 10173434Sesaxe boolean_t found; 10183434Sesaxe 10193434Sesaxe ASSERT(MUTEX_HELD(&cpu_lock)); 10203434Sesaxe 10218906SEric.Saxe@Sun.COM if (cmt_sched_disabled) 10228906SEric.Saxe@Sun.COM return; 10238906SEric.Saxe@Sun.COM 10243434Sesaxe pgs = &cp->cpu_pg->pgs; 10253434Sesaxe group_iter_init(&i); 10263434Sesaxe 10273434Sesaxe while ((pg = group_iterate(pgs, &i)) != NULL) { 10283434Sesaxe 10293434Sesaxe if (IS_CMT_PG(pg) == 0) 10303434Sesaxe continue; 10313434Sesaxe 10323434Sesaxe /* 10333434Sesaxe * Remove the CPU from the CMT PGs active CPU group 10343434Sesaxe * bitmap 10353434Sesaxe */ 10363434Sesaxe err = group_remove(&pg->cmt_cpus_actv, cp, GRP_NORESIZE); 10373434Sesaxe ASSERT(err == 0); 10383434Sesaxe 10393434Sesaxe bitset_del(&pg->cmt_cpus_actv_set, cp->cpu_seqid); 10403434Sesaxe 10413434Sesaxe /* 10423434Sesaxe * If there are no more active CPUs in this PG over which 10433434Sesaxe * load was balanced, remove it as a balancing candidate. 10443434Sesaxe */ 10453434Sesaxe if (GROUP_SIZE(&pg->cmt_cpus_actv) == 0 && 10468906SEric.Saxe@Sun.COM (pg->cmt_policy & (CMT_BALANCE | CMT_COALESCE))) { 10473434Sesaxe err = group_remove(pg->cmt_siblings, pg, GRP_NORESIZE); 10483434Sesaxe ASSERT(err == 0); 10498408SEric.Saxe@Sun.COM 10508906SEric.Saxe@Sun.COM if (pg->cmt_parent == NULL && 10518906SEric.Saxe@Sun.COM pg->cmt_siblings != &cmt_root->cl_pgs) { 10528408SEric.Saxe@Sun.COM err = group_remove(&cmt_root->cl_pgs, pg, 10538408SEric.Saxe@Sun.COM GRP_NORESIZE); 10548408SEric.Saxe@Sun.COM ASSERT(err == 0); 10558408SEric.Saxe@Sun.COM } 10563434Sesaxe } 10573434Sesaxe 10583434Sesaxe /* 10593434Sesaxe * Assert the number of active CPUs does not exceed 10603434Sesaxe * the total number of CPUs in the PG 10613434Sesaxe */ 10623434Sesaxe ASSERT(GROUP_SIZE(&pg->cmt_cpus_actv) <= 10633434Sesaxe GROUP_SIZE(&((pg_t *)pg)->pg_cpus)); 10643434Sesaxe 10653434Sesaxe /* 10663434Sesaxe * Update the PG bitset in the CPU's old partition 10673434Sesaxe */ 10683434Sesaxe found = B_FALSE; 10693434Sesaxe PG_CPU_ITR_INIT(pg, cpu_itr); 10703434Sesaxe while ((cpp = pg_cpu_next(&cpu_itr)) != NULL) { 10713434Sesaxe if (cpp == cp) 10723434Sesaxe continue; 10733676Sesaxe if (CPU_ACTIVE(cpp) && 10743676Sesaxe cpp->cpu_part->cp_id == cp->cpu_part->cp_id) { 10753434Sesaxe found = B_TRUE; 10763434Sesaxe break; 10773434Sesaxe } 10783434Sesaxe } 10793434Sesaxe if (!found) { 10803434Sesaxe bitset_del(&cp->cpu_part->cp_cmt_pgs, 10813434Sesaxe ((pg_t *)pg)->pg_id); 10823434Sesaxe } 10833434Sesaxe } 10843434Sesaxe } 10853434Sesaxe 10863434Sesaxe /* 10873434Sesaxe * Return non-zero if the CPU belongs in the given PG 10883434Sesaxe */ 10893434Sesaxe static int 10903434Sesaxe pg_cmt_cpu_belongs(pg_t *pg, cpu_t *cp) 10913434Sesaxe { 10923434Sesaxe cpu_t *pg_cpu; 10933434Sesaxe 10943434Sesaxe pg_cpu = GROUP_ACCESS(&pg->pg_cpus, 0); 10953434Sesaxe 10963434Sesaxe ASSERT(pg_cpu != NULL); 10973434Sesaxe 10983434Sesaxe /* 10993434Sesaxe * The CPU belongs if, given the nature of the hardware sharing 11003434Sesaxe * relationship represented by the PG, the CPU has that 11013434Sesaxe * relationship with some other CPU already in the PG 11023434Sesaxe */ 11033434Sesaxe if (pg_plat_cpus_share(cp, pg_cpu, ((pghw_t *)pg)->pghw_hw)) 11043434Sesaxe return (1); 11053434Sesaxe 11063434Sesaxe return (0); 11073434Sesaxe } 11083434Sesaxe 11093434Sesaxe /* 11108906SEric.Saxe@Sun.COM * Sort the CPUs CMT hierarchy, where "size" is the number of levels. 11113434Sesaxe */ 11123434Sesaxe static void 11138906SEric.Saxe@Sun.COM pg_cmt_hier_sort(pg_cmt_t **hier, int size) 11143434Sesaxe { 11158906SEric.Saxe@Sun.COM int i, j, inc; 11168906SEric.Saxe@Sun.COM pg_t *tmp; 11178906SEric.Saxe@Sun.COM pg_t **h = (pg_t **)hier; 11183434Sesaxe 11198906SEric.Saxe@Sun.COM /* 11208906SEric.Saxe@Sun.COM * First sort by number of CPUs 11218906SEric.Saxe@Sun.COM */ 11228906SEric.Saxe@Sun.COM inc = size / 2; 11238906SEric.Saxe@Sun.COM while (inc > 0) { 11248906SEric.Saxe@Sun.COM for (i = inc; i < size; i++) { 11258906SEric.Saxe@Sun.COM j = i; 11268906SEric.Saxe@Sun.COM tmp = h[i]; 11278906SEric.Saxe@Sun.COM while ((j >= inc) && 11288906SEric.Saxe@Sun.COM (PG_NUM_CPUS(h[j - inc]) > PG_NUM_CPUS(tmp))) { 11298906SEric.Saxe@Sun.COM h[j] = h[j - inc]; 11308906SEric.Saxe@Sun.COM j = j - inc; 11313434Sesaxe } 11328906SEric.Saxe@Sun.COM h[j] = tmp; 11333434Sesaxe } 11348906SEric.Saxe@Sun.COM if (inc == 2) 11358906SEric.Saxe@Sun.COM inc = 1; 11368906SEric.Saxe@Sun.COM else 11378906SEric.Saxe@Sun.COM inc = (inc * 5) / 11; 11388906SEric.Saxe@Sun.COM } 11398906SEric.Saxe@Sun.COM 11408906SEric.Saxe@Sun.COM /* 11418906SEric.Saxe@Sun.COM * Break ties by asking the platform. 11428906SEric.Saxe@Sun.COM * Determine if h[i] outranks h[i + 1] and if so, swap them. 11438906SEric.Saxe@Sun.COM */ 11448906SEric.Saxe@Sun.COM for (i = 0; i < size - 1; i++) { 11458906SEric.Saxe@Sun.COM if ((PG_NUM_CPUS(h[i]) == PG_NUM_CPUS(h[i + 1])) && 11468906SEric.Saxe@Sun.COM pg_cmt_hier_rank(hier[i], hier[i + 1]) == hier[i]) { 11478906SEric.Saxe@Sun.COM tmp = h[i]; 11488906SEric.Saxe@Sun.COM h[i] = h[i + 1]; 11498906SEric.Saxe@Sun.COM h[i + 1] = tmp; 11508906SEric.Saxe@Sun.COM } 11513434Sesaxe } 11523434Sesaxe } 11533434Sesaxe 11543434Sesaxe /* 11553434Sesaxe * Return a cmt_lgrp_t * given an lgroup handle. 11563434Sesaxe */ 11573434Sesaxe static cmt_lgrp_t * 11583434Sesaxe pg_cmt_find_lgrp(lgrp_handle_t hand) 11593434Sesaxe { 11603434Sesaxe cmt_lgrp_t *lgrp; 11613434Sesaxe 11623434Sesaxe ASSERT(MUTEX_HELD(&cpu_lock)); 11633434Sesaxe 11643434Sesaxe lgrp = cmt_lgrps; 11653434Sesaxe while (lgrp != NULL) { 11663434Sesaxe if (lgrp->cl_hand == hand) 11673676Sesaxe break; 11683434Sesaxe lgrp = lgrp->cl_next; 11693434Sesaxe } 11703676Sesaxe return (lgrp); 11713676Sesaxe } 11723434Sesaxe 11733676Sesaxe /* 11743676Sesaxe * Create a cmt_lgrp_t with the specified handle. 11753676Sesaxe */ 11763676Sesaxe static cmt_lgrp_t * 11773676Sesaxe pg_cmt_lgrp_create(lgrp_handle_t hand) 11783676Sesaxe { 11793676Sesaxe cmt_lgrp_t *lgrp; 11803676Sesaxe 11813676Sesaxe ASSERT(MUTEX_HELD(&cpu_lock)); 11823676Sesaxe 11833434Sesaxe lgrp = kmem_zalloc(sizeof (cmt_lgrp_t), KM_SLEEP); 11843434Sesaxe 11853434Sesaxe lgrp->cl_hand = hand; 11863434Sesaxe lgrp->cl_npgs = 0; 11873434Sesaxe lgrp->cl_next = cmt_lgrps; 11883434Sesaxe cmt_lgrps = lgrp; 11893434Sesaxe group_create(&lgrp->cl_pgs); 11903434Sesaxe 11913434Sesaxe return (lgrp); 11923434Sesaxe } 11938408SEric.Saxe@Sun.COM 11948408SEric.Saxe@Sun.COM /* 11958906SEric.Saxe@Sun.COM * Interfaces to enable and disable power aware dispatching 11968906SEric.Saxe@Sun.COM * The caller must be holding cpu_lock. 11978408SEric.Saxe@Sun.COM * 11988906SEric.Saxe@Sun.COM * Return 0 on success and -1 on failure. 11998408SEric.Saxe@Sun.COM */ 12008906SEric.Saxe@Sun.COM int 12018906SEric.Saxe@Sun.COM cmt_pad_enable(pghw_type_t type) 12028408SEric.Saxe@Sun.COM { 12038906SEric.Saxe@Sun.COM group_t *hwset; 12048906SEric.Saxe@Sun.COM group_iter_t iter; 12058906SEric.Saxe@Sun.COM pg_cmt_t *pg; 12068906SEric.Saxe@Sun.COM 12078906SEric.Saxe@Sun.COM ASSERT(PGHW_IS_PM_DOMAIN(type)); 12088906SEric.Saxe@Sun.COM ASSERT(MUTEX_HELD(&cpu_lock)); 12098408SEric.Saxe@Sun.COM 12108906SEric.Saxe@Sun.COM if ((hwset = pghw_set_lookup(type)) == NULL || 12118906SEric.Saxe@Sun.COM cmt_hw_blacklisted[type]) { 12128906SEric.Saxe@Sun.COM /* 12138906SEric.Saxe@Sun.COM * Unable to find any instances of the specified type 12148906SEric.Saxe@Sun.COM * of power domain, or the power domains have been blacklisted. 12158906SEric.Saxe@Sun.COM */ 12168906SEric.Saxe@Sun.COM return (-1); 12178906SEric.Saxe@Sun.COM } 12188408SEric.Saxe@Sun.COM 12198408SEric.Saxe@Sun.COM /* 12208906SEric.Saxe@Sun.COM * Iterate over the power domains, setting the default dispatcher 12218906SEric.Saxe@Sun.COM * policy for power/performance optimization. 12228906SEric.Saxe@Sun.COM * 12238906SEric.Saxe@Sun.COM * Simply setting the policy isn't enough in the case where the power 12248906SEric.Saxe@Sun.COM * domain is an only child of another PG. Because the dispatcher walks 12258906SEric.Saxe@Sun.COM * the PG hierarchy in a top down fashion, the higher up PG's policy 12268906SEric.Saxe@Sun.COM * will dominate. So promote the power domain above it's parent if both 12278906SEric.Saxe@Sun.COM * PG and it's parent have the same CPUs to ensure it's policy 12288906SEric.Saxe@Sun.COM * dominates. 12298408SEric.Saxe@Sun.COM */ 12308906SEric.Saxe@Sun.COM group_iter_init(&iter); 12318906SEric.Saxe@Sun.COM while ((pg = group_iterate(hwset, &iter)) != NULL) { 12328906SEric.Saxe@Sun.COM /* 12338906SEric.Saxe@Sun.COM * If the power domain is an only child to a parent 12348906SEric.Saxe@Sun.COM * not implementing the same policy, promote the child 12358906SEric.Saxe@Sun.COM * above the parent to activate the policy. 12368906SEric.Saxe@Sun.COM */ 12378906SEric.Saxe@Sun.COM pg->cmt_policy = pg_cmt_policy(((pghw_t *)pg)->pghw_hw); 12388906SEric.Saxe@Sun.COM while ((pg->cmt_parent != NULL) && 12398906SEric.Saxe@Sun.COM (pg->cmt_parent->cmt_policy != pg->cmt_policy) && 12408906SEric.Saxe@Sun.COM (PG_NUM_CPUS((pg_t *)pg) == 12418906SEric.Saxe@Sun.COM PG_NUM_CPUS((pg_t *)pg->cmt_parent))) { 1242*9438SEric.Saxe@Sun.COM cmt_hier_promote(pg, NULL); 12438906SEric.Saxe@Sun.COM } 12448906SEric.Saxe@Sun.COM } 12458906SEric.Saxe@Sun.COM 12468906SEric.Saxe@Sun.COM return (0); 12478906SEric.Saxe@Sun.COM } 12488408SEric.Saxe@Sun.COM 12498906SEric.Saxe@Sun.COM int 12508906SEric.Saxe@Sun.COM cmt_pad_disable(pghw_type_t type) 12518906SEric.Saxe@Sun.COM { 12528906SEric.Saxe@Sun.COM group_t *hwset; 12538906SEric.Saxe@Sun.COM group_iter_t iter; 12548906SEric.Saxe@Sun.COM pg_cmt_t *pg; 12558906SEric.Saxe@Sun.COM pg_cmt_t *child; 12568906SEric.Saxe@Sun.COM 12578906SEric.Saxe@Sun.COM ASSERT(PGHW_IS_PM_DOMAIN(type)); 12588906SEric.Saxe@Sun.COM ASSERT(MUTEX_HELD(&cpu_lock)); 12598906SEric.Saxe@Sun.COM 12608906SEric.Saxe@Sun.COM if ((hwset = pghw_set_lookup(type)) == NULL) { 12618906SEric.Saxe@Sun.COM /* 12628906SEric.Saxe@Sun.COM * Unable to find any instances of the specified type of 12638906SEric.Saxe@Sun.COM * power domain. 12648906SEric.Saxe@Sun.COM */ 12658906SEric.Saxe@Sun.COM return (-1); 12668906SEric.Saxe@Sun.COM } 12678408SEric.Saxe@Sun.COM /* 12688906SEric.Saxe@Sun.COM * Iterate over the power domains, setting the default dispatcher 12698906SEric.Saxe@Sun.COM * policy for performance optimization (load balancing). 12708408SEric.Saxe@Sun.COM */ 12718906SEric.Saxe@Sun.COM group_iter_init(&iter); 12728906SEric.Saxe@Sun.COM while ((pg = group_iterate(hwset, &iter)) != NULL) { 12738408SEric.Saxe@Sun.COM 12748408SEric.Saxe@Sun.COM /* 12758906SEric.Saxe@Sun.COM * If the power domain has an only child that implements 12768906SEric.Saxe@Sun.COM * policy other than load balancing, promote the child 12778906SEric.Saxe@Sun.COM * above the power domain to ensure it's policy dominates. 12788408SEric.Saxe@Sun.COM */ 12798969SEric.Saxe@Sun.COM if (pg->cmt_children != NULL && 12808969SEric.Saxe@Sun.COM GROUP_SIZE(pg->cmt_children) == 1) { 12818906SEric.Saxe@Sun.COM child = GROUP_ACCESS(pg->cmt_children, 0); 12828906SEric.Saxe@Sun.COM if ((child->cmt_policy & CMT_BALANCE) == 0) { 1283*9438SEric.Saxe@Sun.COM cmt_hier_promote(child, NULL); 12848906SEric.Saxe@Sun.COM } 12858906SEric.Saxe@Sun.COM } 12868906SEric.Saxe@Sun.COM pg->cmt_policy = CMT_BALANCE; 12878906SEric.Saxe@Sun.COM } 12888906SEric.Saxe@Sun.COM return (0); 12898906SEric.Saxe@Sun.COM } 12908906SEric.Saxe@Sun.COM 12918906SEric.Saxe@Sun.COM /* ARGSUSED */ 12928906SEric.Saxe@Sun.COM static void 12938906SEric.Saxe@Sun.COM cmt_ev_thread_swtch(pg_t *pg, cpu_t *cp, hrtime_t now, kthread_t *old, 12948906SEric.Saxe@Sun.COM kthread_t *new) 12958906SEric.Saxe@Sun.COM { 12968906SEric.Saxe@Sun.COM pg_cmt_t *cmt_pg = (pg_cmt_t *)pg; 12978906SEric.Saxe@Sun.COM 12988906SEric.Saxe@Sun.COM if (old == cp->cpu_idle_thread) { 12998906SEric.Saxe@Sun.COM atomic_add_32(&cmt_pg->cmt_utilization, 1); 13008906SEric.Saxe@Sun.COM } else if (new == cp->cpu_idle_thread) { 13018906SEric.Saxe@Sun.COM atomic_add_32(&cmt_pg->cmt_utilization, -1); 13028906SEric.Saxe@Sun.COM } 13038906SEric.Saxe@Sun.COM } 13048906SEric.Saxe@Sun.COM 13058906SEric.Saxe@Sun.COM /* 13068906SEric.Saxe@Sun.COM * Macro to test whether a thread is currently runnable on a CPU in a PG. 13078906SEric.Saxe@Sun.COM */ 13088906SEric.Saxe@Sun.COM #define THREAD_RUNNABLE_IN_PG(t, pg) \ 13098906SEric.Saxe@Sun.COM ((t)->t_state == TS_RUN && \ 13108906SEric.Saxe@Sun.COM (t)->t_disp_queue->disp_cpu && \ 13118906SEric.Saxe@Sun.COM bitset_in_set(&(pg)->cmt_cpus_actv_set, \ 13128906SEric.Saxe@Sun.COM (t)->t_disp_queue->disp_cpu->cpu_seqid)) 13138906SEric.Saxe@Sun.COM 13148906SEric.Saxe@Sun.COM static void 13158906SEric.Saxe@Sun.COM cmt_ev_thread_swtch_pwr(pg_t *pg, cpu_t *cp, hrtime_t now, kthread_t *old, 13168906SEric.Saxe@Sun.COM kthread_t *new) 13178906SEric.Saxe@Sun.COM { 13188906SEric.Saxe@Sun.COM pg_cmt_t *cmt = (pg_cmt_t *)pg; 13198906SEric.Saxe@Sun.COM cpupm_domain_t *dom; 13208906SEric.Saxe@Sun.COM uint32_t u; 13218906SEric.Saxe@Sun.COM 13228906SEric.Saxe@Sun.COM if (old == cp->cpu_idle_thread) { 13238906SEric.Saxe@Sun.COM ASSERT(new != cp->cpu_idle_thread); 13248906SEric.Saxe@Sun.COM u = atomic_add_32_nv(&cmt->cmt_utilization, 1); 13258906SEric.Saxe@Sun.COM if (u == 1) { 13268906SEric.Saxe@Sun.COM /* 13278906SEric.Saxe@Sun.COM * Notify the CPU power manager that the domain 13288906SEric.Saxe@Sun.COM * is non-idle. 13298906SEric.Saxe@Sun.COM */ 13308906SEric.Saxe@Sun.COM dom = (cpupm_domain_t *)cmt->cmt_pg.pghw_handle; 13318906SEric.Saxe@Sun.COM cpupm_utilization_event(cp, now, dom, 13328906SEric.Saxe@Sun.COM CPUPM_DOM_BUSY_FROM_IDLE); 13338906SEric.Saxe@Sun.COM } 13348906SEric.Saxe@Sun.COM } else if (new == cp->cpu_idle_thread) { 13358906SEric.Saxe@Sun.COM ASSERT(old != cp->cpu_idle_thread); 13368906SEric.Saxe@Sun.COM u = atomic_add_32_nv(&cmt->cmt_utilization, -1); 13378906SEric.Saxe@Sun.COM if (u == 0) { 13388906SEric.Saxe@Sun.COM /* 13398906SEric.Saxe@Sun.COM * The domain is idle, notify the CPU power 13408906SEric.Saxe@Sun.COM * manager. 13418906SEric.Saxe@Sun.COM * 13428906SEric.Saxe@Sun.COM * Avoid notifying if the thread is simply migrating 13438906SEric.Saxe@Sun.COM * between CPUs in the domain. 13448906SEric.Saxe@Sun.COM */ 13458906SEric.Saxe@Sun.COM if (!THREAD_RUNNABLE_IN_PG(old, cmt)) { 13468906SEric.Saxe@Sun.COM dom = (cpupm_domain_t *)cmt->cmt_pg.pghw_handle; 13478906SEric.Saxe@Sun.COM cpupm_utilization_event(cp, now, dom, 13488906SEric.Saxe@Sun.COM CPUPM_DOM_IDLE_FROM_BUSY); 13498906SEric.Saxe@Sun.COM } 13508906SEric.Saxe@Sun.COM } 13518906SEric.Saxe@Sun.COM } 13528906SEric.Saxe@Sun.COM } 13538906SEric.Saxe@Sun.COM 13548906SEric.Saxe@Sun.COM /* ARGSUSED */ 13558906SEric.Saxe@Sun.COM static void 13568906SEric.Saxe@Sun.COM cmt_ev_thread_remain_pwr(pg_t *pg, cpu_t *cp, kthread_t *t) 13578906SEric.Saxe@Sun.COM { 13588906SEric.Saxe@Sun.COM pg_cmt_t *cmt = (pg_cmt_t *)pg; 13598906SEric.Saxe@Sun.COM cpupm_domain_t *dom; 13608906SEric.Saxe@Sun.COM 13618906SEric.Saxe@Sun.COM dom = (cpupm_domain_t *)cmt->cmt_pg.pghw_handle; 13628906SEric.Saxe@Sun.COM cpupm_utilization_event(cp, (hrtime_t)0, dom, CPUPM_DOM_REMAIN_BUSY); 13638906SEric.Saxe@Sun.COM } 13648906SEric.Saxe@Sun.COM 13658906SEric.Saxe@Sun.COM /* 13668906SEric.Saxe@Sun.COM * Return the name of the CMT scheduling policy 13678906SEric.Saxe@Sun.COM * being implemented across this PG 13688906SEric.Saxe@Sun.COM */ 13698906SEric.Saxe@Sun.COM static char * 13708906SEric.Saxe@Sun.COM pg_cmt_policy_name(pg_t *pg) 13718906SEric.Saxe@Sun.COM { 13728906SEric.Saxe@Sun.COM pg_cmt_policy_t policy; 13738906SEric.Saxe@Sun.COM 13748906SEric.Saxe@Sun.COM policy = ((pg_cmt_t *)pg)->cmt_policy; 13758906SEric.Saxe@Sun.COM 13768906SEric.Saxe@Sun.COM if (policy & CMT_AFFINITY) { 13778906SEric.Saxe@Sun.COM if (policy & CMT_BALANCE) 13788906SEric.Saxe@Sun.COM return ("Load Balancing & Affinity"); 13798906SEric.Saxe@Sun.COM else if (policy & CMT_COALESCE) 13808906SEric.Saxe@Sun.COM return ("Load Coalescence & Affinity"); 13818906SEric.Saxe@Sun.COM else 13828906SEric.Saxe@Sun.COM return ("Affinity"); 13838906SEric.Saxe@Sun.COM } else { 13848906SEric.Saxe@Sun.COM if (policy & CMT_BALANCE) 13858906SEric.Saxe@Sun.COM return ("Load Balancing"); 13868906SEric.Saxe@Sun.COM else if (policy & CMT_COALESCE) 13878906SEric.Saxe@Sun.COM return ("Load Coalescence"); 13888906SEric.Saxe@Sun.COM else 13898906SEric.Saxe@Sun.COM return ("None"); 13908906SEric.Saxe@Sun.COM } 13918906SEric.Saxe@Sun.COM } 13928906SEric.Saxe@Sun.COM 13938906SEric.Saxe@Sun.COM /* 13948906SEric.Saxe@Sun.COM * Prune PG, and all other instances of PG's hardware sharing relationship 13958906SEric.Saxe@Sun.COM * from the PG hierarchy. 1396*9438SEric.Saxe@Sun.COM * 1397*9438SEric.Saxe@Sun.COM * This routine operates on the CPU specific processor group data (for the CPUs 1398*9438SEric.Saxe@Sun.COM * in the PG being pruned), and may be invoked from a context where one CPU's 1399*9438SEric.Saxe@Sun.COM * PG data is under construction. In this case the argument "pgdata", if not 1400*9438SEric.Saxe@Sun.COM * NULL, is a reference to the CPU's under-construction PG data. 14018906SEric.Saxe@Sun.COM */ 14028906SEric.Saxe@Sun.COM static int 1403*9438SEric.Saxe@Sun.COM pg_cmt_prune(pg_cmt_t *pg_bad, pg_cmt_t **lineage, int *sz, cpu_pg_t *pgdata) 14048906SEric.Saxe@Sun.COM { 14058906SEric.Saxe@Sun.COM group_t *hwset, *children; 14068906SEric.Saxe@Sun.COM int i, j, r, size = *sz; 14078906SEric.Saxe@Sun.COM group_iter_t hw_iter, child_iter; 14088906SEric.Saxe@Sun.COM pg_cpu_itr_t cpu_iter; 14098906SEric.Saxe@Sun.COM pg_cmt_t *pg, *child; 14108906SEric.Saxe@Sun.COM cpu_t *cpu; 14118906SEric.Saxe@Sun.COM int cap_needed; 14128906SEric.Saxe@Sun.COM pghw_type_t hw; 14138906SEric.Saxe@Sun.COM 14148906SEric.Saxe@Sun.COM ASSERT(MUTEX_HELD(&cpu_lock)); 14158906SEric.Saxe@Sun.COM 14168906SEric.Saxe@Sun.COM hw = ((pghw_t *)pg_bad)->pghw_hw; 14178906SEric.Saxe@Sun.COM 14188906SEric.Saxe@Sun.COM if (hw == PGHW_POW_ACTIVE) { 14198906SEric.Saxe@Sun.COM cmn_err(CE_NOTE, "!Active CPUPM domain groups look suspect. " 14208906SEric.Saxe@Sun.COM "Event Based CPUPM Unavailable"); 14218906SEric.Saxe@Sun.COM } else if (hw == PGHW_POW_IDLE) { 14228906SEric.Saxe@Sun.COM cmn_err(CE_NOTE, "!Idle CPUPM domain groups look suspect. " 14238906SEric.Saxe@Sun.COM "Dispatcher assisted CPUPM disabled."); 14248906SEric.Saxe@Sun.COM } 14258906SEric.Saxe@Sun.COM 14268906SEric.Saxe@Sun.COM /* 14278906SEric.Saxe@Sun.COM * Find and eliminate the PG from the lineage. 14288906SEric.Saxe@Sun.COM */ 14298906SEric.Saxe@Sun.COM for (i = 0; i < size; i++) { 14308906SEric.Saxe@Sun.COM if (lineage[i] == pg_bad) { 14318906SEric.Saxe@Sun.COM for (j = i; j < size - 1; j++) 14328906SEric.Saxe@Sun.COM lineage[j] = lineage[j + 1]; 14338906SEric.Saxe@Sun.COM *sz = size - 1; 14348906SEric.Saxe@Sun.COM break; 14358906SEric.Saxe@Sun.COM } 14368906SEric.Saxe@Sun.COM } 14378906SEric.Saxe@Sun.COM 14388906SEric.Saxe@Sun.COM /* 14398906SEric.Saxe@Sun.COM * We'll prune all instances of the hardware sharing relationship 14408906SEric.Saxe@Sun.COM * represented by pg. But before we do that (and pause CPUs) we need 14418906SEric.Saxe@Sun.COM * to ensure the hierarchy's groups are properly sized. 14428906SEric.Saxe@Sun.COM */ 14438906SEric.Saxe@Sun.COM hwset = pghw_set_lookup(hw); 14448906SEric.Saxe@Sun.COM 14458906SEric.Saxe@Sun.COM /* 14468906SEric.Saxe@Sun.COM * Blacklist the hardware so that future groups won't be created. 14478906SEric.Saxe@Sun.COM */ 14488906SEric.Saxe@Sun.COM cmt_hw_blacklisted[hw] = 1; 14498906SEric.Saxe@Sun.COM 14508906SEric.Saxe@Sun.COM /* 14518906SEric.Saxe@Sun.COM * For each of the PGs being pruned, ensure sufficient capacity in 14528906SEric.Saxe@Sun.COM * the siblings set for the PG's children 14538906SEric.Saxe@Sun.COM */ 14548906SEric.Saxe@Sun.COM group_iter_init(&hw_iter); 14558906SEric.Saxe@Sun.COM while ((pg = group_iterate(hwset, &hw_iter)) != NULL) { 14568906SEric.Saxe@Sun.COM /* 14578906SEric.Saxe@Sun.COM * PG is being pruned, but if it is bringing up more than 14588906SEric.Saxe@Sun.COM * one child, ask for more capacity in the siblings group. 14598906SEric.Saxe@Sun.COM */ 14608906SEric.Saxe@Sun.COM cap_needed = 0; 14618906SEric.Saxe@Sun.COM if (pg->cmt_children && 14628906SEric.Saxe@Sun.COM GROUP_SIZE(pg->cmt_children) > 1) { 14638906SEric.Saxe@Sun.COM cap_needed = GROUP_SIZE(pg->cmt_children) - 1; 14648906SEric.Saxe@Sun.COM 14658906SEric.Saxe@Sun.COM group_expand(pg->cmt_siblings, 14668906SEric.Saxe@Sun.COM GROUP_SIZE(pg->cmt_siblings) + cap_needed); 14678408SEric.Saxe@Sun.COM 14688408SEric.Saxe@Sun.COM /* 14698906SEric.Saxe@Sun.COM * If this is a top level group, also ensure the 14708906SEric.Saxe@Sun.COM * capacity in the root lgrp level CMT grouping. 14718408SEric.Saxe@Sun.COM */ 14728906SEric.Saxe@Sun.COM if (pg->cmt_parent == NULL && 14738906SEric.Saxe@Sun.COM pg->cmt_siblings != &cmt_root->cl_pgs) { 14748906SEric.Saxe@Sun.COM group_expand(&cmt_root->cl_pgs, 14758906SEric.Saxe@Sun.COM GROUP_SIZE(&cmt_root->cl_pgs) + cap_needed); 14768408SEric.Saxe@Sun.COM } 14778906SEric.Saxe@Sun.COM } 14788906SEric.Saxe@Sun.COM } 14798408SEric.Saxe@Sun.COM 14808906SEric.Saxe@Sun.COM /* 14818906SEric.Saxe@Sun.COM * We're operating on the PG hierarchy. Pause CPUs to ensure 14828906SEric.Saxe@Sun.COM * exclusivity with respect to the dispatcher. 14838906SEric.Saxe@Sun.COM */ 14848906SEric.Saxe@Sun.COM pause_cpus(NULL); 14858408SEric.Saxe@Sun.COM 14868906SEric.Saxe@Sun.COM /* 14878906SEric.Saxe@Sun.COM * Prune all PG instances of the hardware sharing relationship 14888906SEric.Saxe@Sun.COM * represented by pg. 14898906SEric.Saxe@Sun.COM */ 14908906SEric.Saxe@Sun.COM group_iter_init(&hw_iter); 14918906SEric.Saxe@Sun.COM while ((pg = group_iterate(hwset, &hw_iter)) != NULL) { 14928408SEric.Saxe@Sun.COM 14938408SEric.Saxe@Sun.COM /* 14948906SEric.Saxe@Sun.COM * Remove PG from it's group of siblings, if it's there. 14958906SEric.Saxe@Sun.COM */ 14968906SEric.Saxe@Sun.COM if (pg->cmt_siblings) { 14978906SEric.Saxe@Sun.COM (void) group_remove(pg->cmt_siblings, pg, GRP_NORESIZE); 14988906SEric.Saxe@Sun.COM } 14998906SEric.Saxe@Sun.COM if (pg->cmt_parent == NULL && 15008906SEric.Saxe@Sun.COM pg->cmt_siblings != &cmt_root->cl_pgs) { 15018906SEric.Saxe@Sun.COM (void) group_remove(&cmt_root->cl_pgs, pg, 15028906SEric.Saxe@Sun.COM GRP_NORESIZE); 15038906SEric.Saxe@Sun.COM } 15048906SEric.Saxe@Sun.COM /* 15059036SEric.Saxe@Sun.COM * Move PG's children from it's children set to it's parent's 15069036SEric.Saxe@Sun.COM * children set. Note that the parent's children set, and PG's 15079036SEric.Saxe@Sun.COM * siblings set are the same thing. 15089036SEric.Saxe@Sun.COM * 15099036SEric.Saxe@Sun.COM * Because we are iterating over the same group that we are 15109036SEric.Saxe@Sun.COM * operating on (removing the children), first add all of PG's 15119036SEric.Saxe@Sun.COM * children to the parent's children set, and once we are done 15129036SEric.Saxe@Sun.COM * iterating, empty PG's children set. 15138906SEric.Saxe@Sun.COM */ 15148906SEric.Saxe@Sun.COM if (pg->cmt_children != NULL) { 15158906SEric.Saxe@Sun.COM children = pg->cmt_children; 15168906SEric.Saxe@Sun.COM 15178906SEric.Saxe@Sun.COM group_iter_init(&child_iter); 15188906SEric.Saxe@Sun.COM while ((child = group_iterate(children, &child_iter)) 15198906SEric.Saxe@Sun.COM != NULL) { 15209036SEric.Saxe@Sun.COM if (pg->cmt_siblings != NULL) { 15218906SEric.Saxe@Sun.COM r = group_add(pg->cmt_siblings, child, 15228906SEric.Saxe@Sun.COM GRP_NORESIZE); 15238906SEric.Saxe@Sun.COM ASSERT(r == 0); 15248906SEric.Saxe@Sun.COM } 15258906SEric.Saxe@Sun.COM } 15269036SEric.Saxe@Sun.COM group_empty(pg->cmt_children); 15278906SEric.Saxe@Sun.COM } 15288906SEric.Saxe@Sun.COM 15298906SEric.Saxe@Sun.COM /* 15308906SEric.Saxe@Sun.COM * Reset the callbacks to the defaults 15318906SEric.Saxe@Sun.COM */ 15328906SEric.Saxe@Sun.COM pg_callback_set_defaults((pg_t *)pg); 15338906SEric.Saxe@Sun.COM 15348906SEric.Saxe@Sun.COM /* 15358906SEric.Saxe@Sun.COM * Update all the CPU lineages in each of PG's CPUs 15368408SEric.Saxe@Sun.COM */ 15378906SEric.Saxe@Sun.COM PG_CPU_ITR_INIT(pg, cpu_iter); 15388906SEric.Saxe@Sun.COM while ((cpu = pg_cpu_next(&cpu_iter)) != NULL) { 15398906SEric.Saxe@Sun.COM pg_cmt_t *cpu_pg; 15408906SEric.Saxe@Sun.COM group_iter_t liter; /* Iterator for the lineage */ 1541*9438SEric.Saxe@Sun.COM cpu_pg_t *cpd; /* CPU's PG data */ 1542*9438SEric.Saxe@Sun.COM 1543*9438SEric.Saxe@Sun.COM /* 1544*9438SEric.Saxe@Sun.COM * The CPU's lineage is under construction still 1545*9438SEric.Saxe@Sun.COM * references the bootstrap CPU PG data structure. 1546*9438SEric.Saxe@Sun.COM */ 1547*9438SEric.Saxe@Sun.COM if (pg_cpu_is_bootstrapped(cpu)) 1548*9438SEric.Saxe@Sun.COM cpd = pgdata; 1549*9438SEric.Saxe@Sun.COM else 1550*9438SEric.Saxe@Sun.COM cpd = cpu->cpu_pg; 15518906SEric.Saxe@Sun.COM 15528906SEric.Saxe@Sun.COM /* 15538906SEric.Saxe@Sun.COM * Iterate over the CPU's PGs updating the children 15548906SEric.Saxe@Sun.COM * of the PG being promoted, since they have a new 15558906SEric.Saxe@Sun.COM * parent and siblings set. 15568906SEric.Saxe@Sun.COM */ 15578906SEric.Saxe@Sun.COM group_iter_init(&liter); 1558*9438SEric.Saxe@Sun.COM while ((cpu_pg = group_iterate(&cpd->pgs, 1559*9438SEric.Saxe@Sun.COM &liter)) != NULL) { 15608906SEric.Saxe@Sun.COM if (cpu_pg->cmt_parent == pg) { 15618906SEric.Saxe@Sun.COM cpu_pg->cmt_parent = pg->cmt_parent; 15628906SEric.Saxe@Sun.COM cpu_pg->cmt_siblings = pg->cmt_siblings; 15638906SEric.Saxe@Sun.COM } 15648906SEric.Saxe@Sun.COM } 15658906SEric.Saxe@Sun.COM 15668906SEric.Saxe@Sun.COM /* 15678906SEric.Saxe@Sun.COM * Update the CPU's lineages 15688906SEric.Saxe@Sun.COM */ 1569*9438SEric.Saxe@Sun.COM (void) group_remove(&cpd->pgs, pg, GRP_NORESIZE); 1570*9438SEric.Saxe@Sun.COM (void) group_remove(&cpd->cmt_pgs, pg, GRP_NORESIZE); 15718408SEric.Saxe@Sun.COM } 15728906SEric.Saxe@Sun.COM } 15738906SEric.Saxe@Sun.COM start_cpus(); 15748906SEric.Saxe@Sun.COM return (0); 15758906SEric.Saxe@Sun.COM } 15768906SEric.Saxe@Sun.COM 15778906SEric.Saxe@Sun.COM /* 15788906SEric.Saxe@Sun.COM * Disable CMT scheduling 15798906SEric.Saxe@Sun.COM */ 15808906SEric.Saxe@Sun.COM static void 15818906SEric.Saxe@Sun.COM pg_cmt_disable(void) 15828906SEric.Saxe@Sun.COM { 1583*9438SEric.Saxe@Sun.COM cpu_t *cpu; 1584*9438SEric.Saxe@Sun.COM 1585*9438SEric.Saxe@Sun.COM ASSERT(MUTEX_HELD(&cpu_lock)); 15868906SEric.Saxe@Sun.COM 15878906SEric.Saxe@Sun.COM pause_cpus(NULL); 15888906SEric.Saxe@Sun.COM cpu = cpu_list; 15898906SEric.Saxe@Sun.COM 15908906SEric.Saxe@Sun.COM do { 15918906SEric.Saxe@Sun.COM if (cpu->cpu_pg) 15928906SEric.Saxe@Sun.COM group_empty(&cpu->cpu_pg->cmt_pgs); 15938906SEric.Saxe@Sun.COM } while ((cpu = cpu->cpu_next) != cpu_list); 15948906SEric.Saxe@Sun.COM 15958906SEric.Saxe@Sun.COM cmt_sched_disabled = 1; 15968906SEric.Saxe@Sun.COM start_cpus(); 15978906SEric.Saxe@Sun.COM cmn_err(CE_NOTE, "!CMT thread placement optimizations unavailable"); 15988906SEric.Saxe@Sun.COM } 15998408SEric.Saxe@Sun.COM 16009036SEric.Saxe@Sun.COM /* 16019036SEric.Saxe@Sun.COM * CMT lineage validation 16029036SEric.Saxe@Sun.COM * 16039036SEric.Saxe@Sun.COM * This routine is invoked by pg_cmt_cpu_init() to validate the integrity 16049036SEric.Saxe@Sun.COM * of the PGs in a CPU's lineage. This is necessary because it's possible that 16059036SEric.Saxe@Sun.COM * some groupings (power domain groupings in particular) may be defined by 16069036SEric.Saxe@Sun.COM * sources that are buggy (e.g. BIOS bugs). In such cases, it may not be 16079036SEric.Saxe@Sun.COM * possible to integrate those groupings into the CMT PG hierarchy, if doing 16089036SEric.Saxe@Sun.COM * so would violate the subset invariant of the hierarchy, which says that 16099036SEric.Saxe@Sun.COM * a PG must be subset of its parent (if it has one). 16109036SEric.Saxe@Sun.COM * 16119036SEric.Saxe@Sun.COM * pg_cmt_lineage_validate()'s purpose is to detect grouping definitions that 16129036SEric.Saxe@Sun.COM * would result in a violation of this invariant. If a violation is found, 16139036SEric.Saxe@Sun.COM * and the PG is of a grouping type who's definition is known to originate from 16149036SEric.Saxe@Sun.COM * suspect sources (BIOS), then pg_cmt_prune() will be invoked to prune the 16159036SEric.Saxe@Sun.COM * PG (and all other instances PG's sharing relationship type) from the 16169036SEric.Saxe@Sun.COM * hierarchy. Further, future instances of that sharing relationship type won't 16179036SEric.Saxe@Sun.COM * be instantiated. If the grouping definition doesn't originate from suspect 16189036SEric.Saxe@Sun.COM * sources, then pg_cmt_disable() will be invoked to log an error, and disable 16199036SEric.Saxe@Sun.COM * CMT scheduling altogether. 16209036SEric.Saxe@Sun.COM * 16219036SEric.Saxe@Sun.COM * This routine is invoked after the CPU has been added to the PGs in which 16229036SEric.Saxe@Sun.COM * it belongs, but before those PGs have been added to (or had their place 16239036SEric.Saxe@Sun.COM * adjusted in) the CMT PG hierarchy. 16249036SEric.Saxe@Sun.COM * 16259036SEric.Saxe@Sun.COM * The first argument is the CPUs PG lineage (essentially an array of PGs in 16269036SEric.Saxe@Sun.COM * which the CPU belongs) that has already been sorted in ascending order 16279036SEric.Saxe@Sun.COM * by CPU count. Some of the PGs in the CPUs lineage may already have other 16289036SEric.Saxe@Sun.COM * CPUs in them, and have already been integrated into the CMT hierarchy. 16299036SEric.Saxe@Sun.COM * 16309036SEric.Saxe@Sun.COM * The addition of this new CPU to these pre-existing PGs means that those 16319036SEric.Saxe@Sun.COM * PGs may need to be promoted up in the hierarchy to satisfy the subset 16329036SEric.Saxe@Sun.COM * invariant. In additon to testing the subset invariant for the lineage, 16339036SEric.Saxe@Sun.COM * this routine also verifies that the addition of the new CPU to the 16349036SEric.Saxe@Sun.COM * existing PGs wouldn't cause the subset invariant to be violated in 16359036SEric.Saxe@Sun.COM * the exiting lineages. 16369036SEric.Saxe@Sun.COM * 16379036SEric.Saxe@Sun.COM * This routine will normally return one of the following: 16389036SEric.Saxe@Sun.COM * CMT_LINEAGE_VALID - There were no problems detected with the lineage. 16399036SEric.Saxe@Sun.COM * CMT_LINEAGE_REPAIRED - Problems were detected, but repaired via pruning. 16409036SEric.Saxe@Sun.COM * 16419036SEric.Saxe@Sun.COM * Otherwise, this routine will return a value indicating which error it 16429036SEric.Saxe@Sun.COM * was unable to recover from (and set cmt_lineage_status along the way). 1643*9438SEric.Saxe@Sun.COM * 1644*9438SEric.Saxe@Sun.COM * 1645*9438SEric.Saxe@Sun.COM * This routine operates on the CPU specific processor group data (for the CPU 1646*9438SEric.Saxe@Sun.COM * whose lineage is being validated), which is under-construction. 1647*9438SEric.Saxe@Sun.COM * "pgdata" is a reference to the CPU's under-construction PG data. 1648*9438SEric.Saxe@Sun.COM * This routine must be careful to operate only on "pgdata", and not cp->cpu_pg. 16499036SEric.Saxe@Sun.COM */ 16509036SEric.Saxe@Sun.COM static cmt_lineage_validation_t 1651*9438SEric.Saxe@Sun.COM pg_cmt_lineage_validate(pg_cmt_t **lineage, int *sz, cpu_pg_t *pgdata) 16528906SEric.Saxe@Sun.COM { 16539036SEric.Saxe@Sun.COM int i, j, size; 16549036SEric.Saxe@Sun.COM pg_cmt_t *pg, *pg_next, *pg_bad, *pg_tmp; 16558906SEric.Saxe@Sun.COM cpu_t *cp; 16568906SEric.Saxe@Sun.COM pg_cpu_itr_t cpu_iter; 16579036SEric.Saxe@Sun.COM lgrp_handle_t lgrp; 16588906SEric.Saxe@Sun.COM 16598906SEric.Saxe@Sun.COM ASSERT(MUTEX_HELD(&cpu_lock)); 16608906SEric.Saxe@Sun.COM 16618906SEric.Saxe@Sun.COM revalidate: 16628906SEric.Saxe@Sun.COM size = *sz; 16638906SEric.Saxe@Sun.COM pg_bad = NULL; 16649036SEric.Saxe@Sun.COM lgrp = LGRP_NULL_HANDLE; 16659036SEric.Saxe@Sun.COM for (i = 0; i < size; i++) { 16668906SEric.Saxe@Sun.COM 16678906SEric.Saxe@Sun.COM pg = lineage[i]; 16689036SEric.Saxe@Sun.COM if (i < size - 1) 16699036SEric.Saxe@Sun.COM pg_next = lineage[i + 1]; 16709036SEric.Saxe@Sun.COM else 16719036SEric.Saxe@Sun.COM pg_next = NULL; 16728408SEric.Saxe@Sun.COM 16738906SEric.Saxe@Sun.COM /* 16748906SEric.Saxe@Sun.COM * We assume that the lineage has already been sorted 16758906SEric.Saxe@Sun.COM * by the number of CPUs. In fact, we depend on it. 16768906SEric.Saxe@Sun.COM */ 16779036SEric.Saxe@Sun.COM ASSERT(pg_next == NULL || 16789036SEric.Saxe@Sun.COM (PG_NUM_CPUS((pg_t *)pg) <= PG_NUM_CPUS((pg_t *)pg_next))); 16798906SEric.Saxe@Sun.COM 16808906SEric.Saxe@Sun.COM /* 16819036SEric.Saxe@Sun.COM * Check to make sure that the existing parent of PG (if any) 16829036SEric.Saxe@Sun.COM * is either in the PG's lineage, or the PG has more CPUs than 16839036SEric.Saxe@Sun.COM * its existing parent and can and should be promoted above its 16849036SEric.Saxe@Sun.COM * parent. 16859036SEric.Saxe@Sun.COM * 16869036SEric.Saxe@Sun.COM * Since the PG topology is in the middle of being changed, we 16879036SEric.Saxe@Sun.COM * need to check whether the PG's existing parent (if any) is 16889036SEric.Saxe@Sun.COM * part of its lineage (and therefore should contain the new 16899036SEric.Saxe@Sun.COM * CPU). If not, it means that the addition of the new CPU 16909036SEric.Saxe@Sun.COM * should have made this PG have more CPUs than its parent, and 16919036SEric.Saxe@Sun.COM * this PG should be promoted to be above its existing parent 16929036SEric.Saxe@Sun.COM * now. We need to verify all of this to defend against a buggy 16939036SEric.Saxe@Sun.COM * BIOS giving bad power domain CPU groupings. Sigh. 16949036SEric.Saxe@Sun.COM */ 16959036SEric.Saxe@Sun.COM if (pg->cmt_parent) { 16969036SEric.Saxe@Sun.COM /* 16979036SEric.Saxe@Sun.COM * Determine if cmt_parent is in this lineage 16989036SEric.Saxe@Sun.COM */ 16999036SEric.Saxe@Sun.COM for (j = 0; j < size; j++) { 17009036SEric.Saxe@Sun.COM pg_tmp = lineage[j]; 17019036SEric.Saxe@Sun.COM if (pg_tmp == pg->cmt_parent) 17029036SEric.Saxe@Sun.COM break; 17039036SEric.Saxe@Sun.COM } 17049036SEric.Saxe@Sun.COM if (pg_tmp != pg->cmt_parent) { 17059036SEric.Saxe@Sun.COM /* 17069036SEric.Saxe@Sun.COM * cmt_parent is not in the lineage, verify 17079036SEric.Saxe@Sun.COM * it is a proper subset of PG. 17089036SEric.Saxe@Sun.COM */ 17099036SEric.Saxe@Sun.COM if (PG_NUM_CPUS((pg_t *)pg->cmt_parent) >= 17109036SEric.Saxe@Sun.COM PG_NUM_CPUS((pg_t *)pg)) { 17119036SEric.Saxe@Sun.COM /* 17129036SEric.Saxe@Sun.COM * Not a proper subset if pg has less 17139036SEric.Saxe@Sun.COM * CPUs than cmt_parent... 17149036SEric.Saxe@Sun.COM */ 17159036SEric.Saxe@Sun.COM cmt_lineage_status = 17169036SEric.Saxe@Sun.COM CMT_LINEAGE_NON_PROMOTABLE; 17179036SEric.Saxe@Sun.COM goto handle_error; 17189036SEric.Saxe@Sun.COM } 17199036SEric.Saxe@Sun.COM } 17209036SEric.Saxe@Sun.COM } 17219036SEric.Saxe@Sun.COM 17229036SEric.Saxe@Sun.COM /* 17239036SEric.Saxe@Sun.COM * Walk each of the CPUs in the PGs group and perform 17249036SEric.Saxe@Sun.COM * consistency checks along the way. 17258906SEric.Saxe@Sun.COM */ 17268906SEric.Saxe@Sun.COM PG_CPU_ITR_INIT((pg_t *)pg, cpu_iter); 17278906SEric.Saxe@Sun.COM while ((cp = pg_cpu_next(&cpu_iter)) != NULL) { 17289036SEric.Saxe@Sun.COM /* 17299036SEric.Saxe@Sun.COM * Verify that there aren't any CPUs contained in PG 17309036SEric.Saxe@Sun.COM * that the next PG in the lineage (which is larger 17319036SEric.Saxe@Sun.COM * or same size) doesn't also contain. 17329036SEric.Saxe@Sun.COM */ 17339036SEric.Saxe@Sun.COM if (pg_next != NULL && 17349036SEric.Saxe@Sun.COM pg_cpu_find((pg_t *)pg_next, cp) == B_FALSE) { 17358906SEric.Saxe@Sun.COM cmt_lineage_status = CMT_LINEAGE_NON_CONCENTRIC; 17368906SEric.Saxe@Sun.COM goto handle_error; 17378906SEric.Saxe@Sun.COM } 17389036SEric.Saxe@Sun.COM 17399036SEric.Saxe@Sun.COM /* 17409036SEric.Saxe@Sun.COM * Verify that all the CPUs in the PG are in the same 17419036SEric.Saxe@Sun.COM * lgroup. 17429036SEric.Saxe@Sun.COM */ 17439036SEric.Saxe@Sun.COM if (lgrp == LGRP_NULL_HANDLE) { 17449036SEric.Saxe@Sun.COM lgrp = lgrp_plat_cpu_to_hand(cp->cpu_id); 17459036SEric.Saxe@Sun.COM } else if (lgrp_plat_cpu_to_hand(cp->cpu_id) != lgrp) { 17469036SEric.Saxe@Sun.COM cmt_lineage_status = CMT_LINEAGE_PG_SPANS_LGRPS; 17479036SEric.Saxe@Sun.COM goto handle_error; 17489036SEric.Saxe@Sun.COM } 17498906SEric.Saxe@Sun.COM } 17508408SEric.Saxe@Sun.COM } 17518408SEric.Saxe@Sun.COM 17528906SEric.Saxe@Sun.COM handle_error: 17539036SEric.Saxe@Sun.COM /* 17549036SEric.Saxe@Sun.COM * Some of these validation errors can result when the CPU grouping 17559036SEric.Saxe@Sun.COM * information is derived from buggy sources (for example, incorrect 17569036SEric.Saxe@Sun.COM * ACPI tables on x86 systems). 17579036SEric.Saxe@Sun.COM * 17589036SEric.Saxe@Sun.COM * We'll try to recover in such cases by pruning out the illegal 17599036SEric.Saxe@Sun.COM * groupings from the PG hierarchy, which means that we won't optimize 17609036SEric.Saxe@Sun.COM * for those levels, but we will for the remaining ones. 17619036SEric.Saxe@Sun.COM */ 17628906SEric.Saxe@Sun.COM switch (cmt_lineage_status) { 17638906SEric.Saxe@Sun.COM case CMT_LINEAGE_VALID: 17648906SEric.Saxe@Sun.COM case CMT_LINEAGE_REPAIRED: 17658906SEric.Saxe@Sun.COM break; 17669036SEric.Saxe@Sun.COM case CMT_LINEAGE_PG_SPANS_LGRPS: 17679036SEric.Saxe@Sun.COM /* 17689036SEric.Saxe@Sun.COM * We've detected a PG whose CPUs span lgroups. 17699036SEric.Saxe@Sun.COM * 17709036SEric.Saxe@Sun.COM * This isn't supported, as the dispatcher isn't allowed to 17719036SEric.Saxe@Sun.COM * to do CMT thread placement across lgroups, as this would 17729036SEric.Saxe@Sun.COM * conflict with policies implementing MPO thread affinity. 17739036SEric.Saxe@Sun.COM * 17749036SEric.Saxe@Sun.COM * The handling for this falls through to the next case. 17759036SEric.Saxe@Sun.COM */ 17769036SEric.Saxe@Sun.COM case CMT_LINEAGE_NON_PROMOTABLE: 17779036SEric.Saxe@Sun.COM /* 17789036SEric.Saxe@Sun.COM * We've detected a PG that already exists in another CPU's 17799036SEric.Saxe@Sun.COM * lineage that cannot cannot legally be promoted into place 17809036SEric.Saxe@Sun.COM * without breaking the invariants of the hierarchy. 17819036SEric.Saxe@Sun.COM */ 17829036SEric.Saxe@Sun.COM if (PG_CMT_HW_SUSPECT(((pghw_t *)pg)->pghw_hw)) { 1783*9438SEric.Saxe@Sun.COM if (pg_cmt_prune(pg, lineage, sz, pgdata) == 0) { 17849036SEric.Saxe@Sun.COM cmt_lineage_status = CMT_LINEAGE_REPAIRED; 17859036SEric.Saxe@Sun.COM goto revalidate; 17869036SEric.Saxe@Sun.COM } 17879036SEric.Saxe@Sun.COM } 17889036SEric.Saxe@Sun.COM /* 17899036SEric.Saxe@Sun.COM * Something went wrong trying to prune out the bad level. 17909036SEric.Saxe@Sun.COM * Disable CMT scheduling altogether. 17919036SEric.Saxe@Sun.COM */ 17929036SEric.Saxe@Sun.COM pg_cmt_disable(); 17939036SEric.Saxe@Sun.COM break; 17948906SEric.Saxe@Sun.COM case CMT_LINEAGE_NON_CONCENTRIC: 17958408SEric.Saxe@Sun.COM /* 17969036SEric.Saxe@Sun.COM * We've detected a non-concentric PG lineage, which means that 17979036SEric.Saxe@Sun.COM * there's a PG in the lineage that has CPUs that the next PG 17989036SEric.Saxe@Sun.COM * over in the lineage (which is the same size or larger) 17999036SEric.Saxe@Sun.COM * doesn't have. 18008906SEric.Saxe@Sun.COM * 18019036SEric.Saxe@Sun.COM * In this case, we examine the two PGs to see if either 18029036SEric.Saxe@Sun.COM * grouping is defined by potentially buggy sources. 18038906SEric.Saxe@Sun.COM * 18048906SEric.Saxe@Sun.COM * If one has less CPUs than the other, and contains CPUs 18058906SEric.Saxe@Sun.COM * not found in the parent, and it is an untrusted enumeration, 18068906SEric.Saxe@Sun.COM * then prune it. If both have the same number of CPUs, then 18078906SEric.Saxe@Sun.COM * prune the one that is untrusted. 18088906SEric.Saxe@Sun.COM * 18098906SEric.Saxe@Sun.COM * This process repeats until we have a concentric lineage, 18108906SEric.Saxe@Sun.COM * or we would have to prune out level derived from what we 18118906SEric.Saxe@Sun.COM * thought was a reliable source, in which case CMT scheduling 18129036SEric.Saxe@Sun.COM * is disabled altogether. 18138408SEric.Saxe@Sun.COM */ 18149036SEric.Saxe@Sun.COM if ((PG_NUM_CPUS((pg_t *)pg) < PG_NUM_CPUS((pg_t *)pg_next)) && 18158906SEric.Saxe@Sun.COM (PG_CMT_HW_SUSPECT(((pghw_t *)pg)->pghw_hw))) { 18168906SEric.Saxe@Sun.COM pg_bad = pg; 18178906SEric.Saxe@Sun.COM } else if (PG_NUM_CPUS((pg_t *)pg) == 18189036SEric.Saxe@Sun.COM PG_NUM_CPUS((pg_t *)pg_next)) { 18199036SEric.Saxe@Sun.COM if (PG_CMT_HW_SUSPECT(((pghw_t *)pg_next)->pghw_hw)) { 18209036SEric.Saxe@Sun.COM pg_bad = pg_next; 18218906SEric.Saxe@Sun.COM } else if (PG_CMT_HW_SUSPECT(((pghw_t *)pg)->pghw_hw)) { 18228906SEric.Saxe@Sun.COM pg_bad = pg; 18238906SEric.Saxe@Sun.COM } 18248906SEric.Saxe@Sun.COM } 18258906SEric.Saxe@Sun.COM if (pg_bad) { 1826*9438SEric.Saxe@Sun.COM if (pg_cmt_prune(pg_bad, lineage, sz, pgdata) == 0) { 18278906SEric.Saxe@Sun.COM cmt_lineage_status = CMT_LINEAGE_REPAIRED; 18288906SEric.Saxe@Sun.COM goto revalidate; 18298408SEric.Saxe@Sun.COM } 18308906SEric.Saxe@Sun.COM } 18319036SEric.Saxe@Sun.COM /* 18329036SEric.Saxe@Sun.COM * Something went wrong trying to identify and/or prune out 18339036SEric.Saxe@Sun.COM * the bad level. Disable CMT scheduling altogether. 18349036SEric.Saxe@Sun.COM */ 18359036SEric.Saxe@Sun.COM pg_cmt_disable(); 18369036SEric.Saxe@Sun.COM break; 18378906SEric.Saxe@Sun.COM default: 18388906SEric.Saxe@Sun.COM /* 18399036SEric.Saxe@Sun.COM * If we're here, we've encountered a validation error for 18409036SEric.Saxe@Sun.COM * which we don't know how to recover. In this case, disable 18419036SEric.Saxe@Sun.COM * CMT scheduling altogether. 18428906SEric.Saxe@Sun.COM */ 18439036SEric.Saxe@Sun.COM cmt_lineage_status = CMT_LINEAGE_UNRECOVERABLE; 18448906SEric.Saxe@Sun.COM pg_cmt_disable(); 18458408SEric.Saxe@Sun.COM } 18469036SEric.Saxe@Sun.COM return (cmt_lineage_status); 18478408SEric.Saxe@Sun.COM } 1848