13434Sesaxe /* 23434Sesaxe * CDDL HEADER START 33434Sesaxe * 43434Sesaxe * The contents of this file are subject to the terms of the 53434Sesaxe * Common Development and Distribution License (the "License"). 63434Sesaxe * You may not use this file except in compliance with the License. 73434Sesaxe * 83434Sesaxe * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 93434Sesaxe * or http://www.opensolaris.org/os/licensing. 103434Sesaxe * See the License for the specific language governing permissions 113434Sesaxe * and limitations under the License. 123434Sesaxe * 133434Sesaxe * When distributing Covered Code, include this CDDL HEADER in each 143434Sesaxe * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 153434Sesaxe * If applicable, add the following below this CDDL HEADER, with the 163434Sesaxe * fields enclosed by brackets "[]" replaced with your own identifying 173434Sesaxe * information: Portions Copyright [yyyy] [name of copyright owner] 183434Sesaxe * 193434Sesaxe * CDDL HEADER END 203434Sesaxe */ 213434Sesaxe /* 228689SEric.Saxe@Sun.COM * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 233434Sesaxe * Use is subject to license terms. 243434Sesaxe */ 253434Sesaxe 263434Sesaxe #include <sys/systm.h> 273434Sesaxe #include <sys/types.h> 283434Sesaxe #include <sys/param.h> 293434Sesaxe #include <sys/thread.h> 303434Sesaxe #include <sys/cpuvar.h> 313434Sesaxe #include <sys/cpupart.h> 323434Sesaxe #include <sys/kmem.h> 333434Sesaxe #include <sys/cmn_err.h> 343434Sesaxe #include <sys/kstat.h> 353434Sesaxe #include <sys/processor.h> 363434Sesaxe #include <sys/disp.h> 373434Sesaxe #include <sys/group.h> 383434Sesaxe #include <sys/pghw.h> 393434Sesaxe #include <sys/bitset.h> 403434Sesaxe #include <sys/lgrp.h> 413434Sesaxe #include <sys/cmt.h> 428906SEric.Saxe@Sun.COM #include <sys/cpu_pm.h> 433434Sesaxe 443434Sesaxe /* 453434Sesaxe * CMT scheduler / dispatcher support 463434Sesaxe * 473434Sesaxe * This file implements CMT scheduler support using Processor Groups. 483434Sesaxe * The CMT processor group class creates and maintains the CMT class 493434Sesaxe * specific processor group pg_cmt_t. 503434Sesaxe * 513434Sesaxe * ---------------------------- <-- pg_cmt_t * 523434Sesaxe * | pghw_t | 533434Sesaxe * ---------------------------- 543434Sesaxe * | CMT class specific data | 553434Sesaxe * | - hierarchy linkage | 563434Sesaxe * | - CMT load balancing data| 573434Sesaxe * | - active CPU group/bitset| 583434Sesaxe * ---------------------------- 593434Sesaxe * 603434Sesaxe * The scheduler/dispatcher leverages knowledge of the performance 613434Sesaxe * relevant CMT sharing relationships existing between cpus to implement 628906SEric.Saxe@Sun.COM * optimized affinity, load balancing, and coalescence policies. 633434Sesaxe * 643434Sesaxe * Load balancing policy seeks to improve performance by minimizing 658906SEric.Saxe@Sun.COM * contention over shared processor resources / facilities, Affinity 668906SEric.Saxe@Sun.COM * policies seek to improve cache and TLB utilization. Coalescence 678906SEric.Saxe@Sun.COM * policies improve resource utilization and ultimately power efficiency. 683434Sesaxe * 693434Sesaxe * The CMT PGs created by this class are already arranged into a 703434Sesaxe * hierarchy (which is done in the pghw layer). To implement the top-down 713434Sesaxe * CMT load balancing algorithm, the CMT PGs additionally maintain 723434Sesaxe * parent, child and sibling hierarchy relationships. 733434Sesaxe * Parent PGs always contain a superset of their children(s) resources, 743434Sesaxe * each PG can have at most one parent, and siblings are the group of PGs 753434Sesaxe * sharing the same parent. 763434Sesaxe * 773434Sesaxe * On NUMA systems, the CMT load balancing algorithm balances across the 783434Sesaxe * CMT PGs within their respective lgroups. On UMA based system, there 793434Sesaxe * exists a top level group of PGs to balance across. On NUMA systems multiple 803434Sesaxe * top level groups are instantiated, where the top level balancing begins by 813434Sesaxe * balancng across the CMT PGs within their respective (per lgroup) top level 823434Sesaxe * groups. 833434Sesaxe */ 843676Sesaxe static cmt_lgrp_t *cmt_lgrps = NULL; /* cmt_lgrps list head */ 853676Sesaxe static cmt_lgrp_t *cpu0_lgrp = NULL; /* boot CPU's initial lgrp */ 863676Sesaxe /* used for null_proc_lpa */ 878906SEric.Saxe@Sun.COM cmt_lgrp_t *cmt_root = NULL; /* Reference to root cmt pg */ 883434Sesaxe 893676Sesaxe static int is_cpu0 = 1; /* true if this is boot CPU context */ 903676Sesaxe 913676Sesaxe /* 928906SEric.Saxe@Sun.COM * Array of hardware sharing relationships that are blacklisted. 938906SEric.Saxe@Sun.COM * PGs won't be instantiated for blacklisted hardware sharing relationships. 948906SEric.Saxe@Sun.COM */ 958906SEric.Saxe@Sun.COM static int cmt_hw_blacklisted[PGHW_NUM_COMPONENTS]; 968906SEric.Saxe@Sun.COM 978906SEric.Saxe@Sun.COM /* 983676Sesaxe * Set this to non-zero to disable CMT scheduling 993676Sesaxe * This must be done via kmdb -d, as /etc/system will be too late 1003676Sesaxe */ 1018906SEric.Saxe@Sun.COM int cmt_sched_disabled = 0; 1023434Sesaxe 1039036SEric.Saxe@Sun.COM /* 1049036SEric.Saxe@Sun.COM * Status codes for CMT lineage validation 1059036SEric.Saxe@Sun.COM * See pg_cmt_lineage_validate() below 1069036SEric.Saxe@Sun.COM */ 1079036SEric.Saxe@Sun.COM typedef enum cmt_lineage_validation { 1089036SEric.Saxe@Sun.COM CMT_LINEAGE_VALID, 1099036SEric.Saxe@Sun.COM CMT_LINEAGE_NON_CONCENTRIC, 1109036SEric.Saxe@Sun.COM CMT_LINEAGE_PG_SPANS_LGRPS, 1119036SEric.Saxe@Sun.COM CMT_LINEAGE_NON_PROMOTABLE, 1129036SEric.Saxe@Sun.COM CMT_LINEAGE_REPAIRED, 1139036SEric.Saxe@Sun.COM CMT_LINEAGE_UNRECOVERABLE 1149036SEric.Saxe@Sun.COM } cmt_lineage_validation_t; 1159036SEric.Saxe@Sun.COM 1169036SEric.Saxe@Sun.COM /* 1179036SEric.Saxe@Sun.COM * Status of the current lineage under construction. 1189036SEric.Saxe@Sun.COM * One must be holding cpu_lock to change this. 1199036SEric.Saxe@Sun.COM */ 1209036SEric.Saxe@Sun.COM cmt_lineage_validation_t cmt_lineage_status = CMT_LINEAGE_VALID; 1219036SEric.Saxe@Sun.COM 1229036SEric.Saxe@Sun.COM /* 1239036SEric.Saxe@Sun.COM * Power domain definitions (on x86) are defined by ACPI, and 1249036SEric.Saxe@Sun.COM * therefore may be subject to BIOS bugs. 1259036SEric.Saxe@Sun.COM */ 1269036SEric.Saxe@Sun.COM #define PG_CMT_HW_SUSPECT(hw) PGHW_IS_PM_DOMAIN(hw) 1279036SEric.Saxe@Sun.COM 1289036SEric.Saxe@Sun.COM /* 1299036SEric.Saxe@Sun.COM * Macro to test if PG is managed by the CMT PG class 1309036SEric.Saxe@Sun.COM */ 1319036SEric.Saxe@Sun.COM #define IS_CMT_PG(pg) (((pg_t *)(pg))->pg_class->pgc_id == pg_cmt_class_id) 1329036SEric.Saxe@Sun.COM 1333434Sesaxe static pg_cid_t pg_cmt_class_id; /* PG class id */ 1343434Sesaxe 1353434Sesaxe static pg_t *pg_cmt_alloc(); 1363434Sesaxe static void pg_cmt_free(pg_t *); 137*9352SEric.Saxe@Sun.COM static void pg_cmt_cpu_init(cpu_t *, cpu_pg_t *); 138*9352SEric.Saxe@Sun.COM static void pg_cmt_cpu_fini(cpu_t *, cpu_pg_t *); 1393434Sesaxe static void pg_cmt_cpu_active(cpu_t *); 1403434Sesaxe static void pg_cmt_cpu_inactive(cpu_t *); 1413434Sesaxe static void pg_cmt_cpupart_in(cpu_t *, cpupart_t *); 1423434Sesaxe static void pg_cmt_cpupart_move(cpu_t *, cpupart_t *, cpupart_t *); 1438906SEric.Saxe@Sun.COM static char *pg_cmt_policy_name(pg_t *); 1448906SEric.Saxe@Sun.COM static void pg_cmt_hier_sort(pg_cmt_t **, int); 1458906SEric.Saxe@Sun.COM static pg_cmt_t *pg_cmt_hier_rank(pg_cmt_t *, pg_cmt_t *); 1463434Sesaxe static int pg_cmt_cpu_belongs(pg_t *, cpu_t *); 1473434Sesaxe static int pg_cmt_hw(pghw_type_t); 1483434Sesaxe static cmt_lgrp_t *pg_cmt_find_lgrp(lgrp_handle_t); 1493676Sesaxe static cmt_lgrp_t *pg_cmt_lgrp_create(lgrp_handle_t); 1508906SEric.Saxe@Sun.COM static void cmt_ev_thread_swtch(pg_t *, cpu_t *, hrtime_t, 1518906SEric.Saxe@Sun.COM kthread_t *, kthread_t *); 1528906SEric.Saxe@Sun.COM static void cmt_ev_thread_swtch_pwr(pg_t *, cpu_t *, hrtime_t, 1538906SEric.Saxe@Sun.COM kthread_t *, kthread_t *); 1548906SEric.Saxe@Sun.COM static void cmt_ev_thread_remain_pwr(pg_t *, cpu_t *, kthread_t *); 1559036SEric.Saxe@Sun.COM static cmt_lineage_validation_t pg_cmt_lineage_validate(pg_cmt_t **, int *); 1563434Sesaxe 1578906SEric.Saxe@Sun.COM 1588906SEric.Saxe@Sun.COM /* 1593434Sesaxe * CMT PG ops 1603434Sesaxe */ 1613434Sesaxe struct pg_ops pg_ops_cmt = { 1623434Sesaxe pg_cmt_alloc, 1633434Sesaxe pg_cmt_free, 1643434Sesaxe pg_cmt_cpu_init, 1653434Sesaxe pg_cmt_cpu_fini, 1663434Sesaxe pg_cmt_cpu_active, 1673434Sesaxe pg_cmt_cpu_inactive, 1683434Sesaxe pg_cmt_cpupart_in, 1693434Sesaxe NULL, /* cpupart_out */ 1703434Sesaxe pg_cmt_cpupart_move, 1713434Sesaxe pg_cmt_cpu_belongs, 1728906SEric.Saxe@Sun.COM pg_cmt_policy_name, 1733434Sesaxe }; 1743434Sesaxe 1753434Sesaxe /* 1763434Sesaxe * Initialize the CMT PG class 1773434Sesaxe */ 1783434Sesaxe void 1793434Sesaxe pg_cmt_class_init(void) 1803434Sesaxe { 1813434Sesaxe if (cmt_sched_disabled) 1823434Sesaxe return; 1833434Sesaxe 1843434Sesaxe pg_cmt_class_id = pg_class_register("cmt", &pg_ops_cmt, PGR_PHYSICAL); 1853434Sesaxe } 1863434Sesaxe 1873434Sesaxe /* 1883434Sesaxe * Called to indicate a new CPU has started up so 1893434Sesaxe * that either t0 or the slave startup thread can 1903434Sesaxe * be accounted for. 1913434Sesaxe */ 1923434Sesaxe void 1933434Sesaxe pg_cmt_cpu_startup(cpu_t *cp) 1943434Sesaxe { 1958906SEric.Saxe@Sun.COM pg_ev_thread_swtch(cp, gethrtime_unscaled(), cp->cpu_idle_thread, 1968906SEric.Saxe@Sun.COM cp->cpu_thread); 1973434Sesaxe } 1983434Sesaxe 1993434Sesaxe /* 2003434Sesaxe * Return non-zero if thread can migrate between "from" and "to" 2013434Sesaxe * without a performance penalty 2023434Sesaxe */ 2033434Sesaxe int 2043434Sesaxe pg_cmt_can_migrate(cpu_t *from, cpu_t *to) 2053434Sesaxe { 2063434Sesaxe if (from->cpu_physid->cpu_cacheid == 2073434Sesaxe to->cpu_physid->cpu_cacheid) 2083434Sesaxe return (1); 2093434Sesaxe return (0); 2103434Sesaxe } 2113434Sesaxe 2123434Sesaxe /* 2133434Sesaxe * CMT class specific PG allocation 2143434Sesaxe */ 2153434Sesaxe static pg_t * 2163434Sesaxe pg_cmt_alloc(void) 2173434Sesaxe { 2183434Sesaxe return (kmem_zalloc(sizeof (pg_cmt_t), KM_NOSLEEP)); 2193434Sesaxe } 2203434Sesaxe 2213434Sesaxe /* 2223434Sesaxe * Class specific PG de-allocation 2233434Sesaxe */ 2243434Sesaxe static void 2253434Sesaxe pg_cmt_free(pg_t *pg) 2263434Sesaxe { 2273434Sesaxe ASSERT(pg != NULL); 2283434Sesaxe ASSERT(IS_CMT_PG(pg)); 2293434Sesaxe 2303434Sesaxe kmem_free((pg_cmt_t *)pg, sizeof (pg_cmt_t)); 2313434Sesaxe } 2323434Sesaxe 2333434Sesaxe /* 2348906SEric.Saxe@Sun.COM * Given a hardware sharing relationship, return which dispatcher 2358906SEric.Saxe@Sun.COM * policies should be implemented to optimize performance and efficiency 2368906SEric.Saxe@Sun.COM */ 2378906SEric.Saxe@Sun.COM static pg_cmt_policy_t 2388906SEric.Saxe@Sun.COM pg_cmt_policy(pghw_type_t hw) 2398906SEric.Saxe@Sun.COM { 2408906SEric.Saxe@Sun.COM pg_cmt_policy_t p; 2418906SEric.Saxe@Sun.COM 2428906SEric.Saxe@Sun.COM /* 2438906SEric.Saxe@Sun.COM * Give the platform a chance to override the default 2448906SEric.Saxe@Sun.COM */ 2458906SEric.Saxe@Sun.COM if ((p = pg_plat_cmt_policy(hw)) != CMT_NO_POLICY) 2468906SEric.Saxe@Sun.COM return (p); 2478906SEric.Saxe@Sun.COM 2488906SEric.Saxe@Sun.COM switch (hw) { 2498906SEric.Saxe@Sun.COM case PGHW_IPIPE: 2508906SEric.Saxe@Sun.COM case PGHW_FPU: 2518906SEric.Saxe@Sun.COM case PGHW_CHIP: 2528906SEric.Saxe@Sun.COM return (CMT_BALANCE); 2538906SEric.Saxe@Sun.COM case PGHW_CACHE: 2548906SEric.Saxe@Sun.COM return (CMT_AFFINITY); 2558906SEric.Saxe@Sun.COM case PGHW_POW_ACTIVE: 2568906SEric.Saxe@Sun.COM case PGHW_POW_IDLE: 2578906SEric.Saxe@Sun.COM return (CMT_BALANCE); 2588906SEric.Saxe@Sun.COM default: 2598906SEric.Saxe@Sun.COM return (CMT_NO_POLICY); 2608906SEric.Saxe@Sun.COM } 2618906SEric.Saxe@Sun.COM } 2628906SEric.Saxe@Sun.COM 2638906SEric.Saxe@Sun.COM /* 2648906SEric.Saxe@Sun.COM * Rank the importance of optimizing for the pg1 relationship vs. 2658906SEric.Saxe@Sun.COM * the pg2 relationship. 2668906SEric.Saxe@Sun.COM */ 2678906SEric.Saxe@Sun.COM static pg_cmt_t * 2688906SEric.Saxe@Sun.COM pg_cmt_hier_rank(pg_cmt_t *pg1, pg_cmt_t *pg2) 2698906SEric.Saxe@Sun.COM { 2708906SEric.Saxe@Sun.COM pghw_type_t hw1 = ((pghw_t *)pg1)->pghw_hw; 2718906SEric.Saxe@Sun.COM pghw_type_t hw2 = ((pghw_t *)pg2)->pghw_hw; 2728906SEric.Saxe@Sun.COM 2738906SEric.Saxe@Sun.COM /* 2748906SEric.Saxe@Sun.COM * A power domain is only important if CPUPM is enabled. 2758906SEric.Saxe@Sun.COM */ 2768906SEric.Saxe@Sun.COM if (cpupm_get_policy() == CPUPM_POLICY_DISABLED) { 2778906SEric.Saxe@Sun.COM if (PGHW_IS_PM_DOMAIN(hw1) && !PGHW_IS_PM_DOMAIN(hw2)) 2788906SEric.Saxe@Sun.COM return (pg2); 2798906SEric.Saxe@Sun.COM if (PGHW_IS_PM_DOMAIN(hw2) && !PGHW_IS_PM_DOMAIN(hw1)) 2808906SEric.Saxe@Sun.COM return (pg1); 2818906SEric.Saxe@Sun.COM } 2828906SEric.Saxe@Sun.COM 2838906SEric.Saxe@Sun.COM /* 2848906SEric.Saxe@Sun.COM * Otherwise, ask the platform 2858906SEric.Saxe@Sun.COM */ 2868906SEric.Saxe@Sun.COM if (pg_plat_hw_rank(hw1, hw2) == hw1) 2878906SEric.Saxe@Sun.COM return (pg1); 2888906SEric.Saxe@Sun.COM else 2898906SEric.Saxe@Sun.COM return (pg2); 2908906SEric.Saxe@Sun.COM } 2918906SEric.Saxe@Sun.COM 2928906SEric.Saxe@Sun.COM /* 2938906SEric.Saxe@Sun.COM * Initialize CMT callbacks for the given PG 2948906SEric.Saxe@Sun.COM */ 2958906SEric.Saxe@Sun.COM static void 2968906SEric.Saxe@Sun.COM cmt_callback_init(pg_t *pg) 2978906SEric.Saxe@Sun.COM { 2988906SEric.Saxe@Sun.COM switch (((pghw_t *)pg)->pghw_hw) { 2998906SEric.Saxe@Sun.COM case PGHW_POW_ACTIVE: 3008906SEric.Saxe@Sun.COM pg->pg_cb.thread_swtch = cmt_ev_thread_swtch_pwr; 3018906SEric.Saxe@Sun.COM pg->pg_cb.thread_remain = cmt_ev_thread_remain_pwr; 3028906SEric.Saxe@Sun.COM break; 3038906SEric.Saxe@Sun.COM default: 3048906SEric.Saxe@Sun.COM pg->pg_cb.thread_swtch = cmt_ev_thread_swtch; 3058906SEric.Saxe@Sun.COM 3068906SEric.Saxe@Sun.COM } 3078906SEric.Saxe@Sun.COM } 3088906SEric.Saxe@Sun.COM 3098906SEric.Saxe@Sun.COM /* 3108906SEric.Saxe@Sun.COM * Promote PG above it's current parent. 3118906SEric.Saxe@Sun.COM * This is only legal if PG has an equal or greater number of CPUs 3128906SEric.Saxe@Sun.COM * than it's parent. 3133434Sesaxe */ 3148906SEric.Saxe@Sun.COM static void 3158906SEric.Saxe@Sun.COM cmt_hier_promote(pg_cmt_t *pg) 3163434Sesaxe { 3178906SEric.Saxe@Sun.COM pg_cmt_t *parent; 3188906SEric.Saxe@Sun.COM group_t *children; 3198906SEric.Saxe@Sun.COM cpu_t *cpu; 3208906SEric.Saxe@Sun.COM group_iter_t iter; 3218906SEric.Saxe@Sun.COM pg_cpu_itr_t cpu_iter; 3228906SEric.Saxe@Sun.COM int r; 3238906SEric.Saxe@Sun.COM int err; 3248906SEric.Saxe@Sun.COM 3258906SEric.Saxe@Sun.COM ASSERT(MUTEX_HELD(&cpu_lock)); 3268906SEric.Saxe@Sun.COM 3278906SEric.Saxe@Sun.COM parent = pg->cmt_parent; 3288906SEric.Saxe@Sun.COM if (parent == NULL) { 3298906SEric.Saxe@Sun.COM /* 3308906SEric.Saxe@Sun.COM * Nothing to do 3318906SEric.Saxe@Sun.COM */ 3328906SEric.Saxe@Sun.COM return; 3338906SEric.Saxe@Sun.COM } 3348906SEric.Saxe@Sun.COM 3358906SEric.Saxe@Sun.COM ASSERT(PG_NUM_CPUS((pg_t *)pg) >= PG_NUM_CPUS((pg_t *)parent)); 3368906SEric.Saxe@Sun.COM 3378906SEric.Saxe@Sun.COM /* 3388906SEric.Saxe@Sun.COM * We're changing around the hierarchy, which is actively traversed 3398906SEric.Saxe@Sun.COM * by the dispatcher. Pause CPUS to ensure exclusivity. 3408906SEric.Saxe@Sun.COM */ 3418906SEric.Saxe@Sun.COM pause_cpus(NULL); 3428906SEric.Saxe@Sun.COM 3438906SEric.Saxe@Sun.COM /* 3448906SEric.Saxe@Sun.COM * If necessary, update the parent's sibling set, replacing parent 3458906SEric.Saxe@Sun.COM * with PG. 3468906SEric.Saxe@Sun.COM */ 3478906SEric.Saxe@Sun.COM if (parent->cmt_siblings) { 3488906SEric.Saxe@Sun.COM if (group_remove(parent->cmt_siblings, parent, GRP_NORESIZE) 3498906SEric.Saxe@Sun.COM != -1) { 3508906SEric.Saxe@Sun.COM r = group_add(parent->cmt_siblings, pg, GRP_NORESIZE); 3518906SEric.Saxe@Sun.COM ASSERT(r != -1); 3528906SEric.Saxe@Sun.COM } 3538906SEric.Saxe@Sun.COM } 3548906SEric.Saxe@Sun.COM 3558906SEric.Saxe@Sun.COM /* 3568906SEric.Saxe@Sun.COM * If the parent is at the top of the hierarchy, replace it's entry 3578906SEric.Saxe@Sun.COM * in the root lgroup's group of top level PGs. 3588906SEric.Saxe@Sun.COM */ 3598906SEric.Saxe@Sun.COM if (parent->cmt_parent == NULL && 3608906SEric.Saxe@Sun.COM parent->cmt_siblings != &cmt_root->cl_pgs) { 3618906SEric.Saxe@Sun.COM if (group_remove(&cmt_root->cl_pgs, parent, GRP_NORESIZE) 3628906SEric.Saxe@Sun.COM != -1) { 3638906SEric.Saxe@Sun.COM r = group_add(&cmt_root->cl_pgs, pg, GRP_NORESIZE); 3648906SEric.Saxe@Sun.COM ASSERT(r != -1); 3658906SEric.Saxe@Sun.COM } 3668906SEric.Saxe@Sun.COM } 3678906SEric.Saxe@Sun.COM 3688906SEric.Saxe@Sun.COM /* 3698906SEric.Saxe@Sun.COM * We assume (and therefore assert) that the PG being promoted is an 3708906SEric.Saxe@Sun.COM * only child of it's parent. Update the parent's children set 3718906SEric.Saxe@Sun.COM * replacing PG's entry with the parent (since the parent is becoming 3728906SEric.Saxe@Sun.COM * the child). Then have PG and the parent swap children sets. 3738906SEric.Saxe@Sun.COM */ 3748906SEric.Saxe@Sun.COM ASSERT(GROUP_SIZE(parent->cmt_children) <= 1); 3758906SEric.Saxe@Sun.COM if (group_remove(parent->cmt_children, pg, GRP_NORESIZE) != -1) { 3768906SEric.Saxe@Sun.COM r = group_add(parent->cmt_children, parent, GRP_NORESIZE); 3778906SEric.Saxe@Sun.COM ASSERT(r != -1); 3788906SEric.Saxe@Sun.COM } 3798906SEric.Saxe@Sun.COM 3808906SEric.Saxe@Sun.COM children = pg->cmt_children; 3818906SEric.Saxe@Sun.COM pg->cmt_children = parent->cmt_children; 3828906SEric.Saxe@Sun.COM parent->cmt_children = children; 3838906SEric.Saxe@Sun.COM 3848906SEric.Saxe@Sun.COM /* 3858906SEric.Saxe@Sun.COM * Update the sibling references for PG and it's parent 3868906SEric.Saxe@Sun.COM */ 3878906SEric.Saxe@Sun.COM pg->cmt_siblings = parent->cmt_siblings; 3888906SEric.Saxe@Sun.COM parent->cmt_siblings = pg->cmt_children; 3898906SEric.Saxe@Sun.COM 3908906SEric.Saxe@Sun.COM /* 3918906SEric.Saxe@Sun.COM * Update any cached lineages in the per CPU pg data. 3928906SEric.Saxe@Sun.COM */ 3938906SEric.Saxe@Sun.COM PG_CPU_ITR_INIT(pg, cpu_iter); 3948906SEric.Saxe@Sun.COM while ((cpu = pg_cpu_next(&cpu_iter)) != NULL) { 3958906SEric.Saxe@Sun.COM int idx; 3968906SEric.Saxe@Sun.COM group_t *pgs; 3978906SEric.Saxe@Sun.COM pg_cmt_t *cpu_pg; 3988906SEric.Saxe@Sun.COM 3998906SEric.Saxe@Sun.COM /* 4008906SEric.Saxe@Sun.COM * Iterate over the CPU's PGs updating the children 4018906SEric.Saxe@Sun.COM * of the PG being promoted, since they have a new parent. 4028906SEric.Saxe@Sun.COM */ 4038906SEric.Saxe@Sun.COM pgs = &cpu->cpu_pg->pgs; 4048906SEric.Saxe@Sun.COM group_iter_init(&iter); 4058906SEric.Saxe@Sun.COM while ((cpu_pg = group_iterate(pgs, &iter)) != NULL) { 4068906SEric.Saxe@Sun.COM if (cpu_pg->cmt_parent == pg) { 4078906SEric.Saxe@Sun.COM cpu_pg->cmt_parent = parent; 4088906SEric.Saxe@Sun.COM } 4098906SEric.Saxe@Sun.COM } 4108906SEric.Saxe@Sun.COM 4118906SEric.Saxe@Sun.COM /* 4128906SEric.Saxe@Sun.COM * Update the CMT load balancing lineage 4138906SEric.Saxe@Sun.COM */ 4148906SEric.Saxe@Sun.COM pgs = &cpu->cpu_pg->cmt_pgs; 4158906SEric.Saxe@Sun.COM if ((idx = group_find(pgs, (void *)pg)) == -1) { 4168906SEric.Saxe@Sun.COM /* 4178906SEric.Saxe@Sun.COM * Unless this is the CPU who's lineage is being 4188906SEric.Saxe@Sun.COM * constructed, the PG being promoted should be 4198906SEric.Saxe@Sun.COM * in the lineage. 4208906SEric.Saxe@Sun.COM */ 4218906SEric.Saxe@Sun.COM ASSERT(GROUP_SIZE(pgs) == 0); 4228906SEric.Saxe@Sun.COM continue; 4238906SEric.Saxe@Sun.COM } 4248906SEric.Saxe@Sun.COM 4258906SEric.Saxe@Sun.COM ASSERT(GROUP_ACCESS(pgs, idx - 1) == parent); 4268906SEric.Saxe@Sun.COM ASSERT(idx > 0); 4278906SEric.Saxe@Sun.COM 4288906SEric.Saxe@Sun.COM /* 4298906SEric.Saxe@Sun.COM * Have the child and the parent swap places in the CPU's 4308906SEric.Saxe@Sun.COM * lineage 4318906SEric.Saxe@Sun.COM */ 4328906SEric.Saxe@Sun.COM group_remove_at(pgs, idx); 4338906SEric.Saxe@Sun.COM group_remove_at(pgs, idx - 1); 4348906SEric.Saxe@Sun.COM err = group_add_at(pgs, parent, idx); 4358906SEric.Saxe@Sun.COM ASSERT(err == 0); 4368906SEric.Saxe@Sun.COM err = group_add_at(pgs, pg, idx - 1); 4378906SEric.Saxe@Sun.COM ASSERT(err == 0); 4388906SEric.Saxe@Sun.COM } 4398906SEric.Saxe@Sun.COM 4408906SEric.Saxe@Sun.COM /* 4418906SEric.Saxe@Sun.COM * Update the parent references for PG and it's parent 4428906SEric.Saxe@Sun.COM */ 4438906SEric.Saxe@Sun.COM pg->cmt_parent = parent->cmt_parent; 4448906SEric.Saxe@Sun.COM parent->cmt_parent = pg; 4458906SEric.Saxe@Sun.COM 4468906SEric.Saxe@Sun.COM start_cpus(); 4473434Sesaxe } 4483434Sesaxe 4493434Sesaxe /* 4503434Sesaxe * CMT class callback for a new CPU entering the system 4513434Sesaxe */ 4523434Sesaxe static void 453*9352SEric.Saxe@Sun.COM pg_cmt_cpu_init(cpu_t *cp, cpu_pg_t *cpu_pg) 4543434Sesaxe { 4553434Sesaxe pg_cmt_t *pg; 4563434Sesaxe group_t *cmt_pgs; 4578906SEric.Saxe@Sun.COM int levels, level; 4583434Sesaxe pghw_type_t hw; 4593434Sesaxe pg_t *pg_cache = NULL; 4603434Sesaxe pg_cmt_t *cpu_cmt_hier[PGHW_NUM_COMPONENTS]; 4613434Sesaxe lgrp_handle_t lgrp_handle; 4623434Sesaxe cmt_lgrp_t *lgrp; 4639036SEric.Saxe@Sun.COM cmt_lineage_validation_t lineage_status; 4643434Sesaxe 4653434Sesaxe ASSERT(MUTEX_HELD(&cpu_lock)); 4663434Sesaxe 4678906SEric.Saxe@Sun.COM if (cmt_sched_disabled) 4688906SEric.Saxe@Sun.COM return; 4698906SEric.Saxe@Sun.COM 4703434Sesaxe /* 4713434Sesaxe * A new CPU is coming into the system. 4723434Sesaxe * Interrogate the platform to see if the CPU 4738906SEric.Saxe@Sun.COM * has any performance or efficiency relevant 4748906SEric.Saxe@Sun.COM * sharing relationships 4753434Sesaxe */ 476*9352SEric.Saxe@Sun.COM cmt_pgs = &cpu_pg->cmt_pgs; 477*9352SEric.Saxe@Sun.COM cpu_pg->cmt_lineage = NULL; 4783434Sesaxe 4793434Sesaxe bzero(cpu_cmt_hier, sizeof (cpu_cmt_hier)); 4808906SEric.Saxe@Sun.COM levels = 0; 4813434Sesaxe for (hw = PGHW_START; hw < PGHW_NUM_COMPONENTS; hw++) { 4823434Sesaxe 4838906SEric.Saxe@Sun.COM pg_cmt_policy_t policy; 4848906SEric.Saxe@Sun.COM 4853434Sesaxe /* 4868906SEric.Saxe@Sun.COM * We're only interested in the hw sharing relationships 4878906SEric.Saxe@Sun.COM * for which we know how to optimize. 4883434Sesaxe */ 4898906SEric.Saxe@Sun.COM policy = pg_cmt_policy(hw); 4908906SEric.Saxe@Sun.COM if (policy == CMT_NO_POLICY || 4918906SEric.Saxe@Sun.COM pg_plat_hw_shared(cp, hw) == 0) 4923434Sesaxe continue; 4933434Sesaxe 4943434Sesaxe /* 4958906SEric.Saxe@Sun.COM * Continue if the hardware sharing relationship has been 4968906SEric.Saxe@Sun.COM * blacklisted. 4978906SEric.Saxe@Sun.COM */ 4988906SEric.Saxe@Sun.COM if (cmt_hw_blacklisted[hw]) { 4998906SEric.Saxe@Sun.COM continue; 5008906SEric.Saxe@Sun.COM } 5018906SEric.Saxe@Sun.COM 5028906SEric.Saxe@Sun.COM /* 5033434Sesaxe * Find (or create) the PG associated with 5043434Sesaxe * the hw sharing relationship in which cp 5053434Sesaxe * belongs. 5063434Sesaxe * 5073434Sesaxe * Determine if a suitable PG already 5083434Sesaxe * exists, or if one needs to be created. 5093434Sesaxe */ 5103434Sesaxe pg = (pg_cmt_t *)pghw_place_cpu(cp, hw); 5113434Sesaxe if (pg == NULL) { 5123434Sesaxe /* 5133434Sesaxe * Create a new one. 5143434Sesaxe * Initialize the common... 5153434Sesaxe */ 5163434Sesaxe pg = (pg_cmt_t *)pg_create(pg_cmt_class_id); 5173434Sesaxe 5183434Sesaxe /* ... physical ... */ 5193434Sesaxe pghw_init((pghw_t *)pg, cp, hw); 5203434Sesaxe 5213434Sesaxe /* 5223434Sesaxe * ... and CMT specific portions of the 5233434Sesaxe * structure. 5243434Sesaxe */ 5258906SEric.Saxe@Sun.COM pg->cmt_policy = policy; 5268906SEric.Saxe@Sun.COM 5278906SEric.Saxe@Sun.COM /* CMT event callbacks */ 5288906SEric.Saxe@Sun.COM cmt_callback_init((pg_t *)pg); 5298906SEric.Saxe@Sun.COM 5303434Sesaxe bitset_init(&pg->cmt_cpus_actv_set); 5313434Sesaxe group_create(&pg->cmt_cpus_actv); 5323434Sesaxe } else { 5333434Sesaxe ASSERT(IS_CMT_PG(pg)); 5343434Sesaxe } 5353434Sesaxe 5363434Sesaxe /* Add the CPU to the PG */ 537*9352SEric.Saxe@Sun.COM pg_cpu_add((pg_t *)pg, cp, cpu_pg); 5383434Sesaxe 5393434Sesaxe /* 5408408SEric.Saxe@Sun.COM * Ensure capacity of the active CPU group/bitset 5413434Sesaxe */ 5423434Sesaxe group_expand(&pg->cmt_cpus_actv, 5433434Sesaxe GROUP_SIZE(&((pg_t *)pg)->pg_cpus)); 5443434Sesaxe 5453434Sesaxe if (cp->cpu_seqid >= 5463434Sesaxe bitset_capacity(&pg->cmt_cpus_actv_set)) { 5473434Sesaxe bitset_resize(&pg->cmt_cpus_actv_set, 5483434Sesaxe cp->cpu_seqid + 1); 5493434Sesaxe } 5503434Sesaxe 5513434Sesaxe /* 5528906SEric.Saxe@Sun.COM * Build a lineage of CMT PGs for load balancing / coalescence 5533434Sesaxe */ 5548906SEric.Saxe@Sun.COM if (policy & (CMT_BALANCE | CMT_COALESCE)) { 5558906SEric.Saxe@Sun.COM cpu_cmt_hier[levels++] = pg; 5563434Sesaxe } 5573434Sesaxe 5583434Sesaxe /* Cache this for later */ 5593434Sesaxe if (hw == PGHW_CACHE) 5603434Sesaxe pg_cache = (pg_t *)pg; 5613434Sesaxe } 5623434Sesaxe 5638906SEric.Saxe@Sun.COM group_expand(cmt_pgs, levels); 5648408SEric.Saxe@Sun.COM 5658408SEric.Saxe@Sun.COM if (cmt_root == NULL) 5668408SEric.Saxe@Sun.COM cmt_root = pg_cmt_lgrp_create(lgrp_plat_root_hand()); 5673434Sesaxe 5683434Sesaxe /* 5698906SEric.Saxe@Sun.COM * Find the lgrp that encapsulates this CPU's CMT hierarchy 5708408SEric.Saxe@Sun.COM */ 5718408SEric.Saxe@Sun.COM lgrp_handle = lgrp_plat_cpu_to_hand(cp->cpu_id); 5728408SEric.Saxe@Sun.COM if ((lgrp = pg_cmt_find_lgrp(lgrp_handle)) == NULL) 5738408SEric.Saxe@Sun.COM lgrp = pg_cmt_lgrp_create(lgrp_handle); 5748408SEric.Saxe@Sun.COM 5758408SEric.Saxe@Sun.COM /* 5768906SEric.Saxe@Sun.COM * Ascendingly sort the PGs in the lineage by number of CPUs 5778906SEric.Saxe@Sun.COM */ 5788906SEric.Saxe@Sun.COM pg_cmt_hier_sort(cpu_cmt_hier, levels); 5798906SEric.Saxe@Sun.COM 5808906SEric.Saxe@Sun.COM /* 5818906SEric.Saxe@Sun.COM * Examine the lineage and validate it. 5828906SEric.Saxe@Sun.COM * This routine will also try to fix the lineage along with the 5838906SEric.Saxe@Sun.COM * rest of the PG hierarchy should it detect an issue. 5848906SEric.Saxe@Sun.COM * 5859036SEric.Saxe@Sun.COM * If it returns anything other than VALID or REPAIRED, an 5869036SEric.Saxe@Sun.COM * unrecoverable error has occurred, and we cannot proceed. 5878906SEric.Saxe@Sun.COM */ 5889036SEric.Saxe@Sun.COM lineage_status = pg_cmt_lineage_validate(cpu_cmt_hier, &levels); 5899036SEric.Saxe@Sun.COM if ((lineage_status != CMT_LINEAGE_VALID) && 5909036SEric.Saxe@Sun.COM (lineage_status != CMT_LINEAGE_REPAIRED)) 5918906SEric.Saxe@Sun.COM return; 5928906SEric.Saxe@Sun.COM 5938906SEric.Saxe@Sun.COM /* 5948906SEric.Saxe@Sun.COM * For existing PGs in the lineage, verify that the parent is 5958906SEric.Saxe@Sun.COM * correct, as the generation in the lineage may have changed 5968906SEric.Saxe@Sun.COM * as a result of the sorting. Start the traversal at the top 5978906SEric.Saxe@Sun.COM * of the lineage, moving down. 5988906SEric.Saxe@Sun.COM */ 5998906SEric.Saxe@Sun.COM for (level = levels - 1; level >= 0; ) { 6008906SEric.Saxe@Sun.COM int reorg; 6018906SEric.Saxe@Sun.COM 6028906SEric.Saxe@Sun.COM reorg = 0; 6038906SEric.Saxe@Sun.COM pg = cpu_cmt_hier[level]; 6048906SEric.Saxe@Sun.COM 6058906SEric.Saxe@Sun.COM /* 6068906SEric.Saxe@Sun.COM * Promote PGs at an incorrect generation into place. 6078906SEric.Saxe@Sun.COM */ 6088906SEric.Saxe@Sun.COM while (pg->cmt_parent && 6098906SEric.Saxe@Sun.COM pg->cmt_parent != cpu_cmt_hier[level + 1]) { 6108906SEric.Saxe@Sun.COM cmt_hier_promote(pg); 6118906SEric.Saxe@Sun.COM reorg++; 6128906SEric.Saxe@Sun.COM } 6138906SEric.Saxe@Sun.COM if (reorg > 0) 6148906SEric.Saxe@Sun.COM level = levels - 1; 6158906SEric.Saxe@Sun.COM else 6168906SEric.Saxe@Sun.COM level--; 6178906SEric.Saxe@Sun.COM } 6188906SEric.Saxe@Sun.COM 6198906SEric.Saxe@Sun.COM /* 6208408SEric.Saxe@Sun.COM * For each of the PGs in the CPU's lineage: 6218906SEric.Saxe@Sun.COM * - Add an entry in the CPU sorted CMT PG group 6228906SEric.Saxe@Sun.COM * which is used for top down CMT load balancing 6233434Sesaxe * - Tie the PG into the CMT hierarchy by connecting 6243434Sesaxe * it to it's parent and siblings. 6253434Sesaxe */ 6268906SEric.Saxe@Sun.COM for (level = 0; level < levels; level++) { 6273434Sesaxe uint_t children; 6283434Sesaxe int err; 6293434Sesaxe 6303434Sesaxe pg = cpu_cmt_hier[level]; 6318906SEric.Saxe@Sun.COM err = group_add_at(cmt_pgs, pg, levels - level - 1); 6323434Sesaxe ASSERT(err == 0); 6333434Sesaxe 6343434Sesaxe if (level == 0) 635*9352SEric.Saxe@Sun.COM cpu_pg->cmt_lineage = (pg_t *)pg; 6363434Sesaxe 6373434Sesaxe if (pg->cmt_siblings != NULL) { 6383434Sesaxe /* Already initialized */ 6393434Sesaxe ASSERT(pg->cmt_parent == NULL || 6403434Sesaxe pg->cmt_parent == cpu_cmt_hier[level + 1]); 6413434Sesaxe ASSERT(pg->cmt_siblings == &lgrp->cl_pgs || 6425933Sjb145095 ((pg->cmt_parent != NULL) && 6435933Sjb145095 pg->cmt_siblings == pg->cmt_parent->cmt_children)); 6443434Sesaxe continue; 6453434Sesaxe } 6463434Sesaxe 6478906SEric.Saxe@Sun.COM if ((level + 1) == levels) { 6483434Sesaxe pg->cmt_parent = NULL; 6498408SEric.Saxe@Sun.COM 6503434Sesaxe pg->cmt_siblings = &lgrp->cl_pgs; 6513434Sesaxe children = ++lgrp->cl_npgs; 6528906SEric.Saxe@Sun.COM if (cmt_root != lgrp) 6538906SEric.Saxe@Sun.COM cmt_root->cl_npgs++; 6543434Sesaxe } else { 6553434Sesaxe pg->cmt_parent = cpu_cmt_hier[level + 1]; 6563434Sesaxe 6573434Sesaxe /* 6583434Sesaxe * A good parent keeps track of their children. 6593434Sesaxe * The parent's children group is also the PG's 6603434Sesaxe * siblings. 6613434Sesaxe */ 6623434Sesaxe if (pg->cmt_parent->cmt_children == NULL) { 6633434Sesaxe pg->cmt_parent->cmt_children = 6643434Sesaxe kmem_zalloc(sizeof (group_t), KM_SLEEP); 6653434Sesaxe group_create(pg->cmt_parent->cmt_children); 6663434Sesaxe } 6673434Sesaxe pg->cmt_siblings = pg->cmt_parent->cmt_children; 6683434Sesaxe children = ++pg->cmt_parent->cmt_nchildren; 6693434Sesaxe } 6708408SEric.Saxe@Sun.COM 6713434Sesaxe group_expand(pg->cmt_siblings, children); 6728408SEric.Saxe@Sun.COM group_expand(&cmt_root->cl_pgs, cmt_root->cl_npgs); 6733434Sesaxe } 6743434Sesaxe 6753434Sesaxe /* 6763434Sesaxe * Cache the chip and core IDs in the cpu_t->cpu_physid structure 6773434Sesaxe * for fast lookups later. 6783434Sesaxe */ 6793434Sesaxe if (cp->cpu_physid) { 6803434Sesaxe cp->cpu_physid->cpu_chipid = 6813434Sesaxe pg_plat_hw_instance_id(cp, PGHW_CHIP); 6823434Sesaxe cp->cpu_physid->cpu_coreid = pg_plat_get_core_id(cp); 6833434Sesaxe 6843434Sesaxe /* 6853434Sesaxe * If this cpu has a PG representing shared cache, then set 6863434Sesaxe * cpu_cacheid to that PG's logical id 6873434Sesaxe */ 6883434Sesaxe if (pg_cache) 6893434Sesaxe cp->cpu_physid->cpu_cacheid = pg_cache->pg_id; 6903434Sesaxe } 6913434Sesaxe 6923434Sesaxe /* CPU0 only initialization */ 6933434Sesaxe if (is_cpu0) { 6943434Sesaxe pg_cmt_cpu_startup(cp); 6953434Sesaxe is_cpu0 = 0; 6963676Sesaxe cpu0_lgrp = lgrp; 6973434Sesaxe } 6983434Sesaxe 6993434Sesaxe } 7003434Sesaxe 7013434Sesaxe /* 7023434Sesaxe * Class callback when a CPU is leaving the system (deletion) 7033434Sesaxe */ 7043434Sesaxe static void 705*9352SEric.Saxe@Sun.COM pg_cmt_cpu_fini(cpu_t *cp, cpu_pg_t *cpu_pg) 7063434Sesaxe { 7073434Sesaxe group_iter_t i; 7083434Sesaxe pg_cmt_t *pg; 7093434Sesaxe group_t *pgs, *cmt_pgs; 7103434Sesaxe lgrp_handle_t lgrp_handle; 7113434Sesaxe cmt_lgrp_t *lgrp; 7123434Sesaxe 7138906SEric.Saxe@Sun.COM if (cmt_sched_disabled) 7148906SEric.Saxe@Sun.COM return; 7158906SEric.Saxe@Sun.COM 716*9352SEric.Saxe@Sun.COM pgs = &cpu_pg->pgs; 717*9352SEric.Saxe@Sun.COM cmt_pgs = &cpu_pg->cmt_pgs; 7183434Sesaxe 7193434Sesaxe /* 7203434Sesaxe * Find the lgroup that encapsulates this CPU's CMT hierarchy 7213434Sesaxe */ 7223434Sesaxe lgrp_handle = lgrp_plat_cpu_to_hand(cp->cpu_id); 7233676Sesaxe 7243434Sesaxe lgrp = pg_cmt_find_lgrp(lgrp_handle); 7258689SEric.Saxe@Sun.COM if (ncpus == 1 && lgrp != cpu0_lgrp) { 7263676Sesaxe /* 7278689SEric.Saxe@Sun.COM * One might wonder how we could be deconfiguring the 7288689SEric.Saxe@Sun.COM * only CPU in the system. 7293676Sesaxe * 7308689SEric.Saxe@Sun.COM * On Starcat systems when null_proc_lpa is detected, 7318689SEric.Saxe@Sun.COM * the boot CPU (which is already configured into a leaf 7328689SEric.Saxe@Sun.COM * lgroup), is moved into the root lgroup. This is done by 7338689SEric.Saxe@Sun.COM * deconfiguring it from both lgroups and processor 7348689SEric.Saxe@Sun.COM * groups), and then later reconfiguring it back in. This 7358689SEric.Saxe@Sun.COM * call to pg_cmt_cpu_fini() is part of that deconfiguration. 7368689SEric.Saxe@Sun.COM * 7378689SEric.Saxe@Sun.COM * This special case is detected by noting that the platform 7388689SEric.Saxe@Sun.COM * has changed the CPU's lgrp affiliation (since it now 7398689SEric.Saxe@Sun.COM * belongs in the root). In this case, use the cmt_lgrp_t 7408689SEric.Saxe@Sun.COM * cached for the boot CPU, since this is what needs to be 7418689SEric.Saxe@Sun.COM * torn down. 7423676Sesaxe */ 7433676Sesaxe lgrp = cpu0_lgrp; 7443676Sesaxe } 7453434Sesaxe 7468689SEric.Saxe@Sun.COM ASSERT(lgrp != NULL); 7478689SEric.Saxe@Sun.COM 7483434Sesaxe /* 7493434Sesaxe * First, clean up anything load balancing specific for each of 7503434Sesaxe * the CPU's PGs that participated in CMT load balancing 7513434Sesaxe */ 752*9352SEric.Saxe@Sun.COM pg = (pg_cmt_t *)cpu_pg->cmt_lineage; 7533434Sesaxe while (pg != NULL) { 7543434Sesaxe 7553434Sesaxe /* 7563434Sesaxe * Remove the PG from the CPU's load balancing lineage 7573434Sesaxe */ 7583434Sesaxe (void) group_remove(cmt_pgs, pg, GRP_RESIZE); 7593434Sesaxe 7603434Sesaxe /* 7613434Sesaxe * If it's about to become empty, destroy it's children 7623434Sesaxe * group, and remove it's reference from it's siblings. 7633434Sesaxe * This is done here (rather than below) to avoid removing 7643434Sesaxe * our reference from a PG that we just eliminated. 7653434Sesaxe */ 7663434Sesaxe if (GROUP_SIZE(&((pg_t *)pg)->pg_cpus) == 1) { 7673434Sesaxe if (pg->cmt_children != NULL) 7683434Sesaxe group_destroy(pg->cmt_children); 7693434Sesaxe if (pg->cmt_siblings != NULL) { 7703434Sesaxe if (pg->cmt_siblings == &lgrp->cl_pgs) 7713434Sesaxe lgrp->cl_npgs--; 7723434Sesaxe else 7733434Sesaxe pg->cmt_parent->cmt_nchildren--; 7743434Sesaxe } 7753434Sesaxe } 7763434Sesaxe pg = pg->cmt_parent; 7773434Sesaxe } 7783434Sesaxe ASSERT(GROUP_SIZE(cmt_pgs) == 0); 7793434Sesaxe 7803434Sesaxe /* 7813434Sesaxe * Now that the load balancing lineage updates have happened, 7823434Sesaxe * remove the CPU from all it's PGs (destroying any that become 7833434Sesaxe * empty). 7843434Sesaxe */ 7853434Sesaxe group_iter_init(&i); 7863434Sesaxe while ((pg = group_iterate(pgs, &i)) != NULL) { 7873434Sesaxe if (IS_CMT_PG(pg) == 0) 7883434Sesaxe continue; 7893434Sesaxe 790*9352SEric.Saxe@Sun.COM pg_cpu_delete((pg_t *)pg, cp, cpu_pg); 7913434Sesaxe /* 7923434Sesaxe * Deleting the CPU from the PG changes the CPU's 7933434Sesaxe * PG group over which we are actively iterating 7943434Sesaxe * Re-initialize the iteration 7953434Sesaxe */ 7963434Sesaxe group_iter_init(&i); 7973434Sesaxe 7983434Sesaxe if (GROUP_SIZE(&((pg_t *)pg)->pg_cpus) == 0) { 7993434Sesaxe 8003434Sesaxe /* 8013434Sesaxe * The PG has become zero sized, so destroy it. 8023434Sesaxe */ 8033434Sesaxe group_destroy(&pg->cmt_cpus_actv); 8043434Sesaxe bitset_fini(&pg->cmt_cpus_actv_set); 8053434Sesaxe pghw_fini((pghw_t *)pg); 8063434Sesaxe 8073434Sesaxe pg_destroy((pg_t *)pg); 8083434Sesaxe } 8093434Sesaxe } 8103434Sesaxe } 8113434Sesaxe 8123434Sesaxe /* 8133434Sesaxe * Class callback when a CPU is entering a cpu partition 8143434Sesaxe */ 8153434Sesaxe static void 8163434Sesaxe pg_cmt_cpupart_in(cpu_t *cp, cpupart_t *pp) 8173434Sesaxe { 8183434Sesaxe group_t *pgs; 8193434Sesaxe pg_t *pg; 8203434Sesaxe group_iter_t i; 8213434Sesaxe 8223434Sesaxe ASSERT(MUTEX_HELD(&cpu_lock)); 8233434Sesaxe 8248906SEric.Saxe@Sun.COM if (cmt_sched_disabled) 8258906SEric.Saxe@Sun.COM return; 8268906SEric.Saxe@Sun.COM 8273434Sesaxe pgs = &cp->cpu_pg->pgs; 8283434Sesaxe 8293434Sesaxe /* 8303434Sesaxe * Ensure that the new partition's PG bitset 8313434Sesaxe * is large enough for all CMT PG's to which cp 8323434Sesaxe * belongs 8333434Sesaxe */ 8343434Sesaxe group_iter_init(&i); 8353434Sesaxe while ((pg = group_iterate(pgs, &i)) != NULL) { 8363434Sesaxe if (IS_CMT_PG(pg) == 0) 8373434Sesaxe continue; 8383434Sesaxe 8393434Sesaxe if (bitset_capacity(&pp->cp_cmt_pgs) <= pg->pg_id) 8403434Sesaxe bitset_resize(&pp->cp_cmt_pgs, pg->pg_id + 1); 8413434Sesaxe } 8423434Sesaxe } 8433434Sesaxe 8443434Sesaxe /* 8453434Sesaxe * Class callback when a CPU is actually moving partitions 8463434Sesaxe */ 8473434Sesaxe static void 8483434Sesaxe pg_cmt_cpupart_move(cpu_t *cp, cpupart_t *oldpp, cpupart_t *newpp) 8493434Sesaxe { 8503434Sesaxe cpu_t *cpp; 8513434Sesaxe group_t *pgs; 8523434Sesaxe pg_t *pg; 8533434Sesaxe group_iter_t pg_iter; 8543434Sesaxe pg_cpu_itr_t cpu_iter; 8553434Sesaxe boolean_t found; 8563434Sesaxe 8573434Sesaxe ASSERT(MUTEX_HELD(&cpu_lock)); 8583434Sesaxe 8598906SEric.Saxe@Sun.COM if (cmt_sched_disabled) 8608906SEric.Saxe@Sun.COM return; 8618906SEric.Saxe@Sun.COM 8623434Sesaxe pgs = &cp->cpu_pg->pgs; 8633434Sesaxe group_iter_init(&pg_iter); 8643434Sesaxe 8653434Sesaxe /* 8663434Sesaxe * Iterate over the CPUs CMT PGs 8673434Sesaxe */ 8683434Sesaxe while ((pg = group_iterate(pgs, &pg_iter)) != NULL) { 8693434Sesaxe 8703434Sesaxe if (IS_CMT_PG(pg) == 0) 8713434Sesaxe continue; 8723434Sesaxe 8733434Sesaxe /* 8743434Sesaxe * Add the PG to the bitset in the new partition. 8753434Sesaxe */ 8763434Sesaxe bitset_add(&newpp->cp_cmt_pgs, pg->pg_id); 8773434Sesaxe 8783434Sesaxe /* 8793434Sesaxe * Remove the PG from the bitset in the old partition 8803434Sesaxe * if the last of the PG's CPUs have left. 8813434Sesaxe */ 8823434Sesaxe found = B_FALSE; 8833434Sesaxe PG_CPU_ITR_INIT(pg, cpu_iter); 8843434Sesaxe while ((cpp = pg_cpu_next(&cpu_iter)) != NULL) { 8853434Sesaxe if (cpp == cp) 8863434Sesaxe continue; 8873676Sesaxe if (CPU_ACTIVE(cpp) && 8883676Sesaxe cpp->cpu_part->cp_id == oldpp->cp_id) { 8893434Sesaxe found = B_TRUE; 8903434Sesaxe break; 8913434Sesaxe } 8923434Sesaxe } 8933434Sesaxe if (!found) 8943434Sesaxe bitset_del(&cp->cpu_part->cp_cmt_pgs, pg->pg_id); 8953434Sesaxe } 8963434Sesaxe } 8973434Sesaxe 8983434Sesaxe /* 8993434Sesaxe * Class callback when a CPU becomes active (online) 9003434Sesaxe * 9013434Sesaxe * This is called in a context where CPUs are paused 9023434Sesaxe */ 9033434Sesaxe static void 9043434Sesaxe pg_cmt_cpu_active(cpu_t *cp) 9053434Sesaxe { 9063434Sesaxe int err; 9073434Sesaxe group_iter_t i; 9083434Sesaxe pg_cmt_t *pg; 9093434Sesaxe group_t *pgs; 9103434Sesaxe 9113434Sesaxe ASSERT(MUTEX_HELD(&cpu_lock)); 9123434Sesaxe 9138906SEric.Saxe@Sun.COM if (cmt_sched_disabled) 9148906SEric.Saxe@Sun.COM return; 9158906SEric.Saxe@Sun.COM 9163434Sesaxe pgs = &cp->cpu_pg->pgs; 9173434Sesaxe group_iter_init(&i); 9183434Sesaxe 9193434Sesaxe /* 9203434Sesaxe * Iterate over the CPU's PGs 9213434Sesaxe */ 9223434Sesaxe while ((pg = group_iterate(pgs, &i)) != NULL) { 9233434Sesaxe 9243434Sesaxe if (IS_CMT_PG(pg) == 0) 9253434Sesaxe continue; 9263434Sesaxe 9273434Sesaxe err = group_add(&pg->cmt_cpus_actv, cp, GRP_NORESIZE); 9283434Sesaxe ASSERT(err == 0); 9293434Sesaxe 9303434Sesaxe /* 9313434Sesaxe * If this is the first active CPU in the PG, and it 9323434Sesaxe * represents a hardware sharing relationship over which 9333434Sesaxe * CMT load balancing is performed, add it as a candidate 9343434Sesaxe * for balancing with it's siblings. 9353434Sesaxe */ 9363434Sesaxe if (GROUP_SIZE(&pg->cmt_cpus_actv) == 1 && 9378906SEric.Saxe@Sun.COM (pg->cmt_policy & (CMT_BALANCE | CMT_COALESCE))) { 9383434Sesaxe err = group_add(pg->cmt_siblings, pg, GRP_NORESIZE); 9393434Sesaxe ASSERT(err == 0); 9408408SEric.Saxe@Sun.COM 9418408SEric.Saxe@Sun.COM /* 9428408SEric.Saxe@Sun.COM * If this is a top level PG, add it as a balancing 9438906SEric.Saxe@Sun.COM * candidate when balancing within the root lgroup. 9448408SEric.Saxe@Sun.COM */ 9458906SEric.Saxe@Sun.COM if (pg->cmt_parent == NULL && 9468906SEric.Saxe@Sun.COM pg->cmt_siblings != &cmt_root->cl_pgs) { 9478408SEric.Saxe@Sun.COM err = group_add(&cmt_root->cl_pgs, pg, 9488408SEric.Saxe@Sun.COM GRP_NORESIZE); 9498408SEric.Saxe@Sun.COM ASSERT(err == 0); 9508408SEric.Saxe@Sun.COM } 9513434Sesaxe } 9523434Sesaxe 9533434Sesaxe /* 9543434Sesaxe * Notate the CPU in the PGs active CPU bitset. 9553434Sesaxe * Also notate the PG as being active in it's associated 9563434Sesaxe * partition 9573434Sesaxe */ 9583434Sesaxe bitset_add(&pg->cmt_cpus_actv_set, cp->cpu_seqid); 9593434Sesaxe bitset_add(&cp->cpu_part->cp_cmt_pgs, ((pg_t *)pg)->pg_id); 9603434Sesaxe } 9613434Sesaxe } 9623434Sesaxe 9633434Sesaxe /* 9643434Sesaxe * Class callback when a CPU goes inactive (offline) 9653434Sesaxe * 9663434Sesaxe * This is called in a context where CPUs are paused 9673434Sesaxe */ 9683434Sesaxe static void 9693434Sesaxe pg_cmt_cpu_inactive(cpu_t *cp) 9703434Sesaxe { 9713434Sesaxe int err; 9723434Sesaxe group_t *pgs; 9733434Sesaxe pg_cmt_t *pg; 9743434Sesaxe cpu_t *cpp; 9753434Sesaxe group_iter_t i; 9763434Sesaxe pg_cpu_itr_t cpu_itr; 9773434Sesaxe boolean_t found; 9783434Sesaxe 9793434Sesaxe ASSERT(MUTEX_HELD(&cpu_lock)); 9803434Sesaxe 9818906SEric.Saxe@Sun.COM if (cmt_sched_disabled) 9828906SEric.Saxe@Sun.COM return; 9838906SEric.Saxe@Sun.COM 9843434Sesaxe pgs = &cp->cpu_pg->pgs; 9853434Sesaxe group_iter_init(&i); 9863434Sesaxe 9873434Sesaxe while ((pg = group_iterate(pgs, &i)) != NULL) { 9883434Sesaxe 9893434Sesaxe if (IS_CMT_PG(pg) == 0) 9903434Sesaxe continue; 9913434Sesaxe 9923434Sesaxe /* 9933434Sesaxe * Remove the CPU from the CMT PGs active CPU group 9943434Sesaxe * bitmap 9953434Sesaxe */ 9963434Sesaxe err = group_remove(&pg->cmt_cpus_actv, cp, GRP_NORESIZE); 9973434Sesaxe ASSERT(err == 0); 9983434Sesaxe 9993434Sesaxe bitset_del(&pg->cmt_cpus_actv_set, cp->cpu_seqid); 10003434Sesaxe 10013434Sesaxe /* 10023434Sesaxe * If there are no more active CPUs in this PG over which 10033434Sesaxe * load was balanced, remove it as a balancing candidate. 10043434Sesaxe */ 10053434Sesaxe if (GROUP_SIZE(&pg->cmt_cpus_actv) == 0 && 10068906SEric.Saxe@Sun.COM (pg->cmt_policy & (CMT_BALANCE | CMT_COALESCE))) { 10073434Sesaxe err = group_remove(pg->cmt_siblings, pg, GRP_NORESIZE); 10083434Sesaxe ASSERT(err == 0); 10098408SEric.Saxe@Sun.COM 10108906SEric.Saxe@Sun.COM if (pg->cmt_parent == NULL && 10118906SEric.Saxe@Sun.COM pg->cmt_siblings != &cmt_root->cl_pgs) { 10128408SEric.Saxe@Sun.COM err = group_remove(&cmt_root->cl_pgs, pg, 10138408SEric.Saxe@Sun.COM GRP_NORESIZE); 10148408SEric.Saxe@Sun.COM ASSERT(err == 0); 10158408SEric.Saxe@Sun.COM } 10163434Sesaxe } 10173434Sesaxe 10183434Sesaxe /* 10193434Sesaxe * Assert the number of active CPUs does not exceed 10203434Sesaxe * the total number of CPUs in the PG 10213434Sesaxe */ 10223434Sesaxe ASSERT(GROUP_SIZE(&pg->cmt_cpus_actv) <= 10233434Sesaxe GROUP_SIZE(&((pg_t *)pg)->pg_cpus)); 10243434Sesaxe 10253434Sesaxe /* 10263434Sesaxe * Update the PG bitset in the CPU's old partition 10273434Sesaxe */ 10283434Sesaxe found = B_FALSE; 10293434Sesaxe PG_CPU_ITR_INIT(pg, cpu_itr); 10303434Sesaxe while ((cpp = pg_cpu_next(&cpu_itr)) != NULL) { 10313434Sesaxe if (cpp == cp) 10323434Sesaxe continue; 10333676Sesaxe if (CPU_ACTIVE(cpp) && 10343676Sesaxe cpp->cpu_part->cp_id == cp->cpu_part->cp_id) { 10353434Sesaxe found = B_TRUE; 10363434Sesaxe break; 10373434Sesaxe } 10383434Sesaxe } 10393434Sesaxe if (!found) { 10403434Sesaxe bitset_del(&cp->cpu_part->cp_cmt_pgs, 10413434Sesaxe ((pg_t *)pg)->pg_id); 10423434Sesaxe } 10433434Sesaxe } 10443434Sesaxe } 10453434Sesaxe 10463434Sesaxe /* 10473434Sesaxe * Return non-zero if the CPU belongs in the given PG 10483434Sesaxe */ 10493434Sesaxe static int 10503434Sesaxe pg_cmt_cpu_belongs(pg_t *pg, cpu_t *cp) 10513434Sesaxe { 10523434Sesaxe cpu_t *pg_cpu; 10533434Sesaxe 10543434Sesaxe pg_cpu = GROUP_ACCESS(&pg->pg_cpus, 0); 10553434Sesaxe 10563434Sesaxe ASSERT(pg_cpu != NULL); 10573434Sesaxe 10583434Sesaxe /* 10593434Sesaxe * The CPU belongs if, given the nature of the hardware sharing 10603434Sesaxe * relationship represented by the PG, the CPU has that 10613434Sesaxe * relationship with some other CPU already in the PG 10623434Sesaxe */ 10633434Sesaxe if (pg_plat_cpus_share(cp, pg_cpu, ((pghw_t *)pg)->pghw_hw)) 10643434Sesaxe return (1); 10653434Sesaxe 10663434Sesaxe return (0); 10673434Sesaxe } 10683434Sesaxe 10693434Sesaxe /* 10708906SEric.Saxe@Sun.COM * Sort the CPUs CMT hierarchy, where "size" is the number of levels. 10713434Sesaxe */ 10723434Sesaxe static void 10738906SEric.Saxe@Sun.COM pg_cmt_hier_sort(pg_cmt_t **hier, int size) 10743434Sesaxe { 10758906SEric.Saxe@Sun.COM int i, j, inc; 10768906SEric.Saxe@Sun.COM pg_t *tmp; 10778906SEric.Saxe@Sun.COM pg_t **h = (pg_t **)hier; 10783434Sesaxe 10798906SEric.Saxe@Sun.COM /* 10808906SEric.Saxe@Sun.COM * First sort by number of CPUs 10818906SEric.Saxe@Sun.COM */ 10828906SEric.Saxe@Sun.COM inc = size / 2; 10838906SEric.Saxe@Sun.COM while (inc > 0) { 10848906SEric.Saxe@Sun.COM for (i = inc; i < size; i++) { 10858906SEric.Saxe@Sun.COM j = i; 10868906SEric.Saxe@Sun.COM tmp = h[i]; 10878906SEric.Saxe@Sun.COM while ((j >= inc) && 10888906SEric.Saxe@Sun.COM (PG_NUM_CPUS(h[j - inc]) > PG_NUM_CPUS(tmp))) { 10898906SEric.Saxe@Sun.COM h[j] = h[j - inc]; 10908906SEric.Saxe@Sun.COM j = j - inc; 10913434Sesaxe } 10928906SEric.Saxe@Sun.COM h[j] = tmp; 10933434Sesaxe } 10948906SEric.Saxe@Sun.COM if (inc == 2) 10958906SEric.Saxe@Sun.COM inc = 1; 10968906SEric.Saxe@Sun.COM else 10978906SEric.Saxe@Sun.COM inc = (inc * 5) / 11; 10988906SEric.Saxe@Sun.COM } 10998906SEric.Saxe@Sun.COM 11008906SEric.Saxe@Sun.COM /* 11018906SEric.Saxe@Sun.COM * Break ties by asking the platform. 11028906SEric.Saxe@Sun.COM * Determine if h[i] outranks h[i + 1] and if so, swap them. 11038906SEric.Saxe@Sun.COM */ 11048906SEric.Saxe@Sun.COM for (i = 0; i < size - 1; i++) { 11058906SEric.Saxe@Sun.COM if ((PG_NUM_CPUS(h[i]) == PG_NUM_CPUS(h[i + 1])) && 11068906SEric.Saxe@Sun.COM pg_cmt_hier_rank(hier[i], hier[i + 1]) == hier[i]) { 11078906SEric.Saxe@Sun.COM tmp = h[i]; 11088906SEric.Saxe@Sun.COM h[i] = h[i + 1]; 11098906SEric.Saxe@Sun.COM h[i + 1] = tmp; 11108906SEric.Saxe@Sun.COM } 11113434Sesaxe } 11123434Sesaxe } 11133434Sesaxe 11143434Sesaxe /* 11153434Sesaxe * Return a cmt_lgrp_t * given an lgroup handle. 11163434Sesaxe */ 11173434Sesaxe static cmt_lgrp_t * 11183434Sesaxe pg_cmt_find_lgrp(lgrp_handle_t hand) 11193434Sesaxe { 11203434Sesaxe cmt_lgrp_t *lgrp; 11213434Sesaxe 11223434Sesaxe ASSERT(MUTEX_HELD(&cpu_lock)); 11233434Sesaxe 11243434Sesaxe lgrp = cmt_lgrps; 11253434Sesaxe while (lgrp != NULL) { 11263434Sesaxe if (lgrp->cl_hand == hand) 11273676Sesaxe break; 11283434Sesaxe lgrp = lgrp->cl_next; 11293434Sesaxe } 11303676Sesaxe return (lgrp); 11313676Sesaxe } 11323434Sesaxe 11333676Sesaxe /* 11343676Sesaxe * Create a cmt_lgrp_t with the specified handle. 11353676Sesaxe */ 11363676Sesaxe static cmt_lgrp_t * 11373676Sesaxe pg_cmt_lgrp_create(lgrp_handle_t hand) 11383676Sesaxe { 11393676Sesaxe cmt_lgrp_t *lgrp; 11403676Sesaxe 11413676Sesaxe ASSERT(MUTEX_HELD(&cpu_lock)); 11423676Sesaxe 11433434Sesaxe lgrp = kmem_zalloc(sizeof (cmt_lgrp_t), KM_SLEEP); 11443434Sesaxe 11453434Sesaxe lgrp->cl_hand = hand; 11463434Sesaxe lgrp->cl_npgs = 0; 11473434Sesaxe lgrp->cl_next = cmt_lgrps; 11483434Sesaxe cmt_lgrps = lgrp; 11493434Sesaxe group_create(&lgrp->cl_pgs); 11503434Sesaxe 11513434Sesaxe return (lgrp); 11523434Sesaxe } 11538408SEric.Saxe@Sun.COM 11548408SEric.Saxe@Sun.COM /* 11558906SEric.Saxe@Sun.COM * Interfaces to enable and disable power aware dispatching 11568906SEric.Saxe@Sun.COM * The caller must be holding cpu_lock. 11578408SEric.Saxe@Sun.COM * 11588906SEric.Saxe@Sun.COM * Return 0 on success and -1 on failure. 11598408SEric.Saxe@Sun.COM */ 11608906SEric.Saxe@Sun.COM int 11618906SEric.Saxe@Sun.COM cmt_pad_enable(pghw_type_t type) 11628408SEric.Saxe@Sun.COM { 11638906SEric.Saxe@Sun.COM group_t *hwset; 11648906SEric.Saxe@Sun.COM group_iter_t iter; 11658906SEric.Saxe@Sun.COM pg_cmt_t *pg; 11668906SEric.Saxe@Sun.COM 11678906SEric.Saxe@Sun.COM ASSERT(PGHW_IS_PM_DOMAIN(type)); 11688906SEric.Saxe@Sun.COM ASSERT(MUTEX_HELD(&cpu_lock)); 11698408SEric.Saxe@Sun.COM 11708906SEric.Saxe@Sun.COM if ((hwset = pghw_set_lookup(type)) == NULL || 11718906SEric.Saxe@Sun.COM cmt_hw_blacklisted[type]) { 11728906SEric.Saxe@Sun.COM /* 11738906SEric.Saxe@Sun.COM * Unable to find any instances of the specified type 11748906SEric.Saxe@Sun.COM * of power domain, or the power domains have been blacklisted. 11758906SEric.Saxe@Sun.COM */ 11768906SEric.Saxe@Sun.COM return (-1); 11778906SEric.Saxe@Sun.COM } 11788408SEric.Saxe@Sun.COM 11798408SEric.Saxe@Sun.COM /* 11808906SEric.Saxe@Sun.COM * Iterate over the power domains, setting the default dispatcher 11818906SEric.Saxe@Sun.COM * policy for power/performance optimization. 11828906SEric.Saxe@Sun.COM * 11838906SEric.Saxe@Sun.COM * Simply setting the policy isn't enough in the case where the power 11848906SEric.Saxe@Sun.COM * domain is an only child of another PG. Because the dispatcher walks 11858906SEric.Saxe@Sun.COM * the PG hierarchy in a top down fashion, the higher up PG's policy 11868906SEric.Saxe@Sun.COM * will dominate. So promote the power domain above it's parent if both 11878906SEric.Saxe@Sun.COM * PG and it's parent have the same CPUs to ensure it's policy 11888906SEric.Saxe@Sun.COM * dominates. 11898408SEric.Saxe@Sun.COM */ 11908906SEric.Saxe@Sun.COM group_iter_init(&iter); 11918906SEric.Saxe@Sun.COM while ((pg = group_iterate(hwset, &iter)) != NULL) { 11928906SEric.Saxe@Sun.COM /* 11938906SEric.Saxe@Sun.COM * If the power domain is an only child to a parent 11948906SEric.Saxe@Sun.COM * not implementing the same policy, promote the child 11958906SEric.Saxe@Sun.COM * above the parent to activate the policy. 11968906SEric.Saxe@Sun.COM */ 11978906SEric.Saxe@Sun.COM pg->cmt_policy = pg_cmt_policy(((pghw_t *)pg)->pghw_hw); 11988906SEric.Saxe@Sun.COM while ((pg->cmt_parent != NULL) && 11998906SEric.Saxe@Sun.COM (pg->cmt_parent->cmt_policy != pg->cmt_policy) && 12008906SEric.Saxe@Sun.COM (PG_NUM_CPUS((pg_t *)pg) == 12018906SEric.Saxe@Sun.COM PG_NUM_CPUS((pg_t *)pg->cmt_parent))) { 12028906SEric.Saxe@Sun.COM cmt_hier_promote(pg); 12038906SEric.Saxe@Sun.COM } 12048906SEric.Saxe@Sun.COM } 12058906SEric.Saxe@Sun.COM 12068906SEric.Saxe@Sun.COM return (0); 12078906SEric.Saxe@Sun.COM } 12088408SEric.Saxe@Sun.COM 12098906SEric.Saxe@Sun.COM int 12108906SEric.Saxe@Sun.COM cmt_pad_disable(pghw_type_t type) 12118906SEric.Saxe@Sun.COM { 12128906SEric.Saxe@Sun.COM group_t *hwset; 12138906SEric.Saxe@Sun.COM group_iter_t iter; 12148906SEric.Saxe@Sun.COM pg_cmt_t *pg; 12158906SEric.Saxe@Sun.COM pg_cmt_t *child; 12168906SEric.Saxe@Sun.COM 12178906SEric.Saxe@Sun.COM ASSERT(PGHW_IS_PM_DOMAIN(type)); 12188906SEric.Saxe@Sun.COM ASSERT(MUTEX_HELD(&cpu_lock)); 12198906SEric.Saxe@Sun.COM 12208906SEric.Saxe@Sun.COM if ((hwset = pghw_set_lookup(type)) == NULL) { 12218906SEric.Saxe@Sun.COM /* 12228906SEric.Saxe@Sun.COM * Unable to find any instances of the specified type of 12238906SEric.Saxe@Sun.COM * power domain. 12248906SEric.Saxe@Sun.COM */ 12258906SEric.Saxe@Sun.COM return (-1); 12268906SEric.Saxe@Sun.COM } 12278408SEric.Saxe@Sun.COM /* 12288906SEric.Saxe@Sun.COM * Iterate over the power domains, setting the default dispatcher 12298906SEric.Saxe@Sun.COM * policy for performance optimization (load balancing). 12308408SEric.Saxe@Sun.COM */ 12318906SEric.Saxe@Sun.COM group_iter_init(&iter); 12328906SEric.Saxe@Sun.COM while ((pg = group_iterate(hwset, &iter)) != NULL) { 12338408SEric.Saxe@Sun.COM 12348408SEric.Saxe@Sun.COM /* 12358906SEric.Saxe@Sun.COM * If the power domain has an only child that implements 12368906SEric.Saxe@Sun.COM * policy other than load balancing, promote the child 12378906SEric.Saxe@Sun.COM * above the power domain to ensure it's policy dominates. 12388408SEric.Saxe@Sun.COM */ 12398969SEric.Saxe@Sun.COM if (pg->cmt_children != NULL && 12408969SEric.Saxe@Sun.COM GROUP_SIZE(pg->cmt_children) == 1) { 12418906SEric.Saxe@Sun.COM child = GROUP_ACCESS(pg->cmt_children, 0); 12428906SEric.Saxe@Sun.COM if ((child->cmt_policy & CMT_BALANCE) == 0) { 12438906SEric.Saxe@Sun.COM cmt_hier_promote(child); 12448906SEric.Saxe@Sun.COM } 12458906SEric.Saxe@Sun.COM } 12468906SEric.Saxe@Sun.COM pg->cmt_policy = CMT_BALANCE; 12478906SEric.Saxe@Sun.COM } 12488906SEric.Saxe@Sun.COM return (0); 12498906SEric.Saxe@Sun.COM } 12508906SEric.Saxe@Sun.COM 12518906SEric.Saxe@Sun.COM /* ARGSUSED */ 12528906SEric.Saxe@Sun.COM static void 12538906SEric.Saxe@Sun.COM cmt_ev_thread_swtch(pg_t *pg, cpu_t *cp, hrtime_t now, kthread_t *old, 12548906SEric.Saxe@Sun.COM kthread_t *new) 12558906SEric.Saxe@Sun.COM { 12568906SEric.Saxe@Sun.COM pg_cmt_t *cmt_pg = (pg_cmt_t *)pg; 12578906SEric.Saxe@Sun.COM 12588906SEric.Saxe@Sun.COM if (old == cp->cpu_idle_thread) { 12598906SEric.Saxe@Sun.COM atomic_add_32(&cmt_pg->cmt_utilization, 1); 12608906SEric.Saxe@Sun.COM } else if (new == cp->cpu_idle_thread) { 12618906SEric.Saxe@Sun.COM atomic_add_32(&cmt_pg->cmt_utilization, -1); 12628906SEric.Saxe@Sun.COM } 12638906SEric.Saxe@Sun.COM } 12648906SEric.Saxe@Sun.COM 12658906SEric.Saxe@Sun.COM /* 12668906SEric.Saxe@Sun.COM * Macro to test whether a thread is currently runnable on a CPU in a PG. 12678906SEric.Saxe@Sun.COM */ 12688906SEric.Saxe@Sun.COM #define THREAD_RUNNABLE_IN_PG(t, pg) \ 12698906SEric.Saxe@Sun.COM ((t)->t_state == TS_RUN && \ 12708906SEric.Saxe@Sun.COM (t)->t_disp_queue->disp_cpu && \ 12718906SEric.Saxe@Sun.COM bitset_in_set(&(pg)->cmt_cpus_actv_set, \ 12728906SEric.Saxe@Sun.COM (t)->t_disp_queue->disp_cpu->cpu_seqid)) 12738906SEric.Saxe@Sun.COM 12748906SEric.Saxe@Sun.COM static void 12758906SEric.Saxe@Sun.COM cmt_ev_thread_swtch_pwr(pg_t *pg, cpu_t *cp, hrtime_t now, kthread_t *old, 12768906SEric.Saxe@Sun.COM kthread_t *new) 12778906SEric.Saxe@Sun.COM { 12788906SEric.Saxe@Sun.COM pg_cmt_t *cmt = (pg_cmt_t *)pg; 12798906SEric.Saxe@Sun.COM cpupm_domain_t *dom; 12808906SEric.Saxe@Sun.COM uint32_t u; 12818906SEric.Saxe@Sun.COM 12828906SEric.Saxe@Sun.COM if (old == cp->cpu_idle_thread) { 12838906SEric.Saxe@Sun.COM ASSERT(new != cp->cpu_idle_thread); 12848906SEric.Saxe@Sun.COM u = atomic_add_32_nv(&cmt->cmt_utilization, 1); 12858906SEric.Saxe@Sun.COM if (u == 1) { 12868906SEric.Saxe@Sun.COM /* 12878906SEric.Saxe@Sun.COM * Notify the CPU power manager that the domain 12888906SEric.Saxe@Sun.COM * is non-idle. 12898906SEric.Saxe@Sun.COM */ 12908906SEric.Saxe@Sun.COM dom = (cpupm_domain_t *)cmt->cmt_pg.pghw_handle; 12918906SEric.Saxe@Sun.COM cpupm_utilization_event(cp, now, dom, 12928906SEric.Saxe@Sun.COM CPUPM_DOM_BUSY_FROM_IDLE); 12938906SEric.Saxe@Sun.COM } 12948906SEric.Saxe@Sun.COM } else if (new == cp->cpu_idle_thread) { 12958906SEric.Saxe@Sun.COM ASSERT(old != cp->cpu_idle_thread); 12968906SEric.Saxe@Sun.COM u = atomic_add_32_nv(&cmt->cmt_utilization, -1); 12978906SEric.Saxe@Sun.COM if (u == 0) { 12988906SEric.Saxe@Sun.COM /* 12998906SEric.Saxe@Sun.COM * The domain is idle, notify the CPU power 13008906SEric.Saxe@Sun.COM * manager. 13018906SEric.Saxe@Sun.COM * 13028906SEric.Saxe@Sun.COM * Avoid notifying if the thread is simply migrating 13038906SEric.Saxe@Sun.COM * between CPUs in the domain. 13048906SEric.Saxe@Sun.COM */ 13058906SEric.Saxe@Sun.COM if (!THREAD_RUNNABLE_IN_PG(old, cmt)) { 13068906SEric.Saxe@Sun.COM dom = (cpupm_domain_t *)cmt->cmt_pg.pghw_handle; 13078906SEric.Saxe@Sun.COM cpupm_utilization_event(cp, now, dom, 13088906SEric.Saxe@Sun.COM CPUPM_DOM_IDLE_FROM_BUSY); 13098906SEric.Saxe@Sun.COM } 13108906SEric.Saxe@Sun.COM } 13118906SEric.Saxe@Sun.COM } 13128906SEric.Saxe@Sun.COM } 13138906SEric.Saxe@Sun.COM 13148906SEric.Saxe@Sun.COM /* ARGSUSED */ 13158906SEric.Saxe@Sun.COM static void 13168906SEric.Saxe@Sun.COM cmt_ev_thread_remain_pwr(pg_t *pg, cpu_t *cp, kthread_t *t) 13178906SEric.Saxe@Sun.COM { 13188906SEric.Saxe@Sun.COM pg_cmt_t *cmt = (pg_cmt_t *)pg; 13198906SEric.Saxe@Sun.COM cpupm_domain_t *dom; 13208906SEric.Saxe@Sun.COM 13218906SEric.Saxe@Sun.COM dom = (cpupm_domain_t *)cmt->cmt_pg.pghw_handle; 13228906SEric.Saxe@Sun.COM cpupm_utilization_event(cp, (hrtime_t)0, dom, CPUPM_DOM_REMAIN_BUSY); 13238906SEric.Saxe@Sun.COM } 13248906SEric.Saxe@Sun.COM 13258906SEric.Saxe@Sun.COM /* 13268906SEric.Saxe@Sun.COM * Return the name of the CMT scheduling policy 13278906SEric.Saxe@Sun.COM * being implemented across this PG 13288906SEric.Saxe@Sun.COM */ 13298906SEric.Saxe@Sun.COM static char * 13308906SEric.Saxe@Sun.COM pg_cmt_policy_name(pg_t *pg) 13318906SEric.Saxe@Sun.COM { 13328906SEric.Saxe@Sun.COM pg_cmt_policy_t policy; 13338906SEric.Saxe@Sun.COM 13348906SEric.Saxe@Sun.COM policy = ((pg_cmt_t *)pg)->cmt_policy; 13358906SEric.Saxe@Sun.COM 13368906SEric.Saxe@Sun.COM if (policy & CMT_AFFINITY) { 13378906SEric.Saxe@Sun.COM if (policy & CMT_BALANCE) 13388906SEric.Saxe@Sun.COM return ("Load Balancing & Affinity"); 13398906SEric.Saxe@Sun.COM else if (policy & CMT_COALESCE) 13408906SEric.Saxe@Sun.COM return ("Load Coalescence & Affinity"); 13418906SEric.Saxe@Sun.COM else 13428906SEric.Saxe@Sun.COM return ("Affinity"); 13438906SEric.Saxe@Sun.COM } else { 13448906SEric.Saxe@Sun.COM if (policy & CMT_BALANCE) 13458906SEric.Saxe@Sun.COM return ("Load Balancing"); 13468906SEric.Saxe@Sun.COM else if (policy & CMT_COALESCE) 13478906SEric.Saxe@Sun.COM return ("Load Coalescence"); 13488906SEric.Saxe@Sun.COM else 13498906SEric.Saxe@Sun.COM return ("None"); 13508906SEric.Saxe@Sun.COM } 13518906SEric.Saxe@Sun.COM } 13528906SEric.Saxe@Sun.COM 13538906SEric.Saxe@Sun.COM /* 13548906SEric.Saxe@Sun.COM * Prune PG, and all other instances of PG's hardware sharing relationship 13558906SEric.Saxe@Sun.COM * from the PG hierarchy. 13568906SEric.Saxe@Sun.COM */ 13578906SEric.Saxe@Sun.COM static int 13588906SEric.Saxe@Sun.COM pg_cmt_prune(pg_cmt_t *pg_bad, pg_cmt_t **lineage, int *sz) 13598906SEric.Saxe@Sun.COM { 13608906SEric.Saxe@Sun.COM group_t *hwset, *children; 13618906SEric.Saxe@Sun.COM int i, j, r, size = *sz; 13628906SEric.Saxe@Sun.COM group_iter_t hw_iter, child_iter; 13638906SEric.Saxe@Sun.COM pg_cpu_itr_t cpu_iter; 13648906SEric.Saxe@Sun.COM pg_cmt_t *pg, *child; 13658906SEric.Saxe@Sun.COM cpu_t *cpu; 13668906SEric.Saxe@Sun.COM int cap_needed; 13678906SEric.Saxe@Sun.COM pghw_type_t hw; 13688906SEric.Saxe@Sun.COM 13698906SEric.Saxe@Sun.COM ASSERT(MUTEX_HELD(&cpu_lock)); 13708906SEric.Saxe@Sun.COM 13718906SEric.Saxe@Sun.COM hw = ((pghw_t *)pg_bad)->pghw_hw; 13728906SEric.Saxe@Sun.COM 13738906SEric.Saxe@Sun.COM if (hw == PGHW_POW_ACTIVE) { 13748906SEric.Saxe@Sun.COM cmn_err(CE_NOTE, "!Active CPUPM domain groups look suspect. " 13758906SEric.Saxe@Sun.COM "Event Based CPUPM Unavailable"); 13768906SEric.Saxe@Sun.COM } else if (hw == PGHW_POW_IDLE) { 13778906SEric.Saxe@Sun.COM cmn_err(CE_NOTE, "!Idle CPUPM domain groups look suspect. " 13788906SEric.Saxe@Sun.COM "Dispatcher assisted CPUPM disabled."); 13798906SEric.Saxe@Sun.COM } 13808906SEric.Saxe@Sun.COM 13818906SEric.Saxe@Sun.COM /* 13828906SEric.Saxe@Sun.COM * Find and eliminate the PG from the lineage. 13838906SEric.Saxe@Sun.COM */ 13848906SEric.Saxe@Sun.COM for (i = 0; i < size; i++) { 13858906SEric.Saxe@Sun.COM if (lineage[i] == pg_bad) { 13868906SEric.Saxe@Sun.COM for (j = i; j < size - 1; j++) 13878906SEric.Saxe@Sun.COM lineage[j] = lineage[j + 1]; 13888906SEric.Saxe@Sun.COM *sz = size - 1; 13898906SEric.Saxe@Sun.COM break; 13908906SEric.Saxe@Sun.COM } 13918906SEric.Saxe@Sun.COM } 13928906SEric.Saxe@Sun.COM 13938906SEric.Saxe@Sun.COM /* 13948906SEric.Saxe@Sun.COM * We'll prune all instances of the hardware sharing relationship 13958906SEric.Saxe@Sun.COM * represented by pg. But before we do that (and pause CPUs) we need 13968906SEric.Saxe@Sun.COM * to ensure the hierarchy's groups are properly sized. 13978906SEric.Saxe@Sun.COM */ 13988906SEric.Saxe@Sun.COM hwset = pghw_set_lookup(hw); 13998906SEric.Saxe@Sun.COM 14008906SEric.Saxe@Sun.COM /* 14018906SEric.Saxe@Sun.COM * Blacklist the hardware so that future groups won't be created. 14028906SEric.Saxe@Sun.COM */ 14038906SEric.Saxe@Sun.COM cmt_hw_blacklisted[hw] = 1; 14048906SEric.Saxe@Sun.COM 14058906SEric.Saxe@Sun.COM /* 14068906SEric.Saxe@Sun.COM * For each of the PGs being pruned, ensure sufficient capacity in 14078906SEric.Saxe@Sun.COM * the siblings set for the PG's children 14088906SEric.Saxe@Sun.COM */ 14098906SEric.Saxe@Sun.COM group_iter_init(&hw_iter); 14108906SEric.Saxe@Sun.COM while ((pg = group_iterate(hwset, &hw_iter)) != NULL) { 14118906SEric.Saxe@Sun.COM /* 14128906SEric.Saxe@Sun.COM * PG is being pruned, but if it is bringing up more than 14138906SEric.Saxe@Sun.COM * one child, ask for more capacity in the siblings group. 14148906SEric.Saxe@Sun.COM */ 14158906SEric.Saxe@Sun.COM cap_needed = 0; 14168906SEric.Saxe@Sun.COM if (pg->cmt_children && 14178906SEric.Saxe@Sun.COM GROUP_SIZE(pg->cmt_children) > 1) { 14188906SEric.Saxe@Sun.COM cap_needed = GROUP_SIZE(pg->cmt_children) - 1; 14198906SEric.Saxe@Sun.COM 14208906SEric.Saxe@Sun.COM group_expand(pg->cmt_siblings, 14218906SEric.Saxe@Sun.COM GROUP_SIZE(pg->cmt_siblings) + cap_needed); 14228408SEric.Saxe@Sun.COM 14238408SEric.Saxe@Sun.COM /* 14248906SEric.Saxe@Sun.COM * If this is a top level group, also ensure the 14258906SEric.Saxe@Sun.COM * capacity in the root lgrp level CMT grouping. 14268408SEric.Saxe@Sun.COM */ 14278906SEric.Saxe@Sun.COM if (pg->cmt_parent == NULL && 14288906SEric.Saxe@Sun.COM pg->cmt_siblings != &cmt_root->cl_pgs) { 14298906SEric.Saxe@Sun.COM group_expand(&cmt_root->cl_pgs, 14308906SEric.Saxe@Sun.COM GROUP_SIZE(&cmt_root->cl_pgs) + cap_needed); 14318408SEric.Saxe@Sun.COM } 14328906SEric.Saxe@Sun.COM } 14338906SEric.Saxe@Sun.COM } 14348408SEric.Saxe@Sun.COM 14358906SEric.Saxe@Sun.COM /* 14368906SEric.Saxe@Sun.COM * We're operating on the PG hierarchy. Pause CPUs to ensure 14378906SEric.Saxe@Sun.COM * exclusivity with respect to the dispatcher. 14388906SEric.Saxe@Sun.COM */ 14398906SEric.Saxe@Sun.COM pause_cpus(NULL); 14408408SEric.Saxe@Sun.COM 14418906SEric.Saxe@Sun.COM /* 14428906SEric.Saxe@Sun.COM * Prune all PG instances of the hardware sharing relationship 14438906SEric.Saxe@Sun.COM * represented by pg. 14448906SEric.Saxe@Sun.COM */ 14458906SEric.Saxe@Sun.COM group_iter_init(&hw_iter); 14468906SEric.Saxe@Sun.COM while ((pg = group_iterate(hwset, &hw_iter)) != NULL) { 14478408SEric.Saxe@Sun.COM 14488408SEric.Saxe@Sun.COM /* 14498906SEric.Saxe@Sun.COM * Remove PG from it's group of siblings, if it's there. 14508906SEric.Saxe@Sun.COM */ 14518906SEric.Saxe@Sun.COM if (pg->cmt_siblings) { 14528906SEric.Saxe@Sun.COM (void) group_remove(pg->cmt_siblings, pg, GRP_NORESIZE); 14538906SEric.Saxe@Sun.COM } 14548906SEric.Saxe@Sun.COM if (pg->cmt_parent == NULL && 14558906SEric.Saxe@Sun.COM pg->cmt_siblings != &cmt_root->cl_pgs) { 14568906SEric.Saxe@Sun.COM (void) group_remove(&cmt_root->cl_pgs, pg, 14578906SEric.Saxe@Sun.COM GRP_NORESIZE); 14588906SEric.Saxe@Sun.COM } 14598906SEric.Saxe@Sun.COM /* 14609036SEric.Saxe@Sun.COM * Move PG's children from it's children set to it's parent's 14619036SEric.Saxe@Sun.COM * children set. Note that the parent's children set, and PG's 14629036SEric.Saxe@Sun.COM * siblings set are the same thing. 14639036SEric.Saxe@Sun.COM * 14649036SEric.Saxe@Sun.COM * Because we are iterating over the same group that we are 14659036SEric.Saxe@Sun.COM * operating on (removing the children), first add all of PG's 14669036SEric.Saxe@Sun.COM * children to the parent's children set, and once we are done 14679036SEric.Saxe@Sun.COM * iterating, empty PG's children set. 14688906SEric.Saxe@Sun.COM */ 14698906SEric.Saxe@Sun.COM if (pg->cmt_children != NULL) { 14708906SEric.Saxe@Sun.COM children = pg->cmt_children; 14718906SEric.Saxe@Sun.COM 14728906SEric.Saxe@Sun.COM group_iter_init(&child_iter); 14738906SEric.Saxe@Sun.COM while ((child = group_iterate(children, &child_iter)) 14748906SEric.Saxe@Sun.COM != NULL) { 14759036SEric.Saxe@Sun.COM if (pg->cmt_siblings != NULL) { 14768906SEric.Saxe@Sun.COM r = group_add(pg->cmt_siblings, child, 14778906SEric.Saxe@Sun.COM GRP_NORESIZE); 14788906SEric.Saxe@Sun.COM ASSERT(r == 0); 14798906SEric.Saxe@Sun.COM } 14808906SEric.Saxe@Sun.COM } 14819036SEric.Saxe@Sun.COM group_empty(pg->cmt_children); 14828906SEric.Saxe@Sun.COM } 14838906SEric.Saxe@Sun.COM 14848906SEric.Saxe@Sun.COM /* 14858906SEric.Saxe@Sun.COM * Reset the callbacks to the defaults 14868906SEric.Saxe@Sun.COM */ 14878906SEric.Saxe@Sun.COM pg_callback_set_defaults((pg_t *)pg); 14888906SEric.Saxe@Sun.COM 14898906SEric.Saxe@Sun.COM /* 14908906SEric.Saxe@Sun.COM * Update all the CPU lineages in each of PG's CPUs 14918408SEric.Saxe@Sun.COM */ 14928906SEric.Saxe@Sun.COM PG_CPU_ITR_INIT(pg, cpu_iter); 14938906SEric.Saxe@Sun.COM while ((cpu = pg_cpu_next(&cpu_iter)) != NULL) { 14948906SEric.Saxe@Sun.COM group_t *pgs; 14958906SEric.Saxe@Sun.COM pg_cmt_t *cpu_pg; 14968906SEric.Saxe@Sun.COM group_iter_t liter; /* Iterator for the lineage */ 14978906SEric.Saxe@Sun.COM 14988906SEric.Saxe@Sun.COM /* 14998906SEric.Saxe@Sun.COM * Iterate over the CPU's PGs updating the children 15008906SEric.Saxe@Sun.COM * of the PG being promoted, since they have a new 15018906SEric.Saxe@Sun.COM * parent and siblings set. 15028906SEric.Saxe@Sun.COM */ 15038906SEric.Saxe@Sun.COM pgs = &cpu->cpu_pg->pgs; 15048906SEric.Saxe@Sun.COM group_iter_init(&liter); 15058906SEric.Saxe@Sun.COM while ((cpu_pg = group_iterate(pgs, &liter)) != NULL) { 15068906SEric.Saxe@Sun.COM if (cpu_pg->cmt_parent == pg) { 15078906SEric.Saxe@Sun.COM cpu_pg->cmt_parent = pg->cmt_parent; 15088906SEric.Saxe@Sun.COM cpu_pg->cmt_siblings = pg->cmt_siblings; 15098906SEric.Saxe@Sun.COM } 15108906SEric.Saxe@Sun.COM } 15118906SEric.Saxe@Sun.COM 15128906SEric.Saxe@Sun.COM /* 15138906SEric.Saxe@Sun.COM * Update the CPU's lineages 15148906SEric.Saxe@Sun.COM */ 15158906SEric.Saxe@Sun.COM pgs = &cpu->cpu_pg->cmt_pgs; 15168906SEric.Saxe@Sun.COM (void) group_remove(pgs, pg, GRP_NORESIZE); 15178906SEric.Saxe@Sun.COM pgs = &cpu->cpu_pg->pgs; 15188906SEric.Saxe@Sun.COM (void) group_remove(pgs, pg, GRP_NORESIZE); 15198408SEric.Saxe@Sun.COM } 15208906SEric.Saxe@Sun.COM } 15218906SEric.Saxe@Sun.COM start_cpus(); 15228906SEric.Saxe@Sun.COM return (0); 15238906SEric.Saxe@Sun.COM } 15248906SEric.Saxe@Sun.COM 15258906SEric.Saxe@Sun.COM /* 15268906SEric.Saxe@Sun.COM * Disable CMT scheduling 15278906SEric.Saxe@Sun.COM */ 15288906SEric.Saxe@Sun.COM static void 15298906SEric.Saxe@Sun.COM pg_cmt_disable(void) 15308906SEric.Saxe@Sun.COM { 15318906SEric.Saxe@Sun.COM cpu_t *cpu; 15328906SEric.Saxe@Sun.COM 15338906SEric.Saxe@Sun.COM pause_cpus(NULL); 15348906SEric.Saxe@Sun.COM cpu = cpu_list; 15358906SEric.Saxe@Sun.COM 15368906SEric.Saxe@Sun.COM do { 15378906SEric.Saxe@Sun.COM if (cpu->cpu_pg) 15388906SEric.Saxe@Sun.COM group_empty(&cpu->cpu_pg->cmt_pgs); 15398906SEric.Saxe@Sun.COM } while ((cpu = cpu->cpu_next) != cpu_list); 15408906SEric.Saxe@Sun.COM 15418906SEric.Saxe@Sun.COM cmt_sched_disabled = 1; 15428906SEric.Saxe@Sun.COM start_cpus(); 15438906SEric.Saxe@Sun.COM cmn_err(CE_NOTE, "!CMT thread placement optimizations unavailable"); 15448906SEric.Saxe@Sun.COM } 15458408SEric.Saxe@Sun.COM 15469036SEric.Saxe@Sun.COM /* 15479036SEric.Saxe@Sun.COM * CMT lineage validation 15489036SEric.Saxe@Sun.COM * 15499036SEric.Saxe@Sun.COM * This routine is invoked by pg_cmt_cpu_init() to validate the integrity 15509036SEric.Saxe@Sun.COM * of the PGs in a CPU's lineage. This is necessary because it's possible that 15519036SEric.Saxe@Sun.COM * some groupings (power domain groupings in particular) may be defined by 15529036SEric.Saxe@Sun.COM * sources that are buggy (e.g. BIOS bugs). In such cases, it may not be 15539036SEric.Saxe@Sun.COM * possible to integrate those groupings into the CMT PG hierarchy, if doing 15549036SEric.Saxe@Sun.COM * so would violate the subset invariant of the hierarchy, which says that 15559036SEric.Saxe@Sun.COM * a PG must be subset of its parent (if it has one). 15569036SEric.Saxe@Sun.COM * 15579036SEric.Saxe@Sun.COM * pg_cmt_lineage_validate()'s purpose is to detect grouping definitions that 15589036SEric.Saxe@Sun.COM * would result in a violation of this invariant. If a violation is found, 15599036SEric.Saxe@Sun.COM * and the PG is of a grouping type who's definition is known to originate from 15609036SEric.Saxe@Sun.COM * suspect sources (BIOS), then pg_cmt_prune() will be invoked to prune the 15619036SEric.Saxe@Sun.COM * PG (and all other instances PG's sharing relationship type) from the 15629036SEric.Saxe@Sun.COM * hierarchy. Further, future instances of that sharing relationship type won't 15639036SEric.Saxe@Sun.COM * be instantiated. If the grouping definition doesn't originate from suspect 15649036SEric.Saxe@Sun.COM * sources, then pg_cmt_disable() will be invoked to log an error, and disable 15659036SEric.Saxe@Sun.COM * CMT scheduling altogether. 15669036SEric.Saxe@Sun.COM * 15679036SEric.Saxe@Sun.COM * This routine is invoked after the CPU has been added to the PGs in which 15689036SEric.Saxe@Sun.COM * it belongs, but before those PGs have been added to (or had their place 15699036SEric.Saxe@Sun.COM * adjusted in) the CMT PG hierarchy. 15709036SEric.Saxe@Sun.COM * 15719036SEric.Saxe@Sun.COM * The first argument is the CPUs PG lineage (essentially an array of PGs in 15729036SEric.Saxe@Sun.COM * which the CPU belongs) that has already been sorted in ascending order 15739036SEric.Saxe@Sun.COM * by CPU count. Some of the PGs in the CPUs lineage may already have other 15749036SEric.Saxe@Sun.COM * CPUs in them, and have already been integrated into the CMT hierarchy. 15759036SEric.Saxe@Sun.COM * 15769036SEric.Saxe@Sun.COM * The addition of this new CPU to these pre-existing PGs means that those 15779036SEric.Saxe@Sun.COM * PGs may need to be promoted up in the hierarchy to satisfy the subset 15789036SEric.Saxe@Sun.COM * invariant. In additon to testing the subset invariant for the lineage, 15799036SEric.Saxe@Sun.COM * this routine also verifies that the addition of the new CPU to the 15809036SEric.Saxe@Sun.COM * existing PGs wouldn't cause the subset invariant to be violated in 15819036SEric.Saxe@Sun.COM * the exiting lineages. 15829036SEric.Saxe@Sun.COM * 15839036SEric.Saxe@Sun.COM * This routine will normally return one of the following: 15849036SEric.Saxe@Sun.COM * CMT_LINEAGE_VALID - There were no problems detected with the lineage. 15859036SEric.Saxe@Sun.COM * CMT_LINEAGE_REPAIRED - Problems were detected, but repaired via pruning. 15869036SEric.Saxe@Sun.COM * 15879036SEric.Saxe@Sun.COM * Otherwise, this routine will return a value indicating which error it 15889036SEric.Saxe@Sun.COM * was unable to recover from (and set cmt_lineage_status along the way). 15899036SEric.Saxe@Sun.COM */ 15909036SEric.Saxe@Sun.COM static cmt_lineage_validation_t 15918906SEric.Saxe@Sun.COM pg_cmt_lineage_validate(pg_cmt_t **lineage, int *sz) 15928906SEric.Saxe@Sun.COM { 15939036SEric.Saxe@Sun.COM int i, j, size; 15949036SEric.Saxe@Sun.COM pg_cmt_t *pg, *pg_next, *pg_bad, *pg_tmp; 15958906SEric.Saxe@Sun.COM cpu_t *cp; 15968906SEric.Saxe@Sun.COM pg_cpu_itr_t cpu_iter; 15979036SEric.Saxe@Sun.COM lgrp_handle_t lgrp; 15988906SEric.Saxe@Sun.COM 15998906SEric.Saxe@Sun.COM ASSERT(MUTEX_HELD(&cpu_lock)); 16008906SEric.Saxe@Sun.COM 16018906SEric.Saxe@Sun.COM revalidate: 16028906SEric.Saxe@Sun.COM size = *sz; 16038906SEric.Saxe@Sun.COM pg_bad = NULL; 16049036SEric.Saxe@Sun.COM lgrp = LGRP_NULL_HANDLE; 16059036SEric.Saxe@Sun.COM for (i = 0; i < size; i++) { 16068906SEric.Saxe@Sun.COM 16078906SEric.Saxe@Sun.COM pg = lineage[i]; 16089036SEric.Saxe@Sun.COM if (i < size - 1) 16099036SEric.Saxe@Sun.COM pg_next = lineage[i + 1]; 16109036SEric.Saxe@Sun.COM else 16119036SEric.Saxe@Sun.COM pg_next = NULL; 16128408SEric.Saxe@Sun.COM 16138906SEric.Saxe@Sun.COM /* 16148906SEric.Saxe@Sun.COM * We assume that the lineage has already been sorted 16158906SEric.Saxe@Sun.COM * by the number of CPUs. In fact, we depend on it. 16168906SEric.Saxe@Sun.COM */ 16179036SEric.Saxe@Sun.COM ASSERT(pg_next == NULL || 16189036SEric.Saxe@Sun.COM (PG_NUM_CPUS((pg_t *)pg) <= PG_NUM_CPUS((pg_t *)pg_next))); 16198906SEric.Saxe@Sun.COM 16208906SEric.Saxe@Sun.COM /* 16219036SEric.Saxe@Sun.COM * Check to make sure that the existing parent of PG (if any) 16229036SEric.Saxe@Sun.COM * is either in the PG's lineage, or the PG has more CPUs than 16239036SEric.Saxe@Sun.COM * its existing parent and can and should be promoted above its 16249036SEric.Saxe@Sun.COM * parent. 16259036SEric.Saxe@Sun.COM * 16269036SEric.Saxe@Sun.COM * Since the PG topology is in the middle of being changed, we 16279036SEric.Saxe@Sun.COM * need to check whether the PG's existing parent (if any) is 16289036SEric.Saxe@Sun.COM * part of its lineage (and therefore should contain the new 16299036SEric.Saxe@Sun.COM * CPU). If not, it means that the addition of the new CPU 16309036SEric.Saxe@Sun.COM * should have made this PG have more CPUs than its parent, and 16319036SEric.Saxe@Sun.COM * this PG should be promoted to be above its existing parent 16329036SEric.Saxe@Sun.COM * now. We need to verify all of this to defend against a buggy 16339036SEric.Saxe@Sun.COM * BIOS giving bad power domain CPU groupings. Sigh. 16349036SEric.Saxe@Sun.COM */ 16359036SEric.Saxe@Sun.COM if (pg->cmt_parent) { 16369036SEric.Saxe@Sun.COM /* 16379036SEric.Saxe@Sun.COM * Determine if cmt_parent is in this lineage 16389036SEric.Saxe@Sun.COM */ 16399036SEric.Saxe@Sun.COM for (j = 0; j < size; j++) { 16409036SEric.Saxe@Sun.COM pg_tmp = lineage[j]; 16419036SEric.Saxe@Sun.COM if (pg_tmp == pg->cmt_parent) 16429036SEric.Saxe@Sun.COM break; 16439036SEric.Saxe@Sun.COM } 16449036SEric.Saxe@Sun.COM if (pg_tmp != pg->cmt_parent) { 16459036SEric.Saxe@Sun.COM /* 16469036SEric.Saxe@Sun.COM * cmt_parent is not in the lineage, verify 16479036SEric.Saxe@Sun.COM * it is a proper subset of PG. 16489036SEric.Saxe@Sun.COM */ 16499036SEric.Saxe@Sun.COM if (PG_NUM_CPUS((pg_t *)pg->cmt_parent) >= 16509036SEric.Saxe@Sun.COM PG_NUM_CPUS((pg_t *)pg)) { 16519036SEric.Saxe@Sun.COM /* 16529036SEric.Saxe@Sun.COM * Not a proper subset if pg has less 16539036SEric.Saxe@Sun.COM * CPUs than cmt_parent... 16549036SEric.Saxe@Sun.COM */ 16559036SEric.Saxe@Sun.COM cmt_lineage_status = 16569036SEric.Saxe@Sun.COM CMT_LINEAGE_NON_PROMOTABLE; 16579036SEric.Saxe@Sun.COM goto handle_error; 16589036SEric.Saxe@Sun.COM } 16599036SEric.Saxe@Sun.COM } 16609036SEric.Saxe@Sun.COM } 16619036SEric.Saxe@Sun.COM 16629036SEric.Saxe@Sun.COM /* 16639036SEric.Saxe@Sun.COM * Walk each of the CPUs in the PGs group and perform 16649036SEric.Saxe@Sun.COM * consistency checks along the way. 16658906SEric.Saxe@Sun.COM */ 16668906SEric.Saxe@Sun.COM PG_CPU_ITR_INIT((pg_t *)pg, cpu_iter); 16678906SEric.Saxe@Sun.COM while ((cp = pg_cpu_next(&cpu_iter)) != NULL) { 16689036SEric.Saxe@Sun.COM /* 16699036SEric.Saxe@Sun.COM * Verify that there aren't any CPUs contained in PG 16709036SEric.Saxe@Sun.COM * that the next PG in the lineage (which is larger 16719036SEric.Saxe@Sun.COM * or same size) doesn't also contain. 16729036SEric.Saxe@Sun.COM */ 16739036SEric.Saxe@Sun.COM if (pg_next != NULL && 16749036SEric.Saxe@Sun.COM pg_cpu_find((pg_t *)pg_next, cp) == B_FALSE) { 16758906SEric.Saxe@Sun.COM cmt_lineage_status = CMT_LINEAGE_NON_CONCENTRIC; 16768906SEric.Saxe@Sun.COM goto handle_error; 16778906SEric.Saxe@Sun.COM } 16789036SEric.Saxe@Sun.COM 16799036SEric.Saxe@Sun.COM /* 16809036SEric.Saxe@Sun.COM * Verify that all the CPUs in the PG are in the same 16819036SEric.Saxe@Sun.COM * lgroup. 16829036SEric.Saxe@Sun.COM */ 16839036SEric.Saxe@Sun.COM if (lgrp == LGRP_NULL_HANDLE) { 16849036SEric.Saxe@Sun.COM lgrp = lgrp_plat_cpu_to_hand(cp->cpu_id); 16859036SEric.Saxe@Sun.COM } else if (lgrp_plat_cpu_to_hand(cp->cpu_id) != lgrp) { 16869036SEric.Saxe@Sun.COM cmt_lineage_status = CMT_LINEAGE_PG_SPANS_LGRPS; 16879036SEric.Saxe@Sun.COM goto handle_error; 16889036SEric.Saxe@Sun.COM } 16898906SEric.Saxe@Sun.COM } 16908408SEric.Saxe@Sun.COM } 16918408SEric.Saxe@Sun.COM 16928906SEric.Saxe@Sun.COM handle_error: 16939036SEric.Saxe@Sun.COM /* 16949036SEric.Saxe@Sun.COM * Some of these validation errors can result when the CPU grouping 16959036SEric.Saxe@Sun.COM * information is derived from buggy sources (for example, incorrect 16969036SEric.Saxe@Sun.COM * ACPI tables on x86 systems). 16979036SEric.Saxe@Sun.COM * 16989036SEric.Saxe@Sun.COM * We'll try to recover in such cases by pruning out the illegal 16999036SEric.Saxe@Sun.COM * groupings from the PG hierarchy, which means that we won't optimize 17009036SEric.Saxe@Sun.COM * for those levels, but we will for the remaining ones. 17019036SEric.Saxe@Sun.COM */ 17028906SEric.Saxe@Sun.COM switch (cmt_lineage_status) { 17038906SEric.Saxe@Sun.COM case CMT_LINEAGE_VALID: 17048906SEric.Saxe@Sun.COM case CMT_LINEAGE_REPAIRED: 17058906SEric.Saxe@Sun.COM break; 17069036SEric.Saxe@Sun.COM case CMT_LINEAGE_PG_SPANS_LGRPS: 17079036SEric.Saxe@Sun.COM /* 17089036SEric.Saxe@Sun.COM * We've detected a PG whose CPUs span lgroups. 17099036SEric.Saxe@Sun.COM * 17109036SEric.Saxe@Sun.COM * This isn't supported, as the dispatcher isn't allowed to 17119036SEric.Saxe@Sun.COM * to do CMT thread placement across lgroups, as this would 17129036SEric.Saxe@Sun.COM * conflict with policies implementing MPO thread affinity. 17139036SEric.Saxe@Sun.COM * 17149036SEric.Saxe@Sun.COM * The handling for this falls through to the next case. 17159036SEric.Saxe@Sun.COM */ 17169036SEric.Saxe@Sun.COM case CMT_LINEAGE_NON_PROMOTABLE: 17179036SEric.Saxe@Sun.COM /* 17189036SEric.Saxe@Sun.COM * We've detected a PG that already exists in another CPU's 17199036SEric.Saxe@Sun.COM * lineage that cannot cannot legally be promoted into place 17209036SEric.Saxe@Sun.COM * without breaking the invariants of the hierarchy. 17219036SEric.Saxe@Sun.COM */ 17229036SEric.Saxe@Sun.COM if (PG_CMT_HW_SUSPECT(((pghw_t *)pg)->pghw_hw)) { 17239036SEric.Saxe@Sun.COM if (pg_cmt_prune(pg, lineage, sz) == 0) { 17249036SEric.Saxe@Sun.COM cmt_lineage_status = CMT_LINEAGE_REPAIRED; 17259036SEric.Saxe@Sun.COM goto revalidate; 17269036SEric.Saxe@Sun.COM } 17279036SEric.Saxe@Sun.COM } 17289036SEric.Saxe@Sun.COM /* 17299036SEric.Saxe@Sun.COM * Something went wrong trying to prune out the bad level. 17309036SEric.Saxe@Sun.COM * Disable CMT scheduling altogether. 17319036SEric.Saxe@Sun.COM */ 17329036SEric.Saxe@Sun.COM pg_cmt_disable(); 17339036SEric.Saxe@Sun.COM break; 17348906SEric.Saxe@Sun.COM case CMT_LINEAGE_NON_CONCENTRIC: 17358408SEric.Saxe@Sun.COM /* 17369036SEric.Saxe@Sun.COM * We've detected a non-concentric PG lineage, which means that 17379036SEric.Saxe@Sun.COM * there's a PG in the lineage that has CPUs that the next PG 17389036SEric.Saxe@Sun.COM * over in the lineage (which is the same size or larger) 17399036SEric.Saxe@Sun.COM * doesn't have. 17408906SEric.Saxe@Sun.COM * 17419036SEric.Saxe@Sun.COM * In this case, we examine the two PGs to see if either 17429036SEric.Saxe@Sun.COM * grouping is defined by potentially buggy sources. 17438906SEric.Saxe@Sun.COM * 17448906SEric.Saxe@Sun.COM * If one has less CPUs than the other, and contains CPUs 17458906SEric.Saxe@Sun.COM * not found in the parent, and it is an untrusted enumeration, 17468906SEric.Saxe@Sun.COM * then prune it. If both have the same number of CPUs, then 17478906SEric.Saxe@Sun.COM * prune the one that is untrusted. 17488906SEric.Saxe@Sun.COM * 17498906SEric.Saxe@Sun.COM * This process repeats until we have a concentric lineage, 17508906SEric.Saxe@Sun.COM * or we would have to prune out level derived from what we 17518906SEric.Saxe@Sun.COM * thought was a reliable source, in which case CMT scheduling 17529036SEric.Saxe@Sun.COM * is disabled altogether. 17538408SEric.Saxe@Sun.COM */ 17549036SEric.Saxe@Sun.COM if ((PG_NUM_CPUS((pg_t *)pg) < PG_NUM_CPUS((pg_t *)pg_next)) && 17558906SEric.Saxe@Sun.COM (PG_CMT_HW_SUSPECT(((pghw_t *)pg)->pghw_hw))) { 17568906SEric.Saxe@Sun.COM pg_bad = pg; 17578906SEric.Saxe@Sun.COM } else if (PG_NUM_CPUS((pg_t *)pg) == 17589036SEric.Saxe@Sun.COM PG_NUM_CPUS((pg_t *)pg_next)) { 17599036SEric.Saxe@Sun.COM if (PG_CMT_HW_SUSPECT(((pghw_t *)pg_next)->pghw_hw)) { 17609036SEric.Saxe@Sun.COM pg_bad = pg_next; 17618906SEric.Saxe@Sun.COM } else if (PG_CMT_HW_SUSPECT(((pghw_t *)pg)->pghw_hw)) { 17628906SEric.Saxe@Sun.COM pg_bad = pg; 17638906SEric.Saxe@Sun.COM } 17648906SEric.Saxe@Sun.COM } 17658906SEric.Saxe@Sun.COM if (pg_bad) { 17668906SEric.Saxe@Sun.COM if (pg_cmt_prune(pg_bad, lineage, sz) == 0) { 17678906SEric.Saxe@Sun.COM cmt_lineage_status = CMT_LINEAGE_REPAIRED; 17688906SEric.Saxe@Sun.COM goto revalidate; 17698408SEric.Saxe@Sun.COM } 17708906SEric.Saxe@Sun.COM } 17719036SEric.Saxe@Sun.COM /* 17729036SEric.Saxe@Sun.COM * Something went wrong trying to identify and/or prune out 17739036SEric.Saxe@Sun.COM * the bad level. Disable CMT scheduling altogether. 17749036SEric.Saxe@Sun.COM */ 17759036SEric.Saxe@Sun.COM pg_cmt_disable(); 17769036SEric.Saxe@Sun.COM break; 17778906SEric.Saxe@Sun.COM default: 17788906SEric.Saxe@Sun.COM /* 17799036SEric.Saxe@Sun.COM * If we're here, we've encountered a validation error for 17809036SEric.Saxe@Sun.COM * which we don't know how to recover. In this case, disable 17819036SEric.Saxe@Sun.COM * CMT scheduling altogether. 17828906SEric.Saxe@Sun.COM */ 17839036SEric.Saxe@Sun.COM cmt_lineage_status = CMT_LINEAGE_UNRECOVERABLE; 17848906SEric.Saxe@Sun.COM pg_cmt_disable(); 17858408SEric.Saxe@Sun.COM } 17869036SEric.Saxe@Sun.COM return (cmt_lineage_status); 17878408SEric.Saxe@Sun.COM } 1788