1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate /* 30*0Sstevel@tonic-gate * DR memory support routines. 31*0Sstevel@tonic-gate */ 32*0Sstevel@tonic-gate 33*0Sstevel@tonic-gate #include <sys/note.h> 34*0Sstevel@tonic-gate #include <sys/debug.h> 35*0Sstevel@tonic-gate #include <sys/types.h> 36*0Sstevel@tonic-gate #include <sys/errno.h> 37*0Sstevel@tonic-gate #include <sys/param.h> 38*0Sstevel@tonic-gate #include <sys/dditypes.h> 39*0Sstevel@tonic-gate #include <sys/kmem.h> 40*0Sstevel@tonic-gate #include <sys/conf.h> 41*0Sstevel@tonic-gate #include <sys/ddi.h> 42*0Sstevel@tonic-gate #include <sys/sunddi.h> 43*0Sstevel@tonic-gate #include <sys/sunndi.h> 44*0Sstevel@tonic-gate #include <sys/ddi_impldefs.h> 45*0Sstevel@tonic-gate #include <sys/ndi_impldefs.h> 46*0Sstevel@tonic-gate #include <sys/sysmacros.h> 47*0Sstevel@tonic-gate #include <sys/machsystm.h> 48*0Sstevel@tonic-gate #include <sys/spitregs.h> 49*0Sstevel@tonic-gate #include <sys/cpuvar.h> 50*0Sstevel@tonic-gate #include <sys/promif.h> 51*0Sstevel@tonic-gate #include <vm/seg_kmem.h> 52*0Sstevel@tonic-gate #include <sys/lgrp.h> 53*0Sstevel@tonic-gate #include <sys/platform_module.h> 54*0Sstevel@tonic-gate 55*0Sstevel@tonic-gate #include <vm/page.h> 56*0Sstevel@tonic-gate 57*0Sstevel@tonic-gate #include <sys/dr.h> 58*0Sstevel@tonic-gate #include <sys/dr_util.h> 59*0Sstevel@tonic-gate 60*0Sstevel@tonic-gate extern struct memlist *phys_install; 61*0Sstevel@tonic-gate 62*0Sstevel@tonic-gate /* TODO: push this reference below drmach line */ 63*0Sstevel@tonic-gate extern int kcage_on; 64*0Sstevel@tonic-gate 65*0Sstevel@tonic-gate /* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */ 66*0Sstevel@tonic-gate static char *dr_ie_fmt = "%M% %d"; 67*0Sstevel@tonic-gate 68*0Sstevel@tonic-gate static int dr_post_detach_mem_unit(dr_mem_unit_t *mp); 69*0Sstevel@tonic-gate static int dr_reserve_mem_spans(memhandle_t *mhp, 70*0Sstevel@tonic-gate struct memlist *mlist); 71*0Sstevel@tonic-gate static int dr_select_mem_target(dr_handle_t *hp, 72*0Sstevel@tonic-gate dr_mem_unit_t *mp, struct memlist *ml); 73*0Sstevel@tonic-gate static void dr_init_mem_unit_data(dr_mem_unit_t *mp); 74*0Sstevel@tonic-gate 75*0Sstevel@tonic-gate static struct memlist *memlist_dup(struct memlist *); 76*0Sstevel@tonic-gate static int memlist_canfit(struct memlist *s_mlist, 77*0Sstevel@tonic-gate struct memlist *t_mlist); 78*0Sstevel@tonic-gate static struct memlist *memlist_del_span(struct memlist *mlist, 79*0Sstevel@tonic-gate uint64_t base, uint64_t len); 80*0Sstevel@tonic-gate static struct memlist *memlist_cat_span(struct memlist *mlist, 81*0Sstevel@tonic-gate uint64_t base, uint64_t len); 82*0Sstevel@tonic-gate 83*0Sstevel@tonic-gate extern void page_unretire_pages(void); 84*0Sstevel@tonic-gate 85*0Sstevel@tonic-gate /* 86*0Sstevel@tonic-gate * dr_mem_unit_t.sbm_flags 87*0Sstevel@tonic-gate */ 88*0Sstevel@tonic-gate #define DR_MFLAG_RESERVED 0x01 /* mem unit reserved for delete */ 89*0Sstevel@tonic-gate #define DR_MFLAG_SOURCE 0x02 /* source brd of copy/rename op */ 90*0Sstevel@tonic-gate #define DR_MFLAG_TARGET 0x04 /* target brd of copy/rename op */ 91*0Sstevel@tonic-gate #define DR_MFLAG_MEMUPSIZE 0x08 /* move from big to small board */ 92*0Sstevel@tonic-gate #define DR_MFLAG_MEMDOWNSIZE 0x10 /* move from small to big board */ 93*0Sstevel@tonic-gate #define DR_MFLAG_MEMRESIZE 0x18 /* move to different size board */ 94*0Sstevel@tonic-gate #define DR_MFLAG_RELOWNER 0x20 /* memory release (delete) owner */ 95*0Sstevel@tonic-gate #define DR_MFLAG_RELDONE 0x40 /* memory release (delete) done */ 96*0Sstevel@tonic-gate 97*0Sstevel@tonic-gate /* helper macros */ 98*0Sstevel@tonic-gate #define _ptob64(p) ((uint64_t)(p) << PAGESHIFT) 99*0Sstevel@tonic-gate #define _b64top(b) ((pgcnt_t)((b) >> PAGESHIFT)) 100*0Sstevel@tonic-gate 101*0Sstevel@tonic-gate static struct memlist * 102*0Sstevel@tonic-gate dr_get_memlist(dr_mem_unit_t *mp) 103*0Sstevel@tonic-gate { 104*0Sstevel@tonic-gate struct memlist *mlist = NULL; 105*0Sstevel@tonic-gate sbd_error_t *err; 106*0Sstevel@tonic-gate static fn_t f = "dr_get_memlist"; 107*0Sstevel@tonic-gate 108*0Sstevel@tonic-gate PR_MEM("%s for %s...\n", f, mp->sbm_cm.sbdev_path); 109*0Sstevel@tonic-gate 110*0Sstevel@tonic-gate /* 111*0Sstevel@tonic-gate * Return cached memlist, if present. 112*0Sstevel@tonic-gate * This memlist will be present following an 113*0Sstevel@tonic-gate * unconfigure (a.k.a: detach) of this memunit. 114*0Sstevel@tonic-gate * It should only be used in the case were a configure 115*0Sstevel@tonic-gate * is bringing this memunit back in without going 116*0Sstevel@tonic-gate * through the disconnect and connect states. 117*0Sstevel@tonic-gate */ 118*0Sstevel@tonic-gate if (mp->sbm_mlist) { 119*0Sstevel@tonic-gate PR_MEM("%s: found cached memlist\n", f); 120*0Sstevel@tonic-gate 121*0Sstevel@tonic-gate mlist = memlist_dup(mp->sbm_mlist); 122*0Sstevel@tonic-gate } else { 123*0Sstevel@tonic-gate uint64_t basepa = _ptob64(mp->sbm_basepfn); 124*0Sstevel@tonic-gate 125*0Sstevel@tonic-gate /* attempt to construct a memlist using phys_install */ 126*0Sstevel@tonic-gate 127*0Sstevel@tonic-gate /* round down to slice base address */ 128*0Sstevel@tonic-gate basepa &= ~(mp->sbm_slice_size - 1); 129*0Sstevel@tonic-gate 130*0Sstevel@tonic-gate /* get a copy of phys_install to edit */ 131*0Sstevel@tonic-gate memlist_read_lock(); 132*0Sstevel@tonic-gate mlist = memlist_dup(phys_install); 133*0Sstevel@tonic-gate memlist_read_unlock(); 134*0Sstevel@tonic-gate 135*0Sstevel@tonic-gate /* trim lower irrelevant span */ 136*0Sstevel@tonic-gate if (mlist) 137*0Sstevel@tonic-gate mlist = memlist_del_span(mlist, 0ull, basepa); 138*0Sstevel@tonic-gate 139*0Sstevel@tonic-gate /* trim upper irrelevant span */ 140*0Sstevel@tonic-gate if (mlist) { 141*0Sstevel@tonic-gate uint64_t endpa; 142*0Sstevel@tonic-gate 143*0Sstevel@tonic-gate basepa += mp->sbm_slice_size; 144*0Sstevel@tonic-gate endpa = _ptob64(physmax + 1); 145*0Sstevel@tonic-gate if (endpa > basepa) 146*0Sstevel@tonic-gate mlist = memlist_del_span( 147*0Sstevel@tonic-gate mlist, 148*0Sstevel@tonic-gate basepa, 149*0Sstevel@tonic-gate endpa - basepa); 150*0Sstevel@tonic-gate } 151*0Sstevel@tonic-gate 152*0Sstevel@tonic-gate if (mlist) { 153*0Sstevel@tonic-gate /* successfully built a memlist */ 154*0Sstevel@tonic-gate PR_MEM("%s: derived memlist from phys_install\n", f); 155*0Sstevel@tonic-gate } 156*0Sstevel@tonic-gate 157*0Sstevel@tonic-gate /* if no mlist yet, try platform layer */ 158*0Sstevel@tonic-gate if (!mlist) { 159*0Sstevel@tonic-gate err = drmach_mem_get_memlist( 160*0Sstevel@tonic-gate mp->sbm_cm.sbdev_id, &mlist); 161*0Sstevel@tonic-gate if (err) { 162*0Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 163*0Sstevel@tonic-gate mlist = NULL; /* paranoia */ 164*0Sstevel@tonic-gate } 165*0Sstevel@tonic-gate } 166*0Sstevel@tonic-gate } 167*0Sstevel@tonic-gate 168*0Sstevel@tonic-gate PR_MEM("%s: memlist for %s\n", f, mp->sbm_cm.sbdev_path); 169*0Sstevel@tonic-gate PR_MEMLIST_DUMP(mlist); 170*0Sstevel@tonic-gate 171*0Sstevel@tonic-gate return (mlist); 172*0Sstevel@tonic-gate } 173*0Sstevel@tonic-gate 174*0Sstevel@tonic-gate typedef struct { 175*0Sstevel@tonic-gate kcondvar_t cond; 176*0Sstevel@tonic-gate kmutex_t lock; 177*0Sstevel@tonic-gate int error; 178*0Sstevel@tonic-gate int done; 179*0Sstevel@tonic-gate } dr_release_mem_sync_t; 180*0Sstevel@tonic-gate 181*0Sstevel@tonic-gate /* 182*0Sstevel@tonic-gate * Memory has been logically removed by the time this routine is called. 183*0Sstevel@tonic-gate */ 184*0Sstevel@tonic-gate static void 185*0Sstevel@tonic-gate dr_mem_del_done(void *arg, int error) 186*0Sstevel@tonic-gate { 187*0Sstevel@tonic-gate dr_release_mem_sync_t *ds = arg; 188*0Sstevel@tonic-gate 189*0Sstevel@tonic-gate mutex_enter(&ds->lock); 190*0Sstevel@tonic-gate ds->error = error; 191*0Sstevel@tonic-gate ds->done = 1; 192*0Sstevel@tonic-gate cv_signal(&ds->cond); 193*0Sstevel@tonic-gate mutex_exit(&ds->lock); 194*0Sstevel@tonic-gate } 195*0Sstevel@tonic-gate 196*0Sstevel@tonic-gate /* 197*0Sstevel@tonic-gate * When we reach here the memory being drained should have 198*0Sstevel@tonic-gate * already been reserved in dr_pre_release_mem(). 199*0Sstevel@tonic-gate * Our only task here is to kick off the "drain" and wait 200*0Sstevel@tonic-gate * for it to finish. 201*0Sstevel@tonic-gate */ 202*0Sstevel@tonic-gate void 203*0Sstevel@tonic-gate dr_release_mem(dr_common_unit_t *cp) 204*0Sstevel@tonic-gate { 205*0Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)cp; 206*0Sstevel@tonic-gate int err; 207*0Sstevel@tonic-gate dr_release_mem_sync_t rms; 208*0Sstevel@tonic-gate static fn_t f = "dr_release_mem"; 209*0Sstevel@tonic-gate 210*0Sstevel@tonic-gate /* check that this memory unit has been reserved */ 211*0Sstevel@tonic-gate if (!(mp->sbm_flags & DR_MFLAG_RELOWNER)) { 212*0Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 213*0Sstevel@tonic-gate return; 214*0Sstevel@tonic-gate } 215*0Sstevel@tonic-gate 216*0Sstevel@tonic-gate bzero((void *) &rms, sizeof (rms)); 217*0Sstevel@tonic-gate 218*0Sstevel@tonic-gate mutex_init(&rms.lock, NULL, MUTEX_DRIVER, NULL); 219*0Sstevel@tonic-gate cv_init(&rms.cond, NULL, CV_DRIVER, NULL); 220*0Sstevel@tonic-gate 221*0Sstevel@tonic-gate mutex_enter(&rms.lock); 222*0Sstevel@tonic-gate err = kphysm_del_start(mp->sbm_memhandle, 223*0Sstevel@tonic-gate dr_mem_del_done, (void *) &rms); 224*0Sstevel@tonic-gate if (err == KPHYSM_OK) { 225*0Sstevel@tonic-gate /* wait for completion or interrupt */ 226*0Sstevel@tonic-gate while (!rms.done) { 227*0Sstevel@tonic-gate if (cv_wait_sig(&rms.cond, &rms.lock) == 0) { 228*0Sstevel@tonic-gate /* then there is a pending UNIX signal */ 229*0Sstevel@tonic-gate (void) kphysm_del_cancel(mp->sbm_memhandle); 230*0Sstevel@tonic-gate 231*0Sstevel@tonic-gate /* wait for completion */ 232*0Sstevel@tonic-gate while (!rms.done) 233*0Sstevel@tonic-gate cv_wait(&rms.cond, &rms.lock); 234*0Sstevel@tonic-gate } 235*0Sstevel@tonic-gate } 236*0Sstevel@tonic-gate /* get the result of the memory delete operation */ 237*0Sstevel@tonic-gate err = rms.error; 238*0Sstevel@tonic-gate } 239*0Sstevel@tonic-gate mutex_exit(&rms.lock); 240*0Sstevel@tonic-gate 241*0Sstevel@tonic-gate cv_destroy(&rms.cond); 242*0Sstevel@tonic-gate mutex_destroy(&rms.lock); 243*0Sstevel@tonic-gate 244*0Sstevel@tonic-gate if (err != KPHYSM_OK) { 245*0Sstevel@tonic-gate int e_code; 246*0Sstevel@tonic-gate 247*0Sstevel@tonic-gate switch (err) { 248*0Sstevel@tonic-gate case KPHYSM_ENOWORK: 249*0Sstevel@tonic-gate e_code = ESBD_NOERROR; 250*0Sstevel@tonic-gate break; 251*0Sstevel@tonic-gate 252*0Sstevel@tonic-gate case KPHYSM_EHANDLE: 253*0Sstevel@tonic-gate case KPHYSM_ESEQUENCE: 254*0Sstevel@tonic-gate e_code = ESBD_INTERNAL; 255*0Sstevel@tonic-gate break; 256*0Sstevel@tonic-gate 257*0Sstevel@tonic-gate case KPHYSM_ENOTVIABLE: 258*0Sstevel@tonic-gate e_code = ESBD_MEM_NOTVIABLE; 259*0Sstevel@tonic-gate break; 260*0Sstevel@tonic-gate 261*0Sstevel@tonic-gate case KPHYSM_EREFUSED: 262*0Sstevel@tonic-gate e_code = ESBD_MEM_REFUSED; 263*0Sstevel@tonic-gate break; 264*0Sstevel@tonic-gate 265*0Sstevel@tonic-gate case KPHYSM_ENONRELOC: 266*0Sstevel@tonic-gate e_code = ESBD_MEM_NONRELOC; 267*0Sstevel@tonic-gate break; 268*0Sstevel@tonic-gate 269*0Sstevel@tonic-gate case KPHYSM_ECANCELLED: 270*0Sstevel@tonic-gate e_code = ESBD_MEM_CANCELLED; 271*0Sstevel@tonic-gate break; 272*0Sstevel@tonic-gate 273*0Sstevel@tonic-gate case KPHYSM_ERESOURCE: 274*0Sstevel@tonic-gate e_code = ESBD_MEMFAIL; 275*0Sstevel@tonic-gate break; 276*0Sstevel@tonic-gate 277*0Sstevel@tonic-gate default: 278*0Sstevel@tonic-gate cmn_err(CE_WARN, 279*0Sstevel@tonic-gate "%s: unexpected kphysm error code %d," 280*0Sstevel@tonic-gate " id 0x%p", 281*0Sstevel@tonic-gate f, err, mp->sbm_cm.sbdev_id); 282*0Sstevel@tonic-gate 283*0Sstevel@tonic-gate e_code = ESBD_IO; 284*0Sstevel@tonic-gate break; 285*0Sstevel@tonic-gate } 286*0Sstevel@tonic-gate 287*0Sstevel@tonic-gate if (e_code != ESBD_NOERROR) { 288*0Sstevel@tonic-gate dr_dev_err(CE_IGNORE, &mp->sbm_cm, e_code); 289*0Sstevel@tonic-gate } 290*0Sstevel@tonic-gate } 291*0Sstevel@tonic-gate } 292*0Sstevel@tonic-gate 293*0Sstevel@tonic-gate void 294*0Sstevel@tonic-gate dr_attach_mem(dr_handle_t *hp, dr_common_unit_t *cp) 295*0Sstevel@tonic-gate { 296*0Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 297*0Sstevel@tonic-gate 298*0Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)cp; 299*0Sstevel@tonic-gate struct memlist *ml, *mc; 300*0Sstevel@tonic-gate sbd_error_t *err; 301*0Sstevel@tonic-gate static fn_t f = "dr_attach_mem"; 302*0Sstevel@tonic-gate 303*0Sstevel@tonic-gate PR_MEM("%s...\n", f); 304*0Sstevel@tonic-gate 305*0Sstevel@tonic-gate dr_lock_status(hp->h_bd); 306*0Sstevel@tonic-gate err = drmach_configure(cp->sbdev_id, 0); 307*0Sstevel@tonic-gate dr_unlock_status(hp->h_bd); 308*0Sstevel@tonic-gate if (err) { 309*0Sstevel@tonic-gate DRERR_SET_C(&cp->sbdev_error, &err); 310*0Sstevel@tonic-gate return; 311*0Sstevel@tonic-gate } 312*0Sstevel@tonic-gate 313*0Sstevel@tonic-gate ml = dr_get_memlist(mp); 314*0Sstevel@tonic-gate for (mc = ml; mc; mc = mc->next) { 315*0Sstevel@tonic-gate int rv; 316*0Sstevel@tonic-gate sbd_error_t *err; 317*0Sstevel@tonic-gate 318*0Sstevel@tonic-gate rv = kphysm_add_memory_dynamic( 319*0Sstevel@tonic-gate (pfn_t)(mc->address >> PAGESHIFT), 320*0Sstevel@tonic-gate (pgcnt_t)(mc->size >> PAGESHIFT)); 321*0Sstevel@tonic-gate if (rv != KPHYSM_OK) { 322*0Sstevel@tonic-gate /* 323*0Sstevel@tonic-gate * translate kphysm error and 324*0Sstevel@tonic-gate * store in devlist error 325*0Sstevel@tonic-gate */ 326*0Sstevel@tonic-gate switch (rv) { 327*0Sstevel@tonic-gate case KPHYSM_ERESOURCE: 328*0Sstevel@tonic-gate rv = ESBD_NOMEM; 329*0Sstevel@tonic-gate break; 330*0Sstevel@tonic-gate 331*0Sstevel@tonic-gate case KPHYSM_EFAULT: 332*0Sstevel@tonic-gate rv = ESBD_FAULT; 333*0Sstevel@tonic-gate break; 334*0Sstevel@tonic-gate 335*0Sstevel@tonic-gate default: 336*0Sstevel@tonic-gate rv = ESBD_INTERNAL; 337*0Sstevel@tonic-gate break; 338*0Sstevel@tonic-gate } 339*0Sstevel@tonic-gate 340*0Sstevel@tonic-gate if (rv == ESBD_INTERNAL) { 341*0Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 342*0Sstevel@tonic-gate } else 343*0Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, rv); 344*0Sstevel@tonic-gate break; 345*0Sstevel@tonic-gate } 346*0Sstevel@tonic-gate 347*0Sstevel@tonic-gate err = drmach_mem_add_span( 348*0Sstevel@tonic-gate mp->sbm_cm.sbdev_id, mc->address, mc->size); 349*0Sstevel@tonic-gate if (err) { 350*0Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 351*0Sstevel@tonic-gate break; 352*0Sstevel@tonic-gate } 353*0Sstevel@tonic-gate } 354*0Sstevel@tonic-gate 355*0Sstevel@tonic-gate memlist_delete(ml); 356*0Sstevel@tonic-gate 357*0Sstevel@tonic-gate /* back out if configure failed */ 358*0Sstevel@tonic-gate if (mp->sbm_cm.sbdev_error != NULL) { 359*0Sstevel@tonic-gate dr_lock_status(hp->h_bd); 360*0Sstevel@tonic-gate err = drmach_unconfigure(cp->sbdev_id, DRMACH_DEVI_REMOVE); 361*0Sstevel@tonic-gate if (err) 362*0Sstevel@tonic-gate sbd_err_clear(&err); 363*0Sstevel@tonic-gate dr_unlock_status(hp->h_bd); 364*0Sstevel@tonic-gate } 365*0Sstevel@tonic-gate } 366*0Sstevel@tonic-gate 367*0Sstevel@tonic-gate #define DR_SCRUB_VALUE 0x0d0e0a0d0b0e0e0fULL 368*0Sstevel@tonic-gate 369*0Sstevel@tonic-gate static void 370*0Sstevel@tonic-gate dr_mem_ecache_scrub(dr_mem_unit_t *mp, struct memlist *mlist) 371*0Sstevel@tonic-gate { 372*0Sstevel@tonic-gate #ifdef DEBUG 373*0Sstevel@tonic-gate clock_t stime = lbolt; 374*0Sstevel@tonic-gate #endif /* DEBUG */ 375*0Sstevel@tonic-gate 376*0Sstevel@tonic-gate struct memlist *ml; 377*0Sstevel@tonic-gate uint64_t scrub_value = DR_SCRUB_VALUE; 378*0Sstevel@tonic-gate processorid_t cpuid; 379*0Sstevel@tonic-gate static fn_t f = "dr_mem_ecache_scrub"; 380*0Sstevel@tonic-gate 381*0Sstevel@tonic-gate cpuid = drmach_mem_cpu_affinity(mp->sbm_cm.sbdev_id); 382*0Sstevel@tonic-gate affinity_set(cpuid); 383*0Sstevel@tonic-gate 384*0Sstevel@tonic-gate PR_MEM("%s: using proc %d, memlist...\n", f, 385*0Sstevel@tonic-gate (cpuid == CPU_CURRENT) ? CPU->cpu_id : cpuid); 386*0Sstevel@tonic-gate PR_MEMLIST_DUMP(mlist); 387*0Sstevel@tonic-gate 388*0Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) { 389*0Sstevel@tonic-gate uint64_t dst_pa; 390*0Sstevel@tonic-gate uint64_t nbytes; 391*0Sstevel@tonic-gate 392*0Sstevel@tonic-gate /* calculate the destination physical address */ 393*0Sstevel@tonic-gate dst_pa = ml->address; 394*0Sstevel@tonic-gate if (ml->address & PAGEOFFSET) 395*0Sstevel@tonic-gate cmn_err(CE_WARN, 396*0Sstevel@tonic-gate "%s: address (0x%llx) not on " 397*0Sstevel@tonic-gate "page boundary", f, ml->address); 398*0Sstevel@tonic-gate 399*0Sstevel@tonic-gate nbytes = ml->size; 400*0Sstevel@tonic-gate if (ml->size & PAGEOFFSET) 401*0Sstevel@tonic-gate cmn_err(CE_WARN, 402*0Sstevel@tonic-gate "%s: size (0x%llx) not on " 403*0Sstevel@tonic-gate "page boundary", f, ml->size); 404*0Sstevel@tonic-gate 405*0Sstevel@tonic-gate /*LINTED*/ 406*0Sstevel@tonic-gate while (nbytes > 0) { 407*0Sstevel@tonic-gate /* write 64 bits to dst_pa */ 408*0Sstevel@tonic-gate stdphys(dst_pa, scrub_value); 409*0Sstevel@tonic-gate 410*0Sstevel@tonic-gate /* increment/decrement by cacheline sizes */ 411*0Sstevel@tonic-gate dst_pa += DRMACH_COHERENCY_UNIT; 412*0Sstevel@tonic-gate nbytes -= DRMACH_COHERENCY_UNIT; 413*0Sstevel@tonic-gate } 414*0Sstevel@tonic-gate } 415*0Sstevel@tonic-gate 416*0Sstevel@tonic-gate /* 417*0Sstevel@tonic-gate * flush this cpu's ecache and take care to ensure 418*0Sstevel@tonic-gate * that all of it's bus transactions have retired. 419*0Sstevel@tonic-gate */ 420*0Sstevel@tonic-gate drmach_cpu_flush_ecache_sync(); 421*0Sstevel@tonic-gate 422*0Sstevel@tonic-gate affinity_clear(); 423*0Sstevel@tonic-gate 424*0Sstevel@tonic-gate #ifdef DEBUG 425*0Sstevel@tonic-gate stime = lbolt - stime; 426*0Sstevel@tonic-gate PR_MEM("%s: scrub ticks = %ld (%ld secs)\n", f, stime, stime / hz); 427*0Sstevel@tonic-gate #endif /* DEBUG */ 428*0Sstevel@tonic-gate } 429*0Sstevel@tonic-gate 430*0Sstevel@tonic-gate /* 431*0Sstevel@tonic-gate * This function marks as clean, all the faulty pages that belong to the 432*0Sstevel@tonic-gate * board that is copy-renamed since they are not likely to be bad pages 433*0Sstevel@tonic-gate * after the rename. This includes the retired pages on the board. 434*0Sstevel@tonic-gate */ 435*0Sstevel@tonic-gate 436*0Sstevel@tonic-gate static void 437*0Sstevel@tonic-gate dr_memlist_clrpages(struct memlist *r_ml) 438*0Sstevel@tonic-gate { 439*0Sstevel@tonic-gate struct memlist *t_ml; 440*0Sstevel@tonic-gate page_t *pp, *epp; 441*0Sstevel@tonic-gate pfn_t pfn, epfn; 442*0Sstevel@tonic-gate struct memseg *seg; 443*0Sstevel@tonic-gate 444*0Sstevel@tonic-gate if (r_ml == NULL) 445*0Sstevel@tonic-gate return; 446*0Sstevel@tonic-gate 447*0Sstevel@tonic-gate for (t_ml = r_ml; (t_ml != NULL); t_ml = t_ml->next) { 448*0Sstevel@tonic-gate pfn = _b64top(t_ml->address); 449*0Sstevel@tonic-gate epfn = _b64top(t_ml->address + t_ml->size); 450*0Sstevel@tonic-gate 451*0Sstevel@tonic-gate for (seg = memsegs; seg != NULL; seg = seg->next) { 452*0Sstevel@tonic-gate if (pfn >= seg->pages_end || epfn < seg->pages_base) 453*0Sstevel@tonic-gate continue; 454*0Sstevel@tonic-gate 455*0Sstevel@tonic-gate pp = seg->pages; 456*0Sstevel@tonic-gate if (pfn > seg->pages_base) 457*0Sstevel@tonic-gate pp += pfn - seg->pages_base; 458*0Sstevel@tonic-gate 459*0Sstevel@tonic-gate epp = seg->epages; 460*0Sstevel@tonic-gate if (epfn < seg->pages_end) 461*0Sstevel@tonic-gate epp -= seg->pages_end - epfn; 462*0Sstevel@tonic-gate 463*0Sstevel@tonic-gate ASSERT(pp < epp); 464*0Sstevel@tonic-gate while (pp < epp) { 465*0Sstevel@tonic-gate if (page_isfaulty((page_t *)pp)) 466*0Sstevel@tonic-gate page_clrtoxic_flag((page_t *)pp, 467*0Sstevel@tonic-gate PAGE_IS_FAULTY); 468*0Sstevel@tonic-gate pp++; 469*0Sstevel@tonic-gate } 470*0Sstevel@tonic-gate } 471*0Sstevel@tonic-gate } 472*0Sstevel@tonic-gate } 473*0Sstevel@tonic-gate 474*0Sstevel@tonic-gate static int 475*0Sstevel@tonic-gate dr_move_memory(dr_handle_t *hp, dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp) 476*0Sstevel@tonic-gate { 477*0Sstevel@tonic-gate time_t copytime; 478*0Sstevel@tonic-gate drmachid_t cr_id; 479*0Sstevel@tonic-gate dr_sr_handle_t *srhp; 480*0Sstevel@tonic-gate struct memlist *c_ml, *d_ml, *r_ml; 481*0Sstevel@tonic-gate sbd_error_t *err; 482*0Sstevel@tonic-gate static fn_t f = "dr_move_memory"; 483*0Sstevel@tonic-gate 484*0Sstevel@tonic-gate PR_MEM("%s: (INLINE) moving memory from %s to %s\n", 485*0Sstevel@tonic-gate f, 486*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 487*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path); 488*0Sstevel@tonic-gate 489*0Sstevel@tonic-gate ASSERT(s_mp->sbm_flags & DR_MFLAG_SOURCE); 490*0Sstevel@tonic-gate ASSERT(s_mp->sbm_peer == t_mp); 491*0Sstevel@tonic-gate ASSERT(s_mp->sbm_mlist); 492*0Sstevel@tonic-gate 493*0Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET); 494*0Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 495*0Sstevel@tonic-gate 496*0Sstevel@tonic-gate /* 497*0Sstevel@tonic-gate * create a memlist of spans to copy by removing 498*0Sstevel@tonic-gate * the spans that have been deleted, if any, from 499*0Sstevel@tonic-gate * the full source board memlist. s_mp->sbm_del_mlist 500*0Sstevel@tonic-gate * will be NULL if there were no spans deleted from 501*0Sstevel@tonic-gate * the source board. 502*0Sstevel@tonic-gate */ 503*0Sstevel@tonic-gate c_ml = memlist_dup(s_mp->sbm_mlist); 504*0Sstevel@tonic-gate d_ml = s_mp->sbm_del_mlist; 505*0Sstevel@tonic-gate while (d_ml != NULL) { 506*0Sstevel@tonic-gate c_ml = memlist_del_span(c_ml, d_ml->address, d_ml->size); 507*0Sstevel@tonic-gate d_ml = d_ml->next; 508*0Sstevel@tonic-gate } 509*0Sstevel@tonic-gate 510*0Sstevel@tonic-gate /* 511*0Sstevel@tonic-gate * create a copy of the memlist to be used for retiring pages. 512*0Sstevel@tonic-gate */ 513*0Sstevel@tonic-gate r_ml = memlist_dup(c_ml); 514*0Sstevel@tonic-gate 515*0Sstevel@tonic-gate affinity_set(drmach_mem_cpu_affinity(t_mp->sbm_cm.sbdev_id)); 516*0Sstevel@tonic-gate 517*0Sstevel@tonic-gate err = drmach_copy_rename_init( 518*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_id, _ptob64(t_mp->sbm_slice_offset), 519*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_id, c_ml, &cr_id); 520*0Sstevel@tonic-gate if (err) { 521*0Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 522*0Sstevel@tonic-gate affinity_clear(); 523*0Sstevel@tonic-gate memlist_delete(r_ml); 524*0Sstevel@tonic-gate return (-1); 525*0Sstevel@tonic-gate } 526*0Sstevel@tonic-gate 527*0Sstevel@tonic-gate srhp = dr_get_sr_handle(hp); 528*0Sstevel@tonic-gate ASSERT(srhp); 529*0Sstevel@tonic-gate 530*0Sstevel@tonic-gate copytime = lbolt; 531*0Sstevel@tonic-gate 532*0Sstevel@tonic-gate /* Quiesce the OS. */ 533*0Sstevel@tonic-gate if (dr_suspend(srhp)) { 534*0Sstevel@tonic-gate cmn_err(CE_WARN, "%s: failed to quiesce OS" 535*0Sstevel@tonic-gate " for copy-rename", f); 536*0Sstevel@tonic-gate 537*0Sstevel@tonic-gate dr_release_sr_handle(srhp); 538*0Sstevel@tonic-gate err = drmach_copy_rename_fini(cr_id); 539*0Sstevel@tonic-gate if (err) { 540*0Sstevel@tonic-gate /* 541*0Sstevel@tonic-gate * no error is expected since the program has 542*0Sstevel@tonic-gate * not yet run. 543*0Sstevel@tonic-gate */ 544*0Sstevel@tonic-gate 545*0Sstevel@tonic-gate /* catch this in debug kernels */ 546*0Sstevel@tonic-gate ASSERT(0); 547*0Sstevel@tonic-gate 548*0Sstevel@tonic-gate sbd_err_clear(&err); 549*0Sstevel@tonic-gate } 550*0Sstevel@tonic-gate 551*0Sstevel@tonic-gate /* suspend error reached via hp */ 552*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_error = hp->h_err; 553*0Sstevel@tonic-gate hp->h_err = NULL; 554*0Sstevel@tonic-gate 555*0Sstevel@tonic-gate affinity_clear(); 556*0Sstevel@tonic-gate memlist_delete(r_ml); 557*0Sstevel@tonic-gate return (-1); 558*0Sstevel@tonic-gate } 559*0Sstevel@tonic-gate 560*0Sstevel@tonic-gate /* 561*0Sstevel@tonic-gate * Rename memory for lgroup. 562*0Sstevel@tonic-gate * Source and target board numbers are packaged in arg. 563*0Sstevel@tonic-gate */ 564*0Sstevel@tonic-gate { 565*0Sstevel@tonic-gate dr_board_t *t_bp, *s_bp; 566*0Sstevel@tonic-gate 567*0Sstevel@tonic-gate s_bp = s_mp->sbm_cm.sbdev_bp; 568*0Sstevel@tonic-gate t_bp = t_mp->sbm_cm.sbdev_bp; 569*0Sstevel@tonic-gate 570*0Sstevel@tonic-gate lgrp_plat_config(LGRP_CONFIG_MEM_RENAME, 571*0Sstevel@tonic-gate (uintptr_t)(s_bp->b_num | (t_bp->b_num << 16))); 572*0Sstevel@tonic-gate } 573*0Sstevel@tonic-gate 574*0Sstevel@tonic-gate drmach_copy_rename(cr_id); 575*0Sstevel@tonic-gate 576*0Sstevel@tonic-gate /* 577*0Sstevel@tonic-gate * Clear pages that have been marked as faulty since we are 578*0Sstevel@tonic-gate * changing the physical memory for the pages. 579*0Sstevel@tonic-gate */ 580*0Sstevel@tonic-gate dr_memlist_clrpages(r_ml); 581*0Sstevel@tonic-gate 582*0Sstevel@tonic-gate /* Resume the OS. */ 583*0Sstevel@tonic-gate dr_resume(srhp); 584*0Sstevel@tonic-gate 585*0Sstevel@tonic-gate copytime = lbolt - copytime; 586*0Sstevel@tonic-gate 587*0Sstevel@tonic-gate dr_release_sr_handle(srhp); 588*0Sstevel@tonic-gate err = drmach_copy_rename_fini(cr_id); 589*0Sstevel@tonic-gate if (err) 590*0Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 591*0Sstevel@tonic-gate 592*0Sstevel@tonic-gate affinity_clear(); 593*0Sstevel@tonic-gate 594*0Sstevel@tonic-gate PR_MEM("%s: copy-rename elapsed time = %ld ticks (%ld secs)\n", 595*0Sstevel@tonic-gate f, copytime, copytime / hz); 596*0Sstevel@tonic-gate 597*0Sstevel@tonic-gate memlist_delete(r_ml); 598*0Sstevel@tonic-gate 599*0Sstevel@tonic-gate /* Unretire any pages cleared after copy-rename */ 600*0Sstevel@tonic-gate page_unretire_pages(); 601*0Sstevel@tonic-gate 602*0Sstevel@tonic-gate /* return -1 if dr_suspend or copy/rename recorded an error */ 603*0Sstevel@tonic-gate return (err == NULL ? 0 : -1); 604*0Sstevel@tonic-gate } 605*0Sstevel@tonic-gate 606*0Sstevel@tonic-gate /* 607*0Sstevel@tonic-gate * If detaching node contains memory that is "non-permanent" 608*0Sstevel@tonic-gate * then the memory adr's are simply cleared. If the memory 609*0Sstevel@tonic-gate * is non-relocatable, then do a copy-rename. 610*0Sstevel@tonic-gate */ 611*0Sstevel@tonic-gate void 612*0Sstevel@tonic-gate dr_detach_mem(dr_handle_t *hp, dr_common_unit_t *cp) 613*0Sstevel@tonic-gate { 614*0Sstevel@tonic-gate int rv = 0; 615*0Sstevel@tonic-gate dr_mem_unit_t *s_mp = (dr_mem_unit_t *)cp; 616*0Sstevel@tonic-gate dr_mem_unit_t *t_mp; 617*0Sstevel@tonic-gate dr_state_t state; 618*0Sstevel@tonic-gate static fn_t f = "dr_detach_mem"; 619*0Sstevel@tonic-gate 620*0Sstevel@tonic-gate PR_MEM("%s...\n", f); 621*0Sstevel@tonic-gate 622*0Sstevel@tonic-gate /* lookup target mem unit and target board structure, if any */ 623*0Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 624*0Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 625*0Sstevel@tonic-gate ASSERT(t_mp != NULL); 626*0Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 627*0Sstevel@tonic-gate } else { 628*0Sstevel@tonic-gate t_mp = NULL; 629*0Sstevel@tonic-gate } 630*0Sstevel@tonic-gate 631*0Sstevel@tonic-gate /* verify mem unit's state is UNREFERENCED */ 632*0Sstevel@tonic-gate state = s_mp->sbm_cm.sbdev_state; 633*0Sstevel@tonic-gate if (state != DR_STATE_UNREFERENCED) { 634*0Sstevel@tonic-gate dr_dev_err(CE_IGNORE, &s_mp->sbm_cm, ESBD_STATE); 635*0Sstevel@tonic-gate return; 636*0Sstevel@tonic-gate } 637*0Sstevel@tonic-gate 638*0Sstevel@tonic-gate /* verify target mem unit's state is UNREFERENCED, if any */ 639*0Sstevel@tonic-gate if (t_mp != NULL) { 640*0Sstevel@tonic-gate state = t_mp->sbm_cm.sbdev_state; 641*0Sstevel@tonic-gate if (state != DR_STATE_UNREFERENCED) { 642*0Sstevel@tonic-gate dr_dev_err(CE_IGNORE, &t_mp->sbm_cm, ESBD_STATE); 643*0Sstevel@tonic-gate return; 644*0Sstevel@tonic-gate } 645*0Sstevel@tonic-gate } 646*0Sstevel@tonic-gate 647*0Sstevel@tonic-gate /* 648*0Sstevel@tonic-gate * Scrub deleted memory. This will cause all cachelines 649*0Sstevel@tonic-gate * referencing the memory to only be in the local cpu's 650*0Sstevel@tonic-gate * ecache. 651*0Sstevel@tonic-gate */ 652*0Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_RELDONE) { 653*0Sstevel@tonic-gate /* no del mlist for src<=dst mem size copy/rename */ 654*0Sstevel@tonic-gate if (s_mp->sbm_del_mlist) 655*0Sstevel@tonic-gate dr_mem_ecache_scrub(s_mp, s_mp->sbm_del_mlist); 656*0Sstevel@tonic-gate } 657*0Sstevel@tonic-gate if (t_mp != NULL && (t_mp->sbm_flags & DR_MFLAG_RELDONE)) { 658*0Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist); 659*0Sstevel@tonic-gate dr_mem_ecache_scrub(t_mp, t_mp->sbm_del_mlist); 660*0Sstevel@tonic-gate } 661*0Sstevel@tonic-gate 662*0Sstevel@tonic-gate /* 663*0Sstevel@tonic-gate * If there is no target board (no copy/rename was needed), then 664*0Sstevel@tonic-gate * we're done! 665*0Sstevel@tonic-gate */ 666*0Sstevel@tonic-gate if (t_mp == NULL) { 667*0Sstevel@tonic-gate sbd_error_t *err; 668*0Sstevel@tonic-gate /* 669*0Sstevel@tonic-gate * Reprogram interconnect hardware and disable 670*0Sstevel@tonic-gate * memory controllers for memory node that's going away. 671*0Sstevel@tonic-gate */ 672*0Sstevel@tonic-gate 673*0Sstevel@tonic-gate err = drmach_mem_disable(s_mp->sbm_cm.sbdev_id); 674*0Sstevel@tonic-gate if (err) { 675*0Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 676*0Sstevel@tonic-gate rv = -1; 677*0Sstevel@tonic-gate } 678*0Sstevel@tonic-gate } else { 679*0Sstevel@tonic-gate rv = dr_move_memory(hp, s_mp, t_mp); 680*0Sstevel@tonic-gate PR_MEM("%s: %s memory COPY-RENAME (board %d -> %d)\n", 681*0Sstevel@tonic-gate f, 682*0Sstevel@tonic-gate rv ? "FAILED" : "COMPLETED", 683*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_bp->b_num, 684*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_bp->b_num); 685*0Sstevel@tonic-gate 686*0Sstevel@tonic-gate if (rv != 0) 687*0Sstevel@tonic-gate (void) dr_cancel_mem(s_mp); 688*0Sstevel@tonic-gate } 689*0Sstevel@tonic-gate 690*0Sstevel@tonic-gate if (rv == 0) { 691*0Sstevel@tonic-gate sbd_error_t *err; 692*0Sstevel@tonic-gate 693*0Sstevel@tonic-gate dr_lock_status(hp->h_bd); 694*0Sstevel@tonic-gate err = drmach_unconfigure(s_mp->sbm_cm.sbdev_id, 695*0Sstevel@tonic-gate DRMACH_DEVI_REMOVE); 696*0Sstevel@tonic-gate dr_unlock_status(hp->h_bd); 697*0Sstevel@tonic-gate if (err) 698*0Sstevel@tonic-gate sbd_err_clear(&err); 699*0Sstevel@tonic-gate } 700*0Sstevel@tonic-gate } 701*0Sstevel@tonic-gate 702*0Sstevel@tonic-gate #ifndef _STARFIRE 703*0Sstevel@tonic-gate /* 704*0Sstevel@tonic-gate * XXX workaround for certain lab configurations (see also starcat drmach.c) 705*0Sstevel@tonic-gate * Temporary code to get around observed incorrect results from 706*0Sstevel@tonic-gate * kphysm_del_span_query when the queried span contains address spans 707*0Sstevel@tonic-gate * not occupied by memory in between spans that do have memory. 708*0Sstevel@tonic-gate * This routine acts as a wrapper to kphysm_del_span_query. It builds 709*0Sstevel@tonic-gate * a memlist from phys_install of spans that exist between base and 710*0Sstevel@tonic-gate * base + npages, inclusively. Kphysm_del_span_query is called for each 711*0Sstevel@tonic-gate * node in the memlist with the results accumulated in *mp. 712*0Sstevel@tonic-gate */ 713*0Sstevel@tonic-gate static int 714*0Sstevel@tonic-gate dr_del_span_query(pfn_t base, pgcnt_t npages, memquery_t *mp) 715*0Sstevel@tonic-gate { 716*0Sstevel@tonic-gate uint64_t pa = _ptob64(base); 717*0Sstevel@tonic-gate uint64_t sm = ~ (137438953472ull - 1); 718*0Sstevel@tonic-gate uint64_t sa = pa & sm; 719*0Sstevel@tonic-gate struct memlist *mlist, *ml; 720*0Sstevel@tonic-gate int rv; 721*0Sstevel@tonic-gate 722*0Sstevel@tonic-gate npages = npages; /* silence lint */ 723*0Sstevel@tonic-gate memlist_read_lock(); 724*0Sstevel@tonic-gate mlist = memlist_dup(phys_install); 725*0Sstevel@tonic-gate memlist_read_unlock(); 726*0Sstevel@tonic-gate 727*0Sstevel@tonic-gate again: 728*0Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) { 729*0Sstevel@tonic-gate if ((ml->address & sm) != sa) { 730*0Sstevel@tonic-gate mlist = memlist_del_span(mlist, ml->address, ml->size); 731*0Sstevel@tonic-gate goto again; 732*0Sstevel@tonic-gate } 733*0Sstevel@tonic-gate } 734*0Sstevel@tonic-gate 735*0Sstevel@tonic-gate mp->phys_pages = 0; 736*0Sstevel@tonic-gate mp->managed = 0; 737*0Sstevel@tonic-gate mp->nonrelocatable = 0; 738*0Sstevel@tonic-gate mp->first_nonrelocatable = (pfn_t)-1; /* XXX */ 739*0Sstevel@tonic-gate mp->last_nonrelocatable = 0; 740*0Sstevel@tonic-gate 741*0Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) { 742*0Sstevel@tonic-gate memquery_t mq; 743*0Sstevel@tonic-gate 744*0Sstevel@tonic-gate rv = kphysm_del_span_query( 745*0Sstevel@tonic-gate _b64top(ml->address), _b64top(ml->size), &mq); 746*0Sstevel@tonic-gate if (rv) 747*0Sstevel@tonic-gate break; 748*0Sstevel@tonic-gate 749*0Sstevel@tonic-gate mp->phys_pages += mq.phys_pages; 750*0Sstevel@tonic-gate mp->managed += mq.managed; 751*0Sstevel@tonic-gate mp->nonrelocatable += mq.nonrelocatable; 752*0Sstevel@tonic-gate 753*0Sstevel@tonic-gate if (mq.nonrelocatable != 0) { 754*0Sstevel@tonic-gate if (mq.first_nonrelocatable < mp->first_nonrelocatable) 755*0Sstevel@tonic-gate mp->first_nonrelocatable = 756*0Sstevel@tonic-gate mq.first_nonrelocatable; 757*0Sstevel@tonic-gate if (mq.last_nonrelocatable > mp->last_nonrelocatable) 758*0Sstevel@tonic-gate mp->last_nonrelocatable = 759*0Sstevel@tonic-gate mq.last_nonrelocatable; 760*0Sstevel@tonic-gate } 761*0Sstevel@tonic-gate } 762*0Sstevel@tonic-gate 763*0Sstevel@tonic-gate if (mp->nonrelocatable == 0) 764*0Sstevel@tonic-gate mp->first_nonrelocatable = 0; /* XXX */ 765*0Sstevel@tonic-gate 766*0Sstevel@tonic-gate memlist_delete(mlist); 767*0Sstevel@tonic-gate return (rv); 768*0Sstevel@tonic-gate } 769*0Sstevel@tonic-gate 770*0Sstevel@tonic-gate #define kphysm_del_span_query dr_del_span_query 771*0Sstevel@tonic-gate #endif /* _STARFIRE */ 772*0Sstevel@tonic-gate 773*0Sstevel@tonic-gate /* 774*0Sstevel@tonic-gate * NOTE: This routine is only partially smart about multiple 775*0Sstevel@tonic-gate * mem-units. Need to make mem-status structure smart 776*0Sstevel@tonic-gate * about them also. 777*0Sstevel@tonic-gate */ 778*0Sstevel@tonic-gate int 779*0Sstevel@tonic-gate dr_mem_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp) 780*0Sstevel@tonic-gate { 781*0Sstevel@tonic-gate int m, mix; 782*0Sstevel@tonic-gate memdelstat_t mdst; 783*0Sstevel@tonic-gate memquery_t mq; 784*0Sstevel@tonic-gate dr_board_t *bp; 785*0Sstevel@tonic-gate dr_mem_unit_t *mp; 786*0Sstevel@tonic-gate sbd_mem_stat_t *msp; 787*0Sstevel@tonic-gate static fn_t f = "dr_mem_status"; 788*0Sstevel@tonic-gate 789*0Sstevel@tonic-gate bp = hp->h_bd; 790*0Sstevel@tonic-gate devset &= DR_DEVS_PRESENT(bp); 791*0Sstevel@tonic-gate 792*0Sstevel@tonic-gate for (m = mix = 0; m < MAX_MEM_UNITS_PER_BOARD; m++) { 793*0Sstevel@tonic-gate int rv; 794*0Sstevel@tonic-gate sbd_error_t *err; 795*0Sstevel@tonic-gate drmach_status_t pstat; 796*0Sstevel@tonic-gate dr_mem_unit_t *p_mp; 797*0Sstevel@tonic-gate 798*0Sstevel@tonic-gate if (DEVSET_IN_SET(devset, SBD_COMP_MEM, m) == 0) 799*0Sstevel@tonic-gate continue; 800*0Sstevel@tonic-gate 801*0Sstevel@tonic-gate mp = dr_get_mem_unit(bp, m); 802*0Sstevel@tonic-gate 803*0Sstevel@tonic-gate if (mp->sbm_cm.sbdev_state == DR_STATE_EMPTY) { 804*0Sstevel@tonic-gate /* present, but not fully initialized */ 805*0Sstevel@tonic-gate continue; 806*0Sstevel@tonic-gate } 807*0Sstevel@tonic-gate 808*0Sstevel@tonic-gate if (mp->sbm_cm.sbdev_id == (drmachid_t)0) 809*0Sstevel@tonic-gate continue; 810*0Sstevel@tonic-gate 811*0Sstevel@tonic-gate /* fetch platform status */ 812*0Sstevel@tonic-gate err = drmach_status(mp->sbm_cm.sbdev_id, &pstat); 813*0Sstevel@tonic-gate if (err) { 814*0Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 815*0Sstevel@tonic-gate continue; 816*0Sstevel@tonic-gate } 817*0Sstevel@tonic-gate 818*0Sstevel@tonic-gate msp = &dsp->d_mem; 819*0Sstevel@tonic-gate bzero((caddr_t)msp, sizeof (*msp)); 820*0Sstevel@tonic-gate 821*0Sstevel@tonic-gate strncpy(msp->ms_cm.c_id.c_name, pstat.type, 822*0Sstevel@tonic-gate sizeof (msp->ms_cm.c_id.c_name)); 823*0Sstevel@tonic-gate msp->ms_cm.c_id.c_type = mp->sbm_cm.sbdev_type; 824*0Sstevel@tonic-gate msp->ms_cm.c_id.c_unit = SBD_NULL_UNIT; 825*0Sstevel@tonic-gate msp->ms_cm.c_cond = mp->sbm_cm.sbdev_cond; 826*0Sstevel@tonic-gate msp->ms_cm.c_busy = mp->sbm_cm.sbdev_busy | pstat.busy; 827*0Sstevel@tonic-gate msp->ms_cm.c_time = mp->sbm_cm.sbdev_time; 828*0Sstevel@tonic-gate msp->ms_cm.c_ostate = mp->sbm_cm.sbdev_ostate; 829*0Sstevel@tonic-gate 830*0Sstevel@tonic-gate msp->ms_totpages = mp->sbm_npages; 831*0Sstevel@tonic-gate msp->ms_basepfn = mp->sbm_basepfn; 832*0Sstevel@tonic-gate msp->ms_pageslost = mp->sbm_pageslost; 833*0Sstevel@tonic-gate msp->ms_cage_enabled = kcage_on; 834*0Sstevel@tonic-gate 835*0Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_RESERVED) 836*0Sstevel@tonic-gate p_mp = mp->sbm_peer; 837*0Sstevel@tonic-gate else 838*0Sstevel@tonic-gate p_mp = NULL; 839*0Sstevel@tonic-gate 840*0Sstevel@tonic-gate if (p_mp == NULL) { 841*0Sstevel@tonic-gate msp->ms_peer_is_target = 0; 842*0Sstevel@tonic-gate msp->ms_peer_ap_id[0] = '\0'; 843*0Sstevel@tonic-gate } else if (p_mp->sbm_flags & DR_MFLAG_RESERVED) { 844*0Sstevel@tonic-gate char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 845*0Sstevel@tonic-gate char *minor; 846*0Sstevel@tonic-gate 847*0Sstevel@tonic-gate /* 848*0Sstevel@tonic-gate * b_dip doesn't have to be held for ddi_pathname() 849*0Sstevel@tonic-gate * because the board struct (dr_board_t) will be 850*0Sstevel@tonic-gate * destroyed before b_dip detaches. 851*0Sstevel@tonic-gate */ 852*0Sstevel@tonic-gate (void) ddi_pathname(bp->b_dip, path); 853*0Sstevel@tonic-gate minor = strchr(p_mp->sbm_cm.sbdev_path, ':'); 854*0Sstevel@tonic-gate 855*0Sstevel@tonic-gate snprintf(msp->ms_peer_ap_id, 856*0Sstevel@tonic-gate sizeof (msp->ms_peer_ap_id), "%s%s", 857*0Sstevel@tonic-gate path, (minor == NULL) ? "" : minor); 858*0Sstevel@tonic-gate 859*0Sstevel@tonic-gate kmem_free(path, MAXPATHLEN); 860*0Sstevel@tonic-gate 861*0Sstevel@tonic-gate if (p_mp->sbm_flags & DR_MFLAG_TARGET) 862*0Sstevel@tonic-gate msp->ms_peer_is_target = 1; 863*0Sstevel@tonic-gate } 864*0Sstevel@tonic-gate 865*0Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_RELOWNER) 866*0Sstevel@tonic-gate rv = kphysm_del_status(mp->sbm_memhandle, &mdst); 867*0Sstevel@tonic-gate else 868*0Sstevel@tonic-gate rv = KPHYSM_EHANDLE; /* force 'if' to fail */ 869*0Sstevel@tonic-gate 870*0Sstevel@tonic-gate if (rv == KPHYSM_OK) { 871*0Sstevel@tonic-gate /* 872*0Sstevel@tonic-gate * Any pages above managed is "free", 873*0Sstevel@tonic-gate * i.e. it's collected. 874*0Sstevel@tonic-gate */ 875*0Sstevel@tonic-gate msp->ms_detpages += (uint_t)(mdst.collected + 876*0Sstevel@tonic-gate mdst.phys_pages - mdst.managed); 877*0Sstevel@tonic-gate } else { 878*0Sstevel@tonic-gate /* 879*0Sstevel@tonic-gate * If we're UNREFERENCED or UNCONFIGURED, 880*0Sstevel@tonic-gate * then the number of detached pages is 881*0Sstevel@tonic-gate * however many pages are on the board. 882*0Sstevel@tonic-gate * I.e. detached = not in use by OS. 883*0Sstevel@tonic-gate */ 884*0Sstevel@tonic-gate switch (msp->ms_cm.c_ostate) { 885*0Sstevel@tonic-gate /* 886*0Sstevel@tonic-gate * changed to use cfgadm states 887*0Sstevel@tonic-gate * 888*0Sstevel@tonic-gate * was: 889*0Sstevel@tonic-gate * case DR_STATE_UNREFERENCED: 890*0Sstevel@tonic-gate * case DR_STATE_UNCONFIGURED: 891*0Sstevel@tonic-gate */ 892*0Sstevel@tonic-gate case SBD_STAT_UNCONFIGURED: 893*0Sstevel@tonic-gate msp->ms_detpages = msp->ms_totpages; 894*0Sstevel@tonic-gate break; 895*0Sstevel@tonic-gate 896*0Sstevel@tonic-gate default: 897*0Sstevel@tonic-gate break; 898*0Sstevel@tonic-gate } 899*0Sstevel@tonic-gate } 900*0Sstevel@tonic-gate 901*0Sstevel@tonic-gate /* 902*0Sstevel@tonic-gate * kphysm_del_span_query can report non-reloc pages = total 903*0Sstevel@tonic-gate * pages for memory that is not yet configured 904*0Sstevel@tonic-gate */ 905*0Sstevel@tonic-gate if (mp->sbm_cm.sbdev_state != DR_STATE_UNCONFIGURED) { 906*0Sstevel@tonic-gate 907*0Sstevel@tonic-gate rv = kphysm_del_span_query(mp->sbm_basepfn, 908*0Sstevel@tonic-gate mp->sbm_npages, &mq); 909*0Sstevel@tonic-gate 910*0Sstevel@tonic-gate if (rv == KPHYSM_OK) { 911*0Sstevel@tonic-gate msp->ms_managed_pages = mq.managed; 912*0Sstevel@tonic-gate msp->ms_noreloc_pages = mq.nonrelocatable; 913*0Sstevel@tonic-gate msp->ms_noreloc_first = 914*0Sstevel@tonic-gate mq.first_nonrelocatable; 915*0Sstevel@tonic-gate msp->ms_noreloc_last = 916*0Sstevel@tonic-gate mq.last_nonrelocatable; 917*0Sstevel@tonic-gate msp->ms_cm.c_sflags = 0; 918*0Sstevel@tonic-gate if (mq.nonrelocatable) { 919*0Sstevel@tonic-gate SBD_SET_SUSPEND(SBD_CMD_UNCONFIGURE, 920*0Sstevel@tonic-gate msp->ms_cm.c_sflags); 921*0Sstevel@tonic-gate } 922*0Sstevel@tonic-gate } else { 923*0Sstevel@tonic-gate PR_MEM("%s: kphysm_del_span_query() = %d\n", 924*0Sstevel@tonic-gate f, rv); 925*0Sstevel@tonic-gate } 926*0Sstevel@tonic-gate } 927*0Sstevel@tonic-gate 928*0Sstevel@tonic-gate /* 929*0Sstevel@tonic-gate * Check source unit state during copy-rename 930*0Sstevel@tonic-gate */ 931*0Sstevel@tonic-gate if ((mp->sbm_flags & DR_MFLAG_SOURCE) && 932*0Sstevel@tonic-gate (mp->sbm_cm.sbdev_state == DR_STATE_UNREFERENCED || 933*0Sstevel@tonic-gate mp->sbm_cm.sbdev_state == DR_STATE_RELEASE)) 934*0Sstevel@tonic-gate msp->ms_cm.c_ostate = SBD_STAT_CONFIGURED; 935*0Sstevel@tonic-gate 936*0Sstevel@tonic-gate mix++; 937*0Sstevel@tonic-gate dsp++; 938*0Sstevel@tonic-gate } 939*0Sstevel@tonic-gate 940*0Sstevel@tonic-gate return (mix); 941*0Sstevel@tonic-gate } 942*0Sstevel@tonic-gate 943*0Sstevel@tonic-gate int 944*0Sstevel@tonic-gate dr_pre_attach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 945*0Sstevel@tonic-gate { 946*0Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 947*0Sstevel@tonic-gate 948*0Sstevel@tonic-gate int err_flag = 0; 949*0Sstevel@tonic-gate int d; 950*0Sstevel@tonic-gate sbd_error_t *err; 951*0Sstevel@tonic-gate static fn_t f = "dr_pre_attach_mem"; 952*0Sstevel@tonic-gate 953*0Sstevel@tonic-gate PR_MEM("%s...\n", f); 954*0Sstevel@tonic-gate 955*0Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 956*0Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 957*0Sstevel@tonic-gate dr_state_t state; 958*0Sstevel@tonic-gate 959*0Sstevel@tonic-gate cmn_err(CE_CONT, "OS configure %s", mp->sbm_cm.sbdev_path); 960*0Sstevel@tonic-gate 961*0Sstevel@tonic-gate state = mp->sbm_cm.sbdev_state; 962*0Sstevel@tonic-gate switch (state) { 963*0Sstevel@tonic-gate case DR_STATE_UNCONFIGURED: 964*0Sstevel@tonic-gate PR_MEM("%s: recovering from UNCONFIG for %s\n", 965*0Sstevel@tonic-gate f, 966*0Sstevel@tonic-gate mp->sbm_cm.sbdev_path); 967*0Sstevel@tonic-gate 968*0Sstevel@tonic-gate /* use memlist cached by dr_post_detach_mem_unit */ 969*0Sstevel@tonic-gate ASSERT(mp->sbm_mlist != NULL); 970*0Sstevel@tonic-gate PR_MEM("%s: re-configuring cached memlist for %s:\n", 971*0Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 972*0Sstevel@tonic-gate PR_MEMLIST_DUMP(mp->sbm_mlist); 973*0Sstevel@tonic-gate 974*0Sstevel@tonic-gate /* kphysm del handle should be have been freed */ 975*0Sstevel@tonic-gate ASSERT((mp->sbm_flags & DR_MFLAG_RELOWNER) == 0); 976*0Sstevel@tonic-gate 977*0Sstevel@tonic-gate /*FALLTHROUGH*/ 978*0Sstevel@tonic-gate 979*0Sstevel@tonic-gate case DR_STATE_CONNECTED: 980*0Sstevel@tonic-gate PR_MEM("%s: reprogramming mem hardware on %s\n", 981*0Sstevel@tonic-gate f, mp->sbm_cm.sbdev_bp->b_path); 982*0Sstevel@tonic-gate 983*0Sstevel@tonic-gate PR_MEM("%s: enabling %s\n", 984*0Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 985*0Sstevel@tonic-gate 986*0Sstevel@tonic-gate err = drmach_mem_enable(mp->sbm_cm.sbdev_id); 987*0Sstevel@tonic-gate if (err) { 988*0Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 989*0Sstevel@tonic-gate err_flag = 1; 990*0Sstevel@tonic-gate } 991*0Sstevel@tonic-gate break; 992*0Sstevel@tonic-gate 993*0Sstevel@tonic-gate default: 994*0Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_STATE); 995*0Sstevel@tonic-gate err_flag = 1; 996*0Sstevel@tonic-gate break; 997*0Sstevel@tonic-gate } 998*0Sstevel@tonic-gate 999*0Sstevel@tonic-gate /* exit for loop if error encountered */ 1000*0Sstevel@tonic-gate if (err_flag) 1001*0Sstevel@tonic-gate break; 1002*0Sstevel@tonic-gate } 1003*0Sstevel@tonic-gate 1004*0Sstevel@tonic-gate return (err_flag ? -1 : 0); 1005*0Sstevel@tonic-gate } 1006*0Sstevel@tonic-gate 1007*0Sstevel@tonic-gate int 1008*0Sstevel@tonic-gate dr_post_attach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 1009*0Sstevel@tonic-gate { 1010*0Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 1011*0Sstevel@tonic-gate 1012*0Sstevel@tonic-gate int d; 1013*0Sstevel@tonic-gate static fn_t f = "dr_post_attach_mem"; 1014*0Sstevel@tonic-gate 1015*0Sstevel@tonic-gate PR_MEM("%s...\n", f); 1016*0Sstevel@tonic-gate 1017*0Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 1018*0Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 1019*0Sstevel@tonic-gate struct memlist *mlist, *ml; 1020*0Sstevel@tonic-gate 1021*0Sstevel@tonic-gate mlist = dr_get_memlist(mp); 1022*0Sstevel@tonic-gate if (mlist == NULL) { 1023*0Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_MEMFAIL); 1024*0Sstevel@tonic-gate continue; 1025*0Sstevel@tonic-gate } 1026*0Sstevel@tonic-gate 1027*0Sstevel@tonic-gate /* 1028*0Sstevel@tonic-gate * Verify the memory really did successfully attach 1029*0Sstevel@tonic-gate * by checking for its existence in phys_install. 1030*0Sstevel@tonic-gate */ 1031*0Sstevel@tonic-gate memlist_read_lock(); 1032*0Sstevel@tonic-gate if (memlist_intersect(phys_install, mlist) == 0) { 1033*0Sstevel@tonic-gate memlist_read_unlock(); 1034*0Sstevel@tonic-gate 1035*0Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 1036*0Sstevel@tonic-gate 1037*0Sstevel@tonic-gate PR_MEM("%s: %s memlist not in phys_install", 1038*0Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 1039*0Sstevel@tonic-gate 1040*0Sstevel@tonic-gate memlist_delete(mlist); 1041*0Sstevel@tonic-gate continue; 1042*0Sstevel@tonic-gate } 1043*0Sstevel@tonic-gate memlist_read_unlock(); 1044*0Sstevel@tonic-gate 1045*0Sstevel@tonic-gate for (ml = mlist; ml != NULL; ml = ml->next) { 1046*0Sstevel@tonic-gate sbd_error_t *err; 1047*0Sstevel@tonic-gate 1048*0Sstevel@tonic-gate err = drmach_mem_add_span( 1049*0Sstevel@tonic-gate mp->sbm_cm.sbdev_id, 1050*0Sstevel@tonic-gate ml->address, 1051*0Sstevel@tonic-gate ml->size); 1052*0Sstevel@tonic-gate if (err) 1053*0Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 1054*0Sstevel@tonic-gate } 1055*0Sstevel@tonic-gate 1056*0Sstevel@tonic-gate memlist_delete(mlist); 1057*0Sstevel@tonic-gate 1058*0Sstevel@tonic-gate /* 1059*0Sstevel@tonic-gate * Destroy cached memlist, if any. 1060*0Sstevel@tonic-gate * There will be a cached memlist in sbm_mlist if 1061*0Sstevel@tonic-gate * this board is being configured directly after 1062*0Sstevel@tonic-gate * an unconfigure. 1063*0Sstevel@tonic-gate * To support this transition, dr_post_detach_mem 1064*0Sstevel@tonic-gate * left a copy of the last known memlist in sbm_mlist. 1065*0Sstevel@tonic-gate * This memlist could differ from any derived from 1066*0Sstevel@tonic-gate * hardware if while this memunit was last configured 1067*0Sstevel@tonic-gate * the system detected and deleted bad pages from 1068*0Sstevel@tonic-gate * phys_install. The location of those bad pages 1069*0Sstevel@tonic-gate * will be reflected in the cached memlist. 1070*0Sstevel@tonic-gate */ 1071*0Sstevel@tonic-gate if (mp->sbm_mlist) { 1072*0Sstevel@tonic-gate memlist_delete(mp->sbm_mlist); 1073*0Sstevel@tonic-gate mp->sbm_mlist = NULL; 1074*0Sstevel@tonic-gate } 1075*0Sstevel@tonic-gate 1076*0Sstevel@tonic-gate /* 1077*0Sstevel@tonic-gate * TODO: why is this call to dr_init_mem_unit_data here? 1078*0Sstevel@tonic-gate * this has been done at discovery or connect time, so this is 1079*0Sstevel@tonic-gate * probably redundant and unnecessary. 1080*0Sstevel@tonic-gate */ 1081*0Sstevel@tonic-gate dr_init_mem_unit_data(mp); 1082*0Sstevel@tonic-gate } 1083*0Sstevel@tonic-gate 1084*0Sstevel@tonic-gate return (0); 1085*0Sstevel@tonic-gate } 1086*0Sstevel@tonic-gate 1087*0Sstevel@tonic-gate int 1088*0Sstevel@tonic-gate dr_pre_detach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 1089*0Sstevel@tonic-gate { 1090*0Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 1091*0Sstevel@tonic-gate 1092*0Sstevel@tonic-gate int d; 1093*0Sstevel@tonic-gate 1094*0Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 1095*0Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 1096*0Sstevel@tonic-gate 1097*0Sstevel@tonic-gate cmn_err(CE_CONT, "OS unconfigure %s", mp->sbm_cm.sbdev_path); 1098*0Sstevel@tonic-gate } 1099*0Sstevel@tonic-gate 1100*0Sstevel@tonic-gate return (0); 1101*0Sstevel@tonic-gate } 1102*0Sstevel@tonic-gate 1103*0Sstevel@tonic-gate 1104*0Sstevel@tonic-gate int 1105*0Sstevel@tonic-gate dr_post_detach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 1106*0Sstevel@tonic-gate { 1107*0Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 1108*0Sstevel@tonic-gate 1109*0Sstevel@tonic-gate int d, rv; 1110*0Sstevel@tonic-gate static fn_t f = "dr_post_detach_mem"; 1111*0Sstevel@tonic-gate 1112*0Sstevel@tonic-gate PR_MEM("%s...\n", f); 1113*0Sstevel@tonic-gate 1114*0Sstevel@tonic-gate rv = 0; 1115*0Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 1116*0Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 1117*0Sstevel@tonic-gate 1118*0Sstevel@tonic-gate ASSERT(mp->sbm_cm.sbdev_bp == hp->h_bd); 1119*0Sstevel@tonic-gate 1120*0Sstevel@tonic-gate if (dr_post_detach_mem_unit(mp)) 1121*0Sstevel@tonic-gate rv = -1; 1122*0Sstevel@tonic-gate } 1123*0Sstevel@tonic-gate 1124*0Sstevel@tonic-gate return (rv); 1125*0Sstevel@tonic-gate } 1126*0Sstevel@tonic-gate 1127*0Sstevel@tonic-gate static void 1128*0Sstevel@tonic-gate dr_add_memory_spans(dr_mem_unit_t *mp, struct memlist *ml) 1129*0Sstevel@tonic-gate { 1130*0Sstevel@tonic-gate static fn_t f = "dr_add_memory_spans"; 1131*0Sstevel@tonic-gate 1132*0Sstevel@tonic-gate PR_MEM("%s...", f); 1133*0Sstevel@tonic-gate PR_MEMLIST_DUMP(ml); 1134*0Sstevel@tonic-gate 1135*0Sstevel@tonic-gate #ifdef DEBUG 1136*0Sstevel@tonic-gate memlist_read_lock(); 1137*0Sstevel@tonic-gate if (memlist_intersect(phys_install, ml)) { 1138*0Sstevel@tonic-gate PR_MEM("%s:WARNING: memlist intersects with phys_install\n", f); 1139*0Sstevel@tonic-gate } 1140*0Sstevel@tonic-gate memlist_read_unlock(); 1141*0Sstevel@tonic-gate #endif 1142*0Sstevel@tonic-gate 1143*0Sstevel@tonic-gate for (; ml; ml = ml->next) { 1144*0Sstevel@tonic-gate pfn_t base; 1145*0Sstevel@tonic-gate pgcnt_t npgs; 1146*0Sstevel@tonic-gate int rv; 1147*0Sstevel@tonic-gate sbd_error_t *err; 1148*0Sstevel@tonic-gate 1149*0Sstevel@tonic-gate base = _b64top(ml->address); 1150*0Sstevel@tonic-gate npgs = _b64top(ml->size); 1151*0Sstevel@tonic-gate 1152*0Sstevel@tonic-gate rv = kphysm_add_memory_dynamic(base, npgs); 1153*0Sstevel@tonic-gate 1154*0Sstevel@tonic-gate err = drmach_mem_add_span( 1155*0Sstevel@tonic-gate mp->sbm_cm.sbdev_id, 1156*0Sstevel@tonic-gate ml->address, 1157*0Sstevel@tonic-gate ml->size); 1158*0Sstevel@tonic-gate 1159*0Sstevel@tonic-gate if (err) 1160*0Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 1161*0Sstevel@tonic-gate 1162*0Sstevel@tonic-gate if (rv != KPHYSM_OK) { 1163*0Sstevel@tonic-gate cmn_err(CE_WARN, "%s:" 1164*0Sstevel@tonic-gate " unexpected kphysm_add_memory_dynamic" 1165*0Sstevel@tonic-gate " return value %d;" 1166*0Sstevel@tonic-gate " basepfn=0x%lx, npages=%ld\n", 1167*0Sstevel@tonic-gate f, rv, base, npgs); 1168*0Sstevel@tonic-gate 1169*0Sstevel@tonic-gate continue; 1170*0Sstevel@tonic-gate } 1171*0Sstevel@tonic-gate } 1172*0Sstevel@tonic-gate } 1173*0Sstevel@tonic-gate 1174*0Sstevel@tonic-gate static int 1175*0Sstevel@tonic-gate dr_post_detach_mem_unit(dr_mem_unit_t *s_mp) 1176*0Sstevel@tonic-gate { 1177*0Sstevel@tonic-gate uint64_t sz = s_mp->sbm_slice_size; 1178*0Sstevel@tonic-gate uint64_t sm = sz - 1; 1179*0Sstevel@tonic-gate /* old and new below refer to PAs before and after copy-rename */ 1180*0Sstevel@tonic-gate uint64_t s_old_basepa, s_new_basepa; 1181*0Sstevel@tonic-gate uint64_t t_old_basepa, t_new_basepa; 1182*0Sstevel@tonic-gate uint64_t t_new_smallsize = 0; 1183*0Sstevel@tonic-gate dr_mem_unit_t *t_mp, *x_mp; 1184*0Sstevel@tonic-gate struct memlist *ml; 1185*0Sstevel@tonic-gate int rv; 1186*0Sstevel@tonic-gate sbd_error_t *err; 1187*0Sstevel@tonic-gate static fn_t f = "dr_post_detach_mem_unit"; 1188*0Sstevel@tonic-gate 1189*0Sstevel@tonic-gate PR_MEM("%s...\n", f); 1190*0Sstevel@tonic-gate 1191*0Sstevel@tonic-gate /* s_mp->sbm_del_mlist could be NULL, meaning no deleted spans */ 1192*0Sstevel@tonic-gate PR_MEM("%s: %s: deleted memlist (EMPTY maybe okay):\n", 1193*0Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 1194*0Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_del_mlist); 1195*0Sstevel@tonic-gate 1196*0Sstevel@tonic-gate /* sanity check */ 1197*0Sstevel@tonic-gate ASSERT(s_mp->sbm_del_mlist == NULL || 1198*0Sstevel@tonic-gate (s_mp->sbm_flags & DR_MFLAG_RELDONE) != 0); 1199*0Sstevel@tonic-gate 1200*0Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 1201*0Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 1202*0Sstevel@tonic-gate ASSERT(t_mp != NULL); 1203*0Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET); 1204*0Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 1205*0Sstevel@tonic-gate 1206*0Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_RELDONE); 1207*0Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist); 1208*0Sstevel@tonic-gate 1209*0Sstevel@tonic-gate PR_MEM("%s: target %s: deleted memlist:\n", 1210*0Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 1211*0Sstevel@tonic-gate PR_MEMLIST_DUMP(t_mp->sbm_del_mlist); 1212*0Sstevel@tonic-gate } else { 1213*0Sstevel@tonic-gate /* this is no target unit */ 1214*0Sstevel@tonic-gate t_mp = NULL; 1215*0Sstevel@tonic-gate } 1216*0Sstevel@tonic-gate 1217*0Sstevel@tonic-gate /* 1218*0Sstevel@tonic-gate * Verify the memory really did successfully detach 1219*0Sstevel@tonic-gate * by checking for its non-existence in phys_install. 1220*0Sstevel@tonic-gate */ 1221*0Sstevel@tonic-gate rv = 0; 1222*0Sstevel@tonic-gate memlist_read_lock(); 1223*0Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_RELDONE) { 1224*0Sstevel@tonic-gate x_mp = s_mp; 1225*0Sstevel@tonic-gate rv = memlist_intersect(phys_install, x_mp->sbm_del_mlist); 1226*0Sstevel@tonic-gate } 1227*0Sstevel@tonic-gate if (rv == 0 && t_mp && (t_mp->sbm_flags & DR_MFLAG_RELDONE)) { 1228*0Sstevel@tonic-gate x_mp = t_mp; 1229*0Sstevel@tonic-gate rv = memlist_intersect(phys_install, x_mp->sbm_del_mlist); 1230*0Sstevel@tonic-gate } 1231*0Sstevel@tonic-gate memlist_read_unlock(); 1232*0Sstevel@tonic-gate 1233*0Sstevel@tonic-gate if (rv) { 1234*0Sstevel@tonic-gate /* error: memlist still in phys_install */ 1235*0Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&x_mp->sbm_cm); 1236*0Sstevel@tonic-gate } 1237*0Sstevel@tonic-gate 1238*0Sstevel@tonic-gate /* 1239*0Sstevel@tonic-gate * clean mem unit state and bail out if an error has been recorded. 1240*0Sstevel@tonic-gate */ 1241*0Sstevel@tonic-gate rv = 0; 1242*0Sstevel@tonic-gate if (s_mp->sbm_cm.sbdev_error) { 1243*0Sstevel@tonic-gate PR_MEM("%s: %s flags=%x", f, 1244*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags); 1245*0Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&s_mp->sbm_cm); 1246*0Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&s_mp->sbm_cm); 1247*0Sstevel@tonic-gate dr_device_transition(&s_mp->sbm_cm, DR_STATE_CONFIGURED); 1248*0Sstevel@tonic-gate rv = -1; 1249*0Sstevel@tonic-gate } 1250*0Sstevel@tonic-gate if (t_mp != NULL && t_mp->sbm_cm.sbdev_error != NULL) { 1251*0Sstevel@tonic-gate PR_MEM("%s: %s flags=%x", f, 1252*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags); 1253*0Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm); 1254*0Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&t_mp->sbm_cm); 1255*0Sstevel@tonic-gate dr_device_transition(&t_mp->sbm_cm, DR_STATE_CONFIGURED); 1256*0Sstevel@tonic-gate rv = -1; 1257*0Sstevel@tonic-gate } 1258*0Sstevel@tonic-gate if (rv) 1259*0Sstevel@tonic-gate goto cleanup; 1260*0Sstevel@tonic-gate 1261*0Sstevel@tonic-gate s_old_basepa = _ptob64(s_mp->sbm_basepfn); 1262*0Sstevel@tonic-gate err = drmach_mem_get_base_physaddr(s_mp->sbm_cm.sbdev_id, 1263*0Sstevel@tonic-gate &s_new_basepa); 1264*0Sstevel@tonic-gate ASSERT(err == NULL); 1265*0Sstevel@tonic-gate 1266*0Sstevel@tonic-gate PR_MEM("%s:s_old_basepa: 0x%llx\n", f, s_old_basepa); 1267*0Sstevel@tonic-gate PR_MEM("%s:s_new_basepa: 0x%llx\n", f, s_new_basepa); 1268*0Sstevel@tonic-gate 1269*0Sstevel@tonic-gate if (t_mp != NULL) { 1270*0Sstevel@tonic-gate struct memlist *s_copy_mlist; 1271*0Sstevel@tonic-gate 1272*0Sstevel@tonic-gate t_old_basepa = _ptob64(t_mp->sbm_basepfn); 1273*0Sstevel@tonic-gate err = drmach_mem_get_base_physaddr(t_mp->sbm_cm.sbdev_id, 1274*0Sstevel@tonic-gate &t_new_basepa); 1275*0Sstevel@tonic-gate ASSERT(err == NULL); 1276*0Sstevel@tonic-gate 1277*0Sstevel@tonic-gate PR_MEM("%s:t_old_basepa: 0x%llx\n", f, t_old_basepa); 1278*0Sstevel@tonic-gate PR_MEM("%s:t_new_basepa: 0x%llx\n", f, t_new_basepa); 1279*0Sstevel@tonic-gate 1280*0Sstevel@tonic-gate /* 1281*0Sstevel@tonic-gate * Construct copy list with original source addresses. 1282*0Sstevel@tonic-gate * Used to add back excess target mem. 1283*0Sstevel@tonic-gate */ 1284*0Sstevel@tonic-gate s_copy_mlist = memlist_dup(s_mp->sbm_mlist); 1285*0Sstevel@tonic-gate for (ml = s_mp->sbm_del_mlist; ml; ml = ml->next) { 1286*0Sstevel@tonic-gate s_copy_mlist = memlist_del_span(s_copy_mlist, 1287*0Sstevel@tonic-gate ml->address, ml->size); 1288*0Sstevel@tonic-gate } 1289*0Sstevel@tonic-gate 1290*0Sstevel@tonic-gate PR_MEM("%s: source copy list:\n:", f); 1291*0Sstevel@tonic-gate PR_MEMLIST_DUMP(s_copy_mlist); 1292*0Sstevel@tonic-gate 1293*0Sstevel@tonic-gate /* 1294*0Sstevel@tonic-gate * We had to swap mem-units, so update 1295*0Sstevel@tonic-gate * memlists accordingly with new base 1296*0Sstevel@tonic-gate * addresses. 1297*0Sstevel@tonic-gate */ 1298*0Sstevel@tonic-gate for (ml = t_mp->sbm_mlist; ml; ml = ml->next) { 1299*0Sstevel@tonic-gate ml->address -= t_old_basepa; 1300*0Sstevel@tonic-gate ml->address += t_new_basepa; 1301*0Sstevel@tonic-gate } 1302*0Sstevel@tonic-gate 1303*0Sstevel@tonic-gate /* 1304*0Sstevel@tonic-gate * There is no need to explicitly rename the target delete 1305*0Sstevel@tonic-gate * memlist, because sbm_del_mlist and sbm_mlist always 1306*0Sstevel@tonic-gate * point to the same memlist for a copy/rename operation. 1307*0Sstevel@tonic-gate */ 1308*0Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist); 1309*0Sstevel@tonic-gate 1310*0Sstevel@tonic-gate PR_MEM("%s: renamed target memlist and delete memlist:\n", f); 1311*0Sstevel@tonic-gate PR_MEMLIST_DUMP(t_mp->sbm_mlist); 1312*0Sstevel@tonic-gate 1313*0Sstevel@tonic-gate for (ml = s_mp->sbm_mlist; ml; ml = ml->next) { 1314*0Sstevel@tonic-gate ml->address -= s_old_basepa; 1315*0Sstevel@tonic-gate ml->address += s_new_basepa; 1316*0Sstevel@tonic-gate } 1317*0Sstevel@tonic-gate 1318*0Sstevel@tonic-gate PR_MEM("%s: renamed source memlist:\n", f); 1319*0Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_mlist); 1320*0Sstevel@tonic-gate 1321*0Sstevel@tonic-gate /* 1322*0Sstevel@tonic-gate * Keep track of dynamically added segments 1323*0Sstevel@tonic-gate * since they cannot be split if we need to delete 1324*0Sstevel@tonic-gate * excess source memory later for this board. 1325*0Sstevel@tonic-gate */ 1326*0Sstevel@tonic-gate if (t_mp->sbm_dyn_segs) 1327*0Sstevel@tonic-gate memlist_delete(t_mp->sbm_dyn_segs); 1328*0Sstevel@tonic-gate t_mp->sbm_dyn_segs = s_mp->sbm_dyn_segs; 1329*0Sstevel@tonic-gate s_mp->sbm_dyn_segs = NULL; 1330*0Sstevel@tonic-gate 1331*0Sstevel@tonic-gate /* 1332*0Sstevel@tonic-gate * If the target memory range with the new target base PA 1333*0Sstevel@tonic-gate * extends beyond the usable slice, prevent any "target excess" 1334*0Sstevel@tonic-gate * from being added back after this copy/rename and 1335*0Sstevel@tonic-gate * calculate the new smaller size of the target board 1336*0Sstevel@tonic-gate * to be set as part of target cleanup. The base + npages 1337*0Sstevel@tonic-gate * must only include the range of memory up to the end of 1338*0Sstevel@tonic-gate * this slice. This will only be used after a category 4 1339*0Sstevel@tonic-gate * large-to-small target type copy/rename - see comments 1340*0Sstevel@tonic-gate * in dr_select_mem_target. 1341*0Sstevel@tonic-gate */ 1342*0Sstevel@tonic-gate if (((t_new_basepa & sm) + _ptob64(t_mp->sbm_npages)) > sz) { 1343*0Sstevel@tonic-gate t_new_smallsize = sz - (t_new_basepa & sm); 1344*0Sstevel@tonic-gate } 1345*0Sstevel@tonic-gate 1346*0Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_MEMRESIZE && 1347*0Sstevel@tonic-gate t_new_smallsize == 0) { 1348*0Sstevel@tonic-gate struct memlist *t_excess_mlist; 1349*0Sstevel@tonic-gate 1350*0Sstevel@tonic-gate /* 1351*0Sstevel@tonic-gate * Add back excess target memory. 1352*0Sstevel@tonic-gate * Subtract out the portion of the target memory 1353*0Sstevel@tonic-gate * node that was taken over by the source memory 1354*0Sstevel@tonic-gate * node. 1355*0Sstevel@tonic-gate */ 1356*0Sstevel@tonic-gate t_excess_mlist = memlist_dup(t_mp->sbm_mlist); 1357*0Sstevel@tonic-gate for (ml = s_copy_mlist; ml; ml = ml->next) { 1358*0Sstevel@tonic-gate t_excess_mlist = 1359*0Sstevel@tonic-gate memlist_del_span(t_excess_mlist, 1360*0Sstevel@tonic-gate ml->address, ml->size); 1361*0Sstevel@tonic-gate } 1362*0Sstevel@tonic-gate 1363*0Sstevel@tonic-gate /* 1364*0Sstevel@tonic-gate * Update dynamically added segs 1365*0Sstevel@tonic-gate */ 1366*0Sstevel@tonic-gate for (ml = s_mp->sbm_del_mlist; ml; ml = ml->next) { 1367*0Sstevel@tonic-gate t_mp->sbm_dyn_segs = 1368*0Sstevel@tonic-gate memlist_del_span(t_mp->sbm_dyn_segs, 1369*0Sstevel@tonic-gate ml->address, ml->size); 1370*0Sstevel@tonic-gate } 1371*0Sstevel@tonic-gate for (ml = t_excess_mlist; ml; ml = ml->next) { 1372*0Sstevel@tonic-gate t_mp->sbm_dyn_segs = 1373*0Sstevel@tonic-gate memlist_cat_span(t_mp->sbm_dyn_segs, 1374*0Sstevel@tonic-gate ml->address, ml->size); 1375*0Sstevel@tonic-gate } 1376*0Sstevel@tonic-gate PR_MEM("%s: %s: updated dynamic seg list:\n", 1377*0Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 1378*0Sstevel@tonic-gate PR_MEMLIST_DUMP(t_mp->sbm_dyn_segs); 1379*0Sstevel@tonic-gate 1380*0Sstevel@tonic-gate PR_MEM("%s: adding back remaining portion" 1381*0Sstevel@tonic-gate " of %s, memlist:\n", 1382*0Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 1383*0Sstevel@tonic-gate PR_MEMLIST_DUMP(t_excess_mlist); 1384*0Sstevel@tonic-gate 1385*0Sstevel@tonic-gate dr_add_memory_spans(s_mp, t_excess_mlist); 1386*0Sstevel@tonic-gate memlist_delete(t_excess_mlist); 1387*0Sstevel@tonic-gate } 1388*0Sstevel@tonic-gate memlist_delete(s_copy_mlist); 1389*0Sstevel@tonic-gate 1390*0Sstevel@tonic-gate #ifdef DEBUG 1391*0Sstevel@tonic-gate /* 1392*0Sstevel@tonic-gate * Renaming s_mp->sbm_del_mlist is not necessary. This 1393*0Sstevel@tonic-gate * list is not used beyond this point, and in fact, is 1394*0Sstevel@tonic-gate * disposed of at the end of this function. 1395*0Sstevel@tonic-gate */ 1396*0Sstevel@tonic-gate for (ml = s_mp->sbm_del_mlist; ml; ml = ml->next) { 1397*0Sstevel@tonic-gate ml->address -= s_old_basepa; 1398*0Sstevel@tonic-gate ml->address += s_new_basepa; 1399*0Sstevel@tonic-gate } 1400*0Sstevel@tonic-gate 1401*0Sstevel@tonic-gate PR_MEM("%s: renamed source delete memlist", f); 1402*0Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_del_mlist); 1403*0Sstevel@tonic-gate #endif 1404*0Sstevel@tonic-gate 1405*0Sstevel@tonic-gate } 1406*0Sstevel@tonic-gate 1407*0Sstevel@tonic-gate if (t_mp != NULL) { 1408*0Sstevel@tonic-gate /* delete target's entire address space */ 1409*0Sstevel@tonic-gate err = drmach_mem_del_span( 1410*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_id, t_old_basepa & ~ sm, sz); 1411*0Sstevel@tonic-gate if (err) 1412*0Sstevel@tonic-gate DRERR_SET_C(&t_mp->sbm_cm.sbdev_error, &err); 1413*0Sstevel@tonic-gate ASSERT(err == NULL); 1414*0Sstevel@tonic-gate 1415*0Sstevel@tonic-gate /* 1416*0Sstevel@tonic-gate * After the copy/rename, the original address space 1417*0Sstevel@tonic-gate * for the source board (which is now located on the 1418*0Sstevel@tonic-gate * target board) may now have some excess to be deleted. 1419*0Sstevel@tonic-gate * The amount is calculated by masking the slice 1420*0Sstevel@tonic-gate * info and keeping the slice offset from t_new_basepa. 1421*0Sstevel@tonic-gate */ 1422*0Sstevel@tonic-gate err = drmach_mem_del_span(s_mp->sbm_cm.sbdev_id, 1423*0Sstevel@tonic-gate s_old_basepa & ~ sm, t_new_basepa & sm); 1424*0Sstevel@tonic-gate if (err) 1425*0Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 1426*0Sstevel@tonic-gate ASSERT(err == NULL); 1427*0Sstevel@tonic-gate 1428*0Sstevel@tonic-gate } else { 1429*0Sstevel@tonic-gate /* delete board's entire address space */ 1430*0Sstevel@tonic-gate err = drmach_mem_del_span(s_mp->sbm_cm.sbdev_id, 1431*0Sstevel@tonic-gate s_old_basepa & ~ sm, sz); 1432*0Sstevel@tonic-gate if (err) 1433*0Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 1434*0Sstevel@tonic-gate ASSERT(err == NULL); 1435*0Sstevel@tonic-gate } 1436*0Sstevel@tonic-gate 1437*0Sstevel@tonic-gate cleanup: 1438*0Sstevel@tonic-gate /* clean up target mem unit */ 1439*0Sstevel@tonic-gate if (t_mp != NULL) { 1440*0Sstevel@tonic-gate memlist_delete(t_mp->sbm_del_mlist); 1441*0Sstevel@tonic-gate /* no need to delete sbm_mlist, it shares sbm_del_mlist */ 1442*0Sstevel@tonic-gate 1443*0Sstevel@tonic-gate t_mp->sbm_del_mlist = NULL; 1444*0Sstevel@tonic-gate t_mp->sbm_mlist = NULL; 1445*0Sstevel@tonic-gate t_mp->sbm_peer = NULL; 1446*0Sstevel@tonic-gate t_mp->sbm_flags = 0; 1447*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_busy = 0; 1448*0Sstevel@tonic-gate dr_init_mem_unit_data(t_mp); 1449*0Sstevel@tonic-gate 1450*0Sstevel@tonic-gate /* reduce target size if new PAs go past end of usable slice */ 1451*0Sstevel@tonic-gate if (t_new_smallsize > 0) { 1452*0Sstevel@tonic-gate t_mp->sbm_npages = _b64top(t_new_smallsize); 1453*0Sstevel@tonic-gate PR_MEM("%s: target new size 0x%llx bytes\n", 1454*0Sstevel@tonic-gate f, t_new_smallsize); 1455*0Sstevel@tonic-gate } 1456*0Sstevel@tonic-gate } 1457*0Sstevel@tonic-gate if (t_mp != NULL && t_mp->sbm_cm.sbdev_error == NULL) { 1458*0Sstevel@tonic-gate /* 1459*0Sstevel@tonic-gate * now that copy/rename has completed, undo this 1460*0Sstevel@tonic-gate * work that was done in dr_release_mem_done. 1461*0Sstevel@tonic-gate */ 1462*0Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm); 1463*0Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&t_mp->sbm_cm); 1464*0Sstevel@tonic-gate dr_device_transition(&t_mp->sbm_cm, DR_STATE_CONFIGURED); 1465*0Sstevel@tonic-gate } 1466*0Sstevel@tonic-gate 1467*0Sstevel@tonic-gate /* 1468*0Sstevel@tonic-gate * clean up (source) board's mem unit structure. 1469*0Sstevel@tonic-gate * NOTE: sbm_mlist is retained if no error has been record (in other 1470*0Sstevel@tonic-gate * words, when s_mp->sbm_cm.sbdev_error is NULL). This memlist is 1471*0Sstevel@tonic-gate * referred to elsewhere as the cached memlist. The cached memlist 1472*0Sstevel@tonic-gate * is used to re-attach (configure back in) this memunit from the 1473*0Sstevel@tonic-gate * unconfigured state. The memlist is retained because it may 1474*0Sstevel@tonic-gate * represent bad pages that were detected while the memory was 1475*0Sstevel@tonic-gate * configured into the OS. The OS deletes bad pages from phys_install. 1476*0Sstevel@tonic-gate * Those deletes, if any, will be represented in the cached mlist. 1477*0Sstevel@tonic-gate */ 1478*0Sstevel@tonic-gate if (s_mp->sbm_del_mlist && s_mp->sbm_del_mlist != s_mp->sbm_mlist) 1479*0Sstevel@tonic-gate memlist_delete(s_mp->sbm_del_mlist); 1480*0Sstevel@tonic-gate 1481*0Sstevel@tonic-gate if (s_mp->sbm_cm.sbdev_error && s_mp->sbm_mlist) { 1482*0Sstevel@tonic-gate memlist_delete(s_mp->sbm_mlist); 1483*0Sstevel@tonic-gate s_mp->sbm_mlist = NULL; 1484*0Sstevel@tonic-gate } 1485*0Sstevel@tonic-gate 1486*0Sstevel@tonic-gate if (s_mp->sbm_dyn_segs != NULL && s_mp->sbm_cm.sbdev_error == 0) { 1487*0Sstevel@tonic-gate memlist_delete(s_mp->sbm_dyn_segs); 1488*0Sstevel@tonic-gate s_mp->sbm_dyn_segs = NULL; 1489*0Sstevel@tonic-gate } 1490*0Sstevel@tonic-gate 1491*0Sstevel@tonic-gate s_mp->sbm_del_mlist = NULL; 1492*0Sstevel@tonic-gate s_mp->sbm_peer = NULL; 1493*0Sstevel@tonic-gate s_mp->sbm_flags = 0; 1494*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 0; 1495*0Sstevel@tonic-gate dr_init_mem_unit_data(s_mp); 1496*0Sstevel@tonic-gate 1497*0Sstevel@tonic-gate PR_MEM("%s: cached memlist for %s:", f, s_mp->sbm_cm.sbdev_path); 1498*0Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_mlist); 1499*0Sstevel@tonic-gate 1500*0Sstevel@tonic-gate return (0); 1501*0Sstevel@tonic-gate } 1502*0Sstevel@tonic-gate 1503*0Sstevel@tonic-gate /* 1504*0Sstevel@tonic-gate * Successful return from this function will have the memory 1505*0Sstevel@tonic-gate * handle in bp->b_dev[..mem-unit...].sbm_memhandle allocated 1506*0Sstevel@tonic-gate * and waiting. This routine's job is to select the memory that 1507*0Sstevel@tonic-gate * actually has to be released (detached) which may not necessarily 1508*0Sstevel@tonic-gate * be the same memory node that came in in devlist[], 1509*0Sstevel@tonic-gate * i.e. a copy-rename is needed. 1510*0Sstevel@tonic-gate */ 1511*0Sstevel@tonic-gate int 1512*0Sstevel@tonic-gate dr_pre_release_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 1513*0Sstevel@tonic-gate { 1514*0Sstevel@tonic-gate int d; 1515*0Sstevel@tonic-gate int err_flag = 0; 1516*0Sstevel@tonic-gate static fn_t f = "dr_pre_release_mem"; 1517*0Sstevel@tonic-gate 1518*0Sstevel@tonic-gate PR_MEM("%s...\n", f); 1519*0Sstevel@tonic-gate 1520*0Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 1521*0Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 1522*0Sstevel@tonic-gate int rv; 1523*0Sstevel@tonic-gate memquery_t mq; 1524*0Sstevel@tonic-gate struct memlist *ml; 1525*0Sstevel@tonic-gate 1526*0Sstevel@tonic-gate if (mp->sbm_cm.sbdev_error) { 1527*0Sstevel@tonic-gate err_flag = 1; 1528*0Sstevel@tonic-gate continue; 1529*0Sstevel@tonic-gate } else if (!kcage_on) { 1530*0Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_KCAGE_OFF); 1531*0Sstevel@tonic-gate err_flag = 1; 1532*0Sstevel@tonic-gate continue; 1533*0Sstevel@tonic-gate } 1534*0Sstevel@tonic-gate 1535*0Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_RESERVED) { 1536*0Sstevel@tonic-gate /* 1537*0Sstevel@tonic-gate * Board is currently involved in a delete 1538*0Sstevel@tonic-gate * memory operation. Can't detach this guy until 1539*0Sstevel@tonic-gate * that operation completes. 1540*0Sstevel@tonic-gate */ 1541*0Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_INVAL); 1542*0Sstevel@tonic-gate err_flag = 1; 1543*0Sstevel@tonic-gate break; 1544*0Sstevel@tonic-gate } 1545*0Sstevel@tonic-gate 1546*0Sstevel@tonic-gate /* 1547*0Sstevel@tonic-gate * Check whether the detaching memory requires a 1548*0Sstevel@tonic-gate * copy-rename. 1549*0Sstevel@tonic-gate */ 1550*0Sstevel@tonic-gate ASSERT(mp->sbm_npages != 0); 1551*0Sstevel@tonic-gate rv = kphysm_del_span_query( 1552*0Sstevel@tonic-gate mp->sbm_basepfn, mp->sbm_npages, &mq); 1553*0Sstevel@tonic-gate if (rv != KPHYSM_OK) { 1554*0Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 1555*0Sstevel@tonic-gate err_flag = 1; 1556*0Sstevel@tonic-gate break; 1557*0Sstevel@tonic-gate } 1558*0Sstevel@tonic-gate 1559*0Sstevel@tonic-gate if (mq.nonrelocatable != 0) { 1560*0Sstevel@tonic-gate if (!(dr_cmd_flags(hp) & 1561*0Sstevel@tonic-gate (SBD_FLAG_FORCE | SBD_FLAG_QUIESCE_OKAY))) { 1562*0Sstevel@tonic-gate /* caller wasn't prompted for a suspend */ 1563*0Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, 1564*0Sstevel@tonic-gate ESBD_QUIESCE_REQD); 1565*0Sstevel@tonic-gate err_flag = 1; 1566*0Sstevel@tonic-gate break; 1567*0Sstevel@tonic-gate } 1568*0Sstevel@tonic-gate } 1569*0Sstevel@tonic-gate 1570*0Sstevel@tonic-gate /* flags should be clean at this time */ 1571*0Sstevel@tonic-gate ASSERT(mp->sbm_flags == 0); 1572*0Sstevel@tonic-gate 1573*0Sstevel@tonic-gate ASSERT(mp->sbm_mlist == NULL); /* should be null */ 1574*0Sstevel@tonic-gate ASSERT(mp->sbm_del_mlist == NULL); /* should be null */ 1575*0Sstevel@tonic-gate if (mp->sbm_mlist != NULL) { 1576*0Sstevel@tonic-gate memlist_delete(mp->sbm_mlist); 1577*0Sstevel@tonic-gate mp->sbm_mlist = NULL; 1578*0Sstevel@tonic-gate } 1579*0Sstevel@tonic-gate 1580*0Sstevel@tonic-gate ml = dr_get_memlist(mp); 1581*0Sstevel@tonic-gate if (ml == NULL) { 1582*0Sstevel@tonic-gate err_flag = 1; 1583*0Sstevel@tonic-gate PR_MEM("%s: no memlist found for %s\n", 1584*0Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 1585*0Sstevel@tonic-gate continue; 1586*0Sstevel@tonic-gate } 1587*0Sstevel@tonic-gate 1588*0Sstevel@tonic-gate /* allocate a kphysm handle */ 1589*0Sstevel@tonic-gate rv = kphysm_del_gethandle(&mp->sbm_memhandle); 1590*0Sstevel@tonic-gate if (rv != KPHYSM_OK) { 1591*0Sstevel@tonic-gate memlist_delete(ml); 1592*0Sstevel@tonic-gate 1593*0Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 1594*0Sstevel@tonic-gate err_flag = 1; 1595*0Sstevel@tonic-gate break; 1596*0Sstevel@tonic-gate } 1597*0Sstevel@tonic-gate mp->sbm_flags |= DR_MFLAG_RELOWNER; 1598*0Sstevel@tonic-gate 1599*0Sstevel@tonic-gate if ((mq.nonrelocatable != 0) || 1600*0Sstevel@tonic-gate dr_reserve_mem_spans(&mp->sbm_memhandle, ml)) { 1601*0Sstevel@tonic-gate /* 1602*0Sstevel@tonic-gate * Either the detaching memory node contains 1603*0Sstevel@tonic-gate * non-reloc memory or we failed to reserve the 1604*0Sstevel@tonic-gate * detaching memory node (which did _not_ have 1605*0Sstevel@tonic-gate * any non-reloc memory, i.e. some non-reloc mem 1606*0Sstevel@tonic-gate * got onboard). 1607*0Sstevel@tonic-gate */ 1608*0Sstevel@tonic-gate 1609*0Sstevel@tonic-gate if (dr_select_mem_target(hp, mp, ml)) { 1610*0Sstevel@tonic-gate int rv; 1611*0Sstevel@tonic-gate 1612*0Sstevel@tonic-gate /* 1613*0Sstevel@tonic-gate * We had no luck locating a target 1614*0Sstevel@tonic-gate * memory node to be the recipient of 1615*0Sstevel@tonic-gate * the non-reloc memory on the node 1616*0Sstevel@tonic-gate * we're trying to detach. 1617*0Sstevel@tonic-gate * Clean up be disposing the mem handle 1618*0Sstevel@tonic-gate * and the mem list. 1619*0Sstevel@tonic-gate */ 1620*0Sstevel@tonic-gate rv = kphysm_del_release(mp->sbm_memhandle); 1621*0Sstevel@tonic-gate if (rv != KPHYSM_OK) { 1622*0Sstevel@tonic-gate /* 1623*0Sstevel@tonic-gate * can do nothing but complain 1624*0Sstevel@tonic-gate * and hope helpful for debug 1625*0Sstevel@tonic-gate */ 1626*0Sstevel@tonic-gate cmn_err(CE_WARN, "%s: unexpected" 1627*0Sstevel@tonic-gate " kphysm_del_release return" 1628*0Sstevel@tonic-gate " value %d", 1629*0Sstevel@tonic-gate f, rv); 1630*0Sstevel@tonic-gate } 1631*0Sstevel@tonic-gate mp->sbm_flags &= ~DR_MFLAG_RELOWNER; 1632*0Sstevel@tonic-gate 1633*0Sstevel@tonic-gate memlist_delete(ml); 1634*0Sstevel@tonic-gate 1635*0Sstevel@tonic-gate /* make sure sbm_flags is clean */ 1636*0Sstevel@tonic-gate ASSERT(mp->sbm_flags == 0); 1637*0Sstevel@tonic-gate 1638*0Sstevel@tonic-gate dr_dev_err(CE_WARN, 1639*0Sstevel@tonic-gate &mp->sbm_cm, ESBD_NO_TARGET); 1640*0Sstevel@tonic-gate 1641*0Sstevel@tonic-gate err_flag = 1; 1642*0Sstevel@tonic-gate break; 1643*0Sstevel@tonic-gate } 1644*0Sstevel@tonic-gate 1645*0Sstevel@tonic-gate /* 1646*0Sstevel@tonic-gate * ml is not memlist_delete'd here because 1647*0Sstevel@tonic-gate * it has been assigned to mp->sbm_mlist 1648*0Sstevel@tonic-gate * by dr_select_mem_target. 1649*0Sstevel@tonic-gate */ 1650*0Sstevel@tonic-gate } else { 1651*0Sstevel@tonic-gate /* no target needed to detach this board */ 1652*0Sstevel@tonic-gate mp->sbm_flags |= DR_MFLAG_RESERVED; 1653*0Sstevel@tonic-gate mp->sbm_peer = NULL; 1654*0Sstevel@tonic-gate mp->sbm_del_mlist = ml; 1655*0Sstevel@tonic-gate mp->sbm_mlist = ml; 1656*0Sstevel@tonic-gate mp->sbm_cm.sbdev_busy = 1; 1657*0Sstevel@tonic-gate } 1658*0Sstevel@tonic-gate #ifdef DEBUG 1659*0Sstevel@tonic-gate ASSERT(mp->sbm_mlist != NULL); 1660*0Sstevel@tonic-gate 1661*0Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_SOURCE) { 1662*0Sstevel@tonic-gate PR_MEM("%s: release of %s requires copy/rename;" 1663*0Sstevel@tonic-gate " selected target board %s\n", 1664*0Sstevel@tonic-gate f, 1665*0Sstevel@tonic-gate mp->sbm_cm.sbdev_path, 1666*0Sstevel@tonic-gate mp->sbm_peer->sbm_cm.sbdev_path); 1667*0Sstevel@tonic-gate } else { 1668*0Sstevel@tonic-gate PR_MEM("%s: copy/rename not required to release %s\n", 1669*0Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 1670*0Sstevel@tonic-gate } 1671*0Sstevel@tonic-gate 1672*0Sstevel@tonic-gate ASSERT(mp->sbm_flags & DR_MFLAG_RELOWNER); 1673*0Sstevel@tonic-gate ASSERT(mp->sbm_flags & DR_MFLAG_RESERVED); 1674*0Sstevel@tonic-gate #endif 1675*0Sstevel@tonic-gate } 1676*0Sstevel@tonic-gate 1677*0Sstevel@tonic-gate return (err_flag ? -1 : 0); 1678*0Sstevel@tonic-gate } 1679*0Sstevel@tonic-gate 1680*0Sstevel@tonic-gate void 1681*0Sstevel@tonic-gate dr_release_mem_done(dr_common_unit_t *cp) 1682*0Sstevel@tonic-gate { 1683*0Sstevel@tonic-gate dr_mem_unit_t *s_mp = (dr_mem_unit_t *)cp; 1684*0Sstevel@tonic-gate dr_mem_unit_t *t_mp, *mp; 1685*0Sstevel@tonic-gate int rv; 1686*0Sstevel@tonic-gate static fn_t f = "dr_release_mem_done"; 1687*0Sstevel@tonic-gate 1688*0Sstevel@tonic-gate /* 1689*0Sstevel@tonic-gate * This unit will be flagged with DR_MFLAG_SOURCE, if it 1690*0Sstevel@tonic-gate * has a target unit. 1691*0Sstevel@tonic-gate */ 1692*0Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 1693*0Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 1694*0Sstevel@tonic-gate ASSERT(t_mp != NULL); 1695*0Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 1696*0Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET); 1697*0Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_RESERVED); 1698*0Sstevel@tonic-gate } else { 1699*0Sstevel@tonic-gate /* this is no target unit */ 1700*0Sstevel@tonic-gate t_mp = NULL; 1701*0Sstevel@tonic-gate } 1702*0Sstevel@tonic-gate 1703*0Sstevel@tonic-gate /* free delete handle */ 1704*0Sstevel@tonic-gate ASSERT(s_mp->sbm_flags & DR_MFLAG_RELOWNER); 1705*0Sstevel@tonic-gate ASSERT(s_mp->sbm_flags & DR_MFLAG_RESERVED); 1706*0Sstevel@tonic-gate rv = kphysm_del_release(s_mp->sbm_memhandle); 1707*0Sstevel@tonic-gate if (rv != KPHYSM_OK) { 1708*0Sstevel@tonic-gate /* 1709*0Sstevel@tonic-gate * can do nothing but complain 1710*0Sstevel@tonic-gate * and hope helpful for debug 1711*0Sstevel@tonic-gate */ 1712*0Sstevel@tonic-gate cmn_err(CE_WARN, "%s: unexpected kphysm_del_release" 1713*0Sstevel@tonic-gate " return value %d", f, rv); 1714*0Sstevel@tonic-gate } 1715*0Sstevel@tonic-gate s_mp->sbm_flags &= ~DR_MFLAG_RELOWNER; 1716*0Sstevel@tonic-gate 1717*0Sstevel@tonic-gate /* 1718*0Sstevel@tonic-gate * If an error was encountered during release, clean up 1719*0Sstevel@tonic-gate * the source (and target, if present) unit data. 1720*0Sstevel@tonic-gate */ 1721*0Sstevel@tonic-gate /* XXX Can we know that sbdev_error was encountered during release? */ 1722*0Sstevel@tonic-gate if (s_mp->sbm_cm.sbdev_error != NULL) { 1723*0Sstevel@tonic-gate PR_MEM("%s: %s: error %d noted\n", 1724*0Sstevel@tonic-gate f, 1725*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 1726*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_error->e_code); 1727*0Sstevel@tonic-gate 1728*0Sstevel@tonic-gate if (t_mp != NULL) { 1729*0Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist); 1730*0Sstevel@tonic-gate t_mp->sbm_del_mlist = NULL; 1731*0Sstevel@tonic-gate 1732*0Sstevel@tonic-gate if (t_mp->sbm_mlist != NULL) { 1733*0Sstevel@tonic-gate memlist_delete(t_mp->sbm_mlist); 1734*0Sstevel@tonic-gate t_mp->sbm_mlist = NULL; 1735*0Sstevel@tonic-gate } 1736*0Sstevel@tonic-gate 1737*0Sstevel@tonic-gate t_mp->sbm_peer = NULL; 1738*0Sstevel@tonic-gate t_mp->sbm_flags = 0; 1739*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_busy = 0; 1740*0Sstevel@tonic-gate } 1741*0Sstevel@tonic-gate 1742*0Sstevel@tonic-gate if (s_mp->sbm_del_mlist != s_mp->sbm_mlist) 1743*0Sstevel@tonic-gate memlist_delete(s_mp->sbm_del_mlist); 1744*0Sstevel@tonic-gate s_mp->sbm_del_mlist = NULL; 1745*0Sstevel@tonic-gate 1746*0Sstevel@tonic-gate if (s_mp->sbm_mlist != NULL) { 1747*0Sstevel@tonic-gate memlist_delete(s_mp->sbm_mlist); 1748*0Sstevel@tonic-gate s_mp->sbm_mlist = NULL; 1749*0Sstevel@tonic-gate } 1750*0Sstevel@tonic-gate 1751*0Sstevel@tonic-gate s_mp->sbm_peer = NULL; 1752*0Sstevel@tonic-gate s_mp->sbm_flags = 0; 1753*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 0; 1754*0Sstevel@tonic-gate 1755*0Sstevel@tonic-gate /* bail out */ 1756*0Sstevel@tonic-gate return; 1757*0Sstevel@tonic-gate } 1758*0Sstevel@tonic-gate 1759*0Sstevel@tonic-gate DR_DEV_SET_RELEASED(&s_mp->sbm_cm); 1760*0Sstevel@tonic-gate dr_device_transition(&s_mp->sbm_cm, DR_STATE_RELEASE); 1761*0Sstevel@tonic-gate 1762*0Sstevel@tonic-gate if (t_mp != NULL) { 1763*0Sstevel@tonic-gate /* 1764*0Sstevel@tonic-gate * the kphysm delete operation that drained the source 1765*0Sstevel@tonic-gate * board also drained this target board. Since the source 1766*0Sstevel@tonic-gate * board drain is now known to have succeeded, we know this 1767*0Sstevel@tonic-gate * target board is drained too. 1768*0Sstevel@tonic-gate * 1769*0Sstevel@tonic-gate * because DR_DEV_SET_RELEASED and dr_device_transition 1770*0Sstevel@tonic-gate * is done here, the dr_release_dev_done should not 1771*0Sstevel@tonic-gate * fail. 1772*0Sstevel@tonic-gate */ 1773*0Sstevel@tonic-gate DR_DEV_SET_RELEASED(&t_mp->sbm_cm); 1774*0Sstevel@tonic-gate dr_device_transition(&t_mp->sbm_cm, DR_STATE_RELEASE); 1775*0Sstevel@tonic-gate 1776*0Sstevel@tonic-gate /* 1777*0Sstevel@tonic-gate * NOTE: do not transition target's board state, 1778*0Sstevel@tonic-gate * even if the mem-unit was the last configure 1779*0Sstevel@tonic-gate * unit of the board. When copy/rename completes 1780*0Sstevel@tonic-gate * this mem-unit will transitioned back to 1781*0Sstevel@tonic-gate * the configured state. In the meantime, the 1782*0Sstevel@tonic-gate * board's must remain as is. 1783*0Sstevel@tonic-gate */ 1784*0Sstevel@tonic-gate } 1785*0Sstevel@tonic-gate 1786*0Sstevel@tonic-gate /* if board(s) had deleted memory, verify it is gone */ 1787*0Sstevel@tonic-gate rv = 0; 1788*0Sstevel@tonic-gate memlist_read_lock(); 1789*0Sstevel@tonic-gate if (s_mp->sbm_del_mlist != NULL) { 1790*0Sstevel@tonic-gate mp = s_mp; 1791*0Sstevel@tonic-gate rv = memlist_intersect(phys_install, mp->sbm_del_mlist); 1792*0Sstevel@tonic-gate } 1793*0Sstevel@tonic-gate if (rv == 0 && t_mp && t_mp->sbm_del_mlist != NULL) { 1794*0Sstevel@tonic-gate mp = t_mp; 1795*0Sstevel@tonic-gate rv = memlist_intersect(phys_install, mp->sbm_del_mlist); 1796*0Sstevel@tonic-gate } 1797*0Sstevel@tonic-gate memlist_read_unlock(); 1798*0Sstevel@tonic-gate if (rv) { 1799*0Sstevel@tonic-gate cmn_err(CE_WARN, "%s: %smem-unit (%d.%d): " 1800*0Sstevel@tonic-gate "deleted memory still found in phys_install", 1801*0Sstevel@tonic-gate f, 1802*0Sstevel@tonic-gate (mp == t_mp ? "target " : ""), 1803*0Sstevel@tonic-gate mp->sbm_cm.sbdev_bp->b_num, 1804*0Sstevel@tonic-gate mp->sbm_cm.sbdev_unum); 1805*0Sstevel@tonic-gate 1806*0Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&s_mp->sbm_cm); 1807*0Sstevel@tonic-gate return; 1808*0Sstevel@tonic-gate } 1809*0Sstevel@tonic-gate 1810*0Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_RELDONE; 1811*0Sstevel@tonic-gate if (t_mp != NULL) 1812*0Sstevel@tonic-gate t_mp->sbm_flags |= DR_MFLAG_RELDONE; 1813*0Sstevel@tonic-gate 1814*0Sstevel@tonic-gate /* this should not fail */ 1815*0Sstevel@tonic-gate if (dr_release_dev_done(&s_mp->sbm_cm) != 0) { 1816*0Sstevel@tonic-gate /* catch this in debug kernels */ 1817*0Sstevel@tonic-gate ASSERT(0); 1818*0Sstevel@tonic-gate return; 1819*0Sstevel@tonic-gate } 1820*0Sstevel@tonic-gate 1821*0Sstevel@tonic-gate PR_MEM("%s: marking %s release DONE\n", 1822*0Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 1823*0Sstevel@tonic-gate 1824*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_ostate = SBD_STAT_UNCONFIGURED; 1825*0Sstevel@tonic-gate 1826*0Sstevel@tonic-gate if (t_mp != NULL) { 1827*0Sstevel@tonic-gate /* should not fail */ 1828*0Sstevel@tonic-gate rv = dr_release_dev_done(&t_mp->sbm_cm); 1829*0Sstevel@tonic-gate if (rv != 0) { 1830*0Sstevel@tonic-gate /* catch this in debug kernels */ 1831*0Sstevel@tonic-gate ASSERT(0); 1832*0Sstevel@tonic-gate return; 1833*0Sstevel@tonic-gate } 1834*0Sstevel@tonic-gate 1835*0Sstevel@tonic-gate PR_MEM("%s: marking %s release DONE\n", 1836*0Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 1837*0Sstevel@tonic-gate 1838*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_ostate = SBD_STAT_UNCONFIGURED; 1839*0Sstevel@tonic-gate } 1840*0Sstevel@tonic-gate } 1841*0Sstevel@tonic-gate 1842*0Sstevel@tonic-gate /*ARGSUSED*/ 1843*0Sstevel@tonic-gate int 1844*0Sstevel@tonic-gate dr_disconnect_mem(dr_mem_unit_t *mp) 1845*0Sstevel@tonic-gate { 1846*0Sstevel@tonic-gate static fn_t f = "dr_disconnect_mem"; 1847*0Sstevel@tonic-gate update_membounds_t umb; 1848*0Sstevel@tonic-gate 1849*0Sstevel@tonic-gate #ifdef DEBUG 1850*0Sstevel@tonic-gate int state = mp->sbm_cm.sbdev_state; 1851*0Sstevel@tonic-gate ASSERT(state == DR_STATE_CONNECTED || 1852*0Sstevel@tonic-gate state == DR_STATE_UNCONFIGURED); 1853*0Sstevel@tonic-gate #endif 1854*0Sstevel@tonic-gate 1855*0Sstevel@tonic-gate PR_MEM("%s...\n", f); 1856*0Sstevel@tonic-gate 1857*0Sstevel@tonic-gate if (mp->sbm_del_mlist && mp->sbm_del_mlist != mp->sbm_mlist) 1858*0Sstevel@tonic-gate memlist_delete(mp->sbm_del_mlist); 1859*0Sstevel@tonic-gate mp->sbm_del_mlist = NULL; 1860*0Sstevel@tonic-gate 1861*0Sstevel@tonic-gate if (mp->sbm_mlist) { 1862*0Sstevel@tonic-gate memlist_delete(mp->sbm_mlist); 1863*0Sstevel@tonic-gate mp->sbm_mlist = NULL; 1864*0Sstevel@tonic-gate } 1865*0Sstevel@tonic-gate 1866*0Sstevel@tonic-gate /* 1867*0Sstevel@tonic-gate * Remove memory from lgroup 1868*0Sstevel@tonic-gate * For now, only board info is required. 1869*0Sstevel@tonic-gate */ 1870*0Sstevel@tonic-gate umb.u_board = mp->sbm_cm.sbdev_bp->b_num; 1871*0Sstevel@tonic-gate umb.u_base = (uint64_t)-1; 1872*0Sstevel@tonic-gate umb.u_len = (uint64_t)-1; 1873*0Sstevel@tonic-gate 1874*0Sstevel@tonic-gate lgrp_plat_config(LGRP_CONFIG_MEM_DEL, (uintptr_t)&umb); 1875*0Sstevel@tonic-gate 1876*0Sstevel@tonic-gate return (0); 1877*0Sstevel@tonic-gate } 1878*0Sstevel@tonic-gate 1879*0Sstevel@tonic-gate int 1880*0Sstevel@tonic-gate dr_cancel_mem(dr_mem_unit_t *s_mp) 1881*0Sstevel@tonic-gate { 1882*0Sstevel@tonic-gate dr_mem_unit_t *t_mp; 1883*0Sstevel@tonic-gate dr_state_t state; 1884*0Sstevel@tonic-gate static fn_t f = "dr_cancel_mem"; 1885*0Sstevel@tonic-gate 1886*0Sstevel@tonic-gate state = s_mp->sbm_cm.sbdev_state; 1887*0Sstevel@tonic-gate 1888*0Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_TARGET) { 1889*0Sstevel@tonic-gate /* must cancel source board, not target board */ 1890*0Sstevel@tonic-gate /* TODO: set error */ 1891*0Sstevel@tonic-gate return (-1); 1892*0Sstevel@tonic-gate } else if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 1893*0Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 1894*0Sstevel@tonic-gate ASSERT(t_mp != NULL); 1895*0Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 1896*0Sstevel@tonic-gate 1897*0Sstevel@tonic-gate /* must always match the source board's state */ 1898*0Sstevel@tonic-gate /* TODO: is this assertion correct? */ 1899*0Sstevel@tonic-gate ASSERT(t_mp->sbm_cm.sbdev_state == state); 1900*0Sstevel@tonic-gate } else { 1901*0Sstevel@tonic-gate /* this is no target unit */ 1902*0Sstevel@tonic-gate t_mp = NULL; 1903*0Sstevel@tonic-gate } 1904*0Sstevel@tonic-gate 1905*0Sstevel@tonic-gate switch (state) { 1906*0Sstevel@tonic-gate case DR_STATE_UNREFERENCED: /* state set by dr_release_dev_done */ 1907*0Sstevel@tonic-gate ASSERT((s_mp->sbm_flags & DR_MFLAG_RELOWNER) == 0); 1908*0Sstevel@tonic-gate 1909*0Sstevel@tonic-gate if (t_mp != NULL && t_mp->sbm_del_mlist != NULL) { 1910*0Sstevel@tonic-gate PR_MEM("%s: undoing target %s memory delete\n", 1911*0Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 1912*0Sstevel@tonic-gate dr_add_memory_spans(t_mp, t_mp->sbm_del_mlist); 1913*0Sstevel@tonic-gate 1914*0Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm); 1915*0Sstevel@tonic-gate } 1916*0Sstevel@tonic-gate 1917*0Sstevel@tonic-gate if (s_mp->sbm_del_mlist != NULL) { 1918*0Sstevel@tonic-gate PR_MEM("%s: undoing %s memory delete\n", 1919*0Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 1920*0Sstevel@tonic-gate 1921*0Sstevel@tonic-gate dr_add_memory_spans(s_mp, s_mp->sbm_del_mlist); 1922*0Sstevel@tonic-gate } 1923*0Sstevel@tonic-gate 1924*0Sstevel@tonic-gate /*FALLTHROUGH*/ 1925*0Sstevel@tonic-gate 1926*0Sstevel@tonic-gate /* TODO: should no longer be possible to see the release state here */ 1927*0Sstevel@tonic-gate case DR_STATE_RELEASE: /* state set by dr_release_mem_done */ 1928*0Sstevel@tonic-gate 1929*0Sstevel@tonic-gate ASSERT((s_mp->sbm_flags & DR_MFLAG_RELOWNER) == 0); 1930*0Sstevel@tonic-gate 1931*0Sstevel@tonic-gate if (t_mp != NULL) { 1932*0Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist); 1933*0Sstevel@tonic-gate t_mp->sbm_del_mlist = NULL; 1934*0Sstevel@tonic-gate 1935*0Sstevel@tonic-gate if (t_mp->sbm_mlist != NULL) { 1936*0Sstevel@tonic-gate memlist_delete(t_mp->sbm_mlist); 1937*0Sstevel@tonic-gate t_mp->sbm_mlist = NULL; 1938*0Sstevel@tonic-gate } 1939*0Sstevel@tonic-gate 1940*0Sstevel@tonic-gate t_mp->sbm_peer = NULL; 1941*0Sstevel@tonic-gate t_mp->sbm_flags = 0; 1942*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_busy = 0; 1943*0Sstevel@tonic-gate dr_init_mem_unit_data(t_mp); 1944*0Sstevel@tonic-gate 1945*0Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&t_mp->sbm_cm); 1946*0Sstevel@tonic-gate 1947*0Sstevel@tonic-gate dr_device_transition( 1948*0Sstevel@tonic-gate &t_mp->sbm_cm, DR_STATE_CONFIGURED); 1949*0Sstevel@tonic-gate } 1950*0Sstevel@tonic-gate 1951*0Sstevel@tonic-gate if (s_mp->sbm_del_mlist != s_mp->sbm_mlist) 1952*0Sstevel@tonic-gate memlist_delete(s_mp->sbm_del_mlist); 1953*0Sstevel@tonic-gate s_mp->sbm_del_mlist = NULL; 1954*0Sstevel@tonic-gate 1955*0Sstevel@tonic-gate if (s_mp->sbm_mlist != NULL) { 1956*0Sstevel@tonic-gate memlist_delete(s_mp->sbm_mlist); 1957*0Sstevel@tonic-gate s_mp->sbm_mlist = NULL; 1958*0Sstevel@tonic-gate } 1959*0Sstevel@tonic-gate 1960*0Sstevel@tonic-gate s_mp->sbm_peer = NULL; 1961*0Sstevel@tonic-gate s_mp->sbm_flags = 0; 1962*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 0; 1963*0Sstevel@tonic-gate dr_init_mem_unit_data(s_mp); 1964*0Sstevel@tonic-gate 1965*0Sstevel@tonic-gate return (0); 1966*0Sstevel@tonic-gate 1967*0Sstevel@tonic-gate default: 1968*0Sstevel@tonic-gate PR_MEM("%s: WARNING unexpected state (%d) for %s\n", 1969*0Sstevel@tonic-gate f, (int)state, s_mp->sbm_cm.sbdev_path); 1970*0Sstevel@tonic-gate 1971*0Sstevel@tonic-gate return (-1); 1972*0Sstevel@tonic-gate } 1973*0Sstevel@tonic-gate /*NOTREACHED*/ 1974*0Sstevel@tonic-gate } 1975*0Sstevel@tonic-gate 1976*0Sstevel@tonic-gate void 1977*0Sstevel@tonic-gate dr_init_mem_unit(dr_mem_unit_t *mp) 1978*0Sstevel@tonic-gate { 1979*0Sstevel@tonic-gate dr_state_t new_state; 1980*0Sstevel@tonic-gate 1981*0Sstevel@tonic-gate 1982*0Sstevel@tonic-gate if (DR_DEV_IS_ATTACHED(&mp->sbm_cm)) { 1983*0Sstevel@tonic-gate new_state = DR_STATE_CONFIGURED; 1984*0Sstevel@tonic-gate mp->sbm_cm.sbdev_cond = SBD_COND_OK; 1985*0Sstevel@tonic-gate } else if (DR_DEV_IS_PRESENT(&mp->sbm_cm)) { 1986*0Sstevel@tonic-gate new_state = DR_STATE_CONNECTED; 1987*0Sstevel@tonic-gate mp->sbm_cm.sbdev_cond = SBD_COND_OK; 1988*0Sstevel@tonic-gate } else if (mp->sbm_cm.sbdev_id != (drmachid_t)0) { 1989*0Sstevel@tonic-gate new_state = DR_STATE_OCCUPIED; 1990*0Sstevel@tonic-gate } else { 1991*0Sstevel@tonic-gate new_state = DR_STATE_EMPTY; 1992*0Sstevel@tonic-gate } 1993*0Sstevel@tonic-gate 1994*0Sstevel@tonic-gate if (DR_DEV_IS_PRESENT(&mp->sbm_cm)) 1995*0Sstevel@tonic-gate dr_init_mem_unit_data(mp); 1996*0Sstevel@tonic-gate 1997*0Sstevel@tonic-gate /* delay transition until fully initialized */ 1998*0Sstevel@tonic-gate dr_device_transition(&mp->sbm_cm, new_state); 1999*0Sstevel@tonic-gate } 2000*0Sstevel@tonic-gate 2001*0Sstevel@tonic-gate static void 2002*0Sstevel@tonic-gate dr_init_mem_unit_data(dr_mem_unit_t *mp) 2003*0Sstevel@tonic-gate { 2004*0Sstevel@tonic-gate drmachid_t id = mp->sbm_cm.sbdev_id; 2005*0Sstevel@tonic-gate uint64_t bytes; 2006*0Sstevel@tonic-gate sbd_error_t *err; 2007*0Sstevel@tonic-gate static fn_t f = "dr_init_mem_unit_data"; 2008*0Sstevel@tonic-gate update_membounds_t umb; 2009*0Sstevel@tonic-gate 2010*0Sstevel@tonic-gate PR_MEM("%s...\n", f); 2011*0Sstevel@tonic-gate 2012*0Sstevel@tonic-gate /* a little sanity checking */ 2013*0Sstevel@tonic-gate ASSERT(mp->sbm_peer == NULL); 2014*0Sstevel@tonic-gate ASSERT(mp->sbm_flags == 0); 2015*0Sstevel@tonic-gate 2016*0Sstevel@tonic-gate /* get basepfn of mem unit */ 2017*0Sstevel@tonic-gate err = drmach_mem_get_base_physaddr(id, &bytes); 2018*0Sstevel@tonic-gate if (err) { 2019*0Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 2020*0Sstevel@tonic-gate mp->sbm_basepfn = (pfn_t)-1; 2021*0Sstevel@tonic-gate } else 2022*0Sstevel@tonic-gate mp->sbm_basepfn = _b64top(bytes); 2023*0Sstevel@tonic-gate 2024*0Sstevel@tonic-gate /* attempt to get number of pages from PDA */ 2025*0Sstevel@tonic-gate err = drmach_mem_get_size(id, &bytes); 2026*0Sstevel@tonic-gate if (err) { 2027*0Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 2028*0Sstevel@tonic-gate mp->sbm_npages = 0; 2029*0Sstevel@tonic-gate } else 2030*0Sstevel@tonic-gate mp->sbm_npages = _b64top(bytes); 2031*0Sstevel@tonic-gate 2032*0Sstevel@tonic-gate /* if didn't work, calculate using memlist */ 2033*0Sstevel@tonic-gate if (mp->sbm_npages == 0) { 2034*0Sstevel@tonic-gate struct memlist *ml, *mlist; 2035*0Sstevel@tonic-gate /* 2036*0Sstevel@tonic-gate * Either we couldn't open the PDA or our 2037*0Sstevel@tonic-gate * PDA has garbage in it. We must have the 2038*0Sstevel@tonic-gate * page count consistent and whatever the 2039*0Sstevel@tonic-gate * OS states has precedence over the PDA 2040*0Sstevel@tonic-gate * so let's check the kernel. 2041*0Sstevel@tonic-gate */ 2042*0Sstevel@tonic-gate /* TODO: curious comment. it suggests pda query should happen if this fails */ 2043*0Sstevel@tonic-gate PR_MEM("%s: PDA query failed for npages." 2044*0Sstevel@tonic-gate " Checking memlist for %s\n", 2045*0Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 2046*0Sstevel@tonic-gate 2047*0Sstevel@tonic-gate mlist = dr_get_memlist(mp); 2048*0Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) 2049*0Sstevel@tonic-gate mp->sbm_npages += btop(ml->size); 2050*0Sstevel@tonic-gate memlist_delete(mlist); 2051*0Sstevel@tonic-gate } 2052*0Sstevel@tonic-gate 2053*0Sstevel@tonic-gate err = drmach_mem_get_alignment(id, &bytes); 2054*0Sstevel@tonic-gate if (err) { 2055*0Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 2056*0Sstevel@tonic-gate mp->sbm_alignment_mask = 0; 2057*0Sstevel@tonic-gate } else 2058*0Sstevel@tonic-gate mp->sbm_alignment_mask = _b64top(bytes); 2059*0Sstevel@tonic-gate 2060*0Sstevel@tonic-gate err = drmach_mem_get_slice_size(id, &bytes); 2061*0Sstevel@tonic-gate if (err) { 2062*0Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 2063*0Sstevel@tonic-gate mp->sbm_slice_size = 0; /* paranoia */ 2064*0Sstevel@tonic-gate } else 2065*0Sstevel@tonic-gate mp->sbm_slice_size = bytes; 2066*0Sstevel@tonic-gate 2067*0Sstevel@tonic-gate /* 2068*0Sstevel@tonic-gate * Add memory to lgroup 2069*0Sstevel@tonic-gate */ 2070*0Sstevel@tonic-gate umb.u_board = mp->sbm_cm.sbdev_bp->b_num; 2071*0Sstevel@tonic-gate umb.u_base = (uint64_t)mp->sbm_basepfn << MMU_PAGESHIFT; 2072*0Sstevel@tonic-gate umb.u_len = (uint64_t)mp->sbm_npages << MMU_PAGESHIFT; 2073*0Sstevel@tonic-gate 2074*0Sstevel@tonic-gate lgrp_plat_config(LGRP_CONFIG_MEM_ADD, (uintptr_t)&umb); 2075*0Sstevel@tonic-gate 2076*0Sstevel@tonic-gate PR_MEM("%s: %s (basepfn = 0x%x, npgs = %d)\n", 2077*0Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path, mp->sbm_basepfn, mp->sbm_npages); 2078*0Sstevel@tonic-gate } 2079*0Sstevel@tonic-gate 2080*0Sstevel@tonic-gate static int 2081*0Sstevel@tonic-gate dr_reserve_mem_spans(memhandle_t *mhp, struct memlist *ml) 2082*0Sstevel@tonic-gate { 2083*0Sstevel@tonic-gate int err; 2084*0Sstevel@tonic-gate pfn_t base; 2085*0Sstevel@tonic-gate pgcnt_t npgs; 2086*0Sstevel@tonic-gate struct memlist *mc; 2087*0Sstevel@tonic-gate static fn_t f = "dr_reserve_mem_spans"; 2088*0Sstevel@tonic-gate 2089*0Sstevel@tonic-gate PR_MEM("%s...\n", f); 2090*0Sstevel@tonic-gate 2091*0Sstevel@tonic-gate /* 2092*0Sstevel@tonic-gate * Walk the supplied memlist scheduling each span for removal 2093*0Sstevel@tonic-gate * with kphysm_del_span. It is possible that a span may intersect 2094*0Sstevel@tonic-gate * an area occupied by the cage. 2095*0Sstevel@tonic-gate */ 2096*0Sstevel@tonic-gate for (mc = ml; mc != NULL; mc = mc->next) { 2097*0Sstevel@tonic-gate base = _b64top(mc->address); 2098*0Sstevel@tonic-gate npgs = _b64top(mc->size); 2099*0Sstevel@tonic-gate 2100*0Sstevel@tonic-gate err = kphysm_del_span(*mhp, base, npgs); 2101*0Sstevel@tonic-gate if (err != KPHYSM_OK) { 2102*0Sstevel@tonic-gate cmn_err(CE_WARN, "%s memory reserve failed." 2103*0Sstevel@tonic-gate " unexpected kphysm_del_span return value %d;" 2104*0Sstevel@tonic-gate " basepfn=0x%lx npages=%ld", 2105*0Sstevel@tonic-gate f, err, base, npgs); 2106*0Sstevel@tonic-gate 2107*0Sstevel@tonic-gate return (-1); 2108*0Sstevel@tonic-gate } 2109*0Sstevel@tonic-gate } 2110*0Sstevel@tonic-gate 2111*0Sstevel@tonic-gate return (0); 2112*0Sstevel@tonic-gate } 2113*0Sstevel@tonic-gate 2114*0Sstevel@tonic-gate /* debug counters */ 2115*0Sstevel@tonic-gate int dr_smt_realigned; 2116*0Sstevel@tonic-gate int dr_smt_preference[4]; 2117*0Sstevel@tonic-gate 2118*0Sstevel@tonic-gate #ifdef DEBUG 2119*0Sstevel@tonic-gate uint_t dr_ignore_board; /* if bit[bnum-1] set, board won't be candidate */ 2120*0Sstevel@tonic-gate #endif 2121*0Sstevel@tonic-gate 2122*0Sstevel@tonic-gate /* 2123*0Sstevel@tonic-gate * Find and reserve a copy/rename target board suitable for the 2124*0Sstevel@tonic-gate * given source board. 2125*0Sstevel@tonic-gate * All boards in the system are examined and categorized in relation to 2126*0Sstevel@tonic-gate * their memory size versus the source board's memory size. Order of 2127*0Sstevel@tonic-gate * preference is: 2128*0Sstevel@tonic-gate * 1st: board has same memory size 2129*0Sstevel@tonic-gate * 2nd: board has larger memory size 2130*0Sstevel@tonic-gate * 3rd: board has smaller memory size 2131*0Sstevel@tonic-gate * 4th: board has smaller memory size, available memory will be reduced. 2132*0Sstevel@tonic-gate * Boards in category 3 and 4 will have their MC's reprogrammed to locate the 2133*0Sstevel@tonic-gate * span to which the MC responds to address span that appropriately covers 2134*0Sstevel@tonic-gate * the nonrelocatable span of the source board. 2135*0Sstevel@tonic-gate */ 2136*0Sstevel@tonic-gate static int 2137*0Sstevel@tonic-gate dr_select_mem_target(dr_handle_t *hp, 2138*0Sstevel@tonic-gate dr_mem_unit_t *s_mp, struct memlist *s_ml) 2139*0Sstevel@tonic-gate { 2140*0Sstevel@tonic-gate pgcnt_t sz = _b64top(s_mp->sbm_slice_size); 2141*0Sstevel@tonic-gate pgcnt_t sm = sz - 1; /* mem_slice_mask */ 2142*0Sstevel@tonic-gate pfn_t s_phi, t_phi; 2143*0Sstevel@tonic-gate 2144*0Sstevel@tonic-gate int n_sets = 4; /* same, larger, smaller, clipped */ 2145*0Sstevel@tonic-gate int preference; /* lower value is higher preference */ 2146*0Sstevel@tonic-gate int n_units_per_set; 2147*0Sstevel@tonic-gate int idx; 2148*0Sstevel@tonic-gate dr_mem_unit_t **sets; 2149*0Sstevel@tonic-gate 2150*0Sstevel@tonic-gate int t_bd; 2151*0Sstevel@tonic-gate int t_unit; 2152*0Sstevel@tonic-gate int rv; 2153*0Sstevel@tonic-gate int allow_src_memrange_modify; 2154*0Sstevel@tonic-gate int allow_targ_memrange_modify; 2155*0Sstevel@tonic-gate drmachid_t t_id; 2156*0Sstevel@tonic-gate dr_board_t *s_bp, *t_bp; 2157*0Sstevel@tonic-gate dr_mem_unit_t *t_mp, *c_mp; 2158*0Sstevel@tonic-gate struct memlist *d_ml, *t_ml, *x_ml; 2159*0Sstevel@tonic-gate memquery_t s_mq = {0}; 2160*0Sstevel@tonic-gate static fn_t f = "dr_select_mem_target"; 2161*0Sstevel@tonic-gate 2162*0Sstevel@tonic-gate PR_MEM("%s...\n", f); 2163*0Sstevel@tonic-gate 2164*0Sstevel@tonic-gate ASSERT(s_ml != NULL); 2165*0Sstevel@tonic-gate 2166*0Sstevel@tonic-gate n_units_per_set = MAX_BOARDS * MAX_MEM_UNITS_PER_BOARD; 2167*0Sstevel@tonic-gate sets = GETSTRUCT(dr_mem_unit_t *, n_units_per_set * n_sets); 2168*0Sstevel@tonic-gate 2169*0Sstevel@tonic-gate s_bp = hp->h_bd; 2170*0Sstevel@tonic-gate /* calculate the offset into the slice of the last source board pfn */ 2171*0Sstevel@tonic-gate ASSERT(s_mp->sbm_npages != 0); 2172*0Sstevel@tonic-gate s_phi = (s_mp->sbm_basepfn + s_mp->sbm_npages - 1) & sm; 2173*0Sstevel@tonic-gate 2174*0Sstevel@tonic-gate allow_src_memrange_modify = drmach_allow_memrange_modify(s_bp->b_id); 2175*0Sstevel@tonic-gate 2176*0Sstevel@tonic-gate /* 2177*0Sstevel@tonic-gate * Make one pass through all memory units on all boards 2178*0Sstevel@tonic-gate * and categorize them with respect to the source board. 2179*0Sstevel@tonic-gate */ 2180*0Sstevel@tonic-gate for (t_bd = 0; t_bd < MAX_BOARDS; t_bd++) { 2181*0Sstevel@tonic-gate /* 2182*0Sstevel@tonic-gate * The board structs are a contiguous array 2183*0Sstevel@tonic-gate * so we take advantage of that to find the 2184*0Sstevel@tonic-gate * correct board struct pointer for a given 2185*0Sstevel@tonic-gate * board number. 2186*0Sstevel@tonic-gate */ 2187*0Sstevel@tonic-gate t_bp = dr_lookup_board(t_bd); 2188*0Sstevel@tonic-gate 2189*0Sstevel@tonic-gate /* source board can not be its own target */ 2190*0Sstevel@tonic-gate if (s_bp->b_num == t_bp->b_num) 2191*0Sstevel@tonic-gate continue; 2192*0Sstevel@tonic-gate 2193*0Sstevel@tonic-gate for (t_unit = 0; t_unit < MAX_MEM_UNITS_PER_BOARD; t_unit++) { 2194*0Sstevel@tonic-gate 2195*0Sstevel@tonic-gate t_mp = dr_get_mem_unit(t_bp, t_unit); 2196*0Sstevel@tonic-gate 2197*0Sstevel@tonic-gate /* this memory node must be attached */ 2198*0Sstevel@tonic-gate if (!DR_DEV_IS_ATTACHED(&t_mp->sbm_cm)) 2199*0Sstevel@tonic-gate continue; 2200*0Sstevel@tonic-gate 2201*0Sstevel@tonic-gate /* source unit can not be its own target */ 2202*0Sstevel@tonic-gate if (s_mp == t_mp) { 2203*0Sstevel@tonic-gate /* catch this is debug kernels */ 2204*0Sstevel@tonic-gate ASSERT(0); 2205*0Sstevel@tonic-gate continue; 2206*0Sstevel@tonic-gate } 2207*0Sstevel@tonic-gate 2208*0Sstevel@tonic-gate /* 2209*0Sstevel@tonic-gate * this memory node must not already be reserved 2210*0Sstevel@tonic-gate * by some other memory delete operation. 2211*0Sstevel@tonic-gate */ 2212*0Sstevel@tonic-gate if (t_mp->sbm_flags & DR_MFLAG_RESERVED) 2213*0Sstevel@tonic-gate continue; 2214*0Sstevel@tonic-gate 2215*0Sstevel@tonic-gate /* 2216*0Sstevel@tonic-gate * categorize the memory node 2217*0Sstevel@tonic-gate * If this is a smaller memory node, create a 2218*0Sstevel@tonic-gate * temporary, edited copy of the source board's 2219*0Sstevel@tonic-gate * memlist containing only the span of the non- 2220*0Sstevel@tonic-gate * relocatable pages. 2221*0Sstevel@tonic-gate */ 2222*0Sstevel@tonic-gate t_phi = (t_mp->sbm_basepfn + t_mp->sbm_npages - 1) & sm; 2223*0Sstevel@tonic-gate t_id = t_mp->sbm_cm.sbdev_bp->b_id; 2224*0Sstevel@tonic-gate allow_targ_memrange_modify = 2225*0Sstevel@tonic-gate drmach_allow_memrange_modify(t_id); 2226*0Sstevel@tonic-gate if (t_mp->sbm_npages == s_mp->sbm_npages && 2227*0Sstevel@tonic-gate t_phi == s_phi) { 2228*0Sstevel@tonic-gate preference = 0; 2229*0Sstevel@tonic-gate t_mp->sbm_slice_offset = 0; 2230*0Sstevel@tonic-gate } else if (t_mp->sbm_npages > s_mp->sbm_npages && 2231*0Sstevel@tonic-gate t_phi > s_phi) { 2232*0Sstevel@tonic-gate /* 2233*0Sstevel@tonic-gate * Selecting this target will require modifying 2234*0Sstevel@tonic-gate * the source and/or target physical address 2235*0Sstevel@tonic-gate * ranges. Skip if not supported by platform. 2236*0Sstevel@tonic-gate */ 2237*0Sstevel@tonic-gate if (!allow_src_memrange_modify || 2238*0Sstevel@tonic-gate !allow_targ_memrange_modify) { 2239*0Sstevel@tonic-gate PR_MEM("%s: skip target %s, memory " 2240*0Sstevel@tonic-gate "range relocation not supported " 2241*0Sstevel@tonic-gate "by platform\n", f, 2242*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path); 2243*0Sstevel@tonic-gate continue; 2244*0Sstevel@tonic-gate } 2245*0Sstevel@tonic-gate preference = 1; 2246*0Sstevel@tonic-gate t_mp->sbm_slice_offset = 0; 2247*0Sstevel@tonic-gate } else { 2248*0Sstevel@tonic-gate pfn_t pfn = 0; 2249*0Sstevel@tonic-gate 2250*0Sstevel@tonic-gate /* 2251*0Sstevel@tonic-gate * Selecting this target will require modifying 2252*0Sstevel@tonic-gate * the source and/or target physical address 2253*0Sstevel@tonic-gate * ranges. Skip if not supported by platform. 2254*0Sstevel@tonic-gate */ 2255*0Sstevel@tonic-gate if (!allow_src_memrange_modify || 2256*0Sstevel@tonic-gate !allow_targ_memrange_modify) { 2257*0Sstevel@tonic-gate PR_MEM("%s: skip target %s, memory " 2258*0Sstevel@tonic-gate "range relocation not supported " 2259*0Sstevel@tonic-gate "by platform\n", f, 2260*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path); 2261*0Sstevel@tonic-gate continue; 2262*0Sstevel@tonic-gate } 2263*0Sstevel@tonic-gate 2264*0Sstevel@tonic-gate /* 2265*0Sstevel@tonic-gate * Check if its mc can be programmed to relocate 2266*0Sstevel@tonic-gate * the active address range to match the 2267*0Sstevel@tonic-gate * nonrelocatable span of the source board. 2268*0Sstevel@tonic-gate */ 2269*0Sstevel@tonic-gate preference = 2; 2270*0Sstevel@tonic-gate 2271*0Sstevel@tonic-gate if (s_mq.phys_pages == 0) { 2272*0Sstevel@tonic-gate /* 2273*0Sstevel@tonic-gate * find non-relocatable span on 2274*0Sstevel@tonic-gate * source board. 2275*0Sstevel@tonic-gate */ 2276*0Sstevel@tonic-gate rv = kphysm_del_span_query( 2277*0Sstevel@tonic-gate s_mp->sbm_basepfn, 2278*0Sstevel@tonic-gate s_mp->sbm_npages, &s_mq); 2279*0Sstevel@tonic-gate if (rv != KPHYSM_OK) { 2280*0Sstevel@tonic-gate PR_MEM("%s: %s: unexpected" 2281*0Sstevel@tonic-gate " kphysm_del_span_query" 2282*0Sstevel@tonic-gate " return value %d;" 2283*0Sstevel@tonic-gate " basepfn 0x%lx, npages %ld\n", 2284*0Sstevel@tonic-gate f, 2285*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 2286*0Sstevel@tonic-gate rv, 2287*0Sstevel@tonic-gate s_mp->sbm_basepfn, 2288*0Sstevel@tonic-gate s_mp->sbm_npages); 2289*0Sstevel@tonic-gate 2290*0Sstevel@tonic-gate /* paranoia */ 2291*0Sstevel@tonic-gate s_mq.phys_pages = 0; 2292*0Sstevel@tonic-gate 2293*0Sstevel@tonic-gate continue; 2294*0Sstevel@tonic-gate } 2295*0Sstevel@tonic-gate 2296*0Sstevel@tonic-gate /* more paranoia */ 2297*0Sstevel@tonic-gate ASSERT(s_mq.phys_pages != 0); 2298*0Sstevel@tonic-gate ASSERT(s_mq.nonrelocatable != 0); 2299*0Sstevel@tonic-gate 2300*0Sstevel@tonic-gate /* 2301*0Sstevel@tonic-gate * this should not happen 2302*0Sstevel@tonic-gate * if it does, it simply means that 2303*0Sstevel@tonic-gate * we can not proceed with qualifying 2304*0Sstevel@tonic-gate * this target candidate. 2305*0Sstevel@tonic-gate */ 2306*0Sstevel@tonic-gate if (s_mq.nonrelocatable == 0) 2307*0Sstevel@tonic-gate continue; 2308*0Sstevel@tonic-gate 2309*0Sstevel@tonic-gate PR_MEM("%s: %s: nonrelocatable" 2310*0Sstevel@tonic-gate " span (0x%lx..0x%lx)\n", 2311*0Sstevel@tonic-gate f, 2312*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 2313*0Sstevel@tonic-gate s_mq.first_nonrelocatable, 2314*0Sstevel@tonic-gate s_mq.last_nonrelocatable); 2315*0Sstevel@tonic-gate } 2316*0Sstevel@tonic-gate 2317*0Sstevel@tonic-gate /* 2318*0Sstevel@tonic-gate * Round down the starting pfn of the 2319*0Sstevel@tonic-gate * nonrelocatable span on the source board 2320*0Sstevel@tonic-gate * to nearest programmable boundary possible 2321*0Sstevel@tonic-gate * with this target candidate. 2322*0Sstevel@tonic-gate */ 2323*0Sstevel@tonic-gate pfn = s_mq.first_nonrelocatable & 2324*0Sstevel@tonic-gate ~t_mp->sbm_alignment_mask; 2325*0Sstevel@tonic-gate 2326*0Sstevel@tonic-gate /* skip candidate if memory is too small */ 2327*0Sstevel@tonic-gate if (pfn + t_mp->sbm_npages < 2328*0Sstevel@tonic-gate s_mq.last_nonrelocatable) 2329*0Sstevel@tonic-gate continue; 2330*0Sstevel@tonic-gate 2331*0Sstevel@tonic-gate /* 2332*0Sstevel@tonic-gate * reprogramming an mc to relocate its 2333*0Sstevel@tonic-gate * active address range means the beginning 2334*0Sstevel@tonic-gate * address to which the DIMMS respond will 2335*0Sstevel@tonic-gate * be somewhere above the slice boundary 2336*0Sstevel@tonic-gate * address. The larger the size of memory 2337*0Sstevel@tonic-gate * on this unit, the more likely part of it 2338*0Sstevel@tonic-gate * will exist beyond the end of the slice. 2339*0Sstevel@tonic-gate * The portion of the memory that does is 2340*0Sstevel@tonic-gate * unavailable to the system until the mc 2341*0Sstevel@tonic-gate * reprogrammed to a more favorable base 2342*0Sstevel@tonic-gate * address. 2343*0Sstevel@tonic-gate * An attempt is made to avoid the loss by 2344*0Sstevel@tonic-gate * recalculating the mc base address relative 2345*0Sstevel@tonic-gate * to the end of the slice. This may produce 2346*0Sstevel@tonic-gate * a more favorable result. If not, we lower 2347*0Sstevel@tonic-gate * the board's preference rating so that it 2348*0Sstevel@tonic-gate * is one the last candidate boards to be 2349*0Sstevel@tonic-gate * considered. 2350*0Sstevel@tonic-gate */ 2351*0Sstevel@tonic-gate if ((pfn + t_mp->sbm_npages) & ~sm) { 2352*0Sstevel@tonic-gate pfn_t p; 2353*0Sstevel@tonic-gate 2354*0Sstevel@tonic-gate ASSERT(sz >= t_mp->sbm_npages); 2355*0Sstevel@tonic-gate 2356*0Sstevel@tonic-gate /* 2357*0Sstevel@tonic-gate * calculate an alternative starting 2358*0Sstevel@tonic-gate * address relative to the end of the 2359*0Sstevel@tonic-gate * slice's address space. 2360*0Sstevel@tonic-gate */ 2361*0Sstevel@tonic-gate p = pfn & ~sm; 2362*0Sstevel@tonic-gate p = p + (sz - t_mp->sbm_npages); 2363*0Sstevel@tonic-gate p = p & ~t_mp->sbm_alignment_mask; 2364*0Sstevel@tonic-gate 2365*0Sstevel@tonic-gate if ((p > s_mq.first_nonrelocatable) || 2366*0Sstevel@tonic-gate (p + t_mp->sbm_npages < 2367*0Sstevel@tonic-gate s_mq.last_nonrelocatable)) { 2368*0Sstevel@tonic-gate 2369*0Sstevel@tonic-gate /* 2370*0Sstevel@tonic-gate * alternative starting addr 2371*0Sstevel@tonic-gate * won't work. Lower preference 2372*0Sstevel@tonic-gate * rating of this board, since 2373*0Sstevel@tonic-gate * some number of pages will 2374*0Sstevel@tonic-gate * unavailable for use. 2375*0Sstevel@tonic-gate */ 2376*0Sstevel@tonic-gate preference = 3; 2377*0Sstevel@tonic-gate } else { 2378*0Sstevel@tonic-gate dr_smt_realigned++; 2379*0Sstevel@tonic-gate pfn = p; 2380*0Sstevel@tonic-gate } 2381*0Sstevel@tonic-gate } 2382*0Sstevel@tonic-gate 2383*0Sstevel@tonic-gate /* 2384*0Sstevel@tonic-gate * translate calculated pfn to an offset 2385*0Sstevel@tonic-gate * relative to the slice boundary. If the 2386*0Sstevel@tonic-gate * candidate board is selected, this offset 2387*0Sstevel@tonic-gate * will be used to calculate the values 2388*0Sstevel@tonic-gate * programmed into the mc. 2389*0Sstevel@tonic-gate */ 2390*0Sstevel@tonic-gate t_mp->sbm_slice_offset = pfn & sm; 2391*0Sstevel@tonic-gate PR_MEM("%s: %s:" 2392*0Sstevel@tonic-gate " proposed mc offset 0x%lx\n", 2393*0Sstevel@tonic-gate f, 2394*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path, 2395*0Sstevel@tonic-gate t_mp->sbm_slice_offset); 2396*0Sstevel@tonic-gate } 2397*0Sstevel@tonic-gate 2398*0Sstevel@tonic-gate dr_smt_preference[preference]++; 2399*0Sstevel@tonic-gate 2400*0Sstevel@tonic-gate /* calculate index to start of preference set */ 2401*0Sstevel@tonic-gate idx = n_units_per_set * preference; 2402*0Sstevel@tonic-gate /* calculate offset to respective element */ 2403*0Sstevel@tonic-gate idx += t_bd * MAX_MEM_UNITS_PER_BOARD + t_unit; 2404*0Sstevel@tonic-gate 2405*0Sstevel@tonic-gate ASSERT(idx < n_units_per_set * n_sets); 2406*0Sstevel@tonic-gate sets[idx] = t_mp; 2407*0Sstevel@tonic-gate } 2408*0Sstevel@tonic-gate } 2409*0Sstevel@tonic-gate 2410*0Sstevel@tonic-gate /* 2411*0Sstevel@tonic-gate * NOTE: this would be a good place to sort each candidate 2412*0Sstevel@tonic-gate * set in to some desired order, e.g. memory size in ascending 2413*0Sstevel@tonic-gate * order. Without an additional sorting step here, the order 2414*0Sstevel@tonic-gate * within a set is ascending board number order. 2415*0Sstevel@tonic-gate */ 2416*0Sstevel@tonic-gate 2417*0Sstevel@tonic-gate c_mp = NULL; 2418*0Sstevel@tonic-gate x_ml = NULL; 2419*0Sstevel@tonic-gate t_ml = NULL; 2420*0Sstevel@tonic-gate for (idx = 0; idx < n_units_per_set * n_sets; idx++) { 2421*0Sstevel@tonic-gate memquery_t mq; 2422*0Sstevel@tonic-gate 2423*0Sstevel@tonic-gate /* cleanup t_ml after previous pass */ 2424*0Sstevel@tonic-gate if (t_ml != NULL) { 2425*0Sstevel@tonic-gate memlist_delete(t_ml); 2426*0Sstevel@tonic-gate t_ml = NULL; 2427*0Sstevel@tonic-gate } 2428*0Sstevel@tonic-gate 2429*0Sstevel@tonic-gate /* get candidate target board mem unit */ 2430*0Sstevel@tonic-gate t_mp = sets[idx]; 2431*0Sstevel@tonic-gate if (t_mp == NULL) 2432*0Sstevel@tonic-gate continue; 2433*0Sstevel@tonic-gate 2434*0Sstevel@tonic-gate /* get target board memlist */ 2435*0Sstevel@tonic-gate t_ml = dr_get_memlist(t_mp); 2436*0Sstevel@tonic-gate if (t_ml == NULL) { 2437*0Sstevel@tonic-gate cmn_err(CE_WARN, "%s: no memlist for" 2438*0Sstevel@tonic-gate " mem-unit %d, board %d", 2439*0Sstevel@tonic-gate f, 2440*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_bp->b_num, 2441*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_unum); 2442*0Sstevel@tonic-gate 2443*0Sstevel@tonic-gate continue; 2444*0Sstevel@tonic-gate } 2445*0Sstevel@tonic-gate 2446*0Sstevel@tonic-gate /* get appropriate source board memlist */ 2447*0Sstevel@tonic-gate t_phi = (t_mp->sbm_basepfn + t_mp->sbm_npages - 1) & sm; 2448*0Sstevel@tonic-gate if (t_mp->sbm_npages < s_mp->sbm_npages || t_phi < s_phi) { 2449*0Sstevel@tonic-gate spgcnt_t excess; 2450*0Sstevel@tonic-gate 2451*0Sstevel@tonic-gate /* 2452*0Sstevel@tonic-gate * make a copy of the source board memlist 2453*0Sstevel@tonic-gate * then edit it to remove the spans that 2454*0Sstevel@tonic-gate * are outside the calculated span of 2455*0Sstevel@tonic-gate * [pfn..s_mq.last_nonrelocatable]. 2456*0Sstevel@tonic-gate */ 2457*0Sstevel@tonic-gate if (x_ml != NULL) 2458*0Sstevel@tonic-gate memlist_delete(x_ml); 2459*0Sstevel@tonic-gate 2460*0Sstevel@tonic-gate x_ml = memlist_dup(s_ml); 2461*0Sstevel@tonic-gate if (x_ml == NULL) { 2462*0Sstevel@tonic-gate PR_MEM("%s: memlist_dup failed\n", f); 2463*0Sstevel@tonic-gate /* TODO: should abort */ 2464*0Sstevel@tonic-gate continue; 2465*0Sstevel@tonic-gate } 2466*0Sstevel@tonic-gate 2467*0Sstevel@tonic-gate /* trim off lower portion */ 2468*0Sstevel@tonic-gate excess = t_mp->sbm_slice_offset - 2469*0Sstevel@tonic-gate (s_mp->sbm_basepfn & sm); 2470*0Sstevel@tonic-gate 2471*0Sstevel@tonic-gate if (excess > 0) { 2472*0Sstevel@tonic-gate x_ml = memlist_del_span( 2473*0Sstevel@tonic-gate x_ml, 2474*0Sstevel@tonic-gate _ptob64(s_mp->sbm_basepfn), 2475*0Sstevel@tonic-gate _ptob64(excess)); 2476*0Sstevel@tonic-gate } 2477*0Sstevel@tonic-gate ASSERT(x_ml); 2478*0Sstevel@tonic-gate 2479*0Sstevel@tonic-gate /* 2480*0Sstevel@tonic-gate * Since this candidate target board is smaller 2481*0Sstevel@tonic-gate * than the source board, s_mq must have been 2482*0Sstevel@tonic-gate * initialized in previous loop while processing 2483*0Sstevel@tonic-gate * this or some other candidate board. 2484*0Sstevel@tonic-gate * FIXME: this is weak. 2485*0Sstevel@tonic-gate */ 2486*0Sstevel@tonic-gate ASSERT(s_mq.phys_pages != 0); 2487*0Sstevel@tonic-gate 2488*0Sstevel@tonic-gate /* trim off upper portion */ 2489*0Sstevel@tonic-gate excess = (s_mp->sbm_basepfn + s_mp->sbm_npages) 2490*0Sstevel@tonic-gate - (s_mq.last_nonrelocatable + 1); 2491*0Sstevel@tonic-gate if (excess > 0) { 2492*0Sstevel@tonic-gate pfn_t p; 2493*0Sstevel@tonic-gate 2494*0Sstevel@tonic-gate p = s_mq.last_nonrelocatable + 1; 2495*0Sstevel@tonic-gate x_ml = memlist_del_span( 2496*0Sstevel@tonic-gate x_ml, 2497*0Sstevel@tonic-gate _ptob64(p), 2498*0Sstevel@tonic-gate _ptob64(excess)); 2499*0Sstevel@tonic-gate } 2500*0Sstevel@tonic-gate 2501*0Sstevel@tonic-gate PR_MEM("%s: %s: edited source memlist:\n", 2502*0Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 2503*0Sstevel@tonic-gate PR_MEMLIST_DUMP(x_ml); 2504*0Sstevel@tonic-gate 2505*0Sstevel@tonic-gate #ifdef DEBUG 2506*0Sstevel@tonic-gate /* sanity check memlist */ 2507*0Sstevel@tonic-gate d_ml = x_ml; 2508*0Sstevel@tonic-gate while (d_ml->next != NULL) 2509*0Sstevel@tonic-gate d_ml = d_ml->next; 2510*0Sstevel@tonic-gate 2511*0Sstevel@tonic-gate ASSERT(d_ml->address + d_ml->size == 2512*0Sstevel@tonic-gate _ptob64(s_mq.last_nonrelocatable + 1)); 2513*0Sstevel@tonic-gate #endif 2514*0Sstevel@tonic-gate 2515*0Sstevel@tonic-gate /* 2516*0Sstevel@tonic-gate * x_ml now describes only the portion of the 2517*0Sstevel@tonic-gate * source board that will be moved during the 2518*0Sstevel@tonic-gate * copy/rename operation. 2519*0Sstevel@tonic-gate */ 2520*0Sstevel@tonic-gate d_ml = x_ml; 2521*0Sstevel@tonic-gate } else { 2522*0Sstevel@tonic-gate /* use original memlist; all spans will be moved */ 2523*0Sstevel@tonic-gate d_ml = s_ml; 2524*0Sstevel@tonic-gate } 2525*0Sstevel@tonic-gate 2526*0Sstevel@tonic-gate /* verify target can support source memory spans. */ 2527*0Sstevel@tonic-gate if (memlist_canfit(d_ml, t_ml) == 0) { 2528*0Sstevel@tonic-gate PR_MEM("%s: source memlist won't" 2529*0Sstevel@tonic-gate " fit in target memlist\n", f); 2530*0Sstevel@tonic-gate PR_MEM("%s: source memlist:\n", f); 2531*0Sstevel@tonic-gate PR_MEMLIST_DUMP(d_ml); 2532*0Sstevel@tonic-gate PR_MEM("%s: target memlist:\n", f); 2533*0Sstevel@tonic-gate PR_MEMLIST_DUMP(t_ml); 2534*0Sstevel@tonic-gate 2535*0Sstevel@tonic-gate continue; 2536*0Sstevel@tonic-gate } 2537*0Sstevel@tonic-gate 2538*0Sstevel@tonic-gate /* NOTE: the value of d_ml is not used beyond this point */ 2539*0Sstevel@tonic-gate 2540*0Sstevel@tonic-gate PR_MEM("%s: checking for no-reloc in %s, " 2541*0Sstevel@tonic-gate " basepfn=0x%lx, npages=%ld\n", 2542*0Sstevel@tonic-gate f, 2543*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path, 2544*0Sstevel@tonic-gate t_mp->sbm_basepfn, 2545*0Sstevel@tonic-gate t_mp->sbm_npages); 2546*0Sstevel@tonic-gate 2547*0Sstevel@tonic-gate rv = kphysm_del_span_query( 2548*0Sstevel@tonic-gate t_mp->sbm_basepfn, t_mp->sbm_npages, &mq); 2549*0Sstevel@tonic-gate if (rv != KPHYSM_OK) { 2550*0Sstevel@tonic-gate PR_MEM("%s: kphysm_del_span_query:" 2551*0Sstevel@tonic-gate " unexpected return value %d\n", f, rv); 2552*0Sstevel@tonic-gate 2553*0Sstevel@tonic-gate continue; 2554*0Sstevel@tonic-gate } 2555*0Sstevel@tonic-gate 2556*0Sstevel@tonic-gate if (mq.nonrelocatable != 0) { 2557*0Sstevel@tonic-gate PR_MEM("%s: candidate %s has" 2558*0Sstevel@tonic-gate " nonrelocatable span [0x%lx..0x%lx]\n", 2559*0Sstevel@tonic-gate f, 2560*0Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path, 2561*0Sstevel@tonic-gate mq.first_nonrelocatable, 2562*0Sstevel@tonic-gate mq.last_nonrelocatable); 2563*0Sstevel@tonic-gate 2564*0Sstevel@tonic-gate continue; 2565*0Sstevel@tonic-gate } 2566*0Sstevel@tonic-gate 2567*0Sstevel@tonic-gate #ifdef DEBUG 2568*0Sstevel@tonic-gate /* 2569*0Sstevel@tonic-gate * This is a debug tool for excluding certain boards 2570*0Sstevel@tonic-gate * from being selected as a target board candidate. 2571*0Sstevel@tonic-gate * dr_ignore_board is only tested by this driver. 2572*0Sstevel@tonic-gate * It must be set with adb, obp, /etc/system or your 2573*0Sstevel@tonic-gate * favorite debugger. 2574*0Sstevel@tonic-gate */ 2575*0Sstevel@tonic-gate if (dr_ignore_board & 2576*0Sstevel@tonic-gate (1 << (t_mp->sbm_cm.sbdev_bp->b_num - 1))) { 2577*0Sstevel@tonic-gate PR_MEM("%s: dr_ignore_board flag set," 2578*0Sstevel@tonic-gate " ignoring %s as candidate\n", 2579*0Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 2580*0Sstevel@tonic-gate continue; 2581*0Sstevel@tonic-gate } 2582*0Sstevel@tonic-gate #endif 2583*0Sstevel@tonic-gate 2584*0Sstevel@tonic-gate /* 2585*0Sstevel@tonic-gate * Reserve excess source board memory, if any. 2586*0Sstevel@tonic-gate * 2587*0Sstevel@tonic-gate * When the number of pages on the candidate target 2588*0Sstevel@tonic-gate * board is less than the number of pages on the source, 2589*0Sstevel@tonic-gate * then some spans (clearly) of the source board's address 2590*0Sstevel@tonic-gate * space will not be covered by physical memory after the 2591*0Sstevel@tonic-gate * copy/rename completes. The following code block 2592*0Sstevel@tonic-gate * schedules those spans to be deleted. 2593*0Sstevel@tonic-gate */ 2594*0Sstevel@tonic-gate if (t_mp->sbm_npages < s_mp->sbm_npages || t_phi < s_phi) { 2595*0Sstevel@tonic-gate pfn_t pfn; 2596*0Sstevel@tonic-gate uint64_t s_del_pa; 2597*0Sstevel@tonic-gate struct memlist *ml; 2598*0Sstevel@tonic-gate 2599*0Sstevel@tonic-gate d_ml = memlist_dup(s_ml); 2600*0Sstevel@tonic-gate if (d_ml == NULL) { 2601*0Sstevel@tonic-gate PR_MEM("%s: cant dup src brd memlist\n", f); 2602*0Sstevel@tonic-gate /* TODO: should abort */ 2603*0Sstevel@tonic-gate continue; 2604*0Sstevel@tonic-gate } 2605*0Sstevel@tonic-gate 2606*0Sstevel@tonic-gate /* calculate base pfn relative to target board */ 2607*0Sstevel@tonic-gate pfn = s_mp->sbm_basepfn & ~sm; 2608*0Sstevel@tonic-gate pfn += t_mp->sbm_slice_offset; 2609*0Sstevel@tonic-gate 2610*0Sstevel@tonic-gate /* 2611*0Sstevel@tonic-gate * cannot split dynamically added segment 2612*0Sstevel@tonic-gate */ 2613*0Sstevel@tonic-gate s_del_pa = _ptob64(pfn + t_mp->sbm_npages); 2614*0Sstevel@tonic-gate PR_MEM("%s: proposed src delete pa=0x%lx\n", f, 2615*0Sstevel@tonic-gate s_del_pa); 2616*0Sstevel@tonic-gate PR_MEM("%s: checking for split of dyn seg list:\n", f); 2617*0Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_dyn_segs); 2618*0Sstevel@tonic-gate for (ml = s_mp->sbm_dyn_segs; ml; ml = ml->next) { 2619*0Sstevel@tonic-gate if (s_del_pa > ml->address && 2620*0Sstevel@tonic-gate s_del_pa < ml->address + ml->size) { 2621*0Sstevel@tonic-gate s_del_pa = ml->address; 2622*0Sstevel@tonic-gate break; 2623*0Sstevel@tonic-gate } 2624*0Sstevel@tonic-gate } 2625*0Sstevel@tonic-gate 2626*0Sstevel@tonic-gate /* remove span that will reside on candidate board */ 2627*0Sstevel@tonic-gate d_ml = memlist_del_span(d_ml, _ptob64(pfn), 2628*0Sstevel@tonic-gate s_del_pa - _ptob64(pfn)); 2629*0Sstevel@tonic-gate 2630*0Sstevel@tonic-gate PR_MEM("%s: %s: reserving src brd memlist:\n", 2631*0Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 2632*0Sstevel@tonic-gate PR_MEMLIST_DUMP(d_ml); 2633*0Sstevel@tonic-gate 2634*0Sstevel@tonic-gate /* reserve excess spans */ 2635*0Sstevel@tonic-gate if (dr_reserve_mem_spans( 2636*0Sstevel@tonic-gate &s_mp->sbm_memhandle, d_ml) != 0) { 2637*0Sstevel@tonic-gate 2638*0Sstevel@tonic-gate /* likely more non-reloc pages appeared */ 2639*0Sstevel@tonic-gate /* TODO: restart from top? */ 2640*0Sstevel@tonic-gate continue; 2641*0Sstevel@tonic-gate } 2642*0Sstevel@tonic-gate } else { 2643*0Sstevel@tonic-gate /* no excess source board memory */ 2644*0Sstevel@tonic-gate d_ml = NULL; 2645*0Sstevel@tonic-gate } 2646*0Sstevel@tonic-gate 2647*0Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_RESERVED; 2648*0Sstevel@tonic-gate 2649*0Sstevel@tonic-gate /* 2650*0Sstevel@tonic-gate * reserve all memory on target board. 2651*0Sstevel@tonic-gate * NOTE: source board's memhandle is used. 2652*0Sstevel@tonic-gate * 2653*0Sstevel@tonic-gate * If this succeeds (eq 0), then target selection is 2654*0Sstevel@tonic-gate * complete and all unwanted memory spans, both source and 2655*0Sstevel@tonic-gate * target, have been reserved. Loop is terminated. 2656*0Sstevel@tonic-gate */ 2657*0Sstevel@tonic-gate if (dr_reserve_mem_spans(&s_mp->sbm_memhandle, t_ml) == 0) { 2658*0Sstevel@tonic-gate PR_MEM("%s: %s: target board memory reserved\n", 2659*0Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 2660*0Sstevel@tonic-gate 2661*0Sstevel@tonic-gate /* a candidate target board is now reserved */ 2662*0Sstevel@tonic-gate t_mp->sbm_flags |= DR_MFLAG_RESERVED; 2663*0Sstevel@tonic-gate c_mp = t_mp; 2664*0Sstevel@tonic-gate 2665*0Sstevel@tonic-gate /* *** EXITING LOOP *** */ 2666*0Sstevel@tonic-gate break; 2667*0Sstevel@tonic-gate } 2668*0Sstevel@tonic-gate 2669*0Sstevel@tonic-gate /* did not successfully reserve the target board. */ 2670*0Sstevel@tonic-gate PR_MEM("%s: could not reserve target %s\n", 2671*0Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 2672*0Sstevel@tonic-gate 2673*0Sstevel@tonic-gate /* 2674*0Sstevel@tonic-gate * NOTE: an undo of the dr_reserve_mem_span work 2675*0Sstevel@tonic-gate * will happen automatically when the memhandle 2676*0Sstevel@tonic-gate * (s_mp->sbm_memhandle) is kphysm_del_release'd. 2677*0Sstevel@tonic-gate */ 2678*0Sstevel@tonic-gate 2679*0Sstevel@tonic-gate s_mp->sbm_flags &= ~DR_MFLAG_RESERVED; 2680*0Sstevel@tonic-gate } 2681*0Sstevel@tonic-gate 2682*0Sstevel@tonic-gate /* clean up after memlist editing logic */ 2683*0Sstevel@tonic-gate if (x_ml != NULL) 2684*0Sstevel@tonic-gate memlist_delete(x_ml); 2685*0Sstevel@tonic-gate 2686*0Sstevel@tonic-gate FREESTRUCT(sets, dr_mem_unit_t *, n_units_per_set * n_sets); 2687*0Sstevel@tonic-gate 2688*0Sstevel@tonic-gate /* 2689*0Sstevel@tonic-gate * c_mp will be NULL when the entire sets[] array 2690*0Sstevel@tonic-gate * has been searched without reserving a target board. 2691*0Sstevel@tonic-gate */ 2692*0Sstevel@tonic-gate if (c_mp == NULL) { 2693*0Sstevel@tonic-gate PR_MEM("%s: %s: target selection failed.\n", 2694*0Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 2695*0Sstevel@tonic-gate 2696*0Sstevel@tonic-gate if (t_ml != NULL) 2697*0Sstevel@tonic-gate memlist_delete(t_ml); 2698*0Sstevel@tonic-gate 2699*0Sstevel@tonic-gate return (-1); 2700*0Sstevel@tonic-gate } 2701*0Sstevel@tonic-gate 2702*0Sstevel@tonic-gate PR_MEM("%s: found target %s for source %s\n", 2703*0Sstevel@tonic-gate f, 2704*0Sstevel@tonic-gate c_mp->sbm_cm.sbdev_path, 2705*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path); 2706*0Sstevel@tonic-gate 2707*0Sstevel@tonic-gate s_mp->sbm_peer = c_mp; 2708*0Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_SOURCE; 2709*0Sstevel@tonic-gate s_mp->sbm_del_mlist = d_ml; /* spans to be deleted, if any */ 2710*0Sstevel@tonic-gate s_mp->sbm_mlist = s_ml; 2711*0Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 1; 2712*0Sstevel@tonic-gate 2713*0Sstevel@tonic-gate c_mp->sbm_peer = s_mp; 2714*0Sstevel@tonic-gate c_mp->sbm_flags |= DR_MFLAG_TARGET; 2715*0Sstevel@tonic-gate c_mp->sbm_del_mlist = t_ml; /* spans to be deleted */ 2716*0Sstevel@tonic-gate c_mp->sbm_mlist = t_ml; 2717*0Sstevel@tonic-gate c_mp->sbm_cm.sbdev_busy = 1; 2718*0Sstevel@tonic-gate 2719*0Sstevel@tonic-gate s_mp->sbm_flags &= ~DR_MFLAG_MEMRESIZE; 2720*0Sstevel@tonic-gate if (c_mp->sbm_npages > s_mp->sbm_npages) { 2721*0Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_MEMUPSIZE; 2722*0Sstevel@tonic-gate PR_MEM("%s: upsize detected (source=%d < target=%d)\n", 2723*0Sstevel@tonic-gate f, s_mp->sbm_npages, c_mp->sbm_npages); 2724*0Sstevel@tonic-gate } else if (c_mp->sbm_npages < s_mp->sbm_npages) { 2725*0Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_MEMDOWNSIZE; 2726*0Sstevel@tonic-gate PR_MEM("%s: downsize detected (source=%d > target=%d)\n", 2727*0Sstevel@tonic-gate f, s_mp->sbm_npages, c_mp->sbm_npages); 2728*0Sstevel@tonic-gate } 2729*0Sstevel@tonic-gate 2730*0Sstevel@tonic-gate return (0); 2731*0Sstevel@tonic-gate } 2732*0Sstevel@tonic-gate 2733*0Sstevel@tonic-gate /* 2734*0Sstevel@tonic-gate * Memlist support. 2735*0Sstevel@tonic-gate */ 2736*0Sstevel@tonic-gate static struct memlist * 2737*0Sstevel@tonic-gate memlist_dup(struct memlist *mlist) 2738*0Sstevel@tonic-gate { 2739*0Sstevel@tonic-gate struct memlist *hl = NULL, *tl, **mlp; 2740*0Sstevel@tonic-gate 2741*0Sstevel@tonic-gate if (mlist == NULL) 2742*0Sstevel@tonic-gate return (NULL); 2743*0Sstevel@tonic-gate 2744*0Sstevel@tonic-gate mlp = &hl; 2745*0Sstevel@tonic-gate tl = *mlp; 2746*0Sstevel@tonic-gate for (; mlist; mlist = mlist->next) { 2747*0Sstevel@tonic-gate *mlp = GETSTRUCT(struct memlist, 1); 2748*0Sstevel@tonic-gate (*mlp)->address = mlist->address; 2749*0Sstevel@tonic-gate (*mlp)->size = mlist->size; 2750*0Sstevel@tonic-gate (*mlp)->prev = tl; 2751*0Sstevel@tonic-gate tl = *mlp; 2752*0Sstevel@tonic-gate mlp = &((*mlp)->next); 2753*0Sstevel@tonic-gate } 2754*0Sstevel@tonic-gate *mlp = NULL; 2755*0Sstevel@tonic-gate 2756*0Sstevel@tonic-gate return (hl); 2757*0Sstevel@tonic-gate } 2758*0Sstevel@tonic-gate 2759*0Sstevel@tonic-gate /* 2760*0Sstevel@tonic-gate * Determine whether the source memlist (s_mlist) will 2761*0Sstevel@tonic-gate * fit into the target memlist (t_mlist) in terms of 2762*0Sstevel@tonic-gate * size and holes (i.e. based on same relative base address). 2763*0Sstevel@tonic-gate */ 2764*0Sstevel@tonic-gate static int 2765*0Sstevel@tonic-gate memlist_canfit(struct memlist *s_mlist, struct memlist *t_mlist) 2766*0Sstevel@tonic-gate { 2767*0Sstevel@tonic-gate int rv = 0; 2768*0Sstevel@tonic-gate uint64_t s_basepa, t_basepa; 2769*0Sstevel@tonic-gate struct memlist *s_ml, *t_ml; 2770*0Sstevel@tonic-gate 2771*0Sstevel@tonic-gate if ((s_mlist == NULL) || (t_mlist == NULL)) 2772*0Sstevel@tonic-gate return (0); 2773*0Sstevel@tonic-gate 2774*0Sstevel@tonic-gate /* 2775*0Sstevel@tonic-gate * Base both memlists on common base address (0). 2776*0Sstevel@tonic-gate */ 2777*0Sstevel@tonic-gate s_basepa = s_mlist->address; 2778*0Sstevel@tonic-gate t_basepa = t_mlist->address; 2779*0Sstevel@tonic-gate 2780*0Sstevel@tonic-gate for (s_ml = s_mlist; s_ml; s_ml = s_ml->next) 2781*0Sstevel@tonic-gate s_ml->address -= s_basepa; 2782*0Sstevel@tonic-gate 2783*0Sstevel@tonic-gate for (t_ml = t_mlist; t_ml; t_ml = t_ml->next) 2784*0Sstevel@tonic-gate t_ml->address -= t_basepa; 2785*0Sstevel@tonic-gate 2786*0Sstevel@tonic-gate s_ml = s_mlist; 2787*0Sstevel@tonic-gate for (t_ml = t_mlist; t_ml && s_ml; t_ml = t_ml->next) { 2788*0Sstevel@tonic-gate uint64_t s_start, s_end; 2789*0Sstevel@tonic-gate uint64_t t_start, t_end; 2790*0Sstevel@tonic-gate 2791*0Sstevel@tonic-gate t_start = t_ml->address; 2792*0Sstevel@tonic-gate t_end = t_start + t_ml->size; 2793*0Sstevel@tonic-gate 2794*0Sstevel@tonic-gate for (; s_ml; s_ml = s_ml->next) { 2795*0Sstevel@tonic-gate s_start = s_ml->address; 2796*0Sstevel@tonic-gate s_end = s_start + s_ml->size; 2797*0Sstevel@tonic-gate 2798*0Sstevel@tonic-gate if ((s_start < t_start) || (s_end > t_end)) 2799*0Sstevel@tonic-gate break; 2800*0Sstevel@tonic-gate } 2801*0Sstevel@tonic-gate } 2802*0Sstevel@tonic-gate /* 2803*0Sstevel@tonic-gate * If we ran out of source memlist chunks that mean 2804*0Sstevel@tonic-gate * we found a home for all of them. 2805*0Sstevel@tonic-gate */ 2806*0Sstevel@tonic-gate if (s_ml == NULL) 2807*0Sstevel@tonic-gate rv = 1; 2808*0Sstevel@tonic-gate 2809*0Sstevel@tonic-gate /* 2810*0Sstevel@tonic-gate * Need to add base addresses back since memlists 2811*0Sstevel@tonic-gate * are probably in use by caller. 2812*0Sstevel@tonic-gate */ 2813*0Sstevel@tonic-gate for (s_ml = s_mlist; s_ml; s_ml = s_ml->next) 2814*0Sstevel@tonic-gate s_ml->address += s_basepa; 2815*0Sstevel@tonic-gate 2816*0Sstevel@tonic-gate for (t_ml = t_mlist; t_ml; t_ml = t_ml->next) 2817*0Sstevel@tonic-gate t_ml->address += t_basepa; 2818*0Sstevel@tonic-gate 2819*0Sstevel@tonic-gate return (rv); 2820*0Sstevel@tonic-gate } 2821*0Sstevel@tonic-gate 2822*0Sstevel@tonic-gate static struct memlist * 2823*0Sstevel@tonic-gate memlist_del_span(struct memlist *mlist, uint64_t base, uint64_t len) 2824*0Sstevel@tonic-gate { 2825*0Sstevel@tonic-gate uint64_t end; 2826*0Sstevel@tonic-gate struct memlist *ml, *tl, *nlp; 2827*0Sstevel@tonic-gate 2828*0Sstevel@tonic-gate if (mlist == NULL) 2829*0Sstevel@tonic-gate return (NULL); 2830*0Sstevel@tonic-gate 2831*0Sstevel@tonic-gate end = base + len; 2832*0Sstevel@tonic-gate if ((end <= mlist->address) || (base == end)) 2833*0Sstevel@tonic-gate return (mlist); 2834*0Sstevel@tonic-gate 2835*0Sstevel@tonic-gate for (tl = ml = mlist; ml; tl = ml, ml = nlp) { 2836*0Sstevel@tonic-gate uint64_t mend; 2837*0Sstevel@tonic-gate 2838*0Sstevel@tonic-gate nlp = ml->next; 2839*0Sstevel@tonic-gate 2840*0Sstevel@tonic-gate if (end <= ml->address) 2841*0Sstevel@tonic-gate break; 2842*0Sstevel@tonic-gate 2843*0Sstevel@tonic-gate mend = ml->address + ml->size; 2844*0Sstevel@tonic-gate if (base < mend) { 2845*0Sstevel@tonic-gate if (base <= ml->address) { 2846*0Sstevel@tonic-gate ml->address = end; 2847*0Sstevel@tonic-gate if (end >= mend) 2848*0Sstevel@tonic-gate ml->size = 0ull; 2849*0Sstevel@tonic-gate else 2850*0Sstevel@tonic-gate ml->size = mend - ml->address; 2851*0Sstevel@tonic-gate } else { 2852*0Sstevel@tonic-gate ml->size = base - ml->address; 2853*0Sstevel@tonic-gate if (end < mend) { 2854*0Sstevel@tonic-gate struct memlist *nl; 2855*0Sstevel@tonic-gate /* 2856*0Sstevel@tonic-gate * splitting an memlist entry. 2857*0Sstevel@tonic-gate */ 2858*0Sstevel@tonic-gate nl = GETSTRUCT(struct memlist, 1); 2859*0Sstevel@tonic-gate nl->address = end; 2860*0Sstevel@tonic-gate nl->size = mend - nl->address; 2861*0Sstevel@tonic-gate if ((nl->next = nlp) != NULL) 2862*0Sstevel@tonic-gate nlp->prev = nl; 2863*0Sstevel@tonic-gate nl->prev = ml; 2864*0Sstevel@tonic-gate ml->next = nl; 2865*0Sstevel@tonic-gate nlp = nl; 2866*0Sstevel@tonic-gate } 2867*0Sstevel@tonic-gate } 2868*0Sstevel@tonic-gate if (ml->size == 0ull) { 2869*0Sstevel@tonic-gate if (ml == mlist) { 2870*0Sstevel@tonic-gate if ((mlist = nlp) != NULL) 2871*0Sstevel@tonic-gate nlp->prev = NULL; 2872*0Sstevel@tonic-gate FREESTRUCT(ml, struct memlist, 1); 2873*0Sstevel@tonic-gate if (mlist == NULL) 2874*0Sstevel@tonic-gate break; 2875*0Sstevel@tonic-gate ml = nlp; 2876*0Sstevel@tonic-gate } else { 2877*0Sstevel@tonic-gate if ((tl->next = nlp) != NULL) 2878*0Sstevel@tonic-gate nlp->prev = tl; 2879*0Sstevel@tonic-gate FREESTRUCT(ml, struct memlist, 1); 2880*0Sstevel@tonic-gate ml = tl; 2881*0Sstevel@tonic-gate } 2882*0Sstevel@tonic-gate } 2883*0Sstevel@tonic-gate } 2884*0Sstevel@tonic-gate } 2885*0Sstevel@tonic-gate 2886*0Sstevel@tonic-gate return (mlist); 2887*0Sstevel@tonic-gate } 2888*0Sstevel@tonic-gate 2889*0Sstevel@tonic-gate /* 2890*0Sstevel@tonic-gate * add span without merging 2891*0Sstevel@tonic-gate */ 2892*0Sstevel@tonic-gate static struct memlist * 2893*0Sstevel@tonic-gate memlist_cat_span(struct memlist *mlist, uint64_t base, uint64_t len) 2894*0Sstevel@tonic-gate { 2895*0Sstevel@tonic-gate struct memlist *ml, *tl, *nl; 2896*0Sstevel@tonic-gate 2897*0Sstevel@tonic-gate if (len == 0ull) 2898*0Sstevel@tonic-gate return (NULL); 2899*0Sstevel@tonic-gate 2900*0Sstevel@tonic-gate if (mlist == NULL) { 2901*0Sstevel@tonic-gate mlist = GETSTRUCT(struct memlist, 1); 2902*0Sstevel@tonic-gate mlist->address = base; 2903*0Sstevel@tonic-gate mlist->size = len; 2904*0Sstevel@tonic-gate mlist->next = mlist->prev = NULL; 2905*0Sstevel@tonic-gate 2906*0Sstevel@tonic-gate return (mlist); 2907*0Sstevel@tonic-gate } 2908*0Sstevel@tonic-gate 2909*0Sstevel@tonic-gate for (tl = ml = mlist; ml; tl = ml, ml = ml->next) { 2910*0Sstevel@tonic-gate if (base < ml->address) { 2911*0Sstevel@tonic-gate nl = GETSTRUCT(struct memlist, 1); 2912*0Sstevel@tonic-gate nl->address = base; 2913*0Sstevel@tonic-gate nl->size = len; 2914*0Sstevel@tonic-gate nl->next = ml; 2915*0Sstevel@tonic-gate if ((nl->prev = ml->prev) != NULL) 2916*0Sstevel@tonic-gate nl->prev->next = nl; 2917*0Sstevel@tonic-gate ml->prev = nl; 2918*0Sstevel@tonic-gate if (mlist == ml) 2919*0Sstevel@tonic-gate mlist = nl; 2920*0Sstevel@tonic-gate break; 2921*0Sstevel@tonic-gate } 2922*0Sstevel@tonic-gate } 2923*0Sstevel@tonic-gate 2924*0Sstevel@tonic-gate if (ml == NULL) { 2925*0Sstevel@tonic-gate nl = GETSTRUCT(struct memlist, 1); 2926*0Sstevel@tonic-gate nl->address = base; 2927*0Sstevel@tonic-gate nl->size = len; 2928*0Sstevel@tonic-gate nl->next = NULL; 2929*0Sstevel@tonic-gate nl->prev = tl; 2930*0Sstevel@tonic-gate tl->next = nl; 2931*0Sstevel@tonic-gate } 2932*0Sstevel@tonic-gate 2933*0Sstevel@tonic-gate return (mlist); 2934*0Sstevel@tonic-gate } 2935