10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*1772Sjl139090 * Common Development and Distribution License (the "License"). 6*1772Sjl139090 * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 22*1772Sjl139090 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate /* 290Sstevel@tonic-gate * DR memory support routines. 300Sstevel@tonic-gate */ 310Sstevel@tonic-gate 320Sstevel@tonic-gate #include <sys/note.h> 330Sstevel@tonic-gate #include <sys/debug.h> 340Sstevel@tonic-gate #include <sys/types.h> 350Sstevel@tonic-gate #include <sys/errno.h> 360Sstevel@tonic-gate #include <sys/param.h> 370Sstevel@tonic-gate #include <sys/dditypes.h> 380Sstevel@tonic-gate #include <sys/kmem.h> 390Sstevel@tonic-gate #include <sys/conf.h> 400Sstevel@tonic-gate #include <sys/ddi.h> 410Sstevel@tonic-gate #include <sys/sunddi.h> 420Sstevel@tonic-gate #include <sys/sunndi.h> 430Sstevel@tonic-gate #include <sys/ddi_impldefs.h> 440Sstevel@tonic-gate #include <sys/ndi_impldefs.h> 450Sstevel@tonic-gate #include <sys/sysmacros.h> 460Sstevel@tonic-gate #include <sys/machsystm.h> 470Sstevel@tonic-gate #include <sys/spitregs.h> 480Sstevel@tonic-gate #include <sys/cpuvar.h> 490Sstevel@tonic-gate #include <sys/promif.h> 500Sstevel@tonic-gate #include <vm/seg_kmem.h> 510Sstevel@tonic-gate #include <sys/lgrp.h> 520Sstevel@tonic-gate #include <sys/platform_module.h> 530Sstevel@tonic-gate 540Sstevel@tonic-gate #include <vm/page.h> 550Sstevel@tonic-gate 560Sstevel@tonic-gate #include <sys/dr.h> 570Sstevel@tonic-gate #include <sys/dr_util.h> 580Sstevel@tonic-gate 590Sstevel@tonic-gate extern struct memlist *phys_install; 600Sstevel@tonic-gate 610Sstevel@tonic-gate /* TODO: push this reference below drmach line */ 620Sstevel@tonic-gate extern int kcage_on; 630Sstevel@tonic-gate 640Sstevel@tonic-gate /* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */ 650Sstevel@tonic-gate static char *dr_ie_fmt = "%M% %d"; 660Sstevel@tonic-gate 670Sstevel@tonic-gate static int dr_post_detach_mem_unit(dr_mem_unit_t *mp); 680Sstevel@tonic-gate static int dr_reserve_mem_spans(memhandle_t *mhp, 690Sstevel@tonic-gate struct memlist *mlist); 700Sstevel@tonic-gate static int dr_select_mem_target(dr_handle_t *hp, 710Sstevel@tonic-gate dr_mem_unit_t *mp, struct memlist *ml); 720Sstevel@tonic-gate static void dr_init_mem_unit_data(dr_mem_unit_t *mp); 730Sstevel@tonic-gate 740Sstevel@tonic-gate static int memlist_canfit(struct memlist *s_mlist, 750Sstevel@tonic-gate struct memlist *t_mlist); 760Sstevel@tonic-gate 770Sstevel@tonic-gate /* 780Sstevel@tonic-gate * dr_mem_unit_t.sbm_flags 790Sstevel@tonic-gate */ 800Sstevel@tonic-gate #define DR_MFLAG_RESERVED 0x01 /* mem unit reserved for delete */ 810Sstevel@tonic-gate #define DR_MFLAG_SOURCE 0x02 /* source brd of copy/rename op */ 820Sstevel@tonic-gate #define DR_MFLAG_TARGET 0x04 /* target brd of copy/rename op */ 830Sstevel@tonic-gate #define DR_MFLAG_MEMUPSIZE 0x08 /* move from big to small board */ 840Sstevel@tonic-gate #define DR_MFLAG_MEMDOWNSIZE 0x10 /* move from small to big board */ 850Sstevel@tonic-gate #define DR_MFLAG_MEMRESIZE 0x18 /* move to different size board */ 860Sstevel@tonic-gate #define DR_MFLAG_RELOWNER 0x20 /* memory release (delete) owner */ 870Sstevel@tonic-gate #define DR_MFLAG_RELDONE 0x40 /* memory release (delete) done */ 880Sstevel@tonic-gate 890Sstevel@tonic-gate /* helper macros */ 900Sstevel@tonic-gate #define _ptob64(p) ((uint64_t)(p) << PAGESHIFT) 910Sstevel@tonic-gate #define _b64top(b) ((pgcnt_t)((b) >> PAGESHIFT)) 920Sstevel@tonic-gate 930Sstevel@tonic-gate static struct memlist * 940Sstevel@tonic-gate dr_get_memlist(dr_mem_unit_t *mp) 950Sstevel@tonic-gate { 960Sstevel@tonic-gate struct memlist *mlist = NULL; 970Sstevel@tonic-gate sbd_error_t *err; 980Sstevel@tonic-gate static fn_t f = "dr_get_memlist"; 990Sstevel@tonic-gate 1000Sstevel@tonic-gate PR_MEM("%s for %s...\n", f, mp->sbm_cm.sbdev_path); 1010Sstevel@tonic-gate 1020Sstevel@tonic-gate /* 1030Sstevel@tonic-gate * Return cached memlist, if present. 1040Sstevel@tonic-gate * This memlist will be present following an 1050Sstevel@tonic-gate * unconfigure (a.k.a: detach) of this memunit. 1060Sstevel@tonic-gate * It should only be used in the case were a configure 1070Sstevel@tonic-gate * is bringing this memunit back in without going 1080Sstevel@tonic-gate * through the disconnect and connect states. 1090Sstevel@tonic-gate */ 1100Sstevel@tonic-gate if (mp->sbm_mlist) { 1110Sstevel@tonic-gate PR_MEM("%s: found cached memlist\n", f); 1120Sstevel@tonic-gate 1130Sstevel@tonic-gate mlist = memlist_dup(mp->sbm_mlist); 1140Sstevel@tonic-gate } else { 1150Sstevel@tonic-gate uint64_t basepa = _ptob64(mp->sbm_basepfn); 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate /* attempt to construct a memlist using phys_install */ 1180Sstevel@tonic-gate 1190Sstevel@tonic-gate /* round down to slice base address */ 1200Sstevel@tonic-gate basepa &= ~(mp->sbm_slice_size - 1); 1210Sstevel@tonic-gate 1220Sstevel@tonic-gate /* get a copy of phys_install to edit */ 1230Sstevel@tonic-gate memlist_read_lock(); 1240Sstevel@tonic-gate mlist = memlist_dup(phys_install); 1250Sstevel@tonic-gate memlist_read_unlock(); 1260Sstevel@tonic-gate 1270Sstevel@tonic-gate /* trim lower irrelevant span */ 1280Sstevel@tonic-gate if (mlist) 1290Sstevel@tonic-gate mlist = memlist_del_span(mlist, 0ull, basepa); 1300Sstevel@tonic-gate 1310Sstevel@tonic-gate /* trim upper irrelevant span */ 1320Sstevel@tonic-gate if (mlist) { 1330Sstevel@tonic-gate uint64_t endpa; 1340Sstevel@tonic-gate 1350Sstevel@tonic-gate basepa += mp->sbm_slice_size; 1360Sstevel@tonic-gate endpa = _ptob64(physmax + 1); 1370Sstevel@tonic-gate if (endpa > basepa) 1380Sstevel@tonic-gate mlist = memlist_del_span( 1390Sstevel@tonic-gate mlist, 1400Sstevel@tonic-gate basepa, 1410Sstevel@tonic-gate endpa - basepa); 1420Sstevel@tonic-gate } 1430Sstevel@tonic-gate 1440Sstevel@tonic-gate if (mlist) { 1450Sstevel@tonic-gate /* successfully built a memlist */ 1460Sstevel@tonic-gate PR_MEM("%s: derived memlist from phys_install\n", f); 1470Sstevel@tonic-gate } 1480Sstevel@tonic-gate 1490Sstevel@tonic-gate /* if no mlist yet, try platform layer */ 1500Sstevel@tonic-gate if (!mlist) { 1510Sstevel@tonic-gate err = drmach_mem_get_memlist( 1520Sstevel@tonic-gate mp->sbm_cm.sbdev_id, &mlist); 1530Sstevel@tonic-gate if (err) { 1540Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 1550Sstevel@tonic-gate mlist = NULL; /* paranoia */ 1560Sstevel@tonic-gate } 1570Sstevel@tonic-gate } 1580Sstevel@tonic-gate } 1590Sstevel@tonic-gate 1600Sstevel@tonic-gate PR_MEM("%s: memlist for %s\n", f, mp->sbm_cm.sbdev_path); 1610Sstevel@tonic-gate PR_MEMLIST_DUMP(mlist); 1620Sstevel@tonic-gate 1630Sstevel@tonic-gate return (mlist); 1640Sstevel@tonic-gate } 1650Sstevel@tonic-gate 1660Sstevel@tonic-gate typedef struct { 1670Sstevel@tonic-gate kcondvar_t cond; 1680Sstevel@tonic-gate kmutex_t lock; 1690Sstevel@tonic-gate int error; 1700Sstevel@tonic-gate int done; 1710Sstevel@tonic-gate } dr_release_mem_sync_t; 1720Sstevel@tonic-gate 1730Sstevel@tonic-gate /* 1740Sstevel@tonic-gate * Memory has been logically removed by the time this routine is called. 1750Sstevel@tonic-gate */ 1760Sstevel@tonic-gate static void 1770Sstevel@tonic-gate dr_mem_del_done(void *arg, int error) 1780Sstevel@tonic-gate { 1790Sstevel@tonic-gate dr_release_mem_sync_t *ds = arg; 1800Sstevel@tonic-gate 1810Sstevel@tonic-gate mutex_enter(&ds->lock); 1820Sstevel@tonic-gate ds->error = error; 1830Sstevel@tonic-gate ds->done = 1; 1840Sstevel@tonic-gate cv_signal(&ds->cond); 1850Sstevel@tonic-gate mutex_exit(&ds->lock); 1860Sstevel@tonic-gate } 1870Sstevel@tonic-gate 1880Sstevel@tonic-gate /* 1890Sstevel@tonic-gate * When we reach here the memory being drained should have 1900Sstevel@tonic-gate * already been reserved in dr_pre_release_mem(). 1910Sstevel@tonic-gate * Our only task here is to kick off the "drain" and wait 1920Sstevel@tonic-gate * for it to finish. 1930Sstevel@tonic-gate */ 1940Sstevel@tonic-gate void 1950Sstevel@tonic-gate dr_release_mem(dr_common_unit_t *cp) 1960Sstevel@tonic-gate { 1970Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)cp; 1980Sstevel@tonic-gate int err; 1990Sstevel@tonic-gate dr_release_mem_sync_t rms; 2000Sstevel@tonic-gate static fn_t f = "dr_release_mem"; 2010Sstevel@tonic-gate 2020Sstevel@tonic-gate /* check that this memory unit has been reserved */ 2030Sstevel@tonic-gate if (!(mp->sbm_flags & DR_MFLAG_RELOWNER)) { 2040Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 2050Sstevel@tonic-gate return; 2060Sstevel@tonic-gate } 2070Sstevel@tonic-gate 2080Sstevel@tonic-gate bzero((void *) &rms, sizeof (rms)); 2090Sstevel@tonic-gate 2100Sstevel@tonic-gate mutex_init(&rms.lock, NULL, MUTEX_DRIVER, NULL); 2110Sstevel@tonic-gate cv_init(&rms.cond, NULL, CV_DRIVER, NULL); 2120Sstevel@tonic-gate 2130Sstevel@tonic-gate mutex_enter(&rms.lock); 2140Sstevel@tonic-gate err = kphysm_del_start(mp->sbm_memhandle, 2150Sstevel@tonic-gate dr_mem_del_done, (void *) &rms); 2160Sstevel@tonic-gate if (err == KPHYSM_OK) { 2170Sstevel@tonic-gate /* wait for completion or interrupt */ 2180Sstevel@tonic-gate while (!rms.done) { 2190Sstevel@tonic-gate if (cv_wait_sig(&rms.cond, &rms.lock) == 0) { 2200Sstevel@tonic-gate /* then there is a pending UNIX signal */ 2210Sstevel@tonic-gate (void) kphysm_del_cancel(mp->sbm_memhandle); 2220Sstevel@tonic-gate 2230Sstevel@tonic-gate /* wait for completion */ 2240Sstevel@tonic-gate while (!rms.done) 2250Sstevel@tonic-gate cv_wait(&rms.cond, &rms.lock); 2260Sstevel@tonic-gate } 2270Sstevel@tonic-gate } 2280Sstevel@tonic-gate /* get the result of the memory delete operation */ 2290Sstevel@tonic-gate err = rms.error; 2300Sstevel@tonic-gate } 2310Sstevel@tonic-gate mutex_exit(&rms.lock); 2320Sstevel@tonic-gate 2330Sstevel@tonic-gate cv_destroy(&rms.cond); 2340Sstevel@tonic-gate mutex_destroy(&rms.lock); 2350Sstevel@tonic-gate 2360Sstevel@tonic-gate if (err != KPHYSM_OK) { 2370Sstevel@tonic-gate int e_code; 2380Sstevel@tonic-gate 2390Sstevel@tonic-gate switch (err) { 2400Sstevel@tonic-gate case KPHYSM_ENOWORK: 2410Sstevel@tonic-gate e_code = ESBD_NOERROR; 2420Sstevel@tonic-gate break; 2430Sstevel@tonic-gate 2440Sstevel@tonic-gate case KPHYSM_EHANDLE: 2450Sstevel@tonic-gate case KPHYSM_ESEQUENCE: 2460Sstevel@tonic-gate e_code = ESBD_INTERNAL; 2470Sstevel@tonic-gate break; 2480Sstevel@tonic-gate 2490Sstevel@tonic-gate case KPHYSM_ENOTVIABLE: 2500Sstevel@tonic-gate e_code = ESBD_MEM_NOTVIABLE; 2510Sstevel@tonic-gate break; 2520Sstevel@tonic-gate 2530Sstevel@tonic-gate case KPHYSM_EREFUSED: 2540Sstevel@tonic-gate e_code = ESBD_MEM_REFUSED; 2550Sstevel@tonic-gate break; 2560Sstevel@tonic-gate 2570Sstevel@tonic-gate case KPHYSM_ENONRELOC: 2580Sstevel@tonic-gate e_code = ESBD_MEM_NONRELOC; 2590Sstevel@tonic-gate break; 2600Sstevel@tonic-gate 2610Sstevel@tonic-gate case KPHYSM_ECANCELLED: 2620Sstevel@tonic-gate e_code = ESBD_MEM_CANCELLED; 2630Sstevel@tonic-gate break; 2640Sstevel@tonic-gate 2650Sstevel@tonic-gate case KPHYSM_ERESOURCE: 2660Sstevel@tonic-gate e_code = ESBD_MEMFAIL; 2670Sstevel@tonic-gate break; 2680Sstevel@tonic-gate 2690Sstevel@tonic-gate default: 2700Sstevel@tonic-gate cmn_err(CE_WARN, 2710Sstevel@tonic-gate "%s: unexpected kphysm error code %d," 2720Sstevel@tonic-gate " id 0x%p", 2730Sstevel@tonic-gate f, err, mp->sbm_cm.sbdev_id); 2740Sstevel@tonic-gate 2750Sstevel@tonic-gate e_code = ESBD_IO; 2760Sstevel@tonic-gate break; 2770Sstevel@tonic-gate } 2780Sstevel@tonic-gate 2790Sstevel@tonic-gate if (e_code != ESBD_NOERROR) { 2800Sstevel@tonic-gate dr_dev_err(CE_IGNORE, &mp->sbm_cm, e_code); 2810Sstevel@tonic-gate } 2820Sstevel@tonic-gate } 2830Sstevel@tonic-gate } 2840Sstevel@tonic-gate 2850Sstevel@tonic-gate void 2860Sstevel@tonic-gate dr_attach_mem(dr_handle_t *hp, dr_common_unit_t *cp) 2870Sstevel@tonic-gate { 2880Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 2890Sstevel@tonic-gate 2900Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)cp; 2910Sstevel@tonic-gate struct memlist *ml, *mc; 2920Sstevel@tonic-gate sbd_error_t *err; 2930Sstevel@tonic-gate static fn_t f = "dr_attach_mem"; 2940Sstevel@tonic-gate 2950Sstevel@tonic-gate PR_MEM("%s...\n", f); 2960Sstevel@tonic-gate 2970Sstevel@tonic-gate dr_lock_status(hp->h_bd); 2980Sstevel@tonic-gate err = drmach_configure(cp->sbdev_id, 0); 2990Sstevel@tonic-gate dr_unlock_status(hp->h_bd); 3000Sstevel@tonic-gate if (err) { 3010Sstevel@tonic-gate DRERR_SET_C(&cp->sbdev_error, &err); 3020Sstevel@tonic-gate return; 3030Sstevel@tonic-gate } 3040Sstevel@tonic-gate 3050Sstevel@tonic-gate ml = dr_get_memlist(mp); 3060Sstevel@tonic-gate for (mc = ml; mc; mc = mc->next) { 3070Sstevel@tonic-gate int rv; 3080Sstevel@tonic-gate sbd_error_t *err; 3090Sstevel@tonic-gate 3100Sstevel@tonic-gate rv = kphysm_add_memory_dynamic( 3110Sstevel@tonic-gate (pfn_t)(mc->address >> PAGESHIFT), 3120Sstevel@tonic-gate (pgcnt_t)(mc->size >> PAGESHIFT)); 3130Sstevel@tonic-gate if (rv != KPHYSM_OK) { 3140Sstevel@tonic-gate /* 3150Sstevel@tonic-gate * translate kphysm error and 3160Sstevel@tonic-gate * store in devlist error 3170Sstevel@tonic-gate */ 3180Sstevel@tonic-gate switch (rv) { 3190Sstevel@tonic-gate case KPHYSM_ERESOURCE: 3200Sstevel@tonic-gate rv = ESBD_NOMEM; 3210Sstevel@tonic-gate break; 3220Sstevel@tonic-gate 3230Sstevel@tonic-gate case KPHYSM_EFAULT: 3240Sstevel@tonic-gate rv = ESBD_FAULT; 3250Sstevel@tonic-gate break; 3260Sstevel@tonic-gate 3270Sstevel@tonic-gate default: 3280Sstevel@tonic-gate rv = ESBD_INTERNAL; 3290Sstevel@tonic-gate break; 3300Sstevel@tonic-gate } 3310Sstevel@tonic-gate 3320Sstevel@tonic-gate if (rv == ESBD_INTERNAL) { 3330Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 3340Sstevel@tonic-gate } else 3350Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, rv); 3360Sstevel@tonic-gate break; 3370Sstevel@tonic-gate } 3380Sstevel@tonic-gate 3390Sstevel@tonic-gate err = drmach_mem_add_span( 3400Sstevel@tonic-gate mp->sbm_cm.sbdev_id, mc->address, mc->size); 3410Sstevel@tonic-gate if (err) { 3420Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 3430Sstevel@tonic-gate break; 3440Sstevel@tonic-gate } 3450Sstevel@tonic-gate } 3460Sstevel@tonic-gate 3470Sstevel@tonic-gate memlist_delete(ml); 3480Sstevel@tonic-gate 3490Sstevel@tonic-gate /* back out if configure failed */ 3500Sstevel@tonic-gate if (mp->sbm_cm.sbdev_error != NULL) { 3510Sstevel@tonic-gate dr_lock_status(hp->h_bd); 352*1772Sjl139090 err = drmach_unconfigure(cp->sbdev_id, 353*1772Sjl139090 DEVI_BRANCH_DESTROY); 3540Sstevel@tonic-gate if (err) 3550Sstevel@tonic-gate sbd_err_clear(&err); 3560Sstevel@tonic-gate dr_unlock_status(hp->h_bd); 3570Sstevel@tonic-gate } 3580Sstevel@tonic-gate } 3590Sstevel@tonic-gate 3600Sstevel@tonic-gate #define DR_SCRUB_VALUE 0x0d0e0a0d0b0e0e0fULL 3610Sstevel@tonic-gate 3620Sstevel@tonic-gate static void 3630Sstevel@tonic-gate dr_mem_ecache_scrub(dr_mem_unit_t *mp, struct memlist *mlist) 3640Sstevel@tonic-gate { 3650Sstevel@tonic-gate #ifdef DEBUG 3660Sstevel@tonic-gate clock_t stime = lbolt; 3670Sstevel@tonic-gate #endif /* DEBUG */ 3680Sstevel@tonic-gate 3690Sstevel@tonic-gate struct memlist *ml; 3700Sstevel@tonic-gate uint64_t scrub_value = DR_SCRUB_VALUE; 3710Sstevel@tonic-gate processorid_t cpuid; 3720Sstevel@tonic-gate static fn_t f = "dr_mem_ecache_scrub"; 3730Sstevel@tonic-gate 3740Sstevel@tonic-gate cpuid = drmach_mem_cpu_affinity(mp->sbm_cm.sbdev_id); 3750Sstevel@tonic-gate affinity_set(cpuid); 3760Sstevel@tonic-gate 3770Sstevel@tonic-gate PR_MEM("%s: using proc %d, memlist...\n", f, 3780Sstevel@tonic-gate (cpuid == CPU_CURRENT) ? CPU->cpu_id : cpuid); 3790Sstevel@tonic-gate PR_MEMLIST_DUMP(mlist); 3800Sstevel@tonic-gate 3810Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) { 3820Sstevel@tonic-gate uint64_t dst_pa; 3830Sstevel@tonic-gate uint64_t nbytes; 3840Sstevel@tonic-gate 3850Sstevel@tonic-gate /* calculate the destination physical address */ 3860Sstevel@tonic-gate dst_pa = ml->address; 3870Sstevel@tonic-gate if (ml->address & PAGEOFFSET) 3880Sstevel@tonic-gate cmn_err(CE_WARN, 389930Smathue "%s: address (0x%lx) not on " 3900Sstevel@tonic-gate "page boundary", f, ml->address); 3910Sstevel@tonic-gate 3920Sstevel@tonic-gate nbytes = ml->size; 3930Sstevel@tonic-gate if (ml->size & PAGEOFFSET) 3940Sstevel@tonic-gate cmn_err(CE_WARN, 395930Smathue "%s: size (0x%lx) not on " 3960Sstevel@tonic-gate "page boundary", f, ml->size); 3970Sstevel@tonic-gate 3980Sstevel@tonic-gate /*LINTED*/ 3990Sstevel@tonic-gate while (nbytes > 0) { 4000Sstevel@tonic-gate /* write 64 bits to dst_pa */ 4010Sstevel@tonic-gate stdphys(dst_pa, scrub_value); 4020Sstevel@tonic-gate 4030Sstevel@tonic-gate /* increment/decrement by cacheline sizes */ 4040Sstevel@tonic-gate dst_pa += DRMACH_COHERENCY_UNIT; 4050Sstevel@tonic-gate nbytes -= DRMACH_COHERENCY_UNIT; 4060Sstevel@tonic-gate } 4070Sstevel@tonic-gate } 4080Sstevel@tonic-gate 4090Sstevel@tonic-gate /* 4100Sstevel@tonic-gate * flush this cpu's ecache and take care to ensure 4110Sstevel@tonic-gate * that all of it's bus transactions have retired. 4120Sstevel@tonic-gate */ 4130Sstevel@tonic-gate drmach_cpu_flush_ecache_sync(); 4140Sstevel@tonic-gate 4150Sstevel@tonic-gate affinity_clear(); 4160Sstevel@tonic-gate 4170Sstevel@tonic-gate #ifdef DEBUG 4180Sstevel@tonic-gate stime = lbolt - stime; 4190Sstevel@tonic-gate PR_MEM("%s: scrub ticks = %ld (%ld secs)\n", f, stime, stime / hz); 4200Sstevel@tonic-gate #endif /* DEBUG */ 4210Sstevel@tonic-gate } 4220Sstevel@tonic-gate 4230Sstevel@tonic-gate static int 4240Sstevel@tonic-gate dr_move_memory(dr_handle_t *hp, dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp) 4250Sstevel@tonic-gate { 4260Sstevel@tonic-gate time_t copytime; 4270Sstevel@tonic-gate drmachid_t cr_id; 4280Sstevel@tonic-gate dr_sr_handle_t *srhp; 429917Selowe struct memlist *c_ml, *d_ml; 4300Sstevel@tonic-gate sbd_error_t *err; 4310Sstevel@tonic-gate static fn_t f = "dr_move_memory"; 4320Sstevel@tonic-gate 4330Sstevel@tonic-gate PR_MEM("%s: (INLINE) moving memory from %s to %s\n", 4340Sstevel@tonic-gate f, 4350Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 4360Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path); 4370Sstevel@tonic-gate 4380Sstevel@tonic-gate ASSERT(s_mp->sbm_flags & DR_MFLAG_SOURCE); 4390Sstevel@tonic-gate ASSERT(s_mp->sbm_peer == t_mp); 4400Sstevel@tonic-gate ASSERT(s_mp->sbm_mlist); 4410Sstevel@tonic-gate 4420Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET); 4430Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 4440Sstevel@tonic-gate 4450Sstevel@tonic-gate /* 4460Sstevel@tonic-gate * create a memlist of spans to copy by removing 4470Sstevel@tonic-gate * the spans that have been deleted, if any, from 4480Sstevel@tonic-gate * the full source board memlist. s_mp->sbm_del_mlist 4490Sstevel@tonic-gate * will be NULL if there were no spans deleted from 4500Sstevel@tonic-gate * the source board. 4510Sstevel@tonic-gate */ 4520Sstevel@tonic-gate c_ml = memlist_dup(s_mp->sbm_mlist); 4530Sstevel@tonic-gate d_ml = s_mp->sbm_del_mlist; 4540Sstevel@tonic-gate while (d_ml != NULL) { 4550Sstevel@tonic-gate c_ml = memlist_del_span(c_ml, d_ml->address, d_ml->size); 4560Sstevel@tonic-gate d_ml = d_ml->next; 4570Sstevel@tonic-gate } 4580Sstevel@tonic-gate 4590Sstevel@tonic-gate affinity_set(drmach_mem_cpu_affinity(t_mp->sbm_cm.sbdev_id)); 4600Sstevel@tonic-gate 4610Sstevel@tonic-gate err = drmach_copy_rename_init( 4620Sstevel@tonic-gate t_mp->sbm_cm.sbdev_id, _ptob64(t_mp->sbm_slice_offset), 4630Sstevel@tonic-gate s_mp->sbm_cm.sbdev_id, c_ml, &cr_id); 4640Sstevel@tonic-gate if (err) { 4650Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 4660Sstevel@tonic-gate affinity_clear(); 4670Sstevel@tonic-gate return (-1); 4680Sstevel@tonic-gate } 4690Sstevel@tonic-gate 4700Sstevel@tonic-gate srhp = dr_get_sr_handle(hp); 4710Sstevel@tonic-gate ASSERT(srhp); 4720Sstevel@tonic-gate 4730Sstevel@tonic-gate copytime = lbolt; 4740Sstevel@tonic-gate 4750Sstevel@tonic-gate /* Quiesce the OS. */ 4760Sstevel@tonic-gate if (dr_suspend(srhp)) { 4770Sstevel@tonic-gate cmn_err(CE_WARN, "%s: failed to quiesce OS" 4780Sstevel@tonic-gate " for copy-rename", f); 4790Sstevel@tonic-gate 4800Sstevel@tonic-gate dr_release_sr_handle(srhp); 4810Sstevel@tonic-gate err = drmach_copy_rename_fini(cr_id); 4820Sstevel@tonic-gate if (err) { 4830Sstevel@tonic-gate /* 4840Sstevel@tonic-gate * no error is expected since the program has 4850Sstevel@tonic-gate * not yet run. 4860Sstevel@tonic-gate */ 4870Sstevel@tonic-gate 4880Sstevel@tonic-gate /* catch this in debug kernels */ 4890Sstevel@tonic-gate ASSERT(0); 4900Sstevel@tonic-gate 4910Sstevel@tonic-gate sbd_err_clear(&err); 4920Sstevel@tonic-gate } 4930Sstevel@tonic-gate 4940Sstevel@tonic-gate /* suspend error reached via hp */ 4950Sstevel@tonic-gate s_mp->sbm_cm.sbdev_error = hp->h_err; 4960Sstevel@tonic-gate hp->h_err = NULL; 4970Sstevel@tonic-gate 4980Sstevel@tonic-gate affinity_clear(); 4990Sstevel@tonic-gate return (-1); 5000Sstevel@tonic-gate } 5010Sstevel@tonic-gate 5020Sstevel@tonic-gate /* 5030Sstevel@tonic-gate * Rename memory for lgroup. 5040Sstevel@tonic-gate * Source and target board numbers are packaged in arg. 5050Sstevel@tonic-gate */ 5060Sstevel@tonic-gate { 5070Sstevel@tonic-gate dr_board_t *t_bp, *s_bp; 5080Sstevel@tonic-gate 5090Sstevel@tonic-gate s_bp = s_mp->sbm_cm.sbdev_bp; 5100Sstevel@tonic-gate t_bp = t_mp->sbm_cm.sbdev_bp; 5110Sstevel@tonic-gate 5120Sstevel@tonic-gate lgrp_plat_config(LGRP_CONFIG_MEM_RENAME, 5130Sstevel@tonic-gate (uintptr_t)(s_bp->b_num | (t_bp->b_num << 16))); 5140Sstevel@tonic-gate } 5150Sstevel@tonic-gate 5160Sstevel@tonic-gate drmach_copy_rename(cr_id); 5170Sstevel@tonic-gate 5180Sstevel@tonic-gate /* Resume the OS. */ 5190Sstevel@tonic-gate dr_resume(srhp); 5200Sstevel@tonic-gate 5210Sstevel@tonic-gate copytime = lbolt - copytime; 5220Sstevel@tonic-gate 5230Sstevel@tonic-gate dr_release_sr_handle(srhp); 5240Sstevel@tonic-gate err = drmach_copy_rename_fini(cr_id); 5250Sstevel@tonic-gate if (err) 5260Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 5270Sstevel@tonic-gate 5280Sstevel@tonic-gate affinity_clear(); 5290Sstevel@tonic-gate 5300Sstevel@tonic-gate PR_MEM("%s: copy-rename elapsed time = %ld ticks (%ld secs)\n", 5310Sstevel@tonic-gate f, copytime, copytime / hz); 5320Sstevel@tonic-gate 5330Sstevel@tonic-gate /* return -1 if dr_suspend or copy/rename recorded an error */ 5340Sstevel@tonic-gate return (err == NULL ? 0 : -1); 5350Sstevel@tonic-gate } 5360Sstevel@tonic-gate 5370Sstevel@tonic-gate /* 5380Sstevel@tonic-gate * If detaching node contains memory that is "non-permanent" 5390Sstevel@tonic-gate * then the memory adr's are simply cleared. If the memory 5400Sstevel@tonic-gate * is non-relocatable, then do a copy-rename. 5410Sstevel@tonic-gate */ 5420Sstevel@tonic-gate void 5430Sstevel@tonic-gate dr_detach_mem(dr_handle_t *hp, dr_common_unit_t *cp) 5440Sstevel@tonic-gate { 5450Sstevel@tonic-gate int rv = 0; 5460Sstevel@tonic-gate dr_mem_unit_t *s_mp = (dr_mem_unit_t *)cp; 5470Sstevel@tonic-gate dr_mem_unit_t *t_mp; 5480Sstevel@tonic-gate dr_state_t state; 5490Sstevel@tonic-gate static fn_t f = "dr_detach_mem"; 5500Sstevel@tonic-gate 5510Sstevel@tonic-gate PR_MEM("%s...\n", f); 5520Sstevel@tonic-gate 5530Sstevel@tonic-gate /* lookup target mem unit and target board structure, if any */ 5540Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 5550Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 5560Sstevel@tonic-gate ASSERT(t_mp != NULL); 5570Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 5580Sstevel@tonic-gate } else { 5590Sstevel@tonic-gate t_mp = NULL; 5600Sstevel@tonic-gate } 5610Sstevel@tonic-gate 5620Sstevel@tonic-gate /* verify mem unit's state is UNREFERENCED */ 5630Sstevel@tonic-gate state = s_mp->sbm_cm.sbdev_state; 5640Sstevel@tonic-gate if (state != DR_STATE_UNREFERENCED) { 5650Sstevel@tonic-gate dr_dev_err(CE_IGNORE, &s_mp->sbm_cm, ESBD_STATE); 5660Sstevel@tonic-gate return; 5670Sstevel@tonic-gate } 5680Sstevel@tonic-gate 5690Sstevel@tonic-gate /* verify target mem unit's state is UNREFERENCED, if any */ 5700Sstevel@tonic-gate if (t_mp != NULL) { 5710Sstevel@tonic-gate state = t_mp->sbm_cm.sbdev_state; 5720Sstevel@tonic-gate if (state != DR_STATE_UNREFERENCED) { 5730Sstevel@tonic-gate dr_dev_err(CE_IGNORE, &t_mp->sbm_cm, ESBD_STATE); 5740Sstevel@tonic-gate return; 5750Sstevel@tonic-gate } 5760Sstevel@tonic-gate } 5770Sstevel@tonic-gate 5780Sstevel@tonic-gate /* 5790Sstevel@tonic-gate * Scrub deleted memory. This will cause all cachelines 5800Sstevel@tonic-gate * referencing the memory to only be in the local cpu's 5810Sstevel@tonic-gate * ecache. 5820Sstevel@tonic-gate */ 5830Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_RELDONE) { 5840Sstevel@tonic-gate /* no del mlist for src<=dst mem size copy/rename */ 5850Sstevel@tonic-gate if (s_mp->sbm_del_mlist) 5860Sstevel@tonic-gate dr_mem_ecache_scrub(s_mp, s_mp->sbm_del_mlist); 5870Sstevel@tonic-gate } 5880Sstevel@tonic-gate if (t_mp != NULL && (t_mp->sbm_flags & DR_MFLAG_RELDONE)) { 5890Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist); 5900Sstevel@tonic-gate dr_mem_ecache_scrub(t_mp, t_mp->sbm_del_mlist); 5910Sstevel@tonic-gate } 5920Sstevel@tonic-gate 5930Sstevel@tonic-gate /* 5940Sstevel@tonic-gate * If there is no target board (no copy/rename was needed), then 5950Sstevel@tonic-gate * we're done! 5960Sstevel@tonic-gate */ 5970Sstevel@tonic-gate if (t_mp == NULL) { 5980Sstevel@tonic-gate sbd_error_t *err; 5990Sstevel@tonic-gate /* 6000Sstevel@tonic-gate * Reprogram interconnect hardware and disable 6010Sstevel@tonic-gate * memory controllers for memory node that's going away. 6020Sstevel@tonic-gate */ 6030Sstevel@tonic-gate 6040Sstevel@tonic-gate err = drmach_mem_disable(s_mp->sbm_cm.sbdev_id); 6050Sstevel@tonic-gate if (err) { 6060Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 6070Sstevel@tonic-gate rv = -1; 6080Sstevel@tonic-gate } 6090Sstevel@tonic-gate } else { 6100Sstevel@tonic-gate rv = dr_move_memory(hp, s_mp, t_mp); 6110Sstevel@tonic-gate PR_MEM("%s: %s memory COPY-RENAME (board %d -> %d)\n", 6120Sstevel@tonic-gate f, 6130Sstevel@tonic-gate rv ? "FAILED" : "COMPLETED", 6140Sstevel@tonic-gate s_mp->sbm_cm.sbdev_bp->b_num, 6150Sstevel@tonic-gate t_mp->sbm_cm.sbdev_bp->b_num); 6160Sstevel@tonic-gate 6170Sstevel@tonic-gate if (rv != 0) 6180Sstevel@tonic-gate (void) dr_cancel_mem(s_mp); 6190Sstevel@tonic-gate } 6200Sstevel@tonic-gate 6210Sstevel@tonic-gate if (rv == 0) { 6220Sstevel@tonic-gate sbd_error_t *err; 6230Sstevel@tonic-gate 6240Sstevel@tonic-gate dr_lock_status(hp->h_bd); 6250Sstevel@tonic-gate err = drmach_unconfigure(s_mp->sbm_cm.sbdev_id, 626*1772Sjl139090 DEVI_BRANCH_DESTROY); 6270Sstevel@tonic-gate dr_unlock_status(hp->h_bd); 6280Sstevel@tonic-gate if (err) 6290Sstevel@tonic-gate sbd_err_clear(&err); 6300Sstevel@tonic-gate } 6310Sstevel@tonic-gate } 6320Sstevel@tonic-gate 6330Sstevel@tonic-gate #ifndef _STARFIRE 6340Sstevel@tonic-gate /* 6350Sstevel@tonic-gate * XXX workaround for certain lab configurations (see also starcat drmach.c) 6360Sstevel@tonic-gate * Temporary code to get around observed incorrect results from 6370Sstevel@tonic-gate * kphysm_del_span_query when the queried span contains address spans 6380Sstevel@tonic-gate * not occupied by memory in between spans that do have memory. 6390Sstevel@tonic-gate * This routine acts as a wrapper to kphysm_del_span_query. It builds 6400Sstevel@tonic-gate * a memlist from phys_install of spans that exist between base and 6410Sstevel@tonic-gate * base + npages, inclusively. Kphysm_del_span_query is called for each 6420Sstevel@tonic-gate * node in the memlist with the results accumulated in *mp. 6430Sstevel@tonic-gate */ 6440Sstevel@tonic-gate static int 6450Sstevel@tonic-gate dr_del_span_query(pfn_t base, pgcnt_t npages, memquery_t *mp) 6460Sstevel@tonic-gate { 6470Sstevel@tonic-gate uint64_t pa = _ptob64(base); 6480Sstevel@tonic-gate uint64_t sm = ~ (137438953472ull - 1); 6490Sstevel@tonic-gate uint64_t sa = pa & sm; 6500Sstevel@tonic-gate struct memlist *mlist, *ml; 6510Sstevel@tonic-gate int rv; 6520Sstevel@tonic-gate 6530Sstevel@tonic-gate npages = npages; /* silence lint */ 6540Sstevel@tonic-gate memlist_read_lock(); 6550Sstevel@tonic-gate mlist = memlist_dup(phys_install); 6560Sstevel@tonic-gate memlist_read_unlock(); 6570Sstevel@tonic-gate 6580Sstevel@tonic-gate again: 6590Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) { 6600Sstevel@tonic-gate if ((ml->address & sm) != sa) { 661*1772Sjl139090 mlist = memlist_del_span(mlist, 662*1772Sjl139090 ml->address, ml->size); 6630Sstevel@tonic-gate goto again; 6640Sstevel@tonic-gate } 6650Sstevel@tonic-gate } 6660Sstevel@tonic-gate 6670Sstevel@tonic-gate mp->phys_pages = 0; 6680Sstevel@tonic-gate mp->managed = 0; 6690Sstevel@tonic-gate mp->nonrelocatable = 0; 6700Sstevel@tonic-gate mp->first_nonrelocatable = (pfn_t)-1; /* XXX */ 6710Sstevel@tonic-gate mp->last_nonrelocatable = 0; 6720Sstevel@tonic-gate 6730Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) { 6740Sstevel@tonic-gate memquery_t mq; 6750Sstevel@tonic-gate 6760Sstevel@tonic-gate rv = kphysm_del_span_query( 6770Sstevel@tonic-gate _b64top(ml->address), _b64top(ml->size), &mq); 6780Sstevel@tonic-gate if (rv) 6790Sstevel@tonic-gate break; 6800Sstevel@tonic-gate 6810Sstevel@tonic-gate mp->phys_pages += mq.phys_pages; 6820Sstevel@tonic-gate mp->managed += mq.managed; 6830Sstevel@tonic-gate mp->nonrelocatable += mq.nonrelocatable; 6840Sstevel@tonic-gate 6850Sstevel@tonic-gate if (mq.nonrelocatable != 0) { 6860Sstevel@tonic-gate if (mq.first_nonrelocatable < mp->first_nonrelocatable) 6870Sstevel@tonic-gate mp->first_nonrelocatable = 6880Sstevel@tonic-gate mq.first_nonrelocatable; 6890Sstevel@tonic-gate if (mq.last_nonrelocatable > mp->last_nonrelocatable) 6900Sstevel@tonic-gate mp->last_nonrelocatable = 6910Sstevel@tonic-gate mq.last_nonrelocatable; 6920Sstevel@tonic-gate } 6930Sstevel@tonic-gate } 6940Sstevel@tonic-gate 6950Sstevel@tonic-gate if (mp->nonrelocatable == 0) 6960Sstevel@tonic-gate mp->first_nonrelocatable = 0; /* XXX */ 6970Sstevel@tonic-gate 6980Sstevel@tonic-gate memlist_delete(mlist); 6990Sstevel@tonic-gate return (rv); 7000Sstevel@tonic-gate } 7010Sstevel@tonic-gate 7020Sstevel@tonic-gate #define kphysm_del_span_query dr_del_span_query 7030Sstevel@tonic-gate #endif /* _STARFIRE */ 7040Sstevel@tonic-gate 7050Sstevel@tonic-gate /* 7060Sstevel@tonic-gate * NOTE: This routine is only partially smart about multiple 7070Sstevel@tonic-gate * mem-units. Need to make mem-status structure smart 7080Sstevel@tonic-gate * about them also. 7090Sstevel@tonic-gate */ 7100Sstevel@tonic-gate int 7110Sstevel@tonic-gate dr_mem_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp) 7120Sstevel@tonic-gate { 7130Sstevel@tonic-gate int m, mix; 7140Sstevel@tonic-gate memdelstat_t mdst; 7150Sstevel@tonic-gate memquery_t mq; 7160Sstevel@tonic-gate dr_board_t *bp; 7170Sstevel@tonic-gate dr_mem_unit_t *mp; 7180Sstevel@tonic-gate sbd_mem_stat_t *msp; 7190Sstevel@tonic-gate static fn_t f = "dr_mem_status"; 7200Sstevel@tonic-gate 7210Sstevel@tonic-gate bp = hp->h_bd; 7220Sstevel@tonic-gate devset &= DR_DEVS_PRESENT(bp); 7230Sstevel@tonic-gate 7240Sstevel@tonic-gate for (m = mix = 0; m < MAX_MEM_UNITS_PER_BOARD; m++) { 7250Sstevel@tonic-gate int rv; 7260Sstevel@tonic-gate sbd_error_t *err; 7270Sstevel@tonic-gate drmach_status_t pstat; 7280Sstevel@tonic-gate dr_mem_unit_t *p_mp; 7290Sstevel@tonic-gate 7300Sstevel@tonic-gate if (DEVSET_IN_SET(devset, SBD_COMP_MEM, m) == 0) 7310Sstevel@tonic-gate continue; 7320Sstevel@tonic-gate 7330Sstevel@tonic-gate mp = dr_get_mem_unit(bp, m); 7340Sstevel@tonic-gate 7350Sstevel@tonic-gate if (mp->sbm_cm.sbdev_state == DR_STATE_EMPTY) { 7360Sstevel@tonic-gate /* present, but not fully initialized */ 7370Sstevel@tonic-gate continue; 7380Sstevel@tonic-gate } 7390Sstevel@tonic-gate 7400Sstevel@tonic-gate if (mp->sbm_cm.sbdev_id == (drmachid_t)0) 7410Sstevel@tonic-gate continue; 7420Sstevel@tonic-gate 7430Sstevel@tonic-gate /* fetch platform status */ 7440Sstevel@tonic-gate err = drmach_status(mp->sbm_cm.sbdev_id, &pstat); 7450Sstevel@tonic-gate if (err) { 7460Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 7470Sstevel@tonic-gate continue; 7480Sstevel@tonic-gate } 7490Sstevel@tonic-gate 7500Sstevel@tonic-gate msp = &dsp->d_mem; 7510Sstevel@tonic-gate bzero((caddr_t)msp, sizeof (*msp)); 7520Sstevel@tonic-gate 7530Sstevel@tonic-gate strncpy(msp->ms_cm.c_id.c_name, pstat.type, 7540Sstevel@tonic-gate sizeof (msp->ms_cm.c_id.c_name)); 7550Sstevel@tonic-gate msp->ms_cm.c_id.c_type = mp->sbm_cm.sbdev_type; 7560Sstevel@tonic-gate msp->ms_cm.c_id.c_unit = SBD_NULL_UNIT; 7570Sstevel@tonic-gate msp->ms_cm.c_cond = mp->sbm_cm.sbdev_cond; 7580Sstevel@tonic-gate msp->ms_cm.c_busy = mp->sbm_cm.sbdev_busy | pstat.busy; 7590Sstevel@tonic-gate msp->ms_cm.c_time = mp->sbm_cm.sbdev_time; 7600Sstevel@tonic-gate msp->ms_cm.c_ostate = mp->sbm_cm.sbdev_ostate; 7610Sstevel@tonic-gate 7620Sstevel@tonic-gate msp->ms_totpages = mp->sbm_npages; 7630Sstevel@tonic-gate msp->ms_basepfn = mp->sbm_basepfn; 7640Sstevel@tonic-gate msp->ms_pageslost = mp->sbm_pageslost; 7650Sstevel@tonic-gate msp->ms_cage_enabled = kcage_on; 7660Sstevel@tonic-gate 7670Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_RESERVED) 7680Sstevel@tonic-gate p_mp = mp->sbm_peer; 7690Sstevel@tonic-gate else 7700Sstevel@tonic-gate p_mp = NULL; 7710Sstevel@tonic-gate 7720Sstevel@tonic-gate if (p_mp == NULL) { 7730Sstevel@tonic-gate msp->ms_peer_is_target = 0; 7740Sstevel@tonic-gate msp->ms_peer_ap_id[0] = '\0'; 7750Sstevel@tonic-gate } else if (p_mp->sbm_flags & DR_MFLAG_RESERVED) { 7760Sstevel@tonic-gate char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 7770Sstevel@tonic-gate char *minor; 7780Sstevel@tonic-gate 7790Sstevel@tonic-gate /* 7800Sstevel@tonic-gate * b_dip doesn't have to be held for ddi_pathname() 7810Sstevel@tonic-gate * because the board struct (dr_board_t) will be 7820Sstevel@tonic-gate * destroyed before b_dip detaches. 7830Sstevel@tonic-gate */ 7840Sstevel@tonic-gate (void) ddi_pathname(bp->b_dip, path); 7850Sstevel@tonic-gate minor = strchr(p_mp->sbm_cm.sbdev_path, ':'); 7860Sstevel@tonic-gate 7870Sstevel@tonic-gate snprintf(msp->ms_peer_ap_id, 7880Sstevel@tonic-gate sizeof (msp->ms_peer_ap_id), "%s%s", 7890Sstevel@tonic-gate path, (minor == NULL) ? "" : minor); 7900Sstevel@tonic-gate 7910Sstevel@tonic-gate kmem_free(path, MAXPATHLEN); 7920Sstevel@tonic-gate 7930Sstevel@tonic-gate if (p_mp->sbm_flags & DR_MFLAG_TARGET) 7940Sstevel@tonic-gate msp->ms_peer_is_target = 1; 7950Sstevel@tonic-gate } 7960Sstevel@tonic-gate 7970Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_RELOWNER) 7980Sstevel@tonic-gate rv = kphysm_del_status(mp->sbm_memhandle, &mdst); 7990Sstevel@tonic-gate else 8000Sstevel@tonic-gate rv = KPHYSM_EHANDLE; /* force 'if' to fail */ 8010Sstevel@tonic-gate 8020Sstevel@tonic-gate if (rv == KPHYSM_OK) { 8030Sstevel@tonic-gate /* 8040Sstevel@tonic-gate * Any pages above managed is "free", 8050Sstevel@tonic-gate * i.e. it's collected. 8060Sstevel@tonic-gate */ 8070Sstevel@tonic-gate msp->ms_detpages += (uint_t)(mdst.collected + 8080Sstevel@tonic-gate mdst.phys_pages - mdst.managed); 8090Sstevel@tonic-gate } else { 8100Sstevel@tonic-gate /* 8110Sstevel@tonic-gate * If we're UNREFERENCED or UNCONFIGURED, 8120Sstevel@tonic-gate * then the number of detached pages is 8130Sstevel@tonic-gate * however many pages are on the board. 8140Sstevel@tonic-gate * I.e. detached = not in use by OS. 8150Sstevel@tonic-gate */ 8160Sstevel@tonic-gate switch (msp->ms_cm.c_ostate) { 8170Sstevel@tonic-gate /* 8180Sstevel@tonic-gate * changed to use cfgadm states 8190Sstevel@tonic-gate * 8200Sstevel@tonic-gate * was: 8210Sstevel@tonic-gate * case DR_STATE_UNREFERENCED: 8220Sstevel@tonic-gate * case DR_STATE_UNCONFIGURED: 8230Sstevel@tonic-gate */ 8240Sstevel@tonic-gate case SBD_STAT_UNCONFIGURED: 8250Sstevel@tonic-gate msp->ms_detpages = msp->ms_totpages; 8260Sstevel@tonic-gate break; 8270Sstevel@tonic-gate 8280Sstevel@tonic-gate default: 8290Sstevel@tonic-gate break; 8300Sstevel@tonic-gate } 8310Sstevel@tonic-gate } 8320Sstevel@tonic-gate 8330Sstevel@tonic-gate /* 8340Sstevel@tonic-gate * kphysm_del_span_query can report non-reloc pages = total 8350Sstevel@tonic-gate * pages for memory that is not yet configured 8360Sstevel@tonic-gate */ 8370Sstevel@tonic-gate if (mp->sbm_cm.sbdev_state != DR_STATE_UNCONFIGURED) { 8380Sstevel@tonic-gate 8390Sstevel@tonic-gate rv = kphysm_del_span_query(mp->sbm_basepfn, 8400Sstevel@tonic-gate mp->sbm_npages, &mq); 8410Sstevel@tonic-gate 8420Sstevel@tonic-gate if (rv == KPHYSM_OK) { 8430Sstevel@tonic-gate msp->ms_managed_pages = mq.managed; 8440Sstevel@tonic-gate msp->ms_noreloc_pages = mq.nonrelocatable; 8450Sstevel@tonic-gate msp->ms_noreloc_first = 8460Sstevel@tonic-gate mq.first_nonrelocatable; 8470Sstevel@tonic-gate msp->ms_noreloc_last = 8480Sstevel@tonic-gate mq.last_nonrelocatable; 8490Sstevel@tonic-gate msp->ms_cm.c_sflags = 0; 8500Sstevel@tonic-gate if (mq.nonrelocatable) { 8510Sstevel@tonic-gate SBD_SET_SUSPEND(SBD_CMD_UNCONFIGURE, 8520Sstevel@tonic-gate msp->ms_cm.c_sflags); 8530Sstevel@tonic-gate } 8540Sstevel@tonic-gate } else { 8550Sstevel@tonic-gate PR_MEM("%s: kphysm_del_span_query() = %d\n", 8560Sstevel@tonic-gate f, rv); 8570Sstevel@tonic-gate } 8580Sstevel@tonic-gate } 8590Sstevel@tonic-gate 8600Sstevel@tonic-gate /* 8610Sstevel@tonic-gate * Check source unit state during copy-rename 8620Sstevel@tonic-gate */ 8630Sstevel@tonic-gate if ((mp->sbm_flags & DR_MFLAG_SOURCE) && 8640Sstevel@tonic-gate (mp->sbm_cm.sbdev_state == DR_STATE_UNREFERENCED || 8650Sstevel@tonic-gate mp->sbm_cm.sbdev_state == DR_STATE_RELEASE)) 8660Sstevel@tonic-gate msp->ms_cm.c_ostate = SBD_STAT_CONFIGURED; 8670Sstevel@tonic-gate 8680Sstevel@tonic-gate mix++; 8690Sstevel@tonic-gate dsp++; 8700Sstevel@tonic-gate } 8710Sstevel@tonic-gate 8720Sstevel@tonic-gate return (mix); 8730Sstevel@tonic-gate } 8740Sstevel@tonic-gate 8750Sstevel@tonic-gate int 8760Sstevel@tonic-gate dr_pre_attach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 8770Sstevel@tonic-gate { 8780Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 8790Sstevel@tonic-gate 8800Sstevel@tonic-gate int err_flag = 0; 8810Sstevel@tonic-gate int d; 8820Sstevel@tonic-gate sbd_error_t *err; 8830Sstevel@tonic-gate static fn_t f = "dr_pre_attach_mem"; 8840Sstevel@tonic-gate 8850Sstevel@tonic-gate PR_MEM("%s...\n", f); 8860Sstevel@tonic-gate 8870Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 8880Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 8890Sstevel@tonic-gate dr_state_t state; 8900Sstevel@tonic-gate 8910Sstevel@tonic-gate cmn_err(CE_CONT, "OS configure %s", mp->sbm_cm.sbdev_path); 8920Sstevel@tonic-gate 8930Sstevel@tonic-gate state = mp->sbm_cm.sbdev_state; 8940Sstevel@tonic-gate switch (state) { 8950Sstevel@tonic-gate case DR_STATE_UNCONFIGURED: 8960Sstevel@tonic-gate PR_MEM("%s: recovering from UNCONFIG for %s\n", 8970Sstevel@tonic-gate f, 8980Sstevel@tonic-gate mp->sbm_cm.sbdev_path); 8990Sstevel@tonic-gate 9000Sstevel@tonic-gate /* use memlist cached by dr_post_detach_mem_unit */ 9010Sstevel@tonic-gate ASSERT(mp->sbm_mlist != NULL); 9020Sstevel@tonic-gate PR_MEM("%s: re-configuring cached memlist for %s:\n", 9030Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 9040Sstevel@tonic-gate PR_MEMLIST_DUMP(mp->sbm_mlist); 9050Sstevel@tonic-gate 9060Sstevel@tonic-gate /* kphysm del handle should be have been freed */ 9070Sstevel@tonic-gate ASSERT((mp->sbm_flags & DR_MFLAG_RELOWNER) == 0); 9080Sstevel@tonic-gate 9090Sstevel@tonic-gate /*FALLTHROUGH*/ 9100Sstevel@tonic-gate 9110Sstevel@tonic-gate case DR_STATE_CONNECTED: 9120Sstevel@tonic-gate PR_MEM("%s: reprogramming mem hardware on %s\n", 9130Sstevel@tonic-gate f, mp->sbm_cm.sbdev_bp->b_path); 9140Sstevel@tonic-gate 9150Sstevel@tonic-gate PR_MEM("%s: enabling %s\n", 9160Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 9170Sstevel@tonic-gate 9180Sstevel@tonic-gate err = drmach_mem_enable(mp->sbm_cm.sbdev_id); 9190Sstevel@tonic-gate if (err) { 9200Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 9210Sstevel@tonic-gate err_flag = 1; 9220Sstevel@tonic-gate } 9230Sstevel@tonic-gate break; 9240Sstevel@tonic-gate 9250Sstevel@tonic-gate default: 9260Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_STATE); 9270Sstevel@tonic-gate err_flag = 1; 9280Sstevel@tonic-gate break; 9290Sstevel@tonic-gate } 9300Sstevel@tonic-gate 9310Sstevel@tonic-gate /* exit for loop if error encountered */ 9320Sstevel@tonic-gate if (err_flag) 9330Sstevel@tonic-gate break; 9340Sstevel@tonic-gate } 9350Sstevel@tonic-gate 9360Sstevel@tonic-gate return (err_flag ? -1 : 0); 9370Sstevel@tonic-gate } 9380Sstevel@tonic-gate 9390Sstevel@tonic-gate int 9400Sstevel@tonic-gate dr_post_attach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 9410Sstevel@tonic-gate { 9420Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 9430Sstevel@tonic-gate 9440Sstevel@tonic-gate int d; 9450Sstevel@tonic-gate static fn_t f = "dr_post_attach_mem"; 9460Sstevel@tonic-gate 9470Sstevel@tonic-gate PR_MEM("%s...\n", f); 9480Sstevel@tonic-gate 9490Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 9500Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 9510Sstevel@tonic-gate struct memlist *mlist, *ml; 9520Sstevel@tonic-gate 9530Sstevel@tonic-gate mlist = dr_get_memlist(mp); 9540Sstevel@tonic-gate if (mlist == NULL) { 9550Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_MEMFAIL); 9560Sstevel@tonic-gate continue; 9570Sstevel@tonic-gate } 9580Sstevel@tonic-gate 9590Sstevel@tonic-gate /* 9600Sstevel@tonic-gate * Verify the memory really did successfully attach 9610Sstevel@tonic-gate * by checking for its existence in phys_install. 9620Sstevel@tonic-gate */ 9630Sstevel@tonic-gate memlist_read_lock(); 9640Sstevel@tonic-gate if (memlist_intersect(phys_install, mlist) == 0) { 9650Sstevel@tonic-gate memlist_read_unlock(); 9660Sstevel@tonic-gate 9670Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 9680Sstevel@tonic-gate 9690Sstevel@tonic-gate PR_MEM("%s: %s memlist not in phys_install", 9700Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 9710Sstevel@tonic-gate 9720Sstevel@tonic-gate memlist_delete(mlist); 9730Sstevel@tonic-gate continue; 9740Sstevel@tonic-gate } 9750Sstevel@tonic-gate memlist_read_unlock(); 9760Sstevel@tonic-gate 9770Sstevel@tonic-gate for (ml = mlist; ml != NULL; ml = ml->next) { 9780Sstevel@tonic-gate sbd_error_t *err; 9790Sstevel@tonic-gate 9800Sstevel@tonic-gate err = drmach_mem_add_span( 9810Sstevel@tonic-gate mp->sbm_cm.sbdev_id, 9820Sstevel@tonic-gate ml->address, 9830Sstevel@tonic-gate ml->size); 9840Sstevel@tonic-gate if (err) 9850Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 9860Sstevel@tonic-gate } 9870Sstevel@tonic-gate 9880Sstevel@tonic-gate memlist_delete(mlist); 9890Sstevel@tonic-gate 9900Sstevel@tonic-gate /* 9910Sstevel@tonic-gate * Destroy cached memlist, if any. 9920Sstevel@tonic-gate * There will be a cached memlist in sbm_mlist if 9930Sstevel@tonic-gate * this board is being configured directly after 9940Sstevel@tonic-gate * an unconfigure. 9950Sstevel@tonic-gate * To support this transition, dr_post_detach_mem 9960Sstevel@tonic-gate * left a copy of the last known memlist in sbm_mlist. 9970Sstevel@tonic-gate * This memlist could differ from any derived from 9980Sstevel@tonic-gate * hardware if while this memunit was last configured 9990Sstevel@tonic-gate * the system detected and deleted bad pages from 10000Sstevel@tonic-gate * phys_install. The location of those bad pages 10010Sstevel@tonic-gate * will be reflected in the cached memlist. 10020Sstevel@tonic-gate */ 10030Sstevel@tonic-gate if (mp->sbm_mlist) { 10040Sstevel@tonic-gate memlist_delete(mp->sbm_mlist); 10050Sstevel@tonic-gate mp->sbm_mlist = NULL; 10060Sstevel@tonic-gate } 10070Sstevel@tonic-gate 10080Sstevel@tonic-gate /* 10090Sstevel@tonic-gate * TODO: why is this call to dr_init_mem_unit_data here? 10100Sstevel@tonic-gate * this has been done at discovery or connect time, so this is 10110Sstevel@tonic-gate * probably redundant and unnecessary. 10120Sstevel@tonic-gate */ 10130Sstevel@tonic-gate dr_init_mem_unit_data(mp); 10140Sstevel@tonic-gate } 10150Sstevel@tonic-gate 10160Sstevel@tonic-gate return (0); 10170Sstevel@tonic-gate } 10180Sstevel@tonic-gate 10190Sstevel@tonic-gate int 10200Sstevel@tonic-gate dr_pre_detach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 10210Sstevel@tonic-gate { 10220Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 10230Sstevel@tonic-gate 10240Sstevel@tonic-gate int d; 10250Sstevel@tonic-gate 10260Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 10270Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 10280Sstevel@tonic-gate 10290Sstevel@tonic-gate cmn_err(CE_CONT, "OS unconfigure %s", mp->sbm_cm.sbdev_path); 10300Sstevel@tonic-gate } 10310Sstevel@tonic-gate 10320Sstevel@tonic-gate return (0); 10330Sstevel@tonic-gate } 10340Sstevel@tonic-gate 10350Sstevel@tonic-gate 10360Sstevel@tonic-gate int 10370Sstevel@tonic-gate dr_post_detach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 10380Sstevel@tonic-gate { 10390Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 10400Sstevel@tonic-gate 10410Sstevel@tonic-gate int d, rv; 10420Sstevel@tonic-gate static fn_t f = "dr_post_detach_mem"; 10430Sstevel@tonic-gate 10440Sstevel@tonic-gate PR_MEM("%s...\n", f); 10450Sstevel@tonic-gate 10460Sstevel@tonic-gate rv = 0; 10470Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 10480Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 10490Sstevel@tonic-gate 10500Sstevel@tonic-gate ASSERT(mp->sbm_cm.sbdev_bp == hp->h_bd); 10510Sstevel@tonic-gate 10520Sstevel@tonic-gate if (dr_post_detach_mem_unit(mp)) 10530Sstevel@tonic-gate rv = -1; 10540Sstevel@tonic-gate } 10550Sstevel@tonic-gate 10560Sstevel@tonic-gate return (rv); 10570Sstevel@tonic-gate } 10580Sstevel@tonic-gate 10590Sstevel@tonic-gate static void 10600Sstevel@tonic-gate dr_add_memory_spans(dr_mem_unit_t *mp, struct memlist *ml) 10610Sstevel@tonic-gate { 10620Sstevel@tonic-gate static fn_t f = "dr_add_memory_spans"; 10630Sstevel@tonic-gate 10640Sstevel@tonic-gate PR_MEM("%s...", f); 10650Sstevel@tonic-gate PR_MEMLIST_DUMP(ml); 10660Sstevel@tonic-gate 10670Sstevel@tonic-gate #ifdef DEBUG 10680Sstevel@tonic-gate memlist_read_lock(); 10690Sstevel@tonic-gate if (memlist_intersect(phys_install, ml)) { 10700Sstevel@tonic-gate PR_MEM("%s:WARNING: memlist intersects with phys_install\n", f); 10710Sstevel@tonic-gate } 10720Sstevel@tonic-gate memlist_read_unlock(); 10730Sstevel@tonic-gate #endif 10740Sstevel@tonic-gate 10750Sstevel@tonic-gate for (; ml; ml = ml->next) { 10760Sstevel@tonic-gate pfn_t base; 10770Sstevel@tonic-gate pgcnt_t npgs; 10780Sstevel@tonic-gate int rv; 10790Sstevel@tonic-gate sbd_error_t *err; 10800Sstevel@tonic-gate 10810Sstevel@tonic-gate base = _b64top(ml->address); 10820Sstevel@tonic-gate npgs = _b64top(ml->size); 10830Sstevel@tonic-gate 10840Sstevel@tonic-gate rv = kphysm_add_memory_dynamic(base, npgs); 10850Sstevel@tonic-gate 10860Sstevel@tonic-gate err = drmach_mem_add_span( 10870Sstevel@tonic-gate mp->sbm_cm.sbdev_id, 10880Sstevel@tonic-gate ml->address, 10890Sstevel@tonic-gate ml->size); 10900Sstevel@tonic-gate 10910Sstevel@tonic-gate if (err) 10920Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 10930Sstevel@tonic-gate 10940Sstevel@tonic-gate if (rv != KPHYSM_OK) { 10950Sstevel@tonic-gate cmn_err(CE_WARN, "%s:" 10960Sstevel@tonic-gate " unexpected kphysm_add_memory_dynamic" 10970Sstevel@tonic-gate " return value %d;" 10980Sstevel@tonic-gate " basepfn=0x%lx, npages=%ld\n", 10990Sstevel@tonic-gate f, rv, base, npgs); 11000Sstevel@tonic-gate 11010Sstevel@tonic-gate continue; 11020Sstevel@tonic-gate } 11030Sstevel@tonic-gate } 11040Sstevel@tonic-gate } 11050Sstevel@tonic-gate 11060Sstevel@tonic-gate static int 11070Sstevel@tonic-gate dr_post_detach_mem_unit(dr_mem_unit_t *s_mp) 11080Sstevel@tonic-gate { 11090Sstevel@tonic-gate uint64_t sz = s_mp->sbm_slice_size; 11100Sstevel@tonic-gate uint64_t sm = sz - 1; 11110Sstevel@tonic-gate /* old and new below refer to PAs before and after copy-rename */ 11120Sstevel@tonic-gate uint64_t s_old_basepa, s_new_basepa; 11130Sstevel@tonic-gate uint64_t t_old_basepa, t_new_basepa; 11140Sstevel@tonic-gate uint64_t t_new_smallsize = 0; 11150Sstevel@tonic-gate dr_mem_unit_t *t_mp, *x_mp; 11160Sstevel@tonic-gate struct memlist *ml; 11170Sstevel@tonic-gate int rv; 11180Sstevel@tonic-gate sbd_error_t *err; 11190Sstevel@tonic-gate static fn_t f = "dr_post_detach_mem_unit"; 11200Sstevel@tonic-gate 11210Sstevel@tonic-gate PR_MEM("%s...\n", f); 11220Sstevel@tonic-gate 11230Sstevel@tonic-gate /* s_mp->sbm_del_mlist could be NULL, meaning no deleted spans */ 11240Sstevel@tonic-gate PR_MEM("%s: %s: deleted memlist (EMPTY maybe okay):\n", 11250Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 11260Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_del_mlist); 11270Sstevel@tonic-gate 11280Sstevel@tonic-gate /* sanity check */ 11290Sstevel@tonic-gate ASSERT(s_mp->sbm_del_mlist == NULL || 11300Sstevel@tonic-gate (s_mp->sbm_flags & DR_MFLAG_RELDONE) != 0); 11310Sstevel@tonic-gate 11320Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 11330Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 11340Sstevel@tonic-gate ASSERT(t_mp != NULL); 11350Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET); 11360Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 11370Sstevel@tonic-gate 11380Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_RELDONE); 11390Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist); 11400Sstevel@tonic-gate 11410Sstevel@tonic-gate PR_MEM("%s: target %s: deleted memlist:\n", 11420Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 11430Sstevel@tonic-gate PR_MEMLIST_DUMP(t_mp->sbm_del_mlist); 11440Sstevel@tonic-gate } else { 11450Sstevel@tonic-gate /* this is no target unit */ 11460Sstevel@tonic-gate t_mp = NULL; 11470Sstevel@tonic-gate } 11480Sstevel@tonic-gate 11490Sstevel@tonic-gate /* 11500Sstevel@tonic-gate * Verify the memory really did successfully detach 11510Sstevel@tonic-gate * by checking for its non-existence in phys_install. 11520Sstevel@tonic-gate */ 11530Sstevel@tonic-gate rv = 0; 11540Sstevel@tonic-gate memlist_read_lock(); 11550Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_RELDONE) { 11560Sstevel@tonic-gate x_mp = s_mp; 11570Sstevel@tonic-gate rv = memlist_intersect(phys_install, x_mp->sbm_del_mlist); 11580Sstevel@tonic-gate } 11590Sstevel@tonic-gate if (rv == 0 && t_mp && (t_mp->sbm_flags & DR_MFLAG_RELDONE)) { 11600Sstevel@tonic-gate x_mp = t_mp; 11610Sstevel@tonic-gate rv = memlist_intersect(phys_install, x_mp->sbm_del_mlist); 11620Sstevel@tonic-gate } 11630Sstevel@tonic-gate memlist_read_unlock(); 11640Sstevel@tonic-gate 11650Sstevel@tonic-gate if (rv) { 11660Sstevel@tonic-gate /* error: memlist still in phys_install */ 11670Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&x_mp->sbm_cm); 11680Sstevel@tonic-gate } 11690Sstevel@tonic-gate 11700Sstevel@tonic-gate /* 11710Sstevel@tonic-gate * clean mem unit state and bail out if an error has been recorded. 11720Sstevel@tonic-gate */ 11730Sstevel@tonic-gate rv = 0; 11740Sstevel@tonic-gate if (s_mp->sbm_cm.sbdev_error) { 11750Sstevel@tonic-gate PR_MEM("%s: %s flags=%x", f, 11760Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags); 11770Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&s_mp->sbm_cm); 11780Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&s_mp->sbm_cm); 11790Sstevel@tonic-gate dr_device_transition(&s_mp->sbm_cm, DR_STATE_CONFIGURED); 11800Sstevel@tonic-gate rv = -1; 11810Sstevel@tonic-gate } 11820Sstevel@tonic-gate if (t_mp != NULL && t_mp->sbm_cm.sbdev_error != NULL) { 11830Sstevel@tonic-gate PR_MEM("%s: %s flags=%x", f, 11840Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags); 11850Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm); 11860Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&t_mp->sbm_cm); 11870Sstevel@tonic-gate dr_device_transition(&t_mp->sbm_cm, DR_STATE_CONFIGURED); 11880Sstevel@tonic-gate rv = -1; 11890Sstevel@tonic-gate } 11900Sstevel@tonic-gate if (rv) 11910Sstevel@tonic-gate goto cleanup; 11920Sstevel@tonic-gate 11930Sstevel@tonic-gate s_old_basepa = _ptob64(s_mp->sbm_basepfn); 11940Sstevel@tonic-gate err = drmach_mem_get_base_physaddr(s_mp->sbm_cm.sbdev_id, 11950Sstevel@tonic-gate &s_new_basepa); 11960Sstevel@tonic-gate ASSERT(err == NULL); 11970Sstevel@tonic-gate 1198930Smathue PR_MEM("%s:s_old_basepa: 0x%lx\n", f, s_old_basepa); 1199930Smathue PR_MEM("%s:s_new_basepa: 0x%lx\n", f, s_new_basepa); 12000Sstevel@tonic-gate 12010Sstevel@tonic-gate if (t_mp != NULL) { 12020Sstevel@tonic-gate struct memlist *s_copy_mlist; 12030Sstevel@tonic-gate 12040Sstevel@tonic-gate t_old_basepa = _ptob64(t_mp->sbm_basepfn); 12050Sstevel@tonic-gate err = drmach_mem_get_base_physaddr(t_mp->sbm_cm.sbdev_id, 12060Sstevel@tonic-gate &t_new_basepa); 12070Sstevel@tonic-gate ASSERT(err == NULL); 12080Sstevel@tonic-gate 1209930Smathue PR_MEM("%s:t_old_basepa: 0x%lx\n", f, t_old_basepa); 1210930Smathue PR_MEM("%s:t_new_basepa: 0x%lx\n", f, t_new_basepa); 12110Sstevel@tonic-gate 12120Sstevel@tonic-gate /* 12130Sstevel@tonic-gate * Construct copy list with original source addresses. 12140Sstevel@tonic-gate * Used to add back excess target mem. 12150Sstevel@tonic-gate */ 12160Sstevel@tonic-gate s_copy_mlist = memlist_dup(s_mp->sbm_mlist); 12170Sstevel@tonic-gate for (ml = s_mp->sbm_del_mlist; ml; ml = ml->next) { 12180Sstevel@tonic-gate s_copy_mlist = memlist_del_span(s_copy_mlist, 12190Sstevel@tonic-gate ml->address, ml->size); 12200Sstevel@tonic-gate } 12210Sstevel@tonic-gate 12220Sstevel@tonic-gate PR_MEM("%s: source copy list:\n:", f); 12230Sstevel@tonic-gate PR_MEMLIST_DUMP(s_copy_mlist); 12240Sstevel@tonic-gate 12250Sstevel@tonic-gate /* 12260Sstevel@tonic-gate * We had to swap mem-units, so update 12270Sstevel@tonic-gate * memlists accordingly with new base 12280Sstevel@tonic-gate * addresses. 12290Sstevel@tonic-gate */ 12300Sstevel@tonic-gate for (ml = t_mp->sbm_mlist; ml; ml = ml->next) { 12310Sstevel@tonic-gate ml->address -= t_old_basepa; 12320Sstevel@tonic-gate ml->address += t_new_basepa; 12330Sstevel@tonic-gate } 12340Sstevel@tonic-gate 12350Sstevel@tonic-gate /* 12360Sstevel@tonic-gate * There is no need to explicitly rename the target delete 12370Sstevel@tonic-gate * memlist, because sbm_del_mlist and sbm_mlist always 12380Sstevel@tonic-gate * point to the same memlist for a copy/rename operation. 12390Sstevel@tonic-gate */ 12400Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist); 12410Sstevel@tonic-gate 12420Sstevel@tonic-gate PR_MEM("%s: renamed target memlist and delete memlist:\n", f); 12430Sstevel@tonic-gate PR_MEMLIST_DUMP(t_mp->sbm_mlist); 12440Sstevel@tonic-gate 12450Sstevel@tonic-gate for (ml = s_mp->sbm_mlist; ml; ml = ml->next) { 12460Sstevel@tonic-gate ml->address -= s_old_basepa; 12470Sstevel@tonic-gate ml->address += s_new_basepa; 12480Sstevel@tonic-gate } 12490Sstevel@tonic-gate 12500Sstevel@tonic-gate PR_MEM("%s: renamed source memlist:\n", f); 12510Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_mlist); 12520Sstevel@tonic-gate 12530Sstevel@tonic-gate /* 12540Sstevel@tonic-gate * Keep track of dynamically added segments 12550Sstevel@tonic-gate * since they cannot be split if we need to delete 12560Sstevel@tonic-gate * excess source memory later for this board. 12570Sstevel@tonic-gate */ 12580Sstevel@tonic-gate if (t_mp->sbm_dyn_segs) 12590Sstevel@tonic-gate memlist_delete(t_mp->sbm_dyn_segs); 12600Sstevel@tonic-gate t_mp->sbm_dyn_segs = s_mp->sbm_dyn_segs; 12610Sstevel@tonic-gate s_mp->sbm_dyn_segs = NULL; 12620Sstevel@tonic-gate 12630Sstevel@tonic-gate /* 12640Sstevel@tonic-gate * If the target memory range with the new target base PA 12650Sstevel@tonic-gate * extends beyond the usable slice, prevent any "target excess" 12660Sstevel@tonic-gate * from being added back after this copy/rename and 12670Sstevel@tonic-gate * calculate the new smaller size of the target board 12680Sstevel@tonic-gate * to be set as part of target cleanup. The base + npages 12690Sstevel@tonic-gate * must only include the range of memory up to the end of 12700Sstevel@tonic-gate * this slice. This will only be used after a category 4 12710Sstevel@tonic-gate * large-to-small target type copy/rename - see comments 12720Sstevel@tonic-gate * in dr_select_mem_target. 12730Sstevel@tonic-gate */ 12740Sstevel@tonic-gate if (((t_new_basepa & sm) + _ptob64(t_mp->sbm_npages)) > sz) { 12750Sstevel@tonic-gate t_new_smallsize = sz - (t_new_basepa & sm); 12760Sstevel@tonic-gate } 12770Sstevel@tonic-gate 12780Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_MEMRESIZE && 12790Sstevel@tonic-gate t_new_smallsize == 0) { 12800Sstevel@tonic-gate struct memlist *t_excess_mlist; 12810Sstevel@tonic-gate 12820Sstevel@tonic-gate /* 12830Sstevel@tonic-gate * Add back excess target memory. 12840Sstevel@tonic-gate * Subtract out the portion of the target memory 12850Sstevel@tonic-gate * node that was taken over by the source memory 12860Sstevel@tonic-gate * node. 12870Sstevel@tonic-gate */ 12880Sstevel@tonic-gate t_excess_mlist = memlist_dup(t_mp->sbm_mlist); 12890Sstevel@tonic-gate for (ml = s_copy_mlist; ml; ml = ml->next) { 12900Sstevel@tonic-gate t_excess_mlist = 12910Sstevel@tonic-gate memlist_del_span(t_excess_mlist, 12920Sstevel@tonic-gate ml->address, ml->size); 12930Sstevel@tonic-gate } 12940Sstevel@tonic-gate 12950Sstevel@tonic-gate /* 12960Sstevel@tonic-gate * Update dynamically added segs 12970Sstevel@tonic-gate */ 12980Sstevel@tonic-gate for (ml = s_mp->sbm_del_mlist; ml; ml = ml->next) { 12990Sstevel@tonic-gate t_mp->sbm_dyn_segs = 13000Sstevel@tonic-gate memlist_del_span(t_mp->sbm_dyn_segs, 13010Sstevel@tonic-gate ml->address, ml->size); 13020Sstevel@tonic-gate } 13030Sstevel@tonic-gate for (ml = t_excess_mlist; ml; ml = ml->next) { 13040Sstevel@tonic-gate t_mp->sbm_dyn_segs = 13050Sstevel@tonic-gate memlist_cat_span(t_mp->sbm_dyn_segs, 13060Sstevel@tonic-gate ml->address, ml->size); 13070Sstevel@tonic-gate } 13080Sstevel@tonic-gate PR_MEM("%s: %s: updated dynamic seg list:\n", 13090Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 13100Sstevel@tonic-gate PR_MEMLIST_DUMP(t_mp->sbm_dyn_segs); 13110Sstevel@tonic-gate 13120Sstevel@tonic-gate PR_MEM("%s: adding back remaining portion" 13130Sstevel@tonic-gate " of %s, memlist:\n", 13140Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 13150Sstevel@tonic-gate PR_MEMLIST_DUMP(t_excess_mlist); 13160Sstevel@tonic-gate 13170Sstevel@tonic-gate dr_add_memory_spans(s_mp, t_excess_mlist); 13180Sstevel@tonic-gate memlist_delete(t_excess_mlist); 13190Sstevel@tonic-gate } 13200Sstevel@tonic-gate memlist_delete(s_copy_mlist); 13210Sstevel@tonic-gate 13220Sstevel@tonic-gate #ifdef DEBUG 13230Sstevel@tonic-gate /* 13240Sstevel@tonic-gate * Renaming s_mp->sbm_del_mlist is not necessary. This 13250Sstevel@tonic-gate * list is not used beyond this point, and in fact, is 13260Sstevel@tonic-gate * disposed of at the end of this function. 13270Sstevel@tonic-gate */ 13280Sstevel@tonic-gate for (ml = s_mp->sbm_del_mlist; ml; ml = ml->next) { 13290Sstevel@tonic-gate ml->address -= s_old_basepa; 13300Sstevel@tonic-gate ml->address += s_new_basepa; 13310Sstevel@tonic-gate } 13320Sstevel@tonic-gate 13330Sstevel@tonic-gate PR_MEM("%s: renamed source delete memlist", f); 13340Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_del_mlist); 13350Sstevel@tonic-gate #endif 13360Sstevel@tonic-gate 13370Sstevel@tonic-gate } 13380Sstevel@tonic-gate 13390Sstevel@tonic-gate if (t_mp != NULL) { 13400Sstevel@tonic-gate /* delete target's entire address space */ 13410Sstevel@tonic-gate err = drmach_mem_del_span( 13420Sstevel@tonic-gate t_mp->sbm_cm.sbdev_id, t_old_basepa & ~ sm, sz); 13430Sstevel@tonic-gate if (err) 13440Sstevel@tonic-gate DRERR_SET_C(&t_mp->sbm_cm.sbdev_error, &err); 13450Sstevel@tonic-gate ASSERT(err == NULL); 13460Sstevel@tonic-gate 13470Sstevel@tonic-gate /* 13480Sstevel@tonic-gate * After the copy/rename, the original address space 13490Sstevel@tonic-gate * for the source board (which is now located on the 13500Sstevel@tonic-gate * target board) may now have some excess to be deleted. 13510Sstevel@tonic-gate * The amount is calculated by masking the slice 13520Sstevel@tonic-gate * info and keeping the slice offset from t_new_basepa. 13530Sstevel@tonic-gate */ 13540Sstevel@tonic-gate err = drmach_mem_del_span(s_mp->sbm_cm.sbdev_id, 13550Sstevel@tonic-gate s_old_basepa & ~ sm, t_new_basepa & sm); 13560Sstevel@tonic-gate if (err) 13570Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 13580Sstevel@tonic-gate ASSERT(err == NULL); 13590Sstevel@tonic-gate 13600Sstevel@tonic-gate } else { 13610Sstevel@tonic-gate /* delete board's entire address space */ 13620Sstevel@tonic-gate err = drmach_mem_del_span(s_mp->sbm_cm.sbdev_id, 13630Sstevel@tonic-gate s_old_basepa & ~ sm, sz); 13640Sstevel@tonic-gate if (err) 13650Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 13660Sstevel@tonic-gate ASSERT(err == NULL); 13670Sstevel@tonic-gate } 13680Sstevel@tonic-gate 13690Sstevel@tonic-gate cleanup: 13700Sstevel@tonic-gate /* clean up target mem unit */ 13710Sstevel@tonic-gate if (t_mp != NULL) { 13720Sstevel@tonic-gate memlist_delete(t_mp->sbm_del_mlist); 13730Sstevel@tonic-gate /* no need to delete sbm_mlist, it shares sbm_del_mlist */ 13740Sstevel@tonic-gate 13750Sstevel@tonic-gate t_mp->sbm_del_mlist = NULL; 13760Sstevel@tonic-gate t_mp->sbm_mlist = NULL; 13770Sstevel@tonic-gate t_mp->sbm_peer = NULL; 13780Sstevel@tonic-gate t_mp->sbm_flags = 0; 13790Sstevel@tonic-gate t_mp->sbm_cm.sbdev_busy = 0; 13800Sstevel@tonic-gate dr_init_mem_unit_data(t_mp); 13810Sstevel@tonic-gate 13820Sstevel@tonic-gate /* reduce target size if new PAs go past end of usable slice */ 13830Sstevel@tonic-gate if (t_new_smallsize > 0) { 13840Sstevel@tonic-gate t_mp->sbm_npages = _b64top(t_new_smallsize); 1385930Smathue PR_MEM("%s: target new size 0x%lx bytes\n", 13860Sstevel@tonic-gate f, t_new_smallsize); 13870Sstevel@tonic-gate } 13880Sstevel@tonic-gate } 13890Sstevel@tonic-gate if (t_mp != NULL && t_mp->sbm_cm.sbdev_error == NULL) { 13900Sstevel@tonic-gate /* 13910Sstevel@tonic-gate * now that copy/rename has completed, undo this 13920Sstevel@tonic-gate * work that was done in dr_release_mem_done. 13930Sstevel@tonic-gate */ 13940Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm); 13950Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&t_mp->sbm_cm); 13960Sstevel@tonic-gate dr_device_transition(&t_mp->sbm_cm, DR_STATE_CONFIGURED); 13970Sstevel@tonic-gate } 13980Sstevel@tonic-gate 13990Sstevel@tonic-gate /* 14000Sstevel@tonic-gate * clean up (source) board's mem unit structure. 14010Sstevel@tonic-gate * NOTE: sbm_mlist is retained if no error has been record (in other 14020Sstevel@tonic-gate * words, when s_mp->sbm_cm.sbdev_error is NULL). This memlist is 14030Sstevel@tonic-gate * referred to elsewhere as the cached memlist. The cached memlist 14040Sstevel@tonic-gate * is used to re-attach (configure back in) this memunit from the 14050Sstevel@tonic-gate * unconfigured state. The memlist is retained because it may 14060Sstevel@tonic-gate * represent bad pages that were detected while the memory was 14070Sstevel@tonic-gate * configured into the OS. The OS deletes bad pages from phys_install. 14080Sstevel@tonic-gate * Those deletes, if any, will be represented in the cached mlist. 14090Sstevel@tonic-gate */ 14100Sstevel@tonic-gate if (s_mp->sbm_del_mlist && s_mp->sbm_del_mlist != s_mp->sbm_mlist) 14110Sstevel@tonic-gate memlist_delete(s_mp->sbm_del_mlist); 14120Sstevel@tonic-gate 14130Sstevel@tonic-gate if (s_mp->sbm_cm.sbdev_error && s_mp->sbm_mlist) { 14140Sstevel@tonic-gate memlist_delete(s_mp->sbm_mlist); 14150Sstevel@tonic-gate s_mp->sbm_mlist = NULL; 14160Sstevel@tonic-gate } 14170Sstevel@tonic-gate 14180Sstevel@tonic-gate if (s_mp->sbm_dyn_segs != NULL && s_mp->sbm_cm.sbdev_error == 0) { 14190Sstevel@tonic-gate memlist_delete(s_mp->sbm_dyn_segs); 14200Sstevel@tonic-gate s_mp->sbm_dyn_segs = NULL; 14210Sstevel@tonic-gate } 14220Sstevel@tonic-gate 14230Sstevel@tonic-gate s_mp->sbm_del_mlist = NULL; 14240Sstevel@tonic-gate s_mp->sbm_peer = NULL; 14250Sstevel@tonic-gate s_mp->sbm_flags = 0; 14260Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 0; 14270Sstevel@tonic-gate dr_init_mem_unit_data(s_mp); 14280Sstevel@tonic-gate 14290Sstevel@tonic-gate PR_MEM("%s: cached memlist for %s:", f, s_mp->sbm_cm.sbdev_path); 14300Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_mlist); 14310Sstevel@tonic-gate 14320Sstevel@tonic-gate return (0); 14330Sstevel@tonic-gate } 14340Sstevel@tonic-gate 14350Sstevel@tonic-gate /* 14360Sstevel@tonic-gate * Successful return from this function will have the memory 14370Sstevel@tonic-gate * handle in bp->b_dev[..mem-unit...].sbm_memhandle allocated 14380Sstevel@tonic-gate * and waiting. This routine's job is to select the memory that 14390Sstevel@tonic-gate * actually has to be released (detached) which may not necessarily 14400Sstevel@tonic-gate * be the same memory node that came in in devlist[], 14410Sstevel@tonic-gate * i.e. a copy-rename is needed. 14420Sstevel@tonic-gate */ 14430Sstevel@tonic-gate int 14440Sstevel@tonic-gate dr_pre_release_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 14450Sstevel@tonic-gate { 14460Sstevel@tonic-gate int d; 14470Sstevel@tonic-gate int err_flag = 0; 14480Sstevel@tonic-gate static fn_t f = "dr_pre_release_mem"; 14490Sstevel@tonic-gate 14500Sstevel@tonic-gate PR_MEM("%s...\n", f); 14510Sstevel@tonic-gate 14520Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 14530Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 14540Sstevel@tonic-gate int rv; 14550Sstevel@tonic-gate memquery_t mq; 14560Sstevel@tonic-gate struct memlist *ml; 14570Sstevel@tonic-gate 14580Sstevel@tonic-gate if (mp->sbm_cm.sbdev_error) { 14590Sstevel@tonic-gate err_flag = 1; 14600Sstevel@tonic-gate continue; 14610Sstevel@tonic-gate } else if (!kcage_on) { 14620Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_KCAGE_OFF); 14630Sstevel@tonic-gate err_flag = 1; 14640Sstevel@tonic-gate continue; 14650Sstevel@tonic-gate } 14660Sstevel@tonic-gate 14670Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_RESERVED) { 14680Sstevel@tonic-gate /* 14690Sstevel@tonic-gate * Board is currently involved in a delete 14700Sstevel@tonic-gate * memory operation. Can't detach this guy until 14710Sstevel@tonic-gate * that operation completes. 14720Sstevel@tonic-gate */ 14730Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_INVAL); 14740Sstevel@tonic-gate err_flag = 1; 14750Sstevel@tonic-gate break; 14760Sstevel@tonic-gate } 14770Sstevel@tonic-gate 14780Sstevel@tonic-gate /* 14790Sstevel@tonic-gate * Check whether the detaching memory requires a 14800Sstevel@tonic-gate * copy-rename. 14810Sstevel@tonic-gate */ 14820Sstevel@tonic-gate ASSERT(mp->sbm_npages != 0); 14830Sstevel@tonic-gate rv = kphysm_del_span_query( 14840Sstevel@tonic-gate mp->sbm_basepfn, mp->sbm_npages, &mq); 14850Sstevel@tonic-gate if (rv != KPHYSM_OK) { 14860Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 14870Sstevel@tonic-gate err_flag = 1; 14880Sstevel@tonic-gate break; 14890Sstevel@tonic-gate } 14900Sstevel@tonic-gate 14910Sstevel@tonic-gate if (mq.nonrelocatable != 0) { 14920Sstevel@tonic-gate if (!(dr_cmd_flags(hp) & 14930Sstevel@tonic-gate (SBD_FLAG_FORCE | SBD_FLAG_QUIESCE_OKAY))) { 14940Sstevel@tonic-gate /* caller wasn't prompted for a suspend */ 14950Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, 14960Sstevel@tonic-gate ESBD_QUIESCE_REQD); 14970Sstevel@tonic-gate err_flag = 1; 14980Sstevel@tonic-gate break; 14990Sstevel@tonic-gate } 15000Sstevel@tonic-gate } 15010Sstevel@tonic-gate 15020Sstevel@tonic-gate /* flags should be clean at this time */ 15030Sstevel@tonic-gate ASSERT(mp->sbm_flags == 0); 15040Sstevel@tonic-gate 15050Sstevel@tonic-gate ASSERT(mp->sbm_mlist == NULL); /* should be null */ 15060Sstevel@tonic-gate ASSERT(mp->sbm_del_mlist == NULL); /* should be null */ 15070Sstevel@tonic-gate if (mp->sbm_mlist != NULL) { 15080Sstevel@tonic-gate memlist_delete(mp->sbm_mlist); 15090Sstevel@tonic-gate mp->sbm_mlist = NULL; 15100Sstevel@tonic-gate } 15110Sstevel@tonic-gate 15120Sstevel@tonic-gate ml = dr_get_memlist(mp); 15130Sstevel@tonic-gate if (ml == NULL) { 15140Sstevel@tonic-gate err_flag = 1; 15150Sstevel@tonic-gate PR_MEM("%s: no memlist found for %s\n", 15160Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 15170Sstevel@tonic-gate continue; 15180Sstevel@tonic-gate } 15190Sstevel@tonic-gate 15200Sstevel@tonic-gate /* allocate a kphysm handle */ 15210Sstevel@tonic-gate rv = kphysm_del_gethandle(&mp->sbm_memhandle); 15220Sstevel@tonic-gate if (rv != KPHYSM_OK) { 15230Sstevel@tonic-gate memlist_delete(ml); 15240Sstevel@tonic-gate 15250Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 15260Sstevel@tonic-gate err_flag = 1; 15270Sstevel@tonic-gate break; 15280Sstevel@tonic-gate } 15290Sstevel@tonic-gate mp->sbm_flags |= DR_MFLAG_RELOWNER; 15300Sstevel@tonic-gate 15310Sstevel@tonic-gate if ((mq.nonrelocatable != 0) || 15320Sstevel@tonic-gate dr_reserve_mem_spans(&mp->sbm_memhandle, ml)) { 15330Sstevel@tonic-gate /* 15340Sstevel@tonic-gate * Either the detaching memory node contains 15350Sstevel@tonic-gate * non-reloc memory or we failed to reserve the 15360Sstevel@tonic-gate * detaching memory node (which did _not_ have 15370Sstevel@tonic-gate * any non-reloc memory, i.e. some non-reloc mem 15380Sstevel@tonic-gate * got onboard). 15390Sstevel@tonic-gate */ 15400Sstevel@tonic-gate 15410Sstevel@tonic-gate if (dr_select_mem_target(hp, mp, ml)) { 15420Sstevel@tonic-gate int rv; 15430Sstevel@tonic-gate 15440Sstevel@tonic-gate /* 15450Sstevel@tonic-gate * We had no luck locating a target 15460Sstevel@tonic-gate * memory node to be the recipient of 15470Sstevel@tonic-gate * the non-reloc memory on the node 15480Sstevel@tonic-gate * we're trying to detach. 15490Sstevel@tonic-gate * Clean up be disposing the mem handle 15500Sstevel@tonic-gate * and the mem list. 15510Sstevel@tonic-gate */ 15520Sstevel@tonic-gate rv = kphysm_del_release(mp->sbm_memhandle); 15530Sstevel@tonic-gate if (rv != KPHYSM_OK) { 15540Sstevel@tonic-gate /* 15550Sstevel@tonic-gate * can do nothing but complain 15560Sstevel@tonic-gate * and hope helpful for debug 15570Sstevel@tonic-gate */ 15580Sstevel@tonic-gate cmn_err(CE_WARN, "%s: unexpected" 15590Sstevel@tonic-gate " kphysm_del_release return" 15600Sstevel@tonic-gate " value %d", 15610Sstevel@tonic-gate f, rv); 15620Sstevel@tonic-gate } 15630Sstevel@tonic-gate mp->sbm_flags &= ~DR_MFLAG_RELOWNER; 15640Sstevel@tonic-gate 15650Sstevel@tonic-gate memlist_delete(ml); 15660Sstevel@tonic-gate 15670Sstevel@tonic-gate /* make sure sbm_flags is clean */ 15680Sstevel@tonic-gate ASSERT(mp->sbm_flags == 0); 15690Sstevel@tonic-gate 15700Sstevel@tonic-gate dr_dev_err(CE_WARN, 15710Sstevel@tonic-gate &mp->sbm_cm, ESBD_NO_TARGET); 15720Sstevel@tonic-gate 15730Sstevel@tonic-gate err_flag = 1; 15740Sstevel@tonic-gate break; 15750Sstevel@tonic-gate } 15760Sstevel@tonic-gate 15770Sstevel@tonic-gate /* 15780Sstevel@tonic-gate * ml is not memlist_delete'd here because 15790Sstevel@tonic-gate * it has been assigned to mp->sbm_mlist 15800Sstevel@tonic-gate * by dr_select_mem_target. 15810Sstevel@tonic-gate */ 15820Sstevel@tonic-gate } else { 15830Sstevel@tonic-gate /* no target needed to detach this board */ 15840Sstevel@tonic-gate mp->sbm_flags |= DR_MFLAG_RESERVED; 15850Sstevel@tonic-gate mp->sbm_peer = NULL; 15860Sstevel@tonic-gate mp->sbm_del_mlist = ml; 15870Sstevel@tonic-gate mp->sbm_mlist = ml; 15880Sstevel@tonic-gate mp->sbm_cm.sbdev_busy = 1; 15890Sstevel@tonic-gate } 15900Sstevel@tonic-gate #ifdef DEBUG 15910Sstevel@tonic-gate ASSERT(mp->sbm_mlist != NULL); 15920Sstevel@tonic-gate 15930Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_SOURCE) { 15940Sstevel@tonic-gate PR_MEM("%s: release of %s requires copy/rename;" 15950Sstevel@tonic-gate " selected target board %s\n", 15960Sstevel@tonic-gate f, 15970Sstevel@tonic-gate mp->sbm_cm.sbdev_path, 15980Sstevel@tonic-gate mp->sbm_peer->sbm_cm.sbdev_path); 15990Sstevel@tonic-gate } else { 16000Sstevel@tonic-gate PR_MEM("%s: copy/rename not required to release %s\n", 16010Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 16020Sstevel@tonic-gate } 16030Sstevel@tonic-gate 16040Sstevel@tonic-gate ASSERT(mp->sbm_flags & DR_MFLAG_RELOWNER); 16050Sstevel@tonic-gate ASSERT(mp->sbm_flags & DR_MFLAG_RESERVED); 16060Sstevel@tonic-gate #endif 16070Sstevel@tonic-gate } 16080Sstevel@tonic-gate 16090Sstevel@tonic-gate return (err_flag ? -1 : 0); 16100Sstevel@tonic-gate } 16110Sstevel@tonic-gate 16120Sstevel@tonic-gate void 16130Sstevel@tonic-gate dr_release_mem_done(dr_common_unit_t *cp) 16140Sstevel@tonic-gate { 16150Sstevel@tonic-gate dr_mem_unit_t *s_mp = (dr_mem_unit_t *)cp; 16160Sstevel@tonic-gate dr_mem_unit_t *t_mp, *mp; 16170Sstevel@tonic-gate int rv; 16180Sstevel@tonic-gate static fn_t f = "dr_release_mem_done"; 16190Sstevel@tonic-gate 16200Sstevel@tonic-gate /* 16210Sstevel@tonic-gate * This unit will be flagged with DR_MFLAG_SOURCE, if it 16220Sstevel@tonic-gate * has a target unit. 16230Sstevel@tonic-gate */ 16240Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 16250Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 16260Sstevel@tonic-gate ASSERT(t_mp != NULL); 16270Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 16280Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET); 16290Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_RESERVED); 16300Sstevel@tonic-gate } else { 16310Sstevel@tonic-gate /* this is no target unit */ 16320Sstevel@tonic-gate t_mp = NULL; 16330Sstevel@tonic-gate } 16340Sstevel@tonic-gate 16350Sstevel@tonic-gate /* free delete handle */ 16360Sstevel@tonic-gate ASSERT(s_mp->sbm_flags & DR_MFLAG_RELOWNER); 16370Sstevel@tonic-gate ASSERT(s_mp->sbm_flags & DR_MFLAG_RESERVED); 16380Sstevel@tonic-gate rv = kphysm_del_release(s_mp->sbm_memhandle); 16390Sstevel@tonic-gate if (rv != KPHYSM_OK) { 16400Sstevel@tonic-gate /* 16410Sstevel@tonic-gate * can do nothing but complain 16420Sstevel@tonic-gate * and hope helpful for debug 16430Sstevel@tonic-gate */ 16440Sstevel@tonic-gate cmn_err(CE_WARN, "%s: unexpected kphysm_del_release" 16450Sstevel@tonic-gate " return value %d", f, rv); 16460Sstevel@tonic-gate } 16470Sstevel@tonic-gate s_mp->sbm_flags &= ~DR_MFLAG_RELOWNER; 16480Sstevel@tonic-gate 16490Sstevel@tonic-gate /* 16500Sstevel@tonic-gate * If an error was encountered during release, clean up 16510Sstevel@tonic-gate * the source (and target, if present) unit data. 16520Sstevel@tonic-gate */ 16530Sstevel@tonic-gate /* XXX Can we know that sbdev_error was encountered during release? */ 16540Sstevel@tonic-gate if (s_mp->sbm_cm.sbdev_error != NULL) { 16550Sstevel@tonic-gate PR_MEM("%s: %s: error %d noted\n", 16560Sstevel@tonic-gate f, 16570Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 16580Sstevel@tonic-gate s_mp->sbm_cm.sbdev_error->e_code); 16590Sstevel@tonic-gate 16600Sstevel@tonic-gate if (t_mp != NULL) { 16610Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist); 16620Sstevel@tonic-gate t_mp->sbm_del_mlist = NULL; 16630Sstevel@tonic-gate 16640Sstevel@tonic-gate if (t_mp->sbm_mlist != NULL) { 16650Sstevel@tonic-gate memlist_delete(t_mp->sbm_mlist); 16660Sstevel@tonic-gate t_mp->sbm_mlist = NULL; 16670Sstevel@tonic-gate } 16680Sstevel@tonic-gate 16690Sstevel@tonic-gate t_mp->sbm_peer = NULL; 16700Sstevel@tonic-gate t_mp->sbm_flags = 0; 16710Sstevel@tonic-gate t_mp->sbm_cm.sbdev_busy = 0; 16720Sstevel@tonic-gate } 16730Sstevel@tonic-gate 16740Sstevel@tonic-gate if (s_mp->sbm_del_mlist != s_mp->sbm_mlist) 16750Sstevel@tonic-gate memlist_delete(s_mp->sbm_del_mlist); 16760Sstevel@tonic-gate s_mp->sbm_del_mlist = NULL; 16770Sstevel@tonic-gate 16780Sstevel@tonic-gate if (s_mp->sbm_mlist != NULL) { 16790Sstevel@tonic-gate memlist_delete(s_mp->sbm_mlist); 16800Sstevel@tonic-gate s_mp->sbm_mlist = NULL; 16810Sstevel@tonic-gate } 16820Sstevel@tonic-gate 16830Sstevel@tonic-gate s_mp->sbm_peer = NULL; 16840Sstevel@tonic-gate s_mp->sbm_flags = 0; 16850Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 0; 16860Sstevel@tonic-gate 16870Sstevel@tonic-gate /* bail out */ 16880Sstevel@tonic-gate return; 16890Sstevel@tonic-gate } 16900Sstevel@tonic-gate 16910Sstevel@tonic-gate DR_DEV_SET_RELEASED(&s_mp->sbm_cm); 16920Sstevel@tonic-gate dr_device_transition(&s_mp->sbm_cm, DR_STATE_RELEASE); 16930Sstevel@tonic-gate 16940Sstevel@tonic-gate if (t_mp != NULL) { 16950Sstevel@tonic-gate /* 16960Sstevel@tonic-gate * the kphysm delete operation that drained the source 16970Sstevel@tonic-gate * board also drained this target board. Since the source 16980Sstevel@tonic-gate * board drain is now known to have succeeded, we know this 16990Sstevel@tonic-gate * target board is drained too. 17000Sstevel@tonic-gate * 17010Sstevel@tonic-gate * because DR_DEV_SET_RELEASED and dr_device_transition 17020Sstevel@tonic-gate * is done here, the dr_release_dev_done should not 17030Sstevel@tonic-gate * fail. 17040Sstevel@tonic-gate */ 17050Sstevel@tonic-gate DR_DEV_SET_RELEASED(&t_mp->sbm_cm); 17060Sstevel@tonic-gate dr_device_transition(&t_mp->sbm_cm, DR_STATE_RELEASE); 17070Sstevel@tonic-gate 17080Sstevel@tonic-gate /* 17090Sstevel@tonic-gate * NOTE: do not transition target's board state, 17100Sstevel@tonic-gate * even if the mem-unit was the last configure 17110Sstevel@tonic-gate * unit of the board. When copy/rename completes 17120Sstevel@tonic-gate * this mem-unit will transitioned back to 17130Sstevel@tonic-gate * the configured state. In the meantime, the 17140Sstevel@tonic-gate * board's must remain as is. 17150Sstevel@tonic-gate */ 17160Sstevel@tonic-gate } 17170Sstevel@tonic-gate 17180Sstevel@tonic-gate /* if board(s) had deleted memory, verify it is gone */ 17190Sstevel@tonic-gate rv = 0; 17200Sstevel@tonic-gate memlist_read_lock(); 17210Sstevel@tonic-gate if (s_mp->sbm_del_mlist != NULL) { 17220Sstevel@tonic-gate mp = s_mp; 17230Sstevel@tonic-gate rv = memlist_intersect(phys_install, mp->sbm_del_mlist); 17240Sstevel@tonic-gate } 17250Sstevel@tonic-gate if (rv == 0 && t_mp && t_mp->sbm_del_mlist != NULL) { 17260Sstevel@tonic-gate mp = t_mp; 17270Sstevel@tonic-gate rv = memlist_intersect(phys_install, mp->sbm_del_mlist); 17280Sstevel@tonic-gate } 17290Sstevel@tonic-gate memlist_read_unlock(); 17300Sstevel@tonic-gate if (rv) { 17310Sstevel@tonic-gate cmn_err(CE_WARN, "%s: %smem-unit (%d.%d): " 17320Sstevel@tonic-gate "deleted memory still found in phys_install", 17330Sstevel@tonic-gate f, 17340Sstevel@tonic-gate (mp == t_mp ? "target " : ""), 17350Sstevel@tonic-gate mp->sbm_cm.sbdev_bp->b_num, 17360Sstevel@tonic-gate mp->sbm_cm.sbdev_unum); 17370Sstevel@tonic-gate 17380Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&s_mp->sbm_cm); 17390Sstevel@tonic-gate return; 17400Sstevel@tonic-gate } 17410Sstevel@tonic-gate 17420Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_RELDONE; 17430Sstevel@tonic-gate if (t_mp != NULL) 17440Sstevel@tonic-gate t_mp->sbm_flags |= DR_MFLAG_RELDONE; 17450Sstevel@tonic-gate 17460Sstevel@tonic-gate /* this should not fail */ 17470Sstevel@tonic-gate if (dr_release_dev_done(&s_mp->sbm_cm) != 0) { 17480Sstevel@tonic-gate /* catch this in debug kernels */ 17490Sstevel@tonic-gate ASSERT(0); 17500Sstevel@tonic-gate return; 17510Sstevel@tonic-gate } 17520Sstevel@tonic-gate 17530Sstevel@tonic-gate PR_MEM("%s: marking %s release DONE\n", 17540Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 17550Sstevel@tonic-gate 17560Sstevel@tonic-gate s_mp->sbm_cm.sbdev_ostate = SBD_STAT_UNCONFIGURED; 17570Sstevel@tonic-gate 17580Sstevel@tonic-gate if (t_mp != NULL) { 17590Sstevel@tonic-gate /* should not fail */ 17600Sstevel@tonic-gate rv = dr_release_dev_done(&t_mp->sbm_cm); 17610Sstevel@tonic-gate if (rv != 0) { 17620Sstevel@tonic-gate /* catch this in debug kernels */ 17630Sstevel@tonic-gate ASSERT(0); 17640Sstevel@tonic-gate return; 17650Sstevel@tonic-gate } 17660Sstevel@tonic-gate 17670Sstevel@tonic-gate PR_MEM("%s: marking %s release DONE\n", 17680Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 17690Sstevel@tonic-gate 17700Sstevel@tonic-gate t_mp->sbm_cm.sbdev_ostate = SBD_STAT_UNCONFIGURED; 17710Sstevel@tonic-gate } 17720Sstevel@tonic-gate } 17730Sstevel@tonic-gate 17740Sstevel@tonic-gate /*ARGSUSED*/ 17750Sstevel@tonic-gate int 17760Sstevel@tonic-gate dr_disconnect_mem(dr_mem_unit_t *mp) 17770Sstevel@tonic-gate { 17780Sstevel@tonic-gate static fn_t f = "dr_disconnect_mem"; 17790Sstevel@tonic-gate update_membounds_t umb; 17800Sstevel@tonic-gate 17810Sstevel@tonic-gate #ifdef DEBUG 17820Sstevel@tonic-gate int state = mp->sbm_cm.sbdev_state; 17830Sstevel@tonic-gate ASSERT(state == DR_STATE_CONNECTED || 17840Sstevel@tonic-gate state == DR_STATE_UNCONFIGURED); 17850Sstevel@tonic-gate #endif 17860Sstevel@tonic-gate 17870Sstevel@tonic-gate PR_MEM("%s...\n", f); 17880Sstevel@tonic-gate 17890Sstevel@tonic-gate if (mp->sbm_del_mlist && mp->sbm_del_mlist != mp->sbm_mlist) 17900Sstevel@tonic-gate memlist_delete(mp->sbm_del_mlist); 17910Sstevel@tonic-gate mp->sbm_del_mlist = NULL; 17920Sstevel@tonic-gate 17930Sstevel@tonic-gate if (mp->sbm_mlist) { 17940Sstevel@tonic-gate memlist_delete(mp->sbm_mlist); 17950Sstevel@tonic-gate mp->sbm_mlist = NULL; 17960Sstevel@tonic-gate } 17970Sstevel@tonic-gate 17980Sstevel@tonic-gate /* 17990Sstevel@tonic-gate * Remove memory from lgroup 18000Sstevel@tonic-gate * For now, only board info is required. 18010Sstevel@tonic-gate */ 18020Sstevel@tonic-gate umb.u_board = mp->sbm_cm.sbdev_bp->b_num; 18030Sstevel@tonic-gate umb.u_base = (uint64_t)-1; 18040Sstevel@tonic-gate umb.u_len = (uint64_t)-1; 18050Sstevel@tonic-gate 18060Sstevel@tonic-gate lgrp_plat_config(LGRP_CONFIG_MEM_DEL, (uintptr_t)&umb); 18070Sstevel@tonic-gate 18080Sstevel@tonic-gate return (0); 18090Sstevel@tonic-gate } 18100Sstevel@tonic-gate 18110Sstevel@tonic-gate int 18120Sstevel@tonic-gate dr_cancel_mem(dr_mem_unit_t *s_mp) 18130Sstevel@tonic-gate { 18140Sstevel@tonic-gate dr_mem_unit_t *t_mp; 18150Sstevel@tonic-gate dr_state_t state; 18160Sstevel@tonic-gate static fn_t f = "dr_cancel_mem"; 18170Sstevel@tonic-gate 18180Sstevel@tonic-gate state = s_mp->sbm_cm.sbdev_state; 18190Sstevel@tonic-gate 18200Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_TARGET) { 18210Sstevel@tonic-gate /* must cancel source board, not target board */ 18220Sstevel@tonic-gate /* TODO: set error */ 18230Sstevel@tonic-gate return (-1); 18240Sstevel@tonic-gate } else if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 18250Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 18260Sstevel@tonic-gate ASSERT(t_mp != NULL); 18270Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 18280Sstevel@tonic-gate 18290Sstevel@tonic-gate /* must always match the source board's state */ 18300Sstevel@tonic-gate /* TODO: is this assertion correct? */ 18310Sstevel@tonic-gate ASSERT(t_mp->sbm_cm.sbdev_state == state); 18320Sstevel@tonic-gate } else { 18330Sstevel@tonic-gate /* this is no target unit */ 18340Sstevel@tonic-gate t_mp = NULL; 18350Sstevel@tonic-gate } 18360Sstevel@tonic-gate 18370Sstevel@tonic-gate switch (state) { 18380Sstevel@tonic-gate case DR_STATE_UNREFERENCED: /* state set by dr_release_dev_done */ 18390Sstevel@tonic-gate ASSERT((s_mp->sbm_flags & DR_MFLAG_RELOWNER) == 0); 18400Sstevel@tonic-gate 18410Sstevel@tonic-gate if (t_mp != NULL && t_mp->sbm_del_mlist != NULL) { 18420Sstevel@tonic-gate PR_MEM("%s: undoing target %s memory delete\n", 18430Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 18440Sstevel@tonic-gate dr_add_memory_spans(t_mp, t_mp->sbm_del_mlist); 18450Sstevel@tonic-gate 18460Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm); 18470Sstevel@tonic-gate } 18480Sstevel@tonic-gate 18490Sstevel@tonic-gate if (s_mp->sbm_del_mlist != NULL) { 18500Sstevel@tonic-gate PR_MEM("%s: undoing %s memory delete\n", 18510Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 18520Sstevel@tonic-gate 18530Sstevel@tonic-gate dr_add_memory_spans(s_mp, s_mp->sbm_del_mlist); 18540Sstevel@tonic-gate } 18550Sstevel@tonic-gate 18560Sstevel@tonic-gate /*FALLTHROUGH*/ 18570Sstevel@tonic-gate 18580Sstevel@tonic-gate /* TODO: should no longer be possible to see the release state here */ 18590Sstevel@tonic-gate case DR_STATE_RELEASE: /* state set by dr_release_mem_done */ 18600Sstevel@tonic-gate 18610Sstevel@tonic-gate ASSERT((s_mp->sbm_flags & DR_MFLAG_RELOWNER) == 0); 18620Sstevel@tonic-gate 18630Sstevel@tonic-gate if (t_mp != NULL) { 18640Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist); 18650Sstevel@tonic-gate t_mp->sbm_del_mlist = NULL; 18660Sstevel@tonic-gate 18670Sstevel@tonic-gate if (t_mp->sbm_mlist != NULL) { 18680Sstevel@tonic-gate memlist_delete(t_mp->sbm_mlist); 18690Sstevel@tonic-gate t_mp->sbm_mlist = NULL; 18700Sstevel@tonic-gate } 18710Sstevel@tonic-gate 18720Sstevel@tonic-gate t_mp->sbm_peer = NULL; 18730Sstevel@tonic-gate t_mp->sbm_flags = 0; 18740Sstevel@tonic-gate t_mp->sbm_cm.sbdev_busy = 0; 18750Sstevel@tonic-gate dr_init_mem_unit_data(t_mp); 18760Sstevel@tonic-gate 18770Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&t_mp->sbm_cm); 18780Sstevel@tonic-gate 18790Sstevel@tonic-gate dr_device_transition( 18800Sstevel@tonic-gate &t_mp->sbm_cm, DR_STATE_CONFIGURED); 18810Sstevel@tonic-gate } 18820Sstevel@tonic-gate 18830Sstevel@tonic-gate if (s_mp->sbm_del_mlist != s_mp->sbm_mlist) 18840Sstevel@tonic-gate memlist_delete(s_mp->sbm_del_mlist); 18850Sstevel@tonic-gate s_mp->sbm_del_mlist = NULL; 18860Sstevel@tonic-gate 18870Sstevel@tonic-gate if (s_mp->sbm_mlist != NULL) { 18880Sstevel@tonic-gate memlist_delete(s_mp->sbm_mlist); 18890Sstevel@tonic-gate s_mp->sbm_mlist = NULL; 18900Sstevel@tonic-gate } 18910Sstevel@tonic-gate 18920Sstevel@tonic-gate s_mp->sbm_peer = NULL; 18930Sstevel@tonic-gate s_mp->sbm_flags = 0; 18940Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 0; 18950Sstevel@tonic-gate dr_init_mem_unit_data(s_mp); 18960Sstevel@tonic-gate 18970Sstevel@tonic-gate return (0); 18980Sstevel@tonic-gate 18990Sstevel@tonic-gate default: 19000Sstevel@tonic-gate PR_MEM("%s: WARNING unexpected state (%d) for %s\n", 19010Sstevel@tonic-gate f, (int)state, s_mp->sbm_cm.sbdev_path); 19020Sstevel@tonic-gate 19030Sstevel@tonic-gate return (-1); 19040Sstevel@tonic-gate } 19050Sstevel@tonic-gate /*NOTREACHED*/ 19060Sstevel@tonic-gate } 19070Sstevel@tonic-gate 19080Sstevel@tonic-gate void 19090Sstevel@tonic-gate dr_init_mem_unit(dr_mem_unit_t *mp) 19100Sstevel@tonic-gate { 19110Sstevel@tonic-gate dr_state_t new_state; 19120Sstevel@tonic-gate 19130Sstevel@tonic-gate 19140Sstevel@tonic-gate if (DR_DEV_IS_ATTACHED(&mp->sbm_cm)) { 19150Sstevel@tonic-gate new_state = DR_STATE_CONFIGURED; 19160Sstevel@tonic-gate mp->sbm_cm.sbdev_cond = SBD_COND_OK; 19170Sstevel@tonic-gate } else if (DR_DEV_IS_PRESENT(&mp->sbm_cm)) { 19180Sstevel@tonic-gate new_state = DR_STATE_CONNECTED; 19190Sstevel@tonic-gate mp->sbm_cm.sbdev_cond = SBD_COND_OK; 19200Sstevel@tonic-gate } else if (mp->sbm_cm.sbdev_id != (drmachid_t)0) { 19210Sstevel@tonic-gate new_state = DR_STATE_OCCUPIED; 19220Sstevel@tonic-gate } else { 19230Sstevel@tonic-gate new_state = DR_STATE_EMPTY; 19240Sstevel@tonic-gate } 19250Sstevel@tonic-gate 19260Sstevel@tonic-gate if (DR_DEV_IS_PRESENT(&mp->sbm_cm)) 19270Sstevel@tonic-gate dr_init_mem_unit_data(mp); 19280Sstevel@tonic-gate 19290Sstevel@tonic-gate /* delay transition until fully initialized */ 19300Sstevel@tonic-gate dr_device_transition(&mp->sbm_cm, new_state); 19310Sstevel@tonic-gate } 19320Sstevel@tonic-gate 19330Sstevel@tonic-gate static void 19340Sstevel@tonic-gate dr_init_mem_unit_data(dr_mem_unit_t *mp) 19350Sstevel@tonic-gate { 19360Sstevel@tonic-gate drmachid_t id = mp->sbm_cm.sbdev_id; 19370Sstevel@tonic-gate uint64_t bytes; 19380Sstevel@tonic-gate sbd_error_t *err; 19390Sstevel@tonic-gate static fn_t f = "dr_init_mem_unit_data"; 19400Sstevel@tonic-gate update_membounds_t umb; 19410Sstevel@tonic-gate 19420Sstevel@tonic-gate PR_MEM("%s...\n", f); 19430Sstevel@tonic-gate 19440Sstevel@tonic-gate /* a little sanity checking */ 19450Sstevel@tonic-gate ASSERT(mp->sbm_peer == NULL); 19460Sstevel@tonic-gate ASSERT(mp->sbm_flags == 0); 19470Sstevel@tonic-gate 19480Sstevel@tonic-gate /* get basepfn of mem unit */ 19490Sstevel@tonic-gate err = drmach_mem_get_base_physaddr(id, &bytes); 19500Sstevel@tonic-gate if (err) { 19510Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 19520Sstevel@tonic-gate mp->sbm_basepfn = (pfn_t)-1; 19530Sstevel@tonic-gate } else 19540Sstevel@tonic-gate mp->sbm_basepfn = _b64top(bytes); 19550Sstevel@tonic-gate 19560Sstevel@tonic-gate /* attempt to get number of pages from PDA */ 19570Sstevel@tonic-gate err = drmach_mem_get_size(id, &bytes); 19580Sstevel@tonic-gate if (err) { 19590Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 19600Sstevel@tonic-gate mp->sbm_npages = 0; 19610Sstevel@tonic-gate } else 19620Sstevel@tonic-gate mp->sbm_npages = _b64top(bytes); 19630Sstevel@tonic-gate 19640Sstevel@tonic-gate /* if didn't work, calculate using memlist */ 19650Sstevel@tonic-gate if (mp->sbm_npages == 0) { 19660Sstevel@tonic-gate struct memlist *ml, *mlist; 19670Sstevel@tonic-gate /* 19680Sstevel@tonic-gate * Either we couldn't open the PDA or our 19690Sstevel@tonic-gate * PDA has garbage in it. We must have the 19700Sstevel@tonic-gate * page count consistent and whatever the 19710Sstevel@tonic-gate * OS states has precedence over the PDA 19720Sstevel@tonic-gate * so let's check the kernel. 19730Sstevel@tonic-gate */ 19740Sstevel@tonic-gate /* TODO: curious comment. it suggests pda query should happen if this fails */ 19750Sstevel@tonic-gate PR_MEM("%s: PDA query failed for npages." 19760Sstevel@tonic-gate " Checking memlist for %s\n", 19770Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 19780Sstevel@tonic-gate 19790Sstevel@tonic-gate mlist = dr_get_memlist(mp); 19800Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) 19810Sstevel@tonic-gate mp->sbm_npages += btop(ml->size); 19820Sstevel@tonic-gate memlist_delete(mlist); 19830Sstevel@tonic-gate } 19840Sstevel@tonic-gate 19850Sstevel@tonic-gate err = drmach_mem_get_alignment(id, &bytes); 19860Sstevel@tonic-gate if (err) { 19870Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 19880Sstevel@tonic-gate mp->sbm_alignment_mask = 0; 19890Sstevel@tonic-gate } else 19900Sstevel@tonic-gate mp->sbm_alignment_mask = _b64top(bytes); 19910Sstevel@tonic-gate 19920Sstevel@tonic-gate err = drmach_mem_get_slice_size(id, &bytes); 19930Sstevel@tonic-gate if (err) { 19940Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 19950Sstevel@tonic-gate mp->sbm_slice_size = 0; /* paranoia */ 19960Sstevel@tonic-gate } else 19970Sstevel@tonic-gate mp->sbm_slice_size = bytes; 19980Sstevel@tonic-gate 19990Sstevel@tonic-gate /* 20000Sstevel@tonic-gate * Add memory to lgroup 20010Sstevel@tonic-gate */ 20020Sstevel@tonic-gate umb.u_board = mp->sbm_cm.sbdev_bp->b_num; 20030Sstevel@tonic-gate umb.u_base = (uint64_t)mp->sbm_basepfn << MMU_PAGESHIFT; 20040Sstevel@tonic-gate umb.u_len = (uint64_t)mp->sbm_npages << MMU_PAGESHIFT; 20050Sstevel@tonic-gate 20060Sstevel@tonic-gate lgrp_plat_config(LGRP_CONFIG_MEM_ADD, (uintptr_t)&umb); 20070Sstevel@tonic-gate 2008930Smathue PR_MEM("%s: %s (basepfn = 0x%lx, npgs = %ld)\n", 20090Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path, mp->sbm_basepfn, mp->sbm_npages); 20100Sstevel@tonic-gate } 20110Sstevel@tonic-gate 20120Sstevel@tonic-gate static int 20130Sstevel@tonic-gate dr_reserve_mem_spans(memhandle_t *mhp, struct memlist *ml) 20140Sstevel@tonic-gate { 20150Sstevel@tonic-gate int err; 20160Sstevel@tonic-gate pfn_t base; 20170Sstevel@tonic-gate pgcnt_t npgs; 20180Sstevel@tonic-gate struct memlist *mc; 20190Sstevel@tonic-gate static fn_t f = "dr_reserve_mem_spans"; 20200Sstevel@tonic-gate 20210Sstevel@tonic-gate PR_MEM("%s...\n", f); 20220Sstevel@tonic-gate 20230Sstevel@tonic-gate /* 20240Sstevel@tonic-gate * Walk the supplied memlist scheduling each span for removal 20250Sstevel@tonic-gate * with kphysm_del_span. It is possible that a span may intersect 20260Sstevel@tonic-gate * an area occupied by the cage. 20270Sstevel@tonic-gate */ 20280Sstevel@tonic-gate for (mc = ml; mc != NULL; mc = mc->next) { 20290Sstevel@tonic-gate base = _b64top(mc->address); 20300Sstevel@tonic-gate npgs = _b64top(mc->size); 20310Sstevel@tonic-gate 20320Sstevel@tonic-gate err = kphysm_del_span(*mhp, base, npgs); 20330Sstevel@tonic-gate if (err != KPHYSM_OK) { 20340Sstevel@tonic-gate cmn_err(CE_WARN, "%s memory reserve failed." 20350Sstevel@tonic-gate " unexpected kphysm_del_span return value %d;" 20360Sstevel@tonic-gate " basepfn=0x%lx npages=%ld", 20370Sstevel@tonic-gate f, err, base, npgs); 20380Sstevel@tonic-gate 20390Sstevel@tonic-gate return (-1); 20400Sstevel@tonic-gate } 20410Sstevel@tonic-gate } 20420Sstevel@tonic-gate 20430Sstevel@tonic-gate return (0); 20440Sstevel@tonic-gate } 20450Sstevel@tonic-gate 20460Sstevel@tonic-gate /* debug counters */ 20470Sstevel@tonic-gate int dr_smt_realigned; 20480Sstevel@tonic-gate int dr_smt_preference[4]; 20490Sstevel@tonic-gate 20500Sstevel@tonic-gate #ifdef DEBUG 20510Sstevel@tonic-gate uint_t dr_ignore_board; /* if bit[bnum-1] set, board won't be candidate */ 20520Sstevel@tonic-gate #endif 20530Sstevel@tonic-gate 20540Sstevel@tonic-gate /* 20550Sstevel@tonic-gate * Find and reserve a copy/rename target board suitable for the 20560Sstevel@tonic-gate * given source board. 20570Sstevel@tonic-gate * All boards in the system are examined and categorized in relation to 20580Sstevel@tonic-gate * their memory size versus the source board's memory size. Order of 20590Sstevel@tonic-gate * preference is: 20600Sstevel@tonic-gate * 1st: board has same memory size 20610Sstevel@tonic-gate * 2nd: board has larger memory size 20620Sstevel@tonic-gate * 3rd: board has smaller memory size 20630Sstevel@tonic-gate * 4th: board has smaller memory size, available memory will be reduced. 20640Sstevel@tonic-gate * Boards in category 3 and 4 will have their MC's reprogrammed to locate the 20650Sstevel@tonic-gate * span to which the MC responds to address span that appropriately covers 20660Sstevel@tonic-gate * the nonrelocatable span of the source board. 20670Sstevel@tonic-gate */ 20680Sstevel@tonic-gate static int 20690Sstevel@tonic-gate dr_select_mem_target(dr_handle_t *hp, 20700Sstevel@tonic-gate dr_mem_unit_t *s_mp, struct memlist *s_ml) 20710Sstevel@tonic-gate { 20720Sstevel@tonic-gate pgcnt_t sz = _b64top(s_mp->sbm_slice_size); 20730Sstevel@tonic-gate pgcnt_t sm = sz - 1; /* mem_slice_mask */ 20740Sstevel@tonic-gate pfn_t s_phi, t_phi; 20750Sstevel@tonic-gate 20760Sstevel@tonic-gate int n_sets = 4; /* same, larger, smaller, clipped */ 20770Sstevel@tonic-gate int preference; /* lower value is higher preference */ 20780Sstevel@tonic-gate int n_units_per_set; 20790Sstevel@tonic-gate int idx; 20800Sstevel@tonic-gate dr_mem_unit_t **sets; 20810Sstevel@tonic-gate 20820Sstevel@tonic-gate int t_bd; 20830Sstevel@tonic-gate int t_unit; 20840Sstevel@tonic-gate int rv; 20850Sstevel@tonic-gate int allow_src_memrange_modify; 20860Sstevel@tonic-gate int allow_targ_memrange_modify; 20870Sstevel@tonic-gate drmachid_t t_id; 20880Sstevel@tonic-gate dr_board_t *s_bp, *t_bp; 20890Sstevel@tonic-gate dr_mem_unit_t *t_mp, *c_mp; 20900Sstevel@tonic-gate struct memlist *d_ml, *t_ml, *x_ml; 20910Sstevel@tonic-gate memquery_t s_mq = {0}; 20920Sstevel@tonic-gate static fn_t f = "dr_select_mem_target"; 20930Sstevel@tonic-gate 20940Sstevel@tonic-gate PR_MEM("%s...\n", f); 20950Sstevel@tonic-gate 20960Sstevel@tonic-gate ASSERT(s_ml != NULL); 20970Sstevel@tonic-gate 20980Sstevel@tonic-gate n_units_per_set = MAX_BOARDS * MAX_MEM_UNITS_PER_BOARD; 20990Sstevel@tonic-gate sets = GETSTRUCT(dr_mem_unit_t *, n_units_per_set * n_sets); 21000Sstevel@tonic-gate 21010Sstevel@tonic-gate s_bp = hp->h_bd; 21020Sstevel@tonic-gate /* calculate the offset into the slice of the last source board pfn */ 21030Sstevel@tonic-gate ASSERT(s_mp->sbm_npages != 0); 21040Sstevel@tonic-gate s_phi = (s_mp->sbm_basepfn + s_mp->sbm_npages - 1) & sm; 21050Sstevel@tonic-gate 21060Sstevel@tonic-gate allow_src_memrange_modify = drmach_allow_memrange_modify(s_bp->b_id); 21070Sstevel@tonic-gate 21080Sstevel@tonic-gate /* 21090Sstevel@tonic-gate * Make one pass through all memory units on all boards 21100Sstevel@tonic-gate * and categorize them with respect to the source board. 21110Sstevel@tonic-gate */ 21120Sstevel@tonic-gate for (t_bd = 0; t_bd < MAX_BOARDS; t_bd++) { 21130Sstevel@tonic-gate /* 21140Sstevel@tonic-gate * The board structs are a contiguous array 21150Sstevel@tonic-gate * so we take advantage of that to find the 21160Sstevel@tonic-gate * correct board struct pointer for a given 21170Sstevel@tonic-gate * board number. 21180Sstevel@tonic-gate */ 21190Sstevel@tonic-gate t_bp = dr_lookup_board(t_bd); 21200Sstevel@tonic-gate 21210Sstevel@tonic-gate /* source board can not be its own target */ 21220Sstevel@tonic-gate if (s_bp->b_num == t_bp->b_num) 21230Sstevel@tonic-gate continue; 21240Sstevel@tonic-gate 21250Sstevel@tonic-gate for (t_unit = 0; t_unit < MAX_MEM_UNITS_PER_BOARD; t_unit++) { 21260Sstevel@tonic-gate 21270Sstevel@tonic-gate t_mp = dr_get_mem_unit(t_bp, t_unit); 21280Sstevel@tonic-gate 21290Sstevel@tonic-gate /* this memory node must be attached */ 21300Sstevel@tonic-gate if (!DR_DEV_IS_ATTACHED(&t_mp->sbm_cm)) 21310Sstevel@tonic-gate continue; 21320Sstevel@tonic-gate 21330Sstevel@tonic-gate /* source unit can not be its own target */ 21340Sstevel@tonic-gate if (s_mp == t_mp) { 21350Sstevel@tonic-gate /* catch this is debug kernels */ 21360Sstevel@tonic-gate ASSERT(0); 21370Sstevel@tonic-gate continue; 21380Sstevel@tonic-gate } 21390Sstevel@tonic-gate 21400Sstevel@tonic-gate /* 21410Sstevel@tonic-gate * this memory node must not already be reserved 21420Sstevel@tonic-gate * by some other memory delete operation. 21430Sstevel@tonic-gate */ 21440Sstevel@tonic-gate if (t_mp->sbm_flags & DR_MFLAG_RESERVED) 21450Sstevel@tonic-gate continue; 21460Sstevel@tonic-gate 21470Sstevel@tonic-gate /* 21480Sstevel@tonic-gate * categorize the memory node 21490Sstevel@tonic-gate * If this is a smaller memory node, create a 21500Sstevel@tonic-gate * temporary, edited copy of the source board's 21510Sstevel@tonic-gate * memlist containing only the span of the non- 21520Sstevel@tonic-gate * relocatable pages. 21530Sstevel@tonic-gate */ 21540Sstevel@tonic-gate t_phi = (t_mp->sbm_basepfn + t_mp->sbm_npages - 1) & sm; 21550Sstevel@tonic-gate t_id = t_mp->sbm_cm.sbdev_bp->b_id; 21560Sstevel@tonic-gate allow_targ_memrange_modify = 21570Sstevel@tonic-gate drmach_allow_memrange_modify(t_id); 21580Sstevel@tonic-gate if (t_mp->sbm_npages == s_mp->sbm_npages && 21590Sstevel@tonic-gate t_phi == s_phi) { 21600Sstevel@tonic-gate preference = 0; 21610Sstevel@tonic-gate t_mp->sbm_slice_offset = 0; 21620Sstevel@tonic-gate } else if (t_mp->sbm_npages > s_mp->sbm_npages && 21630Sstevel@tonic-gate t_phi > s_phi) { 21640Sstevel@tonic-gate /* 21650Sstevel@tonic-gate * Selecting this target will require modifying 21660Sstevel@tonic-gate * the source and/or target physical address 21670Sstevel@tonic-gate * ranges. Skip if not supported by platform. 21680Sstevel@tonic-gate */ 21690Sstevel@tonic-gate if (!allow_src_memrange_modify || 21700Sstevel@tonic-gate !allow_targ_memrange_modify) { 21710Sstevel@tonic-gate PR_MEM("%s: skip target %s, memory " 21720Sstevel@tonic-gate "range relocation not supported " 21730Sstevel@tonic-gate "by platform\n", f, 21740Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path); 21750Sstevel@tonic-gate continue; 21760Sstevel@tonic-gate } 21770Sstevel@tonic-gate preference = 1; 21780Sstevel@tonic-gate t_mp->sbm_slice_offset = 0; 21790Sstevel@tonic-gate } else { 21800Sstevel@tonic-gate pfn_t pfn = 0; 21810Sstevel@tonic-gate 21820Sstevel@tonic-gate /* 21830Sstevel@tonic-gate * Selecting this target will require modifying 21840Sstevel@tonic-gate * the source and/or target physical address 21850Sstevel@tonic-gate * ranges. Skip if not supported by platform. 21860Sstevel@tonic-gate */ 21870Sstevel@tonic-gate if (!allow_src_memrange_modify || 21880Sstevel@tonic-gate !allow_targ_memrange_modify) { 21890Sstevel@tonic-gate PR_MEM("%s: skip target %s, memory " 21900Sstevel@tonic-gate "range relocation not supported " 21910Sstevel@tonic-gate "by platform\n", f, 21920Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path); 21930Sstevel@tonic-gate continue; 21940Sstevel@tonic-gate } 21950Sstevel@tonic-gate 21960Sstevel@tonic-gate /* 21970Sstevel@tonic-gate * Check if its mc can be programmed to relocate 21980Sstevel@tonic-gate * the active address range to match the 21990Sstevel@tonic-gate * nonrelocatable span of the source board. 22000Sstevel@tonic-gate */ 22010Sstevel@tonic-gate preference = 2; 22020Sstevel@tonic-gate 22030Sstevel@tonic-gate if (s_mq.phys_pages == 0) { 22040Sstevel@tonic-gate /* 22050Sstevel@tonic-gate * find non-relocatable span on 22060Sstevel@tonic-gate * source board. 22070Sstevel@tonic-gate */ 22080Sstevel@tonic-gate rv = kphysm_del_span_query( 22090Sstevel@tonic-gate s_mp->sbm_basepfn, 22100Sstevel@tonic-gate s_mp->sbm_npages, &s_mq); 22110Sstevel@tonic-gate if (rv != KPHYSM_OK) { 22120Sstevel@tonic-gate PR_MEM("%s: %s: unexpected" 22130Sstevel@tonic-gate " kphysm_del_span_query" 22140Sstevel@tonic-gate " return value %d;" 22150Sstevel@tonic-gate " basepfn 0x%lx, npages %ld\n", 22160Sstevel@tonic-gate f, 22170Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 22180Sstevel@tonic-gate rv, 22190Sstevel@tonic-gate s_mp->sbm_basepfn, 22200Sstevel@tonic-gate s_mp->sbm_npages); 22210Sstevel@tonic-gate 22220Sstevel@tonic-gate /* paranoia */ 22230Sstevel@tonic-gate s_mq.phys_pages = 0; 22240Sstevel@tonic-gate 22250Sstevel@tonic-gate continue; 22260Sstevel@tonic-gate } 22270Sstevel@tonic-gate 22280Sstevel@tonic-gate /* more paranoia */ 22290Sstevel@tonic-gate ASSERT(s_mq.phys_pages != 0); 22300Sstevel@tonic-gate ASSERT(s_mq.nonrelocatable != 0); 22310Sstevel@tonic-gate 22320Sstevel@tonic-gate /* 22330Sstevel@tonic-gate * this should not happen 22340Sstevel@tonic-gate * if it does, it simply means that 22350Sstevel@tonic-gate * we can not proceed with qualifying 22360Sstevel@tonic-gate * this target candidate. 22370Sstevel@tonic-gate */ 22380Sstevel@tonic-gate if (s_mq.nonrelocatable == 0) 22390Sstevel@tonic-gate continue; 22400Sstevel@tonic-gate 22410Sstevel@tonic-gate PR_MEM("%s: %s: nonrelocatable" 22420Sstevel@tonic-gate " span (0x%lx..0x%lx)\n", 22430Sstevel@tonic-gate f, 22440Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 22450Sstevel@tonic-gate s_mq.first_nonrelocatable, 22460Sstevel@tonic-gate s_mq.last_nonrelocatable); 22470Sstevel@tonic-gate } 22480Sstevel@tonic-gate 22490Sstevel@tonic-gate /* 22500Sstevel@tonic-gate * Round down the starting pfn of the 22510Sstevel@tonic-gate * nonrelocatable span on the source board 22520Sstevel@tonic-gate * to nearest programmable boundary possible 22530Sstevel@tonic-gate * with this target candidate. 22540Sstevel@tonic-gate */ 22550Sstevel@tonic-gate pfn = s_mq.first_nonrelocatable & 22560Sstevel@tonic-gate ~t_mp->sbm_alignment_mask; 22570Sstevel@tonic-gate 22580Sstevel@tonic-gate /* skip candidate if memory is too small */ 22590Sstevel@tonic-gate if (pfn + t_mp->sbm_npages < 22600Sstevel@tonic-gate s_mq.last_nonrelocatable) 22610Sstevel@tonic-gate continue; 22620Sstevel@tonic-gate 22630Sstevel@tonic-gate /* 22640Sstevel@tonic-gate * reprogramming an mc to relocate its 22650Sstevel@tonic-gate * active address range means the beginning 22660Sstevel@tonic-gate * address to which the DIMMS respond will 22670Sstevel@tonic-gate * be somewhere above the slice boundary 22680Sstevel@tonic-gate * address. The larger the size of memory 22690Sstevel@tonic-gate * on this unit, the more likely part of it 22700Sstevel@tonic-gate * will exist beyond the end of the slice. 22710Sstevel@tonic-gate * The portion of the memory that does is 22720Sstevel@tonic-gate * unavailable to the system until the mc 22730Sstevel@tonic-gate * reprogrammed to a more favorable base 22740Sstevel@tonic-gate * address. 22750Sstevel@tonic-gate * An attempt is made to avoid the loss by 22760Sstevel@tonic-gate * recalculating the mc base address relative 22770Sstevel@tonic-gate * to the end of the slice. This may produce 22780Sstevel@tonic-gate * a more favorable result. If not, we lower 22790Sstevel@tonic-gate * the board's preference rating so that it 22800Sstevel@tonic-gate * is one the last candidate boards to be 22810Sstevel@tonic-gate * considered. 22820Sstevel@tonic-gate */ 22830Sstevel@tonic-gate if ((pfn + t_mp->sbm_npages) & ~sm) { 22840Sstevel@tonic-gate pfn_t p; 22850Sstevel@tonic-gate 22860Sstevel@tonic-gate ASSERT(sz >= t_mp->sbm_npages); 22870Sstevel@tonic-gate 22880Sstevel@tonic-gate /* 22890Sstevel@tonic-gate * calculate an alternative starting 22900Sstevel@tonic-gate * address relative to the end of the 22910Sstevel@tonic-gate * slice's address space. 22920Sstevel@tonic-gate */ 22930Sstevel@tonic-gate p = pfn & ~sm; 22940Sstevel@tonic-gate p = p + (sz - t_mp->sbm_npages); 22950Sstevel@tonic-gate p = p & ~t_mp->sbm_alignment_mask; 22960Sstevel@tonic-gate 22970Sstevel@tonic-gate if ((p > s_mq.first_nonrelocatable) || 22980Sstevel@tonic-gate (p + t_mp->sbm_npages < 22990Sstevel@tonic-gate s_mq.last_nonrelocatable)) { 23000Sstevel@tonic-gate 23010Sstevel@tonic-gate /* 23020Sstevel@tonic-gate * alternative starting addr 23030Sstevel@tonic-gate * won't work. Lower preference 23040Sstevel@tonic-gate * rating of this board, since 23050Sstevel@tonic-gate * some number of pages will 23060Sstevel@tonic-gate * unavailable for use. 23070Sstevel@tonic-gate */ 23080Sstevel@tonic-gate preference = 3; 23090Sstevel@tonic-gate } else { 23100Sstevel@tonic-gate dr_smt_realigned++; 23110Sstevel@tonic-gate pfn = p; 23120Sstevel@tonic-gate } 23130Sstevel@tonic-gate } 23140Sstevel@tonic-gate 23150Sstevel@tonic-gate /* 23160Sstevel@tonic-gate * translate calculated pfn to an offset 23170Sstevel@tonic-gate * relative to the slice boundary. If the 23180Sstevel@tonic-gate * candidate board is selected, this offset 23190Sstevel@tonic-gate * will be used to calculate the values 23200Sstevel@tonic-gate * programmed into the mc. 23210Sstevel@tonic-gate */ 23220Sstevel@tonic-gate t_mp->sbm_slice_offset = pfn & sm; 23230Sstevel@tonic-gate PR_MEM("%s: %s:" 23240Sstevel@tonic-gate " proposed mc offset 0x%lx\n", 23250Sstevel@tonic-gate f, 23260Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path, 23270Sstevel@tonic-gate t_mp->sbm_slice_offset); 23280Sstevel@tonic-gate } 23290Sstevel@tonic-gate 23300Sstevel@tonic-gate dr_smt_preference[preference]++; 23310Sstevel@tonic-gate 23320Sstevel@tonic-gate /* calculate index to start of preference set */ 23330Sstevel@tonic-gate idx = n_units_per_set * preference; 23340Sstevel@tonic-gate /* calculate offset to respective element */ 23350Sstevel@tonic-gate idx += t_bd * MAX_MEM_UNITS_PER_BOARD + t_unit; 23360Sstevel@tonic-gate 23370Sstevel@tonic-gate ASSERT(idx < n_units_per_set * n_sets); 23380Sstevel@tonic-gate sets[idx] = t_mp; 23390Sstevel@tonic-gate } 23400Sstevel@tonic-gate } 23410Sstevel@tonic-gate 23420Sstevel@tonic-gate /* 23430Sstevel@tonic-gate * NOTE: this would be a good place to sort each candidate 23440Sstevel@tonic-gate * set in to some desired order, e.g. memory size in ascending 23450Sstevel@tonic-gate * order. Without an additional sorting step here, the order 23460Sstevel@tonic-gate * within a set is ascending board number order. 23470Sstevel@tonic-gate */ 23480Sstevel@tonic-gate 23490Sstevel@tonic-gate c_mp = NULL; 23500Sstevel@tonic-gate x_ml = NULL; 23510Sstevel@tonic-gate t_ml = NULL; 23520Sstevel@tonic-gate for (idx = 0; idx < n_units_per_set * n_sets; idx++) { 23530Sstevel@tonic-gate memquery_t mq; 23540Sstevel@tonic-gate 23550Sstevel@tonic-gate /* cleanup t_ml after previous pass */ 23560Sstevel@tonic-gate if (t_ml != NULL) { 23570Sstevel@tonic-gate memlist_delete(t_ml); 23580Sstevel@tonic-gate t_ml = NULL; 23590Sstevel@tonic-gate } 23600Sstevel@tonic-gate 23610Sstevel@tonic-gate /* get candidate target board mem unit */ 23620Sstevel@tonic-gate t_mp = sets[idx]; 23630Sstevel@tonic-gate if (t_mp == NULL) 23640Sstevel@tonic-gate continue; 23650Sstevel@tonic-gate 23660Sstevel@tonic-gate /* get target board memlist */ 23670Sstevel@tonic-gate t_ml = dr_get_memlist(t_mp); 23680Sstevel@tonic-gate if (t_ml == NULL) { 23690Sstevel@tonic-gate cmn_err(CE_WARN, "%s: no memlist for" 23700Sstevel@tonic-gate " mem-unit %d, board %d", 23710Sstevel@tonic-gate f, 23720Sstevel@tonic-gate t_mp->sbm_cm.sbdev_bp->b_num, 23730Sstevel@tonic-gate t_mp->sbm_cm.sbdev_unum); 23740Sstevel@tonic-gate 23750Sstevel@tonic-gate continue; 23760Sstevel@tonic-gate } 23770Sstevel@tonic-gate 23780Sstevel@tonic-gate /* get appropriate source board memlist */ 23790Sstevel@tonic-gate t_phi = (t_mp->sbm_basepfn + t_mp->sbm_npages - 1) & sm; 23800Sstevel@tonic-gate if (t_mp->sbm_npages < s_mp->sbm_npages || t_phi < s_phi) { 23810Sstevel@tonic-gate spgcnt_t excess; 23820Sstevel@tonic-gate 23830Sstevel@tonic-gate /* 23840Sstevel@tonic-gate * make a copy of the source board memlist 23850Sstevel@tonic-gate * then edit it to remove the spans that 23860Sstevel@tonic-gate * are outside the calculated span of 23870Sstevel@tonic-gate * [pfn..s_mq.last_nonrelocatable]. 23880Sstevel@tonic-gate */ 23890Sstevel@tonic-gate if (x_ml != NULL) 23900Sstevel@tonic-gate memlist_delete(x_ml); 23910Sstevel@tonic-gate 23920Sstevel@tonic-gate x_ml = memlist_dup(s_ml); 23930Sstevel@tonic-gate if (x_ml == NULL) { 23940Sstevel@tonic-gate PR_MEM("%s: memlist_dup failed\n", f); 23950Sstevel@tonic-gate /* TODO: should abort */ 23960Sstevel@tonic-gate continue; 23970Sstevel@tonic-gate } 23980Sstevel@tonic-gate 23990Sstevel@tonic-gate /* trim off lower portion */ 24000Sstevel@tonic-gate excess = t_mp->sbm_slice_offset - 24010Sstevel@tonic-gate (s_mp->sbm_basepfn & sm); 24020Sstevel@tonic-gate 24030Sstevel@tonic-gate if (excess > 0) { 24040Sstevel@tonic-gate x_ml = memlist_del_span( 24050Sstevel@tonic-gate x_ml, 24060Sstevel@tonic-gate _ptob64(s_mp->sbm_basepfn), 24070Sstevel@tonic-gate _ptob64(excess)); 24080Sstevel@tonic-gate } 24090Sstevel@tonic-gate ASSERT(x_ml); 24100Sstevel@tonic-gate 24110Sstevel@tonic-gate /* 24120Sstevel@tonic-gate * Since this candidate target board is smaller 24130Sstevel@tonic-gate * than the source board, s_mq must have been 24140Sstevel@tonic-gate * initialized in previous loop while processing 24150Sstevel@tonic-gate * this or some other candidate board. 24160Sstevel@tonic-gate * FIXME: this is weak. 24170Sstevel@tonic-gate */ 24180Sstevel@tonic-gate ASSERT(s_mq.phys_pages != 0); 24190Sstevel@tonic-gate 24200Sstevel@tonic-gate /* trim off upper portion */ 24210Sstevel@tonic-gate excess = (s_mp->sbm_basepfn + s_mp->sbm_npages) 24220Sstevel@tonic-gate - (s_mq.last_nonrelocatable + 1); 24230Sstevel@tonic-gate if (excess > 0) { 24240Sstevel@tonic-gate pfn_t p; 24250Sstevel@tonic-gate 24260Sstevel@tonic-gate p = s_mq.last_nonrelocatable + 1; 24270Sstevel@tonic-gate x_ml = memlist_del_span( 24280Sstevel@tonic-gate x_ml, 24290Sstevel@tonic-gate _ptob64(p), 24300Sstevel@tonic-gate _ptob64(excess)); 24310Sstevel@tonic-gate } 24320Sstevel@tonic-gate 24330Sstevel@tonic-gate PR_MEM("%s: %s: edited source memlist:\n", 24340Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 24350Sstevel@tonic-gate PR_MEMLIST_DUMP(x_ml); 24360Sstevel@tonic-gate 24370Sstevel@tonic-gate #ifdef DEBUG 24380Sstevel@tonic-gate /* sanity check memlist */ 24390Sstevel@tonic-gate d_ml = x_ml; 24400Sstevel@tonic-gate while (d_ml->next != NULL) 24410Sstevel@tonic-gate d_ml = d_ml->next; 24420Sstevel@tonic-gate 24430Sstevel@tonic-gate ASSERT(d_ml->address + d_ml->size == 24440Sstevel@tonic-gate _ptob64(s_mq.last_nonrelocatable + 1)); 24450Sstevel@tonic-gate #endif 24460Sstevel@tonic-gate 24470Sstevel@tonic-gate /* 24480Sstevel@tonic-gate * x_ml now describes only the portion of the 24490Sstevel@tonic-gate * source board that will be moved during the 24500Sstevel@tonic-gate * copy/rename operation. 24510Sstevel@tonic-gate */ 24520Sstevel@tonic-gate d_ml = x_ml; 24530Sstevel@tonic-gate } else { 24540Sstevel@tonic-gate /* use original memlist; all spans will be moved */ 24550Sstevel@tonic-gate d_ml = s_ml; 24560Sstevel@tonic-gate } 24570Sstevel@tonic-gate 24580Sstevel@tonic-gate /* verify target can support source memory spans. */ 24590Sstevel@tonic-gate if (memlist_canfit(d_ml, t_ml) == 0) { 24600Sstevel@tonic-gate PR_MEM("%s: source memlist won't" 24610Sstevel@tonic-gate " fit in target memlist\n", f); 24620Sstevel@tonic-gate PR_MEM("%s: source memlist:\n", f); 24630Sstevel@tonic-gate PR_MEMLIST_DUMP(d_ml); 24640Sstevel@tonic-gate PR_MEM("%s: target memlist:\n", f); 24650Sstevel@tonic-gate PR_MEMLIST_DUMP(t_ml); 24660Sstevel@tonic-gate 24670Sstevel@tonic-gate continue; 24680Sstevel@tonic-gate } 24690Sstevel@tonic-gate 24700Sstevel@tonic-gate /* NOTE: the value of d_ml is not used beyond this point */ 24710Sstevel@tonic-gate 24720Sstevel@tonic-gate PR_MEM("%s: checking for no-reloc in %s, " 24730Sstevel@tonic-gate " basepfn=0x%lx, npages=%ld\n", 24740Sstevel@tonic-gate f, 24750Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path, 24760Sstevel@tonic-gate t_mp->sbm_basepfn, 24770Sstevel@tonic-gate t_mp->sbm_npages); 24780Sstevel@tonic-gate 24790Sstevel@tonic-gate rv = kphysm_del_span_query( 24800Sstevel@tonic-gate t_mp->sbm_basepfn, t_mp->sbm_npages, &mq); 24810Sstevel@tonic-gate if (rv != KPHYSM_OK) { 24820Sstevel@tonic-gate PR_MEM("%s: kphysm_del_span_query:" 24830Sstevel@tonic-gate " unexpected return value %d\n", f, rv); 24840Sstevel@tonic-gate 24850Sstevel@tonic-gate continue; 24860Sstevel@tonic-gate } 24870Sstevel@tonic-gate 24880Sstevel@tonic-gate if (mq.nonrelocatable != 0) { 24890Sstevel@tonic-gate PR_MEM("%s: candidate %s has" 24900Sstevel@tonic-gate " nonrelocatable span [0x%lx..0x%lx]\n", 24910Sstevel@tonic-gate f, 24920Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path, 24930Sstevel@tonic-gate mq.first_nonrelocatable, 24940Sstevel@tonic-gate mq.last_nonrelocatable); 24950Sstevel@tonic-gate 24960Sstevel@tonic-gate continue; 24970Sstevel@tonic-gate } 24980Sstevel@tonic-gate 24990Sstevel@tonic-gate #ifdef DEBUG 25000Sstevel@tonic-gate /* 25010Sstevel@tonic-gate * This is a debug tool for excluding certain boards 25020Sstevel@tonic-gate * from being selected as a target board candidate. 25030Sstevel@tonic-gate * dr_ignore_board is only tested by this driver. 25040Sstevel@tonic-gate * It must be set with adb, obp, /etc/system or your 25050Sstevel@tonic-gate * favorite debugger. 25060Sstevel@tonic-gate */ 25070Sstevel@tonic-gate if (dr_ignore_board & 25080Sstevel@tonic-gate (1 << (t_mp->sbm_cm.sbdev_bp->b_num - 1))) { 25090Sstevel@tonic-gate PR_MEM("%s: dr_ignore_board flag set," 25100Sstevel@tonic-gate " ignoring %s as candidate\n", 25110Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 25120Sstevel@tonic-gate continue; 25130Sstevel@tonic-gate } 25140Sstevel@tonic-gate #endif 25150Sstevel@tonic-gate 25160Sstevel@tonic-gate /* 25170Sstevel@tonic-gate * Reserve excess source board memory, if any. 25180Sstevel@tonic-gate * 25190Sstevel@tonic-gate * When the number of pages on the candidate target 25200Sstevel@tonic-gate * board is less than the number of pages on the source, 25210Sstevel@tonic-gate * then some spans (clearly) of the source board's address 25220Sstevel@tonic-gate * space will not be covered by physical memory after the 25230Sstevel@tonic-gate * copy/rename completes. The following code block 25240Sstevel@tonic-gate * schedules those spans to be deleted. 25250Sstevel@tonic-gate */ 25260Sstevel@tonic-gate if (t_mp->sbm_npages < s_mp->sbm_npages || t_phi < s_phi) { 25270Sstevel@tonic-gate pfn_t pfn; 25280Sstevel@tonic-gate uint64_t s_del_pa; 25290Sstevel@tonic-gate struct memlist *ml; 25300Sstevel@tonic-gate 25310Sstevel@tonic-gate d_ml = memlist_dup(s_ml); 25320Sstevel@tonic-gate if (d_ml == NULL) { 25330Sstevel@tonic-gate PR_MEM("%s: cant dup src brd memlist\n", f); 25340Sstevel@tonic-gate /* TODO: should abort */ 25350Sstevel@tonic-gate continue; 25360Sstevel@tonic-gate } 25370Sstevel@tonic-gate 25380Sstevel@tonic-gate /* calculate base pfn relative to target board */ 25390Sstevel@tonic-gate pfn = s_mp->sbm_basepfn & ~sm; 25400Sstevel@tonic-gate pfn += t_mp->sbm_slice_offset; 25410Sstevel@tonic-gate 25420Sstevel@tonic-gate /* 25430Sstevel@tonic-gate * cannot split dynamically added segment 25440Sstevel@tonic-gate */ 25450Sstevel@tonic-gate s_del_pa = _ptob64(pfn + t_mp->sbm_npages); 25460Sstevel@tonic-gate PR_MEM("%s: proposed src delete pa=0x%lx\n", f, 25470Sstevel@tonic-gate s_del_pa); 25480Sstevel@tonic-gate PR_MEM("%s: checking for split of dyn seg list:\n", f); 25490Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_dyn_segs); 25500Sstevel@tonic-gate for (ml = s_mp->sbm_dyn_segs; ml; ml = ml->next) { 25510Sstevel@tonic-gate if (s_del_pa > ml->address && 25520Sstevel@tonic-gate s_del_pa < ml->address + ml->size) { 25530Sstevel@tonic-gate s_del_pa = ml->address; 25540Sstevel@tonic-gate break; 25550Sstevel@tonic-gate } 25560Sstevel@tonic-gate } 25570Sstevel@tonic-gate 25580Sstevel@tonic-gate /* remove span that will reside on candidate board */ 25590Sstevel@tonic-gate d_ml = memlist_del_span(d_ml, _ptob64(pfn), 25600Sstevel@tonic-gate s_del_pa - _ptob64(pfn)); 25610Sstevel@tonic-gate 25620Sstevel@tonic-gate PR_MEM("%s: %s: reserving src brd memlist:\n", 25630Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 25640Sstevel@tonic-gate PR_MEMLIST_DUMP(d_ml); 25650Sstevel@tonic-gate 25660Sstevel@tonic-gate /* reserve excess spans */ 25670Sstevel@tonic-gate if (dr_reserve_mem_spans( 25680Sstevel@tonic-gate &s_mp->sbm_memhandle, d_ml) != 0) { 25690Sstevel@tonic-gate 25700Sstevel@tonic-gate /* likely more non-reloc pages appeared */ 25710Sstevel@tonic-gate /* TODO: restart from top? */ 25720Sstevel@tonic-gate continue; 25730Sstevel@tonic-gate } 25740Sstevel@tonic-gate } else { 25750Sstevel@tonic-gate /* no excess source board memory */ 25760Sstevel@tonic-gate d_ml = NULL; 25770Sstevel@tonic-gate } 25780Sstevel@tonic-gate 25790Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_RESERVED; 25800Sstevel@tonic-gate 25810Sstevel@tonic-gate /* 25820Sstevel@tonic-gate * reserve all memory on target board. 25830Sstevel@tonic-gate * NOTE: source board's memhandle is used. 25840Sstevel@tonic-gate * 25850Sstevel@tonic-gate * If this succeeds (eq 0), then target selection is 25860Sstevel@tonic-gate * complete and all unwanted memory spans, both source and 25870Sstevel@tonic-gate * target, have been reserved. Loop is terminated. 25880Sstevel@tonic-gate */ 25890Sstevel@tonic-gate if (dr_reserve_mem_spans(&s_mp->sbm_memhandle, t_ml) == 0) { 25900Sstevel@tonic-gate PR_MEM("%s: %s: target board memory reserved\n", 25910Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 25920Sstevel@tonic-gate 25930Sstevel@tonic-gate /* a candidate target board is now reserved */ 25940Sstevel@tonic-gate t_mp->sbm_flags |= DR_MFLAG_RESERVED; 25950Sstevel@tonic-gate c_mp = t_mp; 25960Sstevel@tonic-gate 25970Sstevel@tonic-gate /* *** EXITING LOOP *** */ 25980Sstevel@tonic-gate break; 25990Sstevel@tonic-gate } 26000Sstevel@tonic-gate 26010Sstevel@tonic-gate /* did not successfully reserve the target board. */ 26020Sstevel@tonic-gate PR_MEM("%s: could not reserve target %s\n", 26030Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 26040Sstevel@tonic-gate 26050Sstevel@tonic-gate /* 26060Sstevel@tonic-gate * NOTE: an undo of the dr_reserve_mem_span work 26070Sstevel@tonic-gate * will happen automatically when the memhandle 26080Sstevel@tonic-gate * (s_mp->sbm_memhandle) is kphysm_del_release'd. 26090Sstevel@tonic-gate */ 26100Sstevel@tonic-gate 26110Sstevel@tonic-gate s_mp->sbm_flags &= ~DR_MFLAG_RESERVED; 26120Sstevel@tonic-gate } 26130Sstevel@tonic-gate 26140Sstevel@tonic-gate /* clean up after memlist editing logic */ 26150Sstevel@tonic-gate if (x_ml != NULL) 26160Sstevel@tonic-gate memlist_delete(x_ml); 26170Sstevel@tonic-gate 26180Sstevel@tonic-gate FREESTRUCT(sets, dr_mem_unit_t *, n_units_per_set * n_sets); 26190Sstevel@tonic-gate 26200Sstevel@tonic-gate /* 26210Sstevel@tonic-gate * c_mp will be NULL when the entire sets[] array 26220Sstevel@tonic-gate * has been searched without reserving a target board. 26230Sstevel@tonic-gate */ 26240Sstevel@tonic-gate if (c_mp == NULL) { 26250Sstevel@tonic-gate PR_MEM("%s: %s: target selection failed.\n", 26260Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 26270Sstevel@tonic-gate 26280Sstevel@tonic-gate if (t_ml != NULL) 26290Sstevel@tonic-gate memlist_delete(t_ml); 26300Sstevel@tonic-gate 26310Sstevel@tonic-gate return (-1); 26320Sstevel@tonic-gate } 26330Sstevel@tonic-gate 26340Sstevel@tonic-gate PR_MEM("%s: found target %s for source %s\n", 26350Sstevel@tonic-gate f, 26360Sstevel@tonic-gate c_mp->sbm_cm.sbdev_path, 26370Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path); 26380Sstevel@tonic-gate 26390Sstevel@tonic-gate s_mp->sbm_peer = c_mp; 26400Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_SOURCE; 26410Sstevel@tonic-gate s_mp->sbm_del_mlist = d_ml; /* spans to be deleted, if any */ 26420Sstevel@tonic-gate s_mp->sbm_mlist = s_ml; 26430Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 1; 26440Sstevel@tonic-gate 26450Sstevel@tonic-gate c_mp->sbm_peer = s_mp; 26460Sstevel@tonic-gate c_mp->sbm_flags |= DR_MFLAG_TARGET; 26470Sstevel@tonic-gate c_mp->sbm_del_mlist = t_ml; /* spans to be deleted */ 26480Sstevel@tonic-gate c_mp->sbm_mlist = t_ml; 26490Sstevel@tonic-gate c_mp->sbm_cm.sbdev_busy = 1; 26500Sstevel@tonic-gate 26510Sstevel@tonic-gate s_mp->sbm_flags &= ~DR_MFLAG_MEMRESIZE; 26520Sstevel@tonic-gate if (c_mp->sbm_npages > s_mp->sbm_npages) { 26530Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_MEMUPSIZE; 2654930Smathue PR_MEM("%s: upsize detected (source=%ld < target=%ld)\n", 26550Sstevel@tonic-gate f, s_mp->sbm_npages, c_mp->sbm_npages); 26560Sstevel@tonic-gate } else if (c_mp->sbm_npages < s_mp->sbm_npages) { 26570Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_MEMDOWNSIZE; 2658930Smathue PR_MEM("%s: downsize detected (source=%ld > target=%ld)\n", 26590Sstevel@tonic-gate f, s_mp->sbm_npages, c_mp->sbm_npages); 26600Sstevel@tonic-gate } 26610Sstevel@tonic-gate 26620Sstevel@tonic-gate return (0); 26630Sstevel@tonic-gate } 26640Sstevel@tonic-gate 26650Sstevel@tonic-gate /* 26660Sstevel@tonic-gate * Memlist support. 26670Sstevel@tonic-gate */ 26680Sstevel@tonic-gate 26690Sstevel@tonic-gate /* 26700Sstevel@tonic-gate * Determine whether the source memlist (s_mlist) will 26710Sstevel@tonic-gate * fit into the target memlist (t_mlist) in terms of 26720Sstevel@tonic-gate * size and holes (i.e. based on same relative base address). 26730Sstevel@tonic-gate */ 26740Sstevel@tonic-gate static int 26750Sstevel@tonic-gate memlist_canfit(struct memlist *s_mlist, struct memlist *t_mlist) 26760Sstevel@tonic-gate { 26770Sstevel@tonic-gate int rv = 0; 26780Sstevel@tonic-gate uint64_t s_basepa, t_basepa; 26790Sstevel@tonic-gate struct memlist *s_ml, *t_ml; 26800Sstevel@tonic-gate 26810Sstevel@tonic-gate if ((s_mlist == NULL) || (t_mlist == NULL)) 26820Sstevel@tonic-gate return (0); 26830Sstevel@tonic-gate 26840Sstevel@tonic-gate /* 26850Sstevel@tonic-gate * Base both memlists on common base address (0). 26860Sstevel@tonic-gate */ 26870Sstevel@tonic-gate s_basepa = s_mlist->address; 26880Sstevel@tonic-gate t_basepa = t_mlist->address; 26890Sstevel@tonic-gate 26900Sstevel@tonic-gate for (s_ml = s_mlist; s_ml; s_ml = s_ml->next) 26910Sstevel@tonic-gate s_ml->address -= s_basepa; 26920Sstevel@tonic-gate 26930Sstevel@tonic-gate for (t_ml = t_mlist; t_ml; t_ml = t_ml->next) 26940Sstevel@tonic-gate t_ml->address -= t_basepa; 26950Sstevel@tonic-gate 26960Sstevel@tonic-gate s_ml = s_mlist; 26970Sstevel@tonic-gate for (t_ml = t_mlist; t_ml && s_ml; t_ml = t_ml->next) { 26980Sstevel@tonic-gate uint64_t s_start, s_end; 26990Sstevel@tonic-gate uint64_t t_start, t_end; 27000Sstevel@tonic-gate 27010Sstevel@tonic-gate t_start = t_ml->address; 27020Sstevel@tonic-gate t_end = t_start + t_ml->size; 27030Sstevel@tonic-gate 27040Sstevel@tonic-gate for (; s_ml; s_ml = s_ml->next) { 27050Sstevel@tonic-gate s_start = s_ml->address; 27060Sstevel@tonic-gate s_end = s_start + s_ml->size; 27070Sstevel@tonic-gate 27080Sstevel@tonic-gate if ((s_start < t_start) || (s_end > t_end)) 27090Sstevel@tonic-gate break; 27100Sstevel@tonic-gate } 27110Sstevel@tonic-gate } 27120Sstevel@tonic-gate /* 27130Sstevel@tonic-gate * If we ran out of source memlist chunks that mean 27140Sstevel@tonic-gate * we found a home for all of them. 27150Sstevel@tonic-gate */ 27160Sstevel@tonic-gate if (s_ml == NULL) 27170Sstevel@tonic-gate rv = 1; 27180Sstevel@tonic-gate 27190Sstevel@tonic-gate /* 27200Sstevel@tonic-gate * Need to add base addresses back since memlists 27210Sstevel@tonic-gate * are probably in use by caller. 27220Sstevel@tonic-gate */ 27230Sstevel@tonic-gate for (s_ml = s_mlist; s_ml; s_ml = s_ml->next) 27240Sstevel@tonic-gate s_ml->address += s_basepa; 27250Sstevel@tonic-gate 27260Sstevel@tonic-gate for (t_ml = t_mlist; t_ml; t_ml = t_ml->next) 27270Sstevel@tonic-gate t_ml->address += t_basepa; 27280Sstevel@tonic-gate 27290Sstevel@tonic-gate return (rv); 27300Sstevel@tonic-gate } 2731