10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 50Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 60Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 70Sstevel@tonic-gate * with the License. 80Sstevel@tonic-gate * 90Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 100Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 110Sstevel@tonic-gate * See the License for the specific language governing permissions 120Sstevel@tonic-gate * and limitations under the License. 130Sstevel@tonic-gate * 140Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 150Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 160Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 170Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 180Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 190Sstevel@tonic-gate * 200Sstevel@tonic-gate * CDDL HEADER END 210Sstevel@tonic-gate */ 220Sstevel@tonic-gate /* 23*917Selowe * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate /* 300Sstevel@tonic-gate * DR memory support routines. 310Sstevel@tonic-gate */ 320Sstevel@tonic-gate 330Sstevel@tonic-gate #include <sys/note.h> 340Sstevel@tonic-gate #include <sys/debug.h> 350Sstevel@tonic-gate #include <sys/types.h> 360Sstevel@tonic-gate #include <sys/errno.h> 370Sstevel@tonic-gate #include <sys/param.h> 380Sstevel@tonic-gate #include <sys/dditypes.h> 390Sstevel@tonic-gate #include <sys/kmem.h> 400Sstevel@tonic-gate #include <sys/conf.h> 410Sstevel@tonic-gate #include <sys/ddi.h> 420Sstevel@tonic-gate #include <sys/sunddi.h> 430Sstevel@tonic-gate #include <sys/sunndi.h> 440Sstevel@tonic-gate #include <sys/ddi_impldefs.h> 450Sstevel@tonic-gate #include <sys/ndi_impldefs.h> 460Sstevel@tonic-gate #include <sys/sysmacros.h> 470Sstevel@tonic-gate #include <sys/machsystm.h> 480Sstevel@tonic-gate #include <sys/spitregs.h> 490Sstevel@tonic-gate #include <sys/cpuvar.h> 500Sstevel@tonic-gate #include <sys/promif.h> 510Sstevel@tonic-gate #include <vm/seg_kmem.h> 520Sstevel@tonic-gate #include <sys/lgrp.h> 530Sstevel@tonic-gate #include <sys/platform_module.h> 540Sstevel@tonic-gate 550Sstevel@tonic-gate #include <vm/page.h> 560Sstevel@tonic-gate 570Sstevel@tonic-gate #include <sys/dr.h> 580Sstevel@tonic-gate #include <sys/dr_util.h> 590Sstevel@tonic-gate 600Sstevel@tonic-gate extern struct memlist *phys_install; 610Sstevel@tonic-gate 620Sstevel@tonic-gate /* TODO: push this reference below drmach line */ 630Sstevel@tonic-gate extern int kcage_on; 640Sstevel@tonic-gate 650Sstevel@tonic-gate /* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */ 660Sstevel@tonic-gate static char *dr_ie_fmt = "%M% %d"; 670Sstevel@tonic-gate 680Sstevel@tonic-gate static int dr_post_detach_mem_unit(dr_mem_unit_t *mp); 690Sstevel@tonic-gate static int dr_reserve_mem_spans(memhandle_t *mhp, 700Sstevel@tonic-gate struct memlist *mlist); 710Sstevel@tonic-gate static int dr_select_mem_target(dr_handle_t *hp, 720Sstevel@tonic-gate dr_mem_unit_t *mp, struct memlist *ml); 730Sstevel@tonic-gate static void dr_init_mem_unit_data(dr_mem_unit_t *mp); 740Sstevel@tonic-gate 750Sstevel@tonic-gate static struct memlist *memlist_dup(struct memlist *); 760Sstevel@tonic-gate static int memlist_canfit(struct memlist *s_mlist, 770Sstevel@tonic-gate struct memlist *t_mlist); 780Sstevel@tonic-gate static struct memlist *memlist_del_span(struct memlist *mlist, 790Sstevel@tonic-gate uint64_t base, uint64_t len); 800Sstevel@tonic-gate static struct memlist *memlist_cat_span(struct memlist *mlist, 810Sstevel@tonic-gate uint64_t base, uint64_t len); 820Sstevel@tonic-gate 830Sstevel@tonic-gate /* 840Sstevel@tonic-gate * dr_mem_unit_t.sbm_flags 850Sstevel@tonic-gate */ 860Sstevel@tonic-gate #define DR_MFLAG_RESERVED 0x01 /* mem unit reserved for delete */ 870Sstevel@tonic-gate #define DR_MFLAG_SOURCE 0x02 /* source brd of copy/rename op */ 880Sstevel@tonic-gate #define DR_MFLAG_TARGET 0x04 /* target brd of copy/rename op */ 890Sstevel@tonic-gate #define DR_MFLAG_MEMUPSIZE 0x08 /* move from big to small board */ 900Sstevel@tonic-gate #define DR_MFLAG_MEMDOWNSIZE 0x10 /* move from small to big board */ 910Sstevel@tonic-gate #define DR_MFLAG_MEMRESIZE 0x18 /* move to different size board */ 920Sstevel@tonic-gate #define DR_MFLAG_RELOWNER 0x20 /* memory release (delete) owner */ 930Sstevel@tonic-gate #define DR_MFLAG_RELDONE 0x40 /* memory release (delete) done */ 940Sstevel@tonic-gate 950Sstevel@tonic-gate /* helper macros */ 960Sstevel@tonic-gate #define _ptob64(p) ((uint64_t)(p) << PAGESHIFT) 970Sstevel@tonic-gate #define _b64top(b) ((pgcnt_t)((b) >> PAGESHIFT)) 980Sstevel@tonic-gate 990Sstevel@tonic-gate static struct memlist * 1000Sstevel@tonic-gate dr_get_memlist(dr_mem_unit_t *mp) 1010Sstevel@tonic-gate { 1020Sstevel@tonic-gate struct memlist *mlist = NULL; 1030Sstevel@tonic-gate sbd_error_t *err; 1040Sstevel@tonic-gate static fn_t f = "dr_get_memlist"; 1050Sstevel@tonic-gate 1060Sstevel@tonic-gate PR_MEM("%s for %s...\n", f, mp->sbm_cm.sbdev_path); 1070Sstevel@tonic-gate 1080Sstevel@tonic-gate /* 1090Sstevel@tonic-gate * Return cached memlist, if present. 1100Sstevel@tonic-gate * This memlist will be present following an 1110Sstevel@tonic-gate * unconfigure (a.k.a: detach) of this memunit. 1120Sstevel@tonic-gate * It should only be used in the case were a configure 1130Sstevel@tonic-gate * is bringing this memunit back in without going 1140Sstevel@tonic-gate * through the disconnect and connect states. 1150Sstevel@tonic-gate */ 1160Sstevel@tonic-gate if (mp->sbm_mlist) { 1170Sstevel@tonic-gate PR_MEM("%s: found cached memlist\n", f); 1180Sstevel@tonic-gate 1190Sstevel@tonic-gate mlist = memlist_dup(mp->sbm_mlist); 1200Sstevel@tonic-gate } else { 1210Sstevel@tonic-gate uint64_t basepa = _ptob64(mp->sbm_basepfn); 1220Sstevel@tonic-gate 1230Sstevel@tonic-gate /* attempt to construct a memlist using phys_install */ 1240Sstevel@tonic-gate 1250Sstevel@tonic-gate /* round down to slice base address */ 1260Sstevel@tonic-gate basepa &= ~(mp->sbm_slice_size - 1); 1270Sstevel@tonic-gate 1280Sstevel@tonic-gate /* get a copy of phys_install to edit */ 1290Sstevel@tonic-gate memlist_read_lock(); 1300Sstevel@tonic-gate mlist = memlist_dup(phys_install); 1310Sstevel@tonic-gate memlist_read_unlock(); 1320Sstevel@tonic-gate 1330Sstevel@tonic-gate /* trim lower irrelevant span */ 1340Sstevel@tonic-gate if (mlist) 1350Sstevel@tonic-gate mlist = memlist_del_span(mlist, 0ull, basepa); 1360Sstevel@tonic-gate 1370Sstevel@tonic-gate /* trim upper irrelevant span */ 1380Sstevel@tonic-gate if (mlist) { 1390Sstevel@tonic-gate uint64_t endpa; 1400Sstevel@tonic-gate 1410Sstevel@tonic-gate basepa += mp->sbm_slice_size; 1420Sstevel@tonic-gate endpa = _ptob64(physmax + 1); 1430Sstevel@tonic-gate if (endpa > basepa) 1440Sstevel@tonic-gate mlist = memlist_del_span( 1450Sstevel@tonic-gate mlist, 1460Sstevel@tonic-gate basepa, 1470Sstevel@tonic-gate endpa - basepa); 1480Sstevel@tonic-gate } 1490Sstevel@tonic-gate 1500Sstevel@tonic-gate if (mlist) { 1510Sstevel@tonic-gate /* successfully built a memlist */ 1520Sstevel@tonic-gate PR_MEM("%s: derived memlist from phys_install\n", f); 1530Sstevel@tonic-gate } 1540Sstevel@tonic-gate 1550Sstevel@tonic-gate /* if no mlist yet, try platform layer */ 1560Sstevel@tonic-gate if (!mlist) { 1570Sstevel@tonic-gate err = drmach_mem_get_memlist( 1580Sstevel@tonic-gate mp->sbm_cm.sbdev_id, &mlist); 1590Sstevel@tonic-gate if (err) { 1600Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 1610Sstevel@tonic-gate mlist = NULL; /* paranoia */ 1620Sstevel@tonic-gate } 1630Sstevel@tonic-gate } 1640Sstevel@tonic-gate } 1650Sstevel@tonic-gate 1660Sstevel@tonic-gate PR_MEM("%s: memlist for %s\n", f, mp->sbm_cm.sbdev_path); 1670Sstevel@tonic-gate PR_MEMLIST_DUMP(mlist); 1680Sstevel@tonic-gate 1690Sstevel@tonic-gate return (mlist); 1700Sstevel@tonic-gate } 1710Sstevel@tonic-gate 1720Sstevel@tonic-gate typedef struct { 1730Sstevel@tonic-gate kcondvar_t cond; 1740Sstevel@tonic-gate kmutex_t lock; 1750Sstevel@tonic-gate int error; 1760Sstevel@tonic-gate int done; 1770Sstevel@tonic-gate } dr_release_mem_sync_t; 1780Sstevel@tonic-gate 1790Sstevel@tonic-gate /* 1800Sstevel@tonic-gate * Memory has been logically removed by the time this routine is called. 1810Sstevel@tonic-gate */ 1820Sstevel@tonic-gate static void 1830Sstevel@tonic-gate dr_mem_del_done(void *arg, int error) 1840Sstevel@tonic-gate { 1850Sstevel@tonic-gate dr_release_mem_sync_t *ds = arg; 1860Sstevel@tonic-gate 1870Sstevel@tonic-gate mutex_enter(&ds->lock); 1880Sstevel@tonic-gate ds->error = error; 1890Sstevel@tonic-gate ds->done = 1; 1900Sstevel@tonic-gate cv_signal(&ds->cond); 1910Sstevel@tonic-gate mutex_exit(&ds->lock); 1920Sstevel@tonic-gate } 1930Sstevel@tonic-gate 1940Sstevel@tonic-gate /* 1950Sstevel@tonic-gate * When we reach here the memory being drained should have 1960Sstevel@tonic-gate * already been reserved in dr_pre_release_mem(). 1970Sstevel@tonic-gate * Our only task here is to kick off the "drain" and wait 1980Sstevel@tonic-gate * for it to finish. 1990Sstevel@tonic-gate */ 2000Sstevel@tonic-gate void 2010Sstevel@tonic-gate dr_release_mem(dr_common_unit_t *cp) 2020Sstevel@tonic-gate { 2030Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)cp; 2040Sstevel@tonic-gate int err; 2050Sstevel@tonic-gate dr_release_mem_sync_t rms; 2060Sstevel@tonic-gate static fn_t f = "dr_release_mem"; 2070Sstevel@tonic-gate 2080Sstevel@tonic-gate /* check that this memory unit has been reserved */ 2090Sstevel@tonic-gate if (!(mp->sbm_flags & DR_MFLAG_RELOWNER)) { 2100Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 2110Sstevel@tonic-gate return; 2120Sstevel@tonic-gate } 2130Sstevel@tonic-gate 2140Sstevel@tonic-gate bzero((void *) &rms, sizeof (rms)); 2150Sstevel@tonic-gate 2160Sstevel@tonic-gate mutex_init(&rms.lock, NULL, MUTEX_DRIVER, NULL); 2170Sstevel@tonic-gate cv_init(&rms.cond, NULL, CV_DRIVER, NULL); 2180Sstevel@tonic-gate 2190Sstevel@tonic-gate mutex_enter(&rms.lock); 2200Sstevel@tonic-gate err = kphysm_del_start(mp->sbm_memhandle, 2210Sstevel@tonic-gate dr_mem_del_done, (void *) &rms); 2220Sstevel@tonic-gate if (err == KPHYSM_OK) { 2230Sstevel@tonic-gate /* wait for completion or interrupt */ 2240Sstevel@tonic-gate while (!rms.done) { 2250Sstevel@tonic-gate if (cv_wait_sig(&rms.cond, &rms.lock) == 0) { 2260Sstevel@tonic-gate /* then there is a pending UNIX signal */ 2270Sstevel@tonic-gate (void) kphysm_del_cancel(mp->sbm_memhandle); 2280Sstevel@tonic-gate 2290Sstevel@tonic-gate /* wait for completion */ 2300Sstevel@tonic-gate while (!rms.done) 2310Sstevel@tonic-gate cv_wait(&rms.cond, &rms.lock); 2320Sstevel@tonic-gate } 2330Sstevel@tonic-gate } 2340Sstevel@tonic-gate /* get the result of the memory delete operation */ 2350Sstevel@tonic-gate err = rms.error; 2360Sstevel@tonic-gate } 2370Sstevel@tonic-gate mutex_exit(&rms.lock); 2380Sstevel@tonic-gate 2390Sstevel@tonic-gate cv_destroy(&rms.cond); 2400Sstevel@tonic-gate mutex_destroy(&rms.lock); 2410Sstevel@tonic-gate 2420Sstevel@tonic-gate if (err != KPHYSM_OK) { 2430Sstevel@tonic-gate int e_code; 2440Sstevel@tonic-gate 2450Sstevel@tonic-gate switch (err) { 2460Sstevel@tonic-gate case KPHYSM_ENOWORK: 2470Sstevel@tonic-gate e_code = ESBD_NOERROR; 2480Sstevel@tonic-gate break; 2490Sstevel@tonic-gate 2500Sstevel@tonic-gate case KPHYSM_EHANDLE: 2510Sstevel@tonic-gate case KPHYSM_ESEQUENCE: 2520Sstevel@tonic-gate e_code = ESBD_INTERNAL; 2530Sstevel@tonic-gate break; 2540Sstevel@tonic-gate 2550Sstevel@tonic-gate case KPHYSM_ENOTVIABLE: 2560Sstevel@tonic-gate e_code = ESBD_MEM_NOTVIABLE; 2570Sstevel@tonic-gate break; 2580Sstevel@tonic-gate 2590Sstevel@tonic-gate case KPHYSM_EREFUSED: 2600Sstevel@tonic-gate e_code = ESBD_MEM_REFUSED; 2610Sstevel@tonic-gate break; 2620Sstevel@tonic-gate 2630Sstevel@tonic-gate case KPHYSM_ENONRELOC: 2640Sstevel@tonic-gate e_code = ESBD_MEM_NONRELOC; 2650Sstevel@tonic-gate break; 2660Sstevel@tonic-gate 2670Sstevel@tonic-gate case KPHYSM_ECANCELLED: 2680Sstevel@tonic-gate e_code = ESBD_MEM_CANCELLED; 2690Sstevel@tonic-gate break; 2700Sstevel@tonic-gate 2710Sstevel@tonic-gate case KPHYSM_ERESOURCE: 2720Sstevel@tonic-gate e_code = ESBD_MEMFAIL; 2730Sstevel@tonic-gate break; 2740Sstevel@tonic-gate 2750Sstevel@tonic-gate default: 2760Sstevel@tonic-gate cmn_err(CE_WARN, 2770Sstevel@tonic-gate "%s: unexpected kphysm error code %d," 2780Sstevel@tonic-gate " id 0x%p", 2790Sstevel@tonic-gate f, err, mp->sbm_cm.sbdev_id); 2800Sstevel@tonic-gate 2810Sstevel@tonic-gate e_code = ESBD_IO; 2820Sstevel@tonic-gate break; 2830Sstevel@tonic-gate } 2840Sstevel@tonic-gate 2850Sstevel@tonic-gate if (e_code != ESBD_NOERROR) { 2860Sstevel@tonic-gate dr_dev_err(CE_IGNORE, &mp->sbm_cm, e_code); 2870Sstevel@tonic-gate } 2880Sstevel@tonic-gate } 2890Sstevel@tonic-gate } 2900Sstevel@tonic-gate 2910Sstevel@tonic-gate void 2920Sstevel@tonic-gate dr_attach_mem(dr_handle_t *hp, dr_common_unit_t *cp) 2930Sstevel@tonic-gate { 2940Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 2950Sstevel@tonic-gate 2960Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)cp; 2970Sstevel@tonic-gate struct memlist *ml, *mc; 2980Sstevel@tonic-gate sbd_error_t *err; 2990Sstevel@tonic-gate static fn_t f = "dr_attach_mem"; 3000Sstevel@tonic-gate 3010Sstevel@tonic-gate PR_MEM("%s...\n", f); 3020Sstevel@tonic-gate 3030Sstevel@tonic-gate dr_lock_status(hp->h_bd); 3040Sstevel@tonic-gate err = drmach_configure(cp->sbdev_id, 0); 3050Sstevel@tonic-gate dr_unlock_status(hp->h_bd); 3060Sstevel@tonic-gate if (err) { 3070Sstevel@tonic-gate DRERR_SET_C(&cp->sbdev_error, &err); 3080Sstevel@tonic-gate return; 3090Sstevel@tonic-gate } 3100Sstevel@tonic-gate 3110Sstevel@tonic-gate ml = dr_get_memlist(mp); 3120Sstevel@tonic-gate for (mc = ml; mc; mc = mc->next) { 3130Sstevel@tonic-gate int rv; 3140Sstevel@tonic-gate sbd_error_t *err; 3150Sstevel@tonic-gate 3160Sstevel@tonic-gate rv = kphysm_add_memory_dynamic( 3170Sstevel@tonic-gate (pfn_t)(mc->address >> PAGESHIFT), 3180Sstevel@tonic-gate (pgcnt_t)(mc->size >> PAGESHIFT)); 3190Sstevel@tonic-gate if (rv != KPHYSM_OK) { 3200Sstevel@tonic-gate /* 3210Sstevel@tonic-gate * translate kphysm error and 3220Sstevel@tonic-gate * store in devlist error 3230Sstevel@tonic-gate */ 3240Sstevel@tonic-gate switch (rv) { 3250Sstevel@tonic-gate case KPHYSM_ERESOURCE: 3260Sstevel@tonic-gate rv = ESBD_NOMEM; 3270Sstevel@tonic-gate break; 3280Sstevel@tonic-gate 3290Sstevel@tonic-gate case KPHYSM_EFAULT: 3300Sstevel@tonic-gate rv = ESBD_FAULT; 3310Sstevel@tonic-gate break; 3320Sstevel@tonic-gate 3330Sstevel@tonic-gate default: 3340Sstevel@tonic-gate rv = ESBD_INTERNAL; 3350Sstevel@tonic-gate break; 3360Sstevel@tonic-gate } 3370Sstevel@tonic-gate 3380Sstevel@tonic-gate if (rv == ESBD_INTERNAL) { 3390Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 3400Sstevel@tonic-gate } else 3410Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, rv); 3420Sstevel@tonic-gate break; 3430Sstevel@tonic-gate } 3440Sstevel@tonic-gate 3450Sstevel@tonic-gate err = drmach_mem_add_span( 3460Sstevel@tonic-gate mp->sbm_cm.sbdev_id, mc->address, mc->size); 3470Sstevel@tonic-gate if (err) { 3480Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 3490Sstevel@tonic-gate break; 3500Sstevel@tonic-gate } 3510Sstevel@tonic-gate } 3520Sstevel@tonic-gate 3530Sstevel@tonic-gate memlist_delete(ml); 3540Sstevel@tonic-gate 3550Sstevel@tonic-gate /* back out if configure failed */ 3560Sstevel@tonic-gate if (mp->sbm_cm.sbdev_error != NULL) { 3570Sstevel@tonic-gate dr_lock_status(hp->h_bd); 3580Sstevel@tonic-gate err = drmach_unconfigure(cp->sbdev_id, DRMACH_DEVI_REMOVE); 3590Sstevel@tonic-gate if (err) 3600Sstevel@tonic-gate sbd_err_clear(&err); 3610Sstevel@tonic-gate dr_unlock_status(hp->h_bd); 3620Sstevel@tonic-gate } 3630Sstevel@tonic-gate } 3640Sstevel@tonic-gate 3650Sstevel@tonic-gate #define DR_SCRUB_VALUE 0x0d0e0a0d0b0e0e0fULL 3660Sstevel@tonic-gate 3670Sstevel@tonic-gate static void 3680Sstevel@tonic-gate dr_mem_ecache_scrub(dr_mem_unit_t *mp, struct memlist *mlist) 3690Sstevel@tonic-gate { 3700Sstevel@tonic-gate #ifdef DEBUG 3710Sstevel@tonic-gate clock_t stime = lbolt; 3720Sstevel@tonic-gate #endif /* DEBUG */ 3730Sstevel@tonic-gate 3740Sstevel@tonic-gate struct memlist *ml; 3750Sstevel@tonic-gate uint64_t scrub_value = DR_SCRUB_VALUE; 3760Sstevel@tonic-gate processorid_t cpuid; 3770Sstevel@tonic-gate static fn_t f = "dr_mem_ecache_scrub"; 3780Sstevel@tonic-gate 3790Sstevel@tonic-gate cpuid = drmach_mem_cpu_affinity(mp->sbm_cm.sbdev_id); 3800Sstevel@tonic-gate affinity_set(cpuid); 3810Sstevel@tonic-gate 3820Sstevel@tonic-gate PR_MEM("%s: using proc %d, memlist...\n", f, 3830Sstevel@tonic-gate (cpuid == CPU_CURRENT) ? CPU->cpu_id : cpuid); 3840Sstevel@tonic-gate PR_MEMLIST_DUMP(mlist); 3850Sstevel@tonic-gate 3860Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) { 3870Sstevel@tonic-gate uint64_t dst_pa; 3880Sstevel@tonic-gate uint64_t nbytes; 3890Sstevel@tonic-gate 3900Sstevel@tonic-gate /* calculate the destination physical address */ 3910Sstevel@tonic-gate dst_pa = ml->address; 3920Sstevel@tonic-gate if (ml->address & PAGEOFFSET) 3930Sstevel@tonic-gate cmn_err(CE_WARN, 3940Sstevel@tonic-gate "%s: address (0x%llx) not on " 3950Sstevel@tonic-gate "page boundary", f, ml->address); 3960Sstevel@tonic-gate 3970Sstevel@tonic-gate nbytes = ml->size; 3980Sstevel@tonic-gate if (ml->size & PAGEOFFSET) 3990Sstevel@tonic-gate cmn_err(CE_WARN, 4000Sstevel@tonic-gate "%s: size (0x%llx) not on " 4010Sstevel@tonic-gate "page boundary", f, ml->size); 4020Sstevel@tonic-gate 4030Sstevel@tonic-gate /*LINTED*/ 4040Sstevel@tonic-gate while (nbytes > 0) { 4050Sstevel@tonic-gate /* write 64 bits to dst_pa */ 4060Sstevel@tonic-gate stdphys(dst_pa, scrub_value); 4070Sstevel@tonic-gate 4080Sstevel@tonic-gate /* increment/decrement by cacheline sizes */ 4090Sstevel@tonic-gate dst_pa += DRMACH_COHERENCY_UNIT; 4100Sstevel@tonic-gate nbytes -= DRMACH_COHERENCY_UNIT; 4110Sstevel@tonic-gate } 4120Sstevel@tonic-gate } 4130Sstevel@tonic-gate 4140Sstevel@tonic-gate /* 4150Sstevel@tonic-gate * flush this cpu's ecache and take care to ensure 4160Sstevel@tonic-gate * that all of it's bus transactions have retired. 4170Sstevel@tonic-gate */ 4180Sstevel@tonic-gate drmach_cpu_flush_ecache_sync(); 4190Sstevel@tonic-gate 4200Sstevel@tonic-gate affinity_clear(); 4210Sstevel@tonic-gate 4220Sstevel@tonic-gate #ifdef DEBUG 4230Sstevel@tonic-gate stime = lbolt - stime; 4240Sstevel@tonic-gate PR_MEM("%s: scrub ticks = %ld (%ld secs)\n", f, stime, stime / hz); 4250Sstevel@tonic-gate #endif /* DEBUG */ 4260Sstevel@tonic-gate } 4270Sstevel@tonic-gate 4280Sstevel@tonic-gate static int 4290Sstevel@tonic-gate dr_move_memory(dr_handle_t *hp, dr_mem_unit_t *s_mp, dr_mem_unit_t *t_mp) 4300Sstevel@tonic-gate { 4310Sstevel@tonic-gate time_t copytime; 4320Sstevel@tonic-gate drmachid_t cr_id; 4330Sstevel@tonic-gate dr_sr_handle_t *srhp; 434*917Selowe struct memlist *c_ml, *d_ml; 4350Sstevel@tonic-gate sbd_error_t *err; 4360Sstevel@tonic-gate static fn_t f = "dr_move_memory"; 4370Sstevel@tonic-gate 4380Sstevel@tonic-gate PR_MEM("%s: (INLINE) moving memory from %s to %s\n", 4390Sstevel@tonic-gate f, 4400Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 4410Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path); 4420Sstevel@tonic-gate 4430Sstevel@tonic-gate ASSERT(s_mp->sbm_flags & DR_MFLAG_SOURCE); 4440Sstevel@tonic-gate ASSERT(s_mp->sbm_peer == t_mp); 4450Sstevel@tonic-gate ASSERT(s_mp->sbm_mlist); 4460Sstevel@tonic-gate 4470Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET); 4480Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 4490Sstevel@tonic-gate 4500Sstevel@tonic-gate /* 4510Sstevel@tonic-gate * create a memlist of spans to copy by removing 4520Sstevel@tonic-gate * the spans that have been deleted, if any, from 4530Sstevel@tonic-gate * the full source board memlist. s_mp->sbm_del_mlist 4540Sstevel@tonic-gate * will be NULL if there were no spans deleted from 4550Sstevel@tonic-gate * the source board. 4560Sstevel@tonic-gate */ 4570Sstevel@tonic-gate c_ml = memlist_dup(s_mp->sbm_mlist); 4580Sstevel@tonic-gate d_ml = s_mp->sbm_del_mlist; 4590Sstevel@tonic-gate while (d_ml != NULL) { 4600Sstevel@tonic-gate c_ml = memlist_del_span(c_ml, d_ml->address, d_ml->size); 4610Sstevel@tonic-gate d_ml = d_ml->next; 4620Sstevel@tonic-gate } 4630Sstevel@tonic-gate 4640Sstevel@tonic-gate affinity_set(drmach_mem_cpu_affinity(t_mp->sbm_cm.sbdev_id)); 4650Sstevel@tonic-gate 4660Sstevel@tonic-gate err = drmach_copy_rename_init( 4670Sstevel@tonic-gate t_mp->sbm_cm.sbdev_id, _ptob64(t_mp->sbm_slice_offset), 4680Sstevel@tonic-gate s_mp->sbm_cm.sbdev_id, c_ml, &cr_id); 4690Sstevel@tonic-gate if (err) { 4700Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 4710Sstevel@tonic-gate affinity_clear(); 4720Sstevel@tonic-gate return (-1); 4730Sstevel@tonic-gate } 4740Sstevel@tonic-gate 4750Sstevel@tonic-gate srhp = dr_get_sr_handle(hp); 4760Sstevel@tonic-gate ASSERT(srhp); 4770Sstevel@tonic-gate 4780Sstevel@tonic-gate copytime = lbolt; 4790Sstevel@tonic-gate 4800Sstevel@tonic-gate /* Quiesce the OS. */ 4810Sstevel@tonic-gate if (dr_suspend(srhp)) { 4820Sstevel@tonic-gate cmn_err(CE_WARN, "%s: failed to quiesce OS" 4830Sstevel@tonic-gate " for copy-rename", f); 4840Sstevel@tonic-gate 4850Sstevel@tonic-gate dr_release_sr_handle(srhp); 4860Sstevel@tonic-gate err = drmach_copy_rename_fini(cr_id); 4870Sstevel@tonic-gate if (err) { 4880Sstevel@tonic-gate /* 4890Sstevel@tonic-gate * no error is expected since the program has 4900Sstevel@tonic-gate * not yet run. 4910Sstevel@tonic-gate */ 4920Sstevel@tonic-gate 4930Sstevel@tonic-gate /* catch this in debug kernels */ 4940Sstevel@tonic-gate ASSERT(0); 4950Sstevel@tonic-gate 4960Sstevel@tonic-gate sbd_err_clear(&err); 4970Sstevel@tonic-gate } 4980Sstevel@tonic-gate 4990Sstevel@tonic-gate /* suspend error reached via hp */ 5000Sstevel@tonic-gate s_mp->sbm_cm.sbdev_error = hp->h_err; 5010Sstevel@tonic-gate hp->h_err = NULL; 5020Sstevel@tonic-gate 5030Sstevel@tonic-gate affinity_clear(); 5040Sstevel@tonic-gate return (-1); 5050Sstevel@tonic-gate } 5060Sstevel@tonic-gate 5070Sstevel@tonic-gate /* 5080Sstevel@tonic-gate * Rename memory for lgroup. 5090Sstevel@tonic-gate * Source and target board numbers are packaged in arg. 5100Sstevel@tonic-gate */ 5110Sstevel@tonic-gate { 5120Sstevel@tonic-gate dr_board_t *t_bp, *s_bp; 5130Sstevel@tonic-gate 5140Sstevel@tonic-gate s_bp = s_mp->sbm_cm.sbdev_bp; 5150Sstevel@tonic-gate t_bp = t_mp->sbm_cm.sbdev_bp; 5160Sstevel@tonic-gate 5170Sstevel@tonic-gate lgrp_plat_config(LGRP_CONFIG_MEM_RENAME, 5180Sstevel@tonic-gate (uintptr_t)(s_bp->b_num | (t_bp->b_num << 16))); 5190Sstevel@tonic-gate } 5200Sstevel@tonic-gate 5210Sstevel@tonic-gate drmach_copy_rename(cr_id); 5220Sstevel@tonic-gate 5230Sstevel@tonic-gate /* Resume the OS. */ 5240Sstevel@tonic-gate dr_resume(srhp); 5250Sstevel@tonic-gate 5260Sstevel@tonic-gate copytime = lbolt - copytime; 5270Sstevel@tonic-gate 5280Sstevel@tonic-gate dr_release_sr_handle(srhp); 5290Sstevel@tonic-gate err = drmach_copy_rename_fini(cr_id); 5300Sstevel@tonic-gate if (err) 5310Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 5320Sstevel@tonic-gate 5330Sstevel@tonic-gate affinity_clear(); 5340Sstevel@tonic-gate 5350Sstevel@tonic-gate PR_MEM("%s: copy-rename elapsed time = %ld ticks (%ld secs)\n", 5360Sstevel@tonic-gate f, copytime, copytime / hz); 5370Sstevel@tonic-gate 5380Sstevel@tonic-gate /* return -1 if dr_suspend or copy/rename recorded an error */ 5390Sstevel@tonic-gate return (err == NULL ? 0 : -1); 5400Sstevel@tonic-gate } 5410Sstevel@tonic-gate 5420Sstevel@tonic-gate /* 5430Sstevel@tonic-gate * If detaching node contains memory that is "non-permanent" 5440Sstevel@tonic-gate * then the memory adr's are simply cleared. If the memory 5450Sstevel@tonic-gate * is non-relocatable, then do a copy-rename. 5460Sstevel@tonic-gate */ 5470Sstevel@tonic-gate void 5480Sstevel@tonic-gate dr_detach_mem(dr_handle_t *hp, dr_common_unit_t *cp) 5490Sstevel@tonic-gate { 5500Sstevel@tonic-gate int rv = 0; 5510Sstevel@tonic-gate dr_mem_unit_t *s_mp = (dr_mem_unit_t *)cp; 5520Sstevel@tonic-gate dr_mem_unit_t *t_mp; 5530Sstevel@tonic-gate dr_state_t state; 5540Sstevel@tonic-gate static fn_t f = "dr_detach_mem"; 5550Sstevel@tonic-gate 5560Sstevel@tonic-gate PR_MEM("%s...\n", f); 5570Sstevel@tonic-gate 5580Sstevel@tonic-gate /* lookup target mem unit and target board structure, if any */ 5590Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 5600Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 5610Sstevel@tonic-gate ASSERT(t_mp != NULL); 5620Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 5630Sstevel@tonic-gate } else { 5640Sstevel@tonic-gate t_mp = NULL; 5650Sstevel@tonic-gate } 5660Sstevel@tonic-gate 5670Sstevel@tonic-gate /* verify mem unit's state is UNREFERENCED */ 5680Sstevel@tonic-gate state = s_mp->sbm_cm.sbdev_state; 5690Sstevel@tonic-gate if (state != DR_STATE_UNREFERENCED) { 5700Sstevel@tonic-gate dr_dev_err(CE_IGNORE, &s_mp->sbm_cm, ESBD_STATE); 5710Sstevel@tonic-gate return; 5720Sstevel@tonic-gate } 5730Sstevel@tonic-gate 5740Sstevel@tonic-gate /* verify target mem unit's state is UNREFERENCED, if any */ 5750Sstevel@tonic-gate if (t_mp != NULL) { 5760Sstevel@tonic-gate state = t_mp->sbm_cm.sbdev_state; 5770Sstevel@tonic-gate if (state != DR_STATE_UNREFERENCED) { 5780Sstevel@tonic-gate dr_dev_err(CE_IGNORE, &t_mp->sbm_cm, ESBD_STATE); 5790Sstevel@tonic-gate return; 5800Sstevel@tonic-gate } 5810Sstevel@tonic-gate } 5820Sstevel@tonic-gate 5830Sstevel@tonic-gate /* 5840Sstevel@tonic-gate * Scrub deleted memory. This will cause all cachelines 5850Sstevel@tonic-gate * referencing the memory to only be in the local cpu's 5860Sstevel@tonic-gate * ecache. 5870Sstevel@tonic-gate */ 5880Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_RELDONE) { 5890Sstevel@tonic-gate /* no del mlist for src<=dst mem size copy/rename */ 5900Sstevel@tonic-gate if (s_mp->sbm_del_mlist) 5910Sstevel@tonic-gate dr_mem_ecache_scrub(s_mp, s_mp->sbm_del_mlist); 5920Sstevel@tonic-gate } 5930Sstevel@tonic-gate if (t_mp != NULL && (t_mp->sbm_flags & DR_MFLAG_RELDONE)) { 5940Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist); 5950Sstevel@tonic-gate dr_mem_ecache_scrub(t_mp, t_mp->sbm_del_mlist); 5960Sstevel@tonic-gate } 5970Sstevel@tonic-gate 5980Sstevel@tonic-gate /* 5990Sstevel@tonic-gate * If there is no target board (no copy/rename was needed), then 6000Sstevel@tonic-gate * we're done! 6010Sstevel@tonic-gate */ 6020Sstevel@tonic-gate if (t_mp == NULL) { 6030Sstevel@tonic-gate sbd_error_t *err; 6040Sstevel@tonic-gate /* 6050Sstevel@tonic-gate * Reprogram interconnect hardware and disable 6060Sstevel@tonic-gate * memory controllers for memory node that's going away. 6070Sstevel@tonic-gate */ 6080Sstevel@tonic-gate 6090Sstevel@tonic-gate err = drmach_mem_disable(s_mp->sbm_cm.sbdev_id); 6100Sstevel@tonic-gate if (err) { 6110Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 6120Sstevel@tonic-gate rv = -1; 6130Sstevel@tonic-gate } 6140Sstevel@tonic-gate } else { 6150Sstevel@tonic-gate rv = dr_move_memory(hp, s_mp, t_mp); 6160Sstevel@tonic-gate PR_MEM("%s: %s memory COPY-RENAME (board %d -> %d)\n", 6170Sstevel@tonic-gate f, 6180Sstevel@tonic-gate rv ? "FAILED" : "COMPLETED", 6190Sstevel@tonic-gate s_mp->sbm_cm.sbdev_bp->b_num, 6200Sstevel@tonic-gate t_mp->sbm_cm.sbdev_bp->b_num); 6210Sstevel@tonic-gate 6220Sstevel@tonic-gate if (rv != 0) 6230Sstevel@tonic-gate (void) dr_cancel_mem(s_mp); 6240Sstevel@tonic-gate } 6250Sstevel@tonic-gate 6260Sstevel@tonic-gate if (rv == 0) { 6270Sstevel@tonic-gate sbd_error_t *err; 6280Sstevel@tonic-gate 6290Sstevel@tonic-gate dr_lock_status(hp->h_bd); 6300Sstevel@tonic-gate err = drmach_unconfigure(s_mp->sbm_cm.sbdev_id, 6310Sstevel@tonic-gate DRMACH_DEVI_REMOVE); 6320Sstevel@tonic-gate dr_unlock_status(hp->h_bd); 6330Sstevel@tonic-gate if (err) 6340Sstevel@tonic-gate sbd_err_clear(&err); 6350Sstevel@tonic-gate } 6360Sstevel@tonic-gate } 6370Sstevel@tonic-gate 6380Sstevel@tonic-gate #ifndef _STARFIRE 6390Sstevel@tonic-gate /* 6400Sstevel@tonic-gate * XXX workaround for certain lab configurations (see also starcat drmach.c) 6410Sstevel@tonic-gate * Temporary code to get around observed incorrect results from 6420Sstevel@tonic-gate * kphysm_del_span_query when the queried span contains address spans 6430Sstevel@tonic-gate * not occupied by memory in between spans that do have memory. 6440Sstevel@tonic-gate * This routine acts as a wrapper to kphysm_del_span_query. It builds 6450Sstevel@tonic-gate * a memlist from phys_install of spans that exist between base and 6460Sstevel@tonic-gate * base + npages, inclusively. Kphysm_del_span_query is called for each 6470Sstevel@tonic-gate * node in the memlist with the results accumulated in *mp. 6480Sstevel@tonic-gate */ 6490Sstevel@tonic-gate static int 6500Sstevel@tonic-gate dr_del_span_query(pfn_t base, pgcnt_t npages, memquery_t *mp) 6510Sstevel@tonic-gate { 6520Sstevel@tonic-gate uint64_t pa = _ptob64(base); 6530Sstevel@tonic-gate uint64_t sm = ~ (137438953472ull - 1); 6540Sstevel@tonic-gate uint64_t sa = pa & sm; 6550Sstevel@tonic-gate struct memlist *mlist, *ml; 6560Sstevel@tonic-gate int rv; 6570Sstevel@tonic-gate 6580Sstevel@tonic-gate npages = npages; /* silence lint */ 6590Sstevel@tonic-gate memlist_read_lock(); 6600Sstevel@tonic-gate mlist = memlist_dup(phys_install); 6610Sstevel@tonic-gate memlist_read_unlock(); 6620Sstevel@tonic-gate 6630Sstevel@tonic-gate again: 6640Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) { 6650Sstevel@tonic-gate if ((ml->address & sm) != sa) { 6660Sstevel@tonic-gate mlist = memlist_del_span(mlist, ml->address, ml->size); 6670Sstevel@tonic-gate goto again; 6680Sstevel@tonic-gate } 6690Sstevel@tonic-gate } 6700Sstevel@tonic-gate 6710Sstevel@tonic-gate mp->phys_pages = 0; 6720Sstevel@tonic-gate mp->managed = 0; 6730Sstevel@tonic-gate mp->nonrelocatable = 0; 6740Sstevel@tonic-gate mp->first_nonrelocatable = (pfn_t)-1; /* XXX */ 6750Sstevel@tonic-gate mp->last_nonrelocatable = 0; 6760Sstevel@tonic-gate 6770Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) { 6780Sstevel@tonic-gate memquery_t mq; 6790Sstevel@tonic-gate 6800Sstevel@tonic-gate rv = kphysm_del_span_query( 6810Sstevel@tonic-gate _b64top(ml->address), _b64top(ml->size), &mq); 6820Sstevel@tonic-gate if (rv) 6830Sstevel@tonic-gate break; 6840Sstevel@tonic-gate 6850Sstevel@tonic-gate mp->phys_pages += mq.phys_pages; 6860Sstevel@tonic-gate mp->managed += mq.managed; 6870Sstevel@tonic-gate mp->nonrelocatable += mq.nonrelocatable; 6880Sstevel@tonic-gate 6890Sstevel@tonic-gate if (mq.nonrelocatable != 0) { 6900Sstevel@tonic-gate if (mq.first_nonrelocatable < mp->first_nonrelocatable) 6910Sstevel@tonic-gate mp->first_nonrelocatable = 6920Sstevel@tonic-gate mq.first_nonrelocatable; 6930Sstevel@tonic-gate if (mq.last_nonrelocatable > mp->last_nonrelocatable) 6940Sstevel@tonic-gate mp->last_nonrelocatable = 6950Sstevel@tonic-gate mq.last_nonrelocatable; 6960Sstevel@tonic-gate } 6970Sstevel@tonic-gate } 6980Sstevel@tonic-gate 6990Sstevel@tonic-gate if (mp->nonrelocatable == 0) 7000Sstevel@tonic-gate mp->first_nonrelocatable = 0; /* XXX */ 7010Sstevel@tonic-gate 7020Sstevel@tonic-gate memlist_delete(mlist); 7030Sstevel@tonic-gate return (rv); 7040Sstevel@tonic-gate } 7050Sstevel@tonic-gate 7060Sstevel@tonic-gate #define kphysm_del_span_query dr_del_span_query 7070Sstevel@tonic-gate #endif /* _STARFIRE */ 7080Sstevel@tonic-gate 7090Sstevel@tonic-gate /* 7100Sstevel@tonic-gate * NOTE: This routine is only partially smart about multiple 7110Sstevel@tonic-gate * mem-units. Need to make mem-status structure smart 7120Sstevel@tonic-gate * about them also. 7130Sstevel@tonic-gate */ 7140Sstevel@tonic-gate int 7150Sstevel@tonic-gate dr_mem_status(dr_handle_t *hp, dr_devset_t devset, sbd_dev_stat_t *dsp) 7160Sstevel@tonic-gate { 7170Sstevel@tonic-gate int m, mix; 7180Sstevel@tonic-gate memdelstat_t mdst; 7190Sstevel@tonic-gate memquery_t mq; 7200Sstevel@tonic-gate dr_board_t *bp; 7210Sstevel@tonic-gate dr_mem_unit_t *mp; 7220Sstevel@tonic-gate sbd_mem_stat_t *msp; 7230Sstevel@tonic-gate static fn_t f = "dr_mem_status"; 7240Sstevel@tonic-gate 7250Sstevel@tonic-gate bp = hp->h_bd; 7260Sstevel@tonic-gate devset &= DR_DEVS_PRESENT(bp); 7270Sstevel@tonic-gate 7280Sstevel@tonic-gate for (m = mix = 0; m < MAX_MEM_UNITS_PER_BOARD; m++) { 7290Sstevel@tonic-gate int rv; 7300Sstevel@tonic-gate sbd_error_t *err; 7310Sstevel@tonic-gate drmach_status_t pstat; 7320Sstevel@tonic-gate dr_mem_unit_t *p_mp; 7330Sstevel@tonic-gate 7340Sstevel@tonic-gate if (DEVSET_IN_SET(devset, SBD_COMP_MEM, m) == 0) 7350Sstevel@tonic-gate continue; 7360Sstevel@tonic-gate 7370Sstevel@tonic-gate mp = dr_get_mem_unit(bp, m); 7380Sstevel@tonic-gate 7390Sstevel@tonic-gate if (mp->sbm_cm.sbdev_state == DR_STATE_EMPTY) { 7400Sstevel@tonic-gate /* present, but not fully initialized */ 7410Sstevel@tonic-gate continue; 7420Sstevel@tonic-gate } 7430Sstevel@tonic-gate 7440Sstevel@tonic-gate if (mp->sbm_cm.sbdev_id == (drmachid_t)0) 7450Sstevel@tonic-gate continue; 7460Sstevel@tonic-gate 7470Sstevel@tonic-gate /* fetch platform status */ 7480Sstevel@tonic-gate err = drmach_status(mp->sbm_cm.sbdev_id, &pstat); 7490Sstevel@tonic-gate if (err) { 7500Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 7510Sstevel@tonic-gate continue; 7520Sstevel@tonic-gate } 7530Sstevel@tonic-gate 7540Sstevel@tonic-gate msp = &dsp->d_mem; 7550Sstevel@tonic-gate bzero((caddr_t)msp, sizeof (*msp)); 7560Sstevel@tonic-gate 7570Sstevel@tonic-gate strncpy(msp->ms_cm.c_id.c_name, pstat.type, 7580Sstevel@tonic-gate sizeof (msp->ms_cm.c_id.c_name)); 7590Sstevel@tonic-gate msp->ms_cm.c_id.c_type = mp->sbm_cm.sbdev_type; 7600Sstevel@tonic-gate msp->ms_cm.c_id.c_unit = SBD_NULL_UNIT; 7610Sstevel@tonic-gate msp->ms_cm.c_cond = mp->sbm_cm.sbdev_cond; 7620Sstevel@tonic-gate msp->ms_cm.c_busy = mp->sbm_cm.sbdev_busy | pstat.busy; 7630Sstevel@tonic-gate msp->ms_cm.c_time = mp->sbm_cm.sbdev_time; 7640Sstevel@tonic-gate msp->ms_cm.c_ostate = mp->sbm_cm.sbdev_ostate; 7650Sstevel@tonic-gate 7660Sstevel@tonic-gate msp->ms_totpages = mp->sbm_npages; 7670Sstevel@tonic-gate msp->ms_basepfn = mp->sbm_basepfn; 7680Sstevel@tonic-gate msp->ms_pageslost = mp->sbm_pageslost; 7690Sstevel@tonic-gate msp->ms_cage_enabled = kcage_on; 7700Sstevel@tonic-gate 7710Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_RESERVED) 7720Sstevel@tonic-gate p_mp = mp->sbm_peer; 7730Sstevel@tonic-gate else 7740Sstevel@tonic-gate p_mp = NULL; 7750Sstevel@tonic-gate 7760Sstevel@tonic-gate if (p_mp == NULL) { 7770Sstevel@tonic-gate msp->ms_peer_is_target = 0; 7780Sstevel@tonic-gate msp->ms_peer_ap_id[0] = '\0'; 7790Sstevel@tonic-gate } else if (p_mp->sbm_flags & DR_MFLAG_RESERVED) { 7800Sstevel@tonic-gate char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 7810Sstevel@tonic-gate char *minor; 7820Sstevel@tonic-gate 7830Sstevel@tonic-gate /* 7840Sstevel@tonic-gate * b_dip doesn't have to be held for ddi_pathname() 7850Sstevel@tonic-gate * because the board struct (dr_board_t) will be 7860Sstevel@tonic-gate * destroyed before b_dip detaches. 7870Sstevel@tonic-gate */ 7880Sstevel@tonic-gate (void) ddi_pathname(bp->b_dip, path); 7890Sstevel@tonic-gate minor = strchr(p_mp->sbm_cm.sbdev_path, ':'); 7900Sstevel@tonic-gate 7910Sstevel@tonic-gate snprintf(msp->ms_peer_ap_id, 7920Sstevel@tonic-gate sizeof (msp->ms_peer_ap_id), "%s%s", 7930Sstevel@tonic-gate path, (minor == NULL) ? "" : minor); 7940Sstevel@tonic-gate 7950Sstevel@tonic-gate kmem_free(path, MAXPATHLEN); 7960Sstevel@tonic-gate 7970Sstevel@tonic-gate if (p_mp->sbm_flags & DR_MFLAG_TARGET) 7980Sstevel@tonic-gate msp->ms_peer_is_target = 1; 7990Sstevel@tonic-gate } 8000Sstevel@tonic-gate 8010Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_RELOWNER) 8020Sstevel@tonic-gate rv = kphysm_del_status(mp->sbm_memhandle, &mdst); 8030Sstevel@tonic-gate else 8040Sstevel@tonic-gate rv = KPHYSM_EHANDLE; /* force 'if' to fail */ 8050Sstevel@tonic-gate 8060Sstevel@tonic-gate if (rv == KPHYSM_OK) { 8070Sstevel@tonic-gate /* 8080Sstevel@tonic-gate * Any pages above managed is "free", 8090Sstevel@tonic-gate * i.e. it's collected. 8100Sstevel@tonic-gate */ 8110Sstevel@tonic-gate msp->ms_detpages += (uint_t)(mdst.collected + 8120Sstevel@tonic-gate mdst.phys_pages - mdst.managed); 8130Sstevel@tonic-gate } else { 8140Sstevel@tonic-gate /* 8150Sstevel@tonic-gate * If we're UNREFERENCED or UNCONFIGURED, 8160Sstevel@tonic-gate * then the number of detached pages is 8170Sstevel@tonic-gate * however many pages are on the board. 8180Sstevel@tonic-gate * I.e. detached = not in use by OS. 8190Sstevel@tonic-gate */ 8200Sstevel@tonic-gate switch (msp->ms_cm.c_ostate) { 8210Sstevel@tonic-gate /* 8220Sstevel@tonic-gate * changed to use cfgadm states 8230Sstevel@tonic-gate * 8240Sstevel@tonic-gate * was: 8250Sstevel@tonic-gate * case DR_STATE_UNREFERENCED: 8260Sstevel@tonic-gate * case DR_STATE_UNCONFIGURED: 8270Sstevel@tonic-gate */ 8280Sstevel@tonic-gate case SBD_STAT_UNCONFIGURED: 8290Sstevel@tonic-gate msp->ms_detpages = msp->ms_totpages; 8300Sstevel@tonic-gate break; 8310Sstevel@tonic-gate 8320Sstevel@tonic-gate default: 8330Sstevel@tonic-gate break; 8340Sstevel@tonic-gate } 8350Sstevel@tonic-gate } 8360Sstevel@tonic-gate 8370Sstevel@tonic-gate /* 8380Sstevel@tonic-gate * kphysm_del_span_query can report non-reloc pages = total 8390Sstevel@tonic-gate * pages for memory that is not yet configured 8400Sstevel@tonic-gate */ 8410Sstevel@tonic-gate if (mp->sbm_cm.sbdev_state != DR_STATE_UNCONFIGURED) { 8420Sstevel@tonic-gate 8430Sstevel@tonic-gate rv = kphysm_del_span_query(mp->sbm_basepfn, 8440Sstevel@tonic-gate mp->sbm_npages, &mq); 8450Sstevel@tonic-gate 8460Sstevel@tonic-gate if (rv == KPHYSM_OK) { 8470Sstevel@tonic-gate msp->ms_managed_pages = mq.managed; 8480Sstevel@tonic-gate msp->ms_noreloc_pages = mq.nonrelocatable; 8490Sstevel@tonic-gate msp->ms_noreloc_first = 8500Sstevel@tonic-gate mq.first_nonrelocatable; 8510Sstevel@tonic-gate msp->ms_noreloc_last = 8520Sstevel@tonic-gate mq.last_nonrelocatable; 8530Sstevel@tonic-gate msp->ms_cm.c_sflags = 0; 8540Sstevel@tonic-gate if (mq.nonrelocatable) { 8550Sstevel@tonic-gate SBD_SET_SUSPEND(SBD_CMD_UNCONFIGURE, 8560Sstevel@tonic-gate msp->ms_cm.c_sflags); 8570Sstevel@tonic-gate } 8580Sstevel@tonic-gate } else { 8590Sstevel@tonic-gate PR_MEM("%s: kphysm_del_span_query() = %d\n", 8600Sstevel@tonic-gate f, rv); 8610Sstevel@tonic-gate } 8620Sstevel@tonic-gate } 8630Sstevel@tonic-gate 8640Sstevel@tonic-gate /* 8650Sstevel@tonic-gate * Check source unit state during copy-rename 8660Sstevel@tonic-gate */ 8670Sstevel@tonic-gate if ((mp->sbm_flags & DR_MFLAG_SOURCE) && 8680Sstevel@tonic-gate (mp->sbm_cm.sbdev_state == DR_STATE_UNREFERENCED || 8690Sstevel@tonic-gate mp->sbm_cm.sbdev_state == DR_STATE_RELEASE)) 8700Sstevel@tonic-gate msp->ms_cm.c_ostate = SBD_STAT_CONFIGURED; 8710Sstevel@tonic-gate 8720Sstevel@tonic-gate mix++; 8730Sstevel@tonic-gate dsp++; 8740Sstevel@tonic-gate } 8750Sstevel@tonic-gate 8760Sstevel@tonic-gate return (mix); 8770Sstevel@tonic-gate } 8780Sstevel@tonic-gate 8790Sstevel@tonic-gate int 8800Sstevel@tonic-gate dr_pre_attach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 8810Sstevel@tonic-gate { 8820Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 8830Sstevel@tonic-gate 8840Sstevel@tonic-gate int err_flag = 0; 8850Sstevel@tonic-gate int d; 8860Sstevel@tonic-gate sbd_error_t *err; 8870Sstevel@tonic-gate static fn_t f = "dr_pre_attach_mem"; 8880Sstevel@tonic-gate 8890Sstevel@tonic-gate PR_MEM("%s...\n", f); 8900Sstevel@tonic-gate 8910Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 8920Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 8930Sstevel@tonic-gate dr_state_t state; 8940Sstevel@tonic-gate 8950Sstevel@tonic-gate cmn_err(CE_CONT, "OS configure %s", mp->sbm_cm.sbdev_path); 8960Sstevel@tonic-gate 8970Sstevel@tonic-gate state = mp->sbm_cm.sbdev_state; 8980Sstevel@tonic-gate switch (state) { 8990Sstevel@tonic-gate case DR_STATE_UNCONFIGURED: 9000Sstevel@tonic-gate PR_MEM("%s: recovering from UNCONFIG for %s\n", 9010Sstevel@tonic-gate f, 9020Sstevel@tonic-gate mp->sbm_cm.sbdev_path); 9030Sstevel@tonic-gate 9040Sstevel@tonic-gate /* use memlist cached by dr_post_detach_mem_unit */ 9050Sstevel@tonic-gate ASSERT(mp->sbm_mlist != NULL); 9060Sstevel@tonic-gate PR_MEM("%s: re-configuring cached memlist for %s:\n", 9070Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 9080Sstevel@tonic-gate PR_MEMLIST_DUMP(mp->sbm_mlist); 9090Sstevel@tonic-gate 9100Sstevel@tonic-gate /* kphysm del handle should be have been freed */ 9110Sstevel@tonic-gate ASSERT((mp->sbm_flags & DR_MFLAG_RELOWNER) == 0); 9120Sstevel@tonic-gate 9130Sstevel@tonic-gate /*FALLTHROUGH*/ 9140Sstevel@tonic-gate 9150Sstevel@tonic-gate case DR_STATE_CONNECTED: 9160Sstevel@tonic-gate PR_MEM("%s: reprogramming mem hardware on %s\n", 9170Sstevel@tonic-gate f, mp->sbm_cm.sbdev_bp->b_path); 9180Sstevel@tonic-gate 9190Sstevel@tonic-gate PR_MEM("%s: enabling %s\n", 9200Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 9210Sstevel@tonic-gate 9220Sstevel@tonic-gate err = drmach_mem_enable(mp->sbm_cm.sbdev_id); 9230Sstevel@tonic-gate if (err) { 9240Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 9250Sstevel@tonic-gate err_flag = 1; 9260Sstevel@tonic-gate } 9270Sstevel@tonic-gate break; 9280Sstevel@tonic-gate 9290Sstevel@tonic-gate default: 9300Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_STATE); 9310Sstevel@tonic-gate err_flag = 1; 9320Sstevel@tonic-gate break; 9330Sstevel@tonic-gate } 9340Sstevel@tonic-gate 9350Sstevel@tonic-gate /* exit for loop if error encountered */ 9360Sstevel@tonic-gate if (err_flag) 9370Sstevel@tonic-gate break; 9380Sstevel@tonic-gate } 9390Sstevel@tonic-gate 9400Sstevel@tonic-gate return (err_flag ? -1 : 0); 9410Sstevel@tonic-gate } 9420Sstevel@tonic-gate 9430Sstevel@tonic-gate int 9440Sstevel@tonic-gate dr_post_attach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 9450Sstevel@tonic-gate { 9460Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 9470Sstevel@tonic-gate 9480Sstevel@tonic-gate int d; 9490Sstevel@tonic-gate static fn_t f = "dr_post_attach_mem"; 9500Sstevel@tonic-gate 9510Sstevel@tonic-gate PR_MEM("%s...\n", f); 9520Sstevel@tonic-gate 9530Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 9540Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 9550Sstevel@tonic-gate struct memlist *mlist, *ml; 9560Sstevel@tonic-gate 9570Sstevel@tonic-gate mlist = dr_get_memlist(mp); 9580Sstevel@tonic-gate if (mlist == NULL) { 9590Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_MEMFAIL); 9600Sstevel@tonic-gate continue; 9610Sstevel@tonic-gate } 9620Sstevel@tonic-gate 9630Sstevel@tonic-gate /* 9640Sstevel@tonic-gate * Verify the memory really did successfully attach 9650Sstevel@tonic-gate * by checking for its existence in phys_install. 9660Sstevel@tonic-gate */ 9670Sstevel@tonic-gate memlist_read_lock(); 9680Sstevel@tonic-gate if (memlist_intersect(phys_install, mlist) == 0) { 9690Sstevel@tonic-gate memlist_read_unlock(); 9700Sstevel@tonic-gate 9710Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 9720Sstevel@tonic-gate 9730Sstevel@tonic-gate PR_MEM("%s: %s memlist not in phys_install", 9740Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 9750Sstevel@tonic-gate 9760Sstevel@tonic-gate memlist_delete(mlist); 9770Sstevel@tonic-gate continue; 9780Sstevel@tonic-gate } 9790Sstevel@tonic-gate memlist_read_unlock(); 9800Sstevel@tonic-gate 9810Sstevel@tonic-gate for (ml = mlist; ml != NULL; ml = ml->next) { 9820Sstevel@tonic-gate sbd_error_t *err; 9830Sstevel@tonic-gate 9840Sstevel@tonic-gate err = drmach_mem_add_span( 9850Sstevel@tonic-gate mp->sbm_cm.sbdev_id, 9860Sstevel@tonic-gate ml->address, 9870Sstevel@tonic-gate ml->size); 9880Sstevel@tonic-gate if (err) 9890Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 9900Sstevel@tonic-gate } 9910Sstevel@tonic-gate 9920Sstevel@tonic-gate memlist_delete(mlist); 9930Sstevel@tonic-gate 9940Sstevel@tonic-gate /* 9950Sstevel@tonic-gate * Destroy cached memlist, if any. 9960Sstevel@tonic-gate * There will be a cached memlist in sbm_mlist if 9970Sstevel@tonic-gate * this board is being configured directly after 9980Sstevel@tonic-gate * an unconfigure. 9990Sstevel@tonic-gate * To support this transition, dr_post_detach_mem 10000Sstevel@tonic-gate * left a copy of the last known memlist in sbm_mlist. 10010Sstevel@tonic-gate * This memlist could differ from any derived from 10020Sstevel@tonic-gate * hardware if while this memunit was last configured 10030Sstevel@tonic-gate * the system detected and deleted bad pages from 10040Sstevel@tonic-gate * phys_install. The location of those bad pages 10050Sstevel@tonic-gate * will be reflected in the cached memlist. 10060Sstevel@tonic-gate */ 10070Sstevel@tonic-gate if (mp->sbm_mlist) { 10080Sstevel@tonic-gate memlist_delete(mp->sbm_mlist); 10090Sstevel@tonic-gate mp->sbm_mlist = NULL; 10100Sstevel@tonic-gate } 10110Sstevel@tonic-gate 10120Sstevel@tonic-gate /* 10130Sstevel@tonic-gate * TODO: why is this call to dr_init_mem_unit_data here? 10140Sstevel@tonic-gate * this has been done at discovery or connect time, so this is 10150Sstevel@tonic-gate * probably redundant and unnecessary. 10160Sstevel@tonic-gate */ 10170Sstevel@tonic-gate dr_init_mem_unit_data(mp); 10180Sstevel@tonic-gate } 10190Sstevel@tonic-gate 10200Sstevel@tonic-gate return (0); 10210Sstevel@tonic-gate } 10220Sstevel@tonic-gate 10230Sstevel@tonic-gate int 10240Sstevel@tonic-gate dr_pre_detach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 10250Sstevel@tonic-gate { 10260Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 10270Sstevel@tonic-gate 10280Sstevel@tonic-gate int d; 10290Sstevel@tonic-gate 10300Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 10310Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 10320Sstevel@tonic-gate 10330Sstevel@tonic-gate cmn_err(CE_CONT, "OS unconfigure %s", mp->sbm_cm.sbdev_path); 10340Sstevel@tonic-gate } 10350Sstevel@tonic-gate 10360Sstevel@tonic-gate return (0); 10370Sstevel@tonic-gate } 10380Sstevel@tonic-gate 10390Sstevel@tonic-gate 10400Sstevel@tonic-gate int 10410Sstevel@tonic-gate dr_post_detach_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 10420Sstevel@tonic-gate { 10430Sstevel@tonic-gate _NOTE(ARGUNUSED(hp)) 10440Sstevel@tonic-gate 10450Sstevel@tonic-gate int d, rv; 10460Sstevel@tonic-gate static fn_t f = "dr_post_detach_mem"; 10470Sstevel@tonic-gate 10480Sstevel@tonic-gate PR_MEM("%s...\n", f); 10490Sstevel@tonic-gate 10500Sstevel@tonic-gate rv = 0; 10510Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 10520Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 10530Sstevel@tonic-gate 10540Sstevel@tonic-gate ASSERT(mp->sbm_cm.sbdev_bp == hp->h_bd); 10550Sstevel@tonic-gate 10560Sstevel@tonic-gate if (dr_post_detach_mem_unit(mp)) 10570Sstevel@tonic-gate rv = -1; 10580Sstevel@tonic-gate } 10590Sstevel@tonic-gate 10600Sstevel@tonic-gate return (rv); 10610Sstevel@tonic-gate } 10620Sstevel@tonic-gate 10630Sstevel@tonic-gate static void 10640Sstevel@tonic-gate dr_add_memory_spans(dr_mem_unit_t *mp, struct memlist *ml) 10650Sstevel@tonic-gate { 10660Sstevel@tonic-gate static fn_t f = "dr_add_memory_spans"; 10670Sstevel@tonic-gate 10680Sstevel@tonic-gate PR_MEM("%s...", f); 10690Sstevel@tonic-gate PR_MEMLIST_DUMP(ml); 10700Sstevel@tonic-gate 10710Sstevel@tonic-gate #ifdef DEBUG 10720Sstevel@tonic-gate memlist_read_lock(); 10730Sstevel@tonic-gate if (memlist_intersect(phys_install, ml)) { 10740Sstevel@tonic-gate PR_MEM("%s:WARNING: memlist intersects with phys_install\n", f); 10750Sstevel@tonic-gate } 10760Sstevel@tonic-gate memlist_read_unlock(); 10770Sstevel@tonic-gate #endif 10780Sstevel@tonic-gate 10790Sstevel@tonic-gate for (; ml; ml = ml->next) { 10800Sstevel@tonic-gate pfn_t base; 10810Sstevel@tonic-gate pgcnt_t npgs; 10820Sstevel@tonic-gate int rv; 10830Sstevel@tonic-gate sbd_error_t *err; 10840Sstevel@tonic-gate 10850Sstevel@tonic-gate base = _b64top(ml->address); 10860Sstevel@tonic-gate npgs = _b64top(ml->size); 10870Sstevel@tonic-gate 10880Sstevel@tonic-gate rv = kphysm_add_memory_dynamic(base, npgs); 10890Sstevel@tonic-gate 10900Sstevel@tonic-gate err = drmach_mem_add_span( 10910Sstevel@tonic-gate mp->sbm_cm.sbdev_id, 10920Sstevel@tonic-gate ml->address, 10930Sstevel@tonic-gate ml->size); 10940Sstevel@tonic-gate 10950Sstevel@tonic-gate if (err) 10960Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 10970Sstevel@tonic-gate 10980Sstevel@tonic-gate if (rv != KPHYSM_OK) { 10990Sstevel@tonic-gate cmn_err(CE_WARN, "%s:" 11000Sstevel@tonic-gate " unexpected kphysm_add_memory_dynamic" 11010Sstevel@tonic-gate " return value %d;" 11020Sstevel@tonic-gate " basepfn=0x%lx, npages=%ld\n", 11030Sstevel@tonic-gate f, rv, base, npgs); 11040Sstevel@tonic-gate 11050Sstevel@tonic-gate continue; 11060Sstevel@tonic-gate } 11070Sstevel@tonic-gate } 11080Sstevel@tonic-gate } 11090Sstevel@tonic-gate 11100Sstevel@tonic-gate static int 11110Sstevel@tonic-gate dr_post_detach_mem_unit(dr_mem_unit_t *s_mp) 11120Sstevel@tonic-gate { 11130Sstevel@tonic-gate uint64_t sz = s_mp->sbm_slice_size; 11140Sstevel@tonic-gate uint64_t sm = sz - 1; 11150Sstevel@tonic-gate /* old and new below refer to PAs before and after copy-rename */ 11160Sstevel@tonic-gate uint64_t s_old_basepa, s_new_basepa; 11170Sstevel@tonic-gate uint64_t t_old_basepa, t_new_basepa; 11180Sstevel@tonic-gate uint64_t t_new_smallsize = 0; 11190Sstevel@tonic-gate dr_mem_unit_t *t_mp, *x_mp; 11200Sstevel@tonic-gate struct memlist *ml; 11210Sstevel@tonic-gate int rv; 11220Sstevel@tonic-gate sbd_error_t *err; 11230Sstevel@tonic-gate static fn_t f = "dr_post_detach_mem_unit"; 11240Sstevel@tonic-gate 11250Sstevel@tonic-gate PR_MEM("%s...\n", f); 11260Sstevel@tonic-gate 11270Sstevel@tonic-gate /* s_mp->sbm_del_mlist could be NULL, meaning no deleted spans */ 11280Sstevel@tonic-gate PR_MEM("%s: %s: deleted memlist (EMPTY maybe okay):\n", 11290Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 11300Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_del_mlist); 11310Sstevel@tonic-gate 11320Sstevel@tonic-gate /* sanity check */ 11330Sstevel@tonic-gate ASSERT(s_mp->sbm_del_mlist == NULL || 11340Sstevel@tonic-gate (s_mp->sbm_flags & DR_MFLAG_RELDONE) != 0); 11350Sstevel@tonic-gate 11360Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 11370Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 11380Sstevel@tonic-gate ASSERT(t_mp != NULL); 11390Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET); 11400Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 11410Sstevel@tonic-gate 11420Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_RELDONE); 11430Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist); 11440Sstevel@tonic-gate 11450Sstevel@tonic-gate PR_MEM("%s: target %s: deleted memlist:\n", 11460Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 11470Sstevel@tonic-gate PR_MEMLIST_DUMP(t_mp->sbm_del_mlist); 11480Sstevel@tonic-gate } else { 11490Sstevel@tonic-gate /* this is no target unit */ 11500Sstevel@tonic-gate t_mp = NULL; 11510Sstevel@tonic-gate } 11520Sstevel@tonic-gate 11530Sstevel@tonic-gate /* 11540Sstevel@tonic-gate * Verify the memory really did successfully detach 11550Sstevel@tonic-gate * by checking for its non-existence in phys_install. 11560Sstevel@tonic-gate */ 11570Sstevel@tonic-gate rv = 0; 11580Sstevel@tonic-gate memlist_read_lock(); 11590Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_RELDONE) { 11600Sstevel@tonic-gate x_mp = s_mp; 11610Sstevel@tonic-gate rv = memlist_intersect(phys_install, x_mp->sbm_del_mlist); 11620Sstevel@tonic-gate } 11630Sstevel@tonic-gate if (rv == 0 && t_mp && (t_mp->sbm_flags & DR_MFLAG_RELDONE)) { 11640Sstevel@tonic-gate x_mp = t_mp; 11650Sstevel@tonic-gate rv = memlist_intersect(phys_install, x_mp->sbm_del_mlist); 11660Sstevel@tonic-gate } 11670Sstevel@tonic-gate memlist_read_unlock(); 11680Sstevel@tonic-gate 11690Sstevel@tonic-gate if (rv) { 11700Sstevel@tonic-gate /* error: memlist still in phys_install */ 11710Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&x_mp->sbm_cm); 11720Sstevel@tonic-gate } 11730Sstevel@tonic-gate 11740Sstevel@tonic-gate /* 11750Sstevel@tonic-gate * clean mem unit state and bail out if an error has been recorded. 11760Sstevel@tonic-gate */ 11770Sstevel@tonic-gate rv = 0; 11780Sstevel@tonic-gate if (s_mp->sbm_cm.sbdev_error) { 11790Sstevel@tonic-gate PR_MEM("%s: %s flags=%x", f, 11800Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags); 11810Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&s_mp->sbm_cm); 11820Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&s_mp->sbm_cm); 11830Sstevel@tonic-gate dr_device_transition(&s_mp->sbm_cm, DR_STATE_CONFIGURED); 11840Sstevel@tonic-gate rv = -1; 11850Sstevel@tonic-gate } 11860Sstevel@tonic-gate if (t_mp != NULL && t_mp->sbm_cm.sbdev_error != NULL) { 11870Sstevel@tonic-gate PR_MEM("%s: %s flags=%x", f, 11880Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, s_mp->sbm_flags); 11890Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm); 11900Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&t_mp->sbm_cm); 11910Sstevel@tonic-gate dr_device_transition(&t_mp->sbm_cm, DR_STATE_CONFIGURED); 11920Sstevel@tonic-gate rv = -1; 11930Sstevel@tonic-gate } 11940Sstevel@tonic-gate if (rv) 11950Sstevel@tonic-gate goto cleanup; 11960Sstevel@tonic-gate 11970Sstevel@tonic-gate s_old_basepa = _ptob64(s_mp->sbm_basepfn); 11980Sstevel@tonic-gate err = drmach_mem_get_base_physaddr(s_mp->sbm_cm.sbdev_id, 11990Sstevel@tonic-gate &s_new_basepa); 12000Sstevel@tonic-gate ASSERT(err == NULL); 12010Sstevel@tonic-gate 12020Sstevel@tonic-gate PR_MEM("%s:s_old_basepa: 0x%llx\n", f, s_old_basepa); 12030Sstevel@tonic-gate PR_MEM("%s:s_new_basepa: 0x%llx\n", f, s_new_basepa); 12040Sstevel@tonic-gate 12050Sstevel@tonic-gate if (t_mp != NULL) { 12060Sstevel@tonic-gate struct memlist *s_copy_mlist; 12070Sstevel@tonic-gate 12080Sstevel@tonic-gate t_old_basepa = _ptob64(t_mp->sbm_basepfn); 12090Sstevel@tonic-gate err = drmach_mem_get_base_physaddr(t_mp->sbm_cm.sbdev_id, 12100Sstevel@tonic-gate &t_new_basepa); 12110Sstevel@tonic-gate ASSERT(err == NULL); 12120Sstevel@tonic-gate 12130Sstevel@tonic-gate PR_MEM("%s:t_old_basepa: 0x%llx\n", f, t_old_basepa); 12140Sstevel@tonic-gate PR_MEM("%s:t_new_basepa: 0x%llx\n", f, t_new_basepa); 12150Sstevel@tonic-gate 12160Sstevel@tonic-gate /* 12170Sstevel@tonic-gate * Construct copy list with original source addresses. 12180Sstevel@tonic-gate * Used to add back excess target mem. 12190Sstevel@tonic-gate */ 12200Sstevel@tonic-gate s_copy_mlist = memlist_dup(s_mp->sbm_mlist); 12210Sstevel@tonic-gate for (ml = s_mp->sbm_del_mlist; ml; ml = ml->next) { 12220Sstevel@tonic-gate s_copy_mlist = memlist_del_span(s_copy_mlist, 12230Sstevel@tonic-gate ml->address, ml->size); 12240Sstevel@tonic-gate } 12250Sstevel@tonic-gate 12260Sstevel@tonic-gate PR_MEM("%s: source copy list:\n:", f); 12270Sstevel@tonic-gate PR_MEMLIST_DUMP(s_copy_mlist); 12280Sstevel@tonic-gate 12290Sstevel@tonic-gate /* 12300Sstevel@tonic-gate * We had to swap mem-units, so update 12310Sstevel@tonic-gate * memlists accordingly with new base 12320Sstevel@tonic-gate * addresses. 12330Sstevel@tonic-gate */ 12340Sstevel@tonic-gate for (ml = t_mp->sbm_mlist; ml; ml = ml->next) { 12350Sstevel@tonic-gate ml->address -= t_old_basepa; 12360Sstevel@tonic-gate ml->address += t_new_basepa; 12370Sstevel@tonic-gate } 12380Sstevel@tonic-gate 12390Sstevel@tonic-gate /* 12400Sstevel@tonic-gate * There is no need to explicitly rename the target delete 12410Sstevel@tonic-gate * memlist, because sbm_del_mlist and sbm_mlist always 12420Sstevel@tonic-gate * point to the same memlist for a copy/rename operation. 12430Sstevel@tonic-gate */ 12440Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist); 12450Sstevel@tonic-gate 12460Sstevel@tonic-gate PR_MEM("%s: renamed target memlist and delete memlist:\n", f); 12470Sstevel@tonic-gate PR_MEMLIST_DUMP(t_mp->sbm_mlist); 12480Sstevel@tonic-gate 12490Sstevel@tonic-gate for (ml = s_mp->sbm_mlist; ml; ml = ml->next) { 12500Sstevel@tonic-gate ml->address -= s_old_basepa; 12510Sstevel@tonic-gate ml->address += s_new_basepa; 12520Sstevel@tonic-gate } 12530Sstevel@tonic-gate 12540Sstevel@tonic-gate PR_MEM("%s: renamed source memlist:\n", f); 12550Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_mlist); 12560Sstevel@tonic-gate 12570Sstevel@tonic-gate /* 12580Sstevel@tonic-gate * Keep track of dynamically added segments 12590Sstevel@tonic-gate * since they cannot be split if we need to delete 12600Sstevel@tonic-gate * excess source memory later for this board. 12610Sstevel@tonic-gate */ 12620Sstevel@tonic-gate if (t_mp->sbm_dyn_segs) 12630Sstevel@tonic-gate memlist_delete(t_mp->sbm_dyn_segs); 12640Sstevel@tonic-gate t_mp->sbm_dyn_segs = s_mp->sbm_dyn_segs; 12650Sstevel@tonic-gate s_mp->sbm_dyn_segs = NULL; 12660Sstevel@tonic-gate 12670Sstevel@tonic-gate /* 12680Sstevel@tonic-gate * If the target memory range with the new target base PA 12690Sstevel@tonic-gate * extends beyond the usable slice, prevent any "target excess" 12700Sstevel@tonic-gate * from being added back after this copy/rename and 12710Sstevel@tonic-gate * calculate the new smaller size of the target board 12720Sstevel@tonic-gate * to be set as part of target cleanup. The base + npages 12730Sstevel@tonic-gate * must only include the range of memory up to the end of 12740Sstevel@tonic-gate * this slice. This will only be used after a category 4 12750Sstevel@tonic-gate * large-to-small target type copy/rename - see comments 12760Sstevel@tonic-gate * in dr_select_mem_target. 12770Sstevel@tonic-gate */ 12780Sstevel@tonic-gate if (((t_new_basepa & sm) + _ptob64(t_mp->sbm_npages)) > sz) { 12790Sstevel@tonic-gate t_new_smallsize = sz - (t_new_basepa & sm); 12800Sstevel@tonic-gate } 12810Sstevel@tonic-gate 12820Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_MEMRESIZE && 12830Sstevel@tonic-gate t_new_smallsize == 0) { 12840Sstevel@tonic-gate struct memlist *t_excess_mlist; 12850Sstevel@tonic-gate 12860Sstevel@tonic-gate /* 12870Sstevel@tonic-gate * Add back excess target memory. 12880Sstevel@tonic-gate * Subtract out the portion of the target memory 12890Sstevel@tonic-gate * node that was taken over by the source memory 12900Sstevel@tonic-gate * node. 12910Sstevel@tonic-gate */ 12920Sstevel@tonic-gate t_excess_mlist = memlist_dup(t_mp->sbm_mlist); 12930Sstevel@tonic-gate for (ml = s_copy_mlist; ml; ml = ml->next) { 12940Sstevel@tonic-gate t_excess_mlist = 12950Sstevel@tonic-gate memlist_del_span(t_excess_mlist, 12960Sstevel@tonic-gate ml->address, ml->size); 12970Sstevel@tonic-gate } 12980Sstevel@tonic-gate 12990Sstevel@tonic-gate /* 13000Sstevel@tonic-gate * Update dynamically added segs 13010Sstevel@tonic-gate */ 13020Sstevel@tonic-gate for (ml = s_mp->sbm_del_mlist; ml; ml = ml->next) { 13030Sstevel@tonic-gate t_mp->sbm_dyn_segs = 13040Sstevel@tonic-gate memlist_del_span(t_mp->sbm_dyn_segs, 13050Sstevel@tonic-gate ml->address, ml->size); 13060Sstevel@tonic-gate } 13070Sstevel@tonic-gate for (ml = t_excess_mlist; ml; ml = ml->next) { 13080Sstevel@tonic-gate t_mp->sbm_dyn_segs = 13090Sstevel@tonic-gate memlist_cat_span(t_mp->sbm_dyn_segs, 13100Sstevel@tonic-gate ml->address, ml->size); 13110Sstevel@tonic-gate } 13120Sstevel@tonic-gate PR_MEM("%s: %s: updated dynamic seg list:\n", 13130Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 13140Sstevel@tonic-gate PR_MEMLIST_DUMP(t_mp->sbm_dyn_segs); 13150Sstevel@tonic-gate 13160Sstevel@tonic-gate PR_MEM("%s: adding back remaining portion" 13170Sstevel@tonic-gate " of %s, memlist:\n", 13180Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 13190Sstevel@tonic-gate PR_MEMLIST_DUMP(t_excess_mlist); 13200Sstevel@tonic-gate 13210Sstevel@tonic-gate dr_add_memory_spans(s_mp, t_excess_mlist); 13220Sstevel@tonic-gate memlist_delete(t_excess_mlist); 13230Sstevel@tonic-gate } 13240Sstevel@tonic-gate memlist_delete(s_copy_mlist); 13250Sstevel@tonic-gate 13260Sstevel@tonic-gate #ifdef DEBUG 13270Sstevel@tonic-gate /* 13280Sstevel@tonic-gate * Renaming s_mp->sbm_del_mlist is not necessary. This 13290Sstevel@tonic-gate * list is not used beyond this point, and in fact, is 13300Sstevel@tonic-gate * disposed of at the end of this function. 13310Sstevel@tonic-gate */ 13320Sstevel@tonic-gate for (ml = s_mp->sbm_del_mlist; ml; ml = ml->next) { 13330Sstevel@tonic-gate ml->address -= s_old_basepa; 13340Sstevel@tonic-gate ml->address += s_new_basepa; 13350Sstevel@tonic-gate } 13360Sstevel@tonic-gate 13370Sstevel@tonic-gate PR_MEM("%s: renamed source delete memlist", f); 13380Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_del_mlist); 13390Sstevel@tonic-gate #endif 13400Sstevel@tonic-gate 13410Sstevel@tonic-gate } 13420Sstevel@tonic-gate 13430Sstevel@tonic-gate if (t_mp != NULL) { 13440Sstevel@tonic-gate /* delete target's entire address space */ 13450Sstevel@tonic-gate err = drmach_mem_del_span( 13460Sstevel@tonic-gate t_mp->sbm_cm.sbdev_id, t_old_basepa & ~ sm, sz); 13470Sstevel@tonic-gate if (err) 13480Sstevel@tonic-gate DRERR_SET_C(&t_mp->sbm_cm.sbdev_error, &err); 13490Sstevel@tonic-gate ASSERT(err == NULL); 13500Sstevel@tonic-gate 13510Sstevel@tonic-gate /* 13520Sstevel@tonic-gate * After the copy/rename, the original address space 13530Sstevel@tonic-gate * for the source board (which is now located on the 13540Sstevel@tonic-gate * target board) may now have some excess to be deleted. 13550Sstevel@tonic-gate * The amount is calculated by masking the slice 13560Sstevel@tonic-gate * info and keeping the slice offset from t_new_basepa. 13570Sstevel@tonic-gate */ 13580Sstevel@tonic-gate err = drmach_mem_del_span(s_mp->sbm_cm.sbdev_id, 13590Sstevel@tonic-gate s_old_basepa & ~ sm, t_new_basepa & sm); 13600Sstevel@tonic-gate if (err) 13610Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 13620Sstevel@tonic-gate ASSERT(err == NULL); 13630Sstevel@tonic-gate 13640Sstevel@tonic-gate } else { 13650Sstevel@tonic-gate /* delete board's entire address space */ 13660Sstevel@tonic-gate err = drmach_mem_del_span(s_mp->sbm_cm.sbdev_id, 13670Sstevel@tonic-gate s_old_basepa & ~ sm, sz); 13680Sstevel@tonic-gate if (err) 13690Sstevel@tonic-gate DRERR_SET_C(&s_mp->sbm_cm.sbdev_error, &err); 13700Sstevel@tonic-gate ASSERT(err == NULL); 13710Sstevel@tonic-gate } 13720Sstevel@tonic-gate 13730Sstevel@tonic-gate cleanup: 13740Sstevel@tonic-gate /* clean up target mem unit */ 13750Sstevel@tonic-gate if (t_mp != NULL) { 13760Sstevel@tonic-gate memlist_delete(t_mp->sbm_del_mlist); 13770Sstevel@tonic-gate /* no need to delete sbm_mlist, it shares sbm_del_mlist */ 13780Sstevel@tonic-gate 13790Sstevel@tonic-gate t_mp->sbm_del_mlist = NULL; 13800Sstevel@tonic-gate t_mp->sbm_mlist = NULL; 13810Sstevel@tonic-gate t_mp->sbm_peer = NULL; 13820Sstevel@tonic-gate t_mp->sbm_flags = 0; 13830Sstevel@tonic-gate t_mp->sbm_cm.sbdev_busy = 0; 13840Sstevel@tonic-gate dr_init_mem_unit_data(t_mp); 13850Sstevel@tonic-gate 13860Sstevel@tonic-gate /* reduce target size if new PAs go past end of usable slice */ 13870Sstevel@tonic-gate if (t_new_smallsize > 0) { 13880Sstevel@tonic-gate t_mp->sbm_npages = _b64top(t_new_smallsize); 13890Sstevel@tonic-gate PR_MEM("%s: target new size 0x%llx bytes\n", 13900Sstevel@tonic-gate f, t_new_smallsize); 13910Sstevel@tonic-gate } 13920Sstevel@tonic-gate } 13930Sstevel@tonic-gate if (t_mp != NULL && t_mp->sbm_cm.sbdev_error == NULL) { 13940Sstevel@tonic-gate /* 13950Sstevel@tonic-gate * now that copy/rename has completed, undo this 13960Sstevel@tonic-gate * work that was done in dr_release_mem_done. 13970Sstevel@tonic-gate */ 13980Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm); 13990Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&t_mp->sbm_cm); 14000Sstevel@tonic-gate dr_device_transition(&t_mp->sbm_cm, DR_STATE_CONFIGURED); 14010Sstevel@tonic-gate } 14020Sstevel@tonic-gate 14030Sstevel@tonic-gate /* 14040Sstevel@tonic-gate * clean up (source) board's mem unit structure. 14050Sstevel@tonic-gate * NOTE: sbm_mlist is retained if no error has been record (in other 14060Sstevel@tonic-gate * words, when s_mp->sbm_cm.sbdev_error is NULL). This memlist is 14070Sstevel@tonic-gate * referred to elsewhere as the cached memlist. The cached memlist 14080Sstevel@tonic-gate * is used to re-attach (configure back in) this memunit from the 14090Sstevel@tonic-gate * unconfigured state. The memlist is retained because it may 14100Sstevel@tonic-gate * represent bad pages that were detected while the memory was 14110Sstevel@tonic-gate * configured into the OS. The OS deletes bad pages from phys_install. 14120Sstevel@tonic-gate * Those deletes, if any, will be represented in the cached mlist. 14130Sstevel@tonic-gate */ 14140Sstevel@tonic-gate if (s_mp->sbm_del_mlist && s_mp->sbm_del_mlist != s_mp->sbm_mlist) 14150Sstevel@tonic-gate memlist_delete(s_mp->sbm_del_mlist); 14160Sstevel@tonic-gate 14170Sstevel@tonic-gate if (s_mp->sbm_cm.sbdev_error && s_mp->sbm_mlist) { 14180Sstevel@tonic-gate memlist_delete(s_mp->sbm_mlist); 14190Sstevel@tonic-gate s_mp->sbm_mlist = NULL; 14200Sstevel@tonic-gate } 14210Sstevel@tonic-gate 14220Sstevel@tonic-gate if (s_mp->sbm_dyn_segs != NULL && s_mp->sbm_cm.sbdev_error == 0) { 14230Sstevel@tonic-gate memlist_delete(s_mp->sbm_dyn_segs); 14240Sstevel@tonic-gate s_mp->sbm_dyn_segs = NULL; 14250Sstevel@tonic-gate } 14260Sstevel@tonic-gate 14270Sstevel@tonic-gate s_mp->sbm_del_mlist = NULL; 14280Sstevel@tonic-gate s_mp->sbm_peer = NULL; 14290Sstevel@tonic-gate s_mp->sbm_flags = 0; 14300Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 0; 14310Sstevel@tonic-gate dr_init_mem_unit_data(s_mp); 14320Sstevel@tonic-gate 14330Sstevel@tonic-gate PR_MEM("%s: cached memlist for %s:", f, s_mp->sbm_cm.sbdev_path); 14340Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_mlist); 14350Sstevel@tonic-gate 14360Sstevel@tonic-gate return (0); 14370Sstevel@tonic-gate } 14380Sstevel@tonic-gate 14390Sstevel@tonic-gate /* 14400Sstevel@tonic-gate * Successful return from this function will have the memory 14410Sstevel@tonic-gate * handle in bp->b_dev[..mem-unit...].sbm_memhandle allocated 14420Sstevel@tonic-gate * and waiting. This routine's job is to select the memory that 14430Sstevel@tonic-gate * actually has to be released (detached) which may not necessarily 14440Sstevel@tonic-gate * be the same memory node that came in in devlist[], 14450Sstevel@tonic-gate * i.e. a copy-rename is needed. 14460Sstevel@tonic-gate */ 14470Sstevel@tonic-gate int 14480Sstevel@tonic-gate dr_pre_release_mem(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum) 14490Sstevel@tonic-gate { 14500Sstevel@tonic-gate int d; 14510Sstevel@tonic-gate int err_flag = 0; 14520Sstevel@tonic-gate static fn_t f = "dr_pre_release_mem"; 14530Sstevel@tonic-gate 14540Sstevel@tonic-gate PR_MEM("%s...\n", f); 14550Sstevel@tonic-gate 14560Sstevel@tonic-gate for (d = 0; d < devnum; d++) { 14570Sstevel@tonic-gate dr_mem_unit_t *mp = (dr_mem_unit_t *)devlist[d]; 14580Sstevel@tonic-gate int rv; 14590Sstevel@tonic-gate memquery_t mq; 14600Sstevel@tonic-gate struct memlist *ml; 14610Sstevel@tonic-gate 14620Sstevel@tonic-gate if (mp->sbm_cm.sbdev_error) { 14630Sstevel@tonic-gate err_flag = 1; 14640Sstevel@tonic-gate continue; 14650Sstevel@tonic-gate } else if (!kcage_on) { 14660Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_KCAGE_OFF); 14670Sstevel@tonic-gate err_flag = 1; 14680Sstevel@tonic-gate continue; 14690Sstevel@tonic-gate } 14700Sstevel@tonic-gate 14710Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_RESERVED) { 14720Sstevel@tonic-gate /* 14730Sstevel@tonic-gate * Board is currently involved in a delete 14740Sstevel@tonic-gate * memory operation. Can't detach this guy until 14750Sstevel@tonic-gate * that operation completes. 14760Sstevel@tonic-gate */ 14770Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, ESBD_INVAL); 14780Sstevel@tonic-gate err_flag = 1; 14790Sstevel@tonic-gate break; 14800Sstevel@tonic-gate } 14810Sstevel@tonic-gate 14820Sstevel@tonic-gate /* 14830Sstevel@tonic-gate * Check whether the detaching memory requires a 14840Sstevel@tonic-gate * copy-rename. 14850Sstevel@tonic-gate */ 14860Sstevel@tonic-gate ASSERT(mp->sbm_npages != 0); 14870Sstevel@tonic-gate rv = kphysm_del_span_query( 14880Sstevel@tonic-gate mp->sbm_basepfn, mp->sbm_npages, &mq); 14890Sstevel@tonic-gate if (rv != KPHYSM_OK) { 14900Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 14910Sstevel@tonic-gate err_flag = 1; 14920Sstevel@tonic-gate break; 14930Sstevel@tonic-gate } 14940Sstevel@tonic-gate 14950Sstevel@tonic-gate if (mq.nonrelocatable != 0) { 14960Sstevel@tonic-gate if (!(dr_cmd_flags(hp) & 14970Sstevel@tonic-gate (SBD_FLAG_FORCE | SBD_FLAG_QUIESCE_OKAY))) { 14980Sstevel@tonic-gate /* caller wasn't prompted for a suspend */ 14990Sstevel@tonic-gate dr_dev_err(CE_WARN, &mp->sbm_cm, 15000Sstevel@tonic-gate ESBD_QUIESCE_REQD); 15010Sstevel@tonic-gate err_flag = 1; 15020Sstevel@tonic-gate break; 15030Sstevel@tonic-gate } 15040Sstevel@tonic-gate } 15050Sstevel@tonic-gate 15060Sstevel@tonic-gate /* flags should be clean at this time */ 15070Sstevel@tonic-gate ASSERT(mp->sbm_flags == 0); 15080Sstevel@tonic-gate 15090Sstevel@tonic-gate ASSERT(mp->sbm_mlist == NULL); /* should be null */ 15100Sstevel@tonic-gate ASSERT(mp->sbm_del_mlist == NULL); /* should be null */ 15110Sstevel@tonic-gate if (mp->sbm_mlist != NULL) { 15120Sstevel@tonic-gate memlist_delete(mp->sbm_mlist); 15130Sstevel@tonic-gate mp->sbm_mlist = NULL; 15140Sstevel@tonic-gate } 15150Sstevel@tonic-gate 15160Sstevel@tonic-gate ml = dr_get_memlist(mp); 15170Sstevel@tonic-gate if (ml == NULL) { 15180Sstevel@tonic-gate err_flag = 1; 15190Sstevel@tonic-gate PR_MEM("%s: no memlist found for %s\n", 15200Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 15210Sstevel@tonic-gate continue; 15220Sstevel@tonic-gate } 15230Sstevel@tonic-gate 15240Sstevel@tonic-gate /* allocate a kphysm handle */ 15250Sstevel@tonic-gate rv = kphysm_del_gethandle(&mp->sbm_memhandle); 15260Sstevel@tonic-gate if (rv != KPHYSM_OK) { 15270Sstevel@tonic-gate memlist_delete(ml); 15280Sstevel@tonic-gate 15290Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&mp->sbm_cm); 15300Sstevel@tonic-gate err_flag = 1; 15310Sstevel@tonic-gate break; 15320Sstevel@tonic-gate } 15330Sstevel@tonic-gate mp->sbm_flags |= DR_MFLAG_RELOWNER; 15340Sstevel@tonic-gate 15350Sstevel@tonic-gate if ((mq.nonrelocatable != 0) || 15360Sstevel@tonic-gate dr_reserve_mem_spans(&mp->sbm_memhandle, ml)) { 15370Sstevel@tonic-gate /* 15380Sstevel@tonic-gate * Either the detaching memory node contains 15390Sstevel@tonic-gate * non-reloc memory or we failed to reserve the 15400Sstevel@tonic-gate * detaching memory node (which did _not_ have 15410Sstevel@tonic-gate * any non-reloc memory, i.e. some non-reloc mem 15420Sstevel@tonic-gate * got onboard). 15430Sstevel@tonic-gate */ 15440Sstevel@tonic-gate 15450Sstevel@tonic-gate if (dr_select_mem_target(hp, mp, ml)) { 15460Sstevel@tonic-gate int rv; 15470Sstevel@tonic-gate 15480Sstevel@tonic-gate /* 15490Sstevel@tonic-gate * We had no luck locating a target 15500Sstevel@tonic-gate * memory node to be the recipient of 15510Sstevel@tonic-gate * the non-reloc memory on the node 15520Sstevel@tonic-gate * we're trying to detach. 15530Sstevel@tonic-gate * Clean up be disposing the mem handle 15540Sstevel@tonic-gate * and the mem list. 15550Sstevel@tonic-gate */ 15560Sstevel@tonic-gate rv = kphysm_del_release(mp->sbm_memhandle); 15570Sstevel@tonic-gate if (rv != KPHYSM_OK) { 15580Sstevel@tonic-gate /* 15590Sstevel@tonic-gate * can do nothing but complain 15600Sstevel@tonic-gate * and hope helpful for debug 15610Sstevel@tonic-gate */ 15620Sstevel@tonic-gate cmn_err(CE_WARN, "%s: unexpected" 15630Sstevel@tonic-gate " kphysm_del_release return" 15640Sstevel@tonic-gate " value %d", 15650Sstevel@tonic-gate f, rv); 15660Sstevel@tonic-gate } 15670Sstevel@tonic-gate mp->sbm_flags &= ~DR_MFLAG_RELOWNER; 15680Sstevel@tonic-gate 15690Sstevel@tonic-gate memlist_delete(ml); 15700Sstevel@tonic-gate 15710Sstevel@tonic-gate /* make sure sbm_flags is clean */ 15720Sstevel@tonic-gate ASSERT(mp->sbm_flags == 0); 15730Sstevel@tonic-gate 15740Sstevel@tonic-gate dr_dev_err(CE_WARN, 15750Sstevel@tonic-gate &mp->sbm_cm, ESBD_NO_TARGET); 15760Sstevel@tonic-gate 15770Sstevel@tonic-gate err_flag = 1; 15780Sstevel@tonic-gate break; 15790Sstevel@tonic-gate } 15800Sstevel@tonic-gate 15810Sstevel@tonic-gate /* 15820Sstevel@tonic-gate * ml is not memlist_delete'd here because 15830Sstevel@tonic-gate * it has been assigned to mp->sbm_mlist 15840Sstevel@tonic-gate * by dr_select_mem_target. 15850Sstevel@tonic-gate */ 15860Sstevel@tonic-gate } else { 15870Sstevel@tonic-gate /* no target needed to detach this board */ 15880Sstevel@tonic-gate mp->sbm_flags |= DR_MFLAG_RESERVED; 15890Sstevel@tonic-gate mp->sbm_peer = NULL; 15900Sstevel@tonic-gate mp->sbm_del_mlist = ml; 15910Sstevel@tonic-gate mp->sbm_mlist = ml; 15920Sstevel@tonic-gate mp->sbm_cm.sbdev_busy = 1; 15930Sstevel@tonic-gate } 15940Sstevel@tonic-gate #ifdef DEBUG 15950Sstevel@tonic-gate ASSERT(mp->sbm_mlist != NULL); 15960Sstevel@tonic-gate 15970Sstevel@tonic-gate if (mp->sbm_flags & DR_MFLAG_SOURCE) { 15980Sstevel@tonic-gate PR_MEM("%s: release of %s requires copy/rename;" 15990Sstevel@tonic-gate " selected target board %s\n", 16000Sstevel@tonic-gate f, 16010Sstevel@tonic-gate mp->sbm_cm.sbdev_path, 16020Sstevel@tonic-gate mp->sbm_peer->sbm_cm.sbdev_path); 16030Sstevel@tonic-gate } else { 16040Sstevel@tonic-gate PR_MEM("%s: copy/rename not required to release %s\n", 16050Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 16060Sstevel@tonic-gate } 16070Sstevel@tonic-gate 16080Sstevel@tonic-gate ASSERT(mp->sbm_flags & DR_MFLAG_RELOWNER); 16090Sstevel@tonic-gate ASSERT(mp->sbm_flags & DR_MFLAG_RESERVED); 16100Sstevel@tonic-gate #endif 16110Sstevel@tonic-gate } 16120Sstevel@tonic-gate 16130Sstevel@tonic-gate return (err_flag ? -1 : 0); 16140Sstevel@tonic-gate } 16150Sstevel@tonic-gate 16160Sstevel@tonic-gate void 16170Sstevel@tonic-gate dr_release_mem_done(dr_common_unit_t *cp) 16180Sstevel@tonic-gate { 16190Sstevel@tonic-gate dr_mem_unit_t *s_mp = (dr_mem_unit_t *)cp; 16200Sstevel@tonic-gate dr_mem_unit_t *t_mp, *mp; 16210Sstevel@tonic-gate int rv; 16220Sstevel@tonic-gate static fn_t f = "dr_release_mem_done"; 16230Sstevel@tonic-gate 16240Sstevel@tonic-gate /* 16250Sstevel@tonic-gate * This unit will be flagged with DR_MFLAG_SOURCE, if it 16260Sstevel@tonic-gate * has a target unit. 16270Sstevel@tonic-gate */ 16280Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 16290Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 16300Sstevel@tonic-gate ASSERT(t_mp != NULL); 16310Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 16320Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_TARGET); 16330Sstevel@tonic-gate ASSERT(t_mp->sbm_flags & DR_MFLAG_RESERVED); 16340Sstevel@tonic-gate } else { 16350Sstevel@tonic-gate /* this is no target unit */ 16360Sstevel@tonic-gate t_mp = NULL; 16370Sstevel@tonic-gate } 16380Sstevel@tonic-gate 16390Sstevel@tonic-gate /* free delete handle */ 16400Sstevel@tonic-gate ASSERT(s_mp->sbm_flags & DR_MFLAG_RELOWNER); 16410Sstevel@tonic-gate ASSERT(s_mp->sbm_flags & DR_MFLAG_RESERVED); 16420Sstevel@tonic-gate rv = kphysm_del_release(s_mp->sbm_memhandle); 16430Sstevel@tonic-gate if (rv != KPHYSM_OK) { 16440Sstevel@tonic-gate /* 16450Sstevel@tonic-gate * can do nothing but complain 16460Sstevel@tonic-gate * and hope helpful for debug 16470Sstevel@tonic-gate */ 16480Sstevel@tonic-gate cmn_err(CE_WARN, "%s: unexpected kphysm_del_release" 16490Sstevel@tonic-gate " return value %d", f, rv); 16500Sstevel@tonic-gate } 16510Sstevel@tonic-gate s_mp->sbm_flags &= ~DR_MFLAG_RELOWNER; 16520Sstevel@tonic-gate 16530Sstevel@tonic-gate /* 16540Sstevel@tonic-gate * If an error was encountered during release, clean up 16550Sstevel@tonic-gate * the source (and target, if present) unit data. 16560Sstevel@tonic-gate */ 16570Sstevel@tonic-gate /* XXX Can we know that sbdev_error was encountered during release? */ 16580Sstevel@tonic-gate if (s_mp->sbm_cm.sbdev_error != NULL) { 16590Sstevel@tonic-gate PR_MEM("%s: %s: error %d noted\n", 16600Sstevel@tonic-gate f, 16610Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 16620Sstevel@tonic-gate s_mp->sbm_cm.sbdev_error->e_code); 16630Sstevel@tonic-gate 16640Sstevel@tonic-gate if (t_mp != NULL) { 16650Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist); 16660Sstevel@tonic-gate t_mp->sbm_del_mlist = NULL; 16670Sstevel@tonic-gate 16680Sstevel@tonic-gate if (t_mp->sbm_mlist != NULL) { 16690Sstevel@tonic-gate memlist_delete(t_mp->sbm_mlist); 16700Sstevel@tonic-gate t_mp->sbm_mlist = NULL; 16710Sstevel@tonic-gate } 16720Sstevel@tonic-gate 16730Sstevel@tonic-gate t_mp->sbm_peer = NULL; 16740Sstevel@tonic-gate t_mp->sbm_flags = 0; 16750Sstevel@tonic-gate t_mp->sbm_cm.sbdev_busy = 0; 16760Sstevel@tonic-gate } 16770Sstevel@tonic-gate 16780Sstevel@tonic-gate if (s_mp->sbm_del_mlist != s_mp->sbm_mlist) 16790Sstevel@tonic-gate memlist_delete(s_mp->sbm_del_mlist); 16800Sstevel@tonic-gate s_mp->sbm_del_mlist = NULL; 16810Sstevel@tonic-gate 16820Sstevel@tonic-gate if (s_mp->sbm_mlist != NULL) { 16830Sstevel@tonic-gate memlist_delete(s_mp->sbm_mlist); 16840Sstevel@tonic-gate s_mp->sbm_mlist = NULL; 16850Sstevel@tonic-gate } 16860Sstevel@tonic-gate 16870Sstevel@tonic-gate s_mp->sbm_peer = NULL; 16880Sstevel@tonic-gate s_mp->sbm_flags = 0; 16890Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 0; 16900Sstevel@tonic-gate 16910Sstevel@tonic-gate /* bail out */ 16920Sstevel@tonic-gate return; 16930Sstevel@tonic-gate } 16940Sstevel@tonic-gate 16950Sstevel@tonic-gate DR_DEV_SET_RELEASED(&s_mp->sbm_cm); 16960Sstevel@tonic-gate dr_device_transition(&s_mp->sbm_cm, DR_STATE_RELEASE); 16970Sstevel@tonic-gate 16980Sstevel@tonic-gate if (t_mp != NULL) { 16990Sstevel@tonic-gate /* 17000Sstevel@tonic-gate * the kphysm delete operation that drained the source 17010Sstevel@tonic-gate * board also drained this target board. Since the source 17020Sstevel@tonic-gate * board drain is now known to have succeeded, we know this 17030Sstevel@tonic-gate * target board is drained too. 17040Sstevel@tonic-gate * 17050Sstevel@tonic-gate * because DR_DEV_SET_RELEASED and dr_device_transition 17060Sstevel@tonic-gate * is done here, the dr_release_dev_done should not 17070Sstevel@tonic-gate * fail. 17080Sstevel@tonic-gate */ 17090Sstevel@tonic-gate DR_DEV_SET_RELEASED(&t_mp->sbm_cm); 17100Sstevel@tonic-gate dr_device_transition(&t_mp->sbm_cm, DR_STATE_RELEASE); 17110Sstevel@tonic-gate 17120Sstevel@tonic-gate /* 17130Sstevel@tonic-gate * NOTE: do not transition target's board state, 17140Sstevel@tonic-gate * even if the mem-unit was the last configure 17150Sstevel@tonic-gate * unit of the board. When copy/rename completes 17160Sstevel@tonic-gate * this mem-unit will transitioned back to 17170Sstevel@tonic-gate * the configured state. In the meantime, the 17180Sstevel@tonic-gate * board's must remain as is. 17190Sstevel@tonic-gate */ 17200Sstevel@tonic-gate } 17210Sstevel@tonic-gate 17220Sstevel@tonic-gate /* if board(s) had deleted memory, verify it is gone */ 17230Sstevel@tonic-gate rv = 0; 17240Sstevel@tonic-gate memlist_read_lock(); 17250Sstevel@tonic-gate if (s_mp->sbm_del_mlist != NULL) { 17260Sstevel@tonic-gate mp = s_mp; 17270Sstevel@tonic-gate rv = memlist_intersect(phys_install, mp->sbm_del_mlist); 17280Sstevel@tonic-gate } 17290Sstevel@tonic-gate if (rv == 0 && t_mp && t_mp->sbm_del_mlist != NULL) { 17300Sstevel@tonic-gate mp = t_mp; 17310Sstevel@tonic-gate rv = memlist_intersect(phys_install, mp->sbm_del_mlist); 17320Sstevel@tonic-gate } 17330Sstevel@tonic-gate memlist_read_unlock(); 17340Sstevel@tonic-gate if (rv) { 17350Sstevel@tonic-gate cmn_err(CE_WARN, "%s: %smem-unit (%d.%d): " 17360Sstevel@tonic-gate "deleted memory still found in phys_install", 17370Sstevel@tonic-gate f, 17380Sstevel@tonic-gate (mp == t_mp ? "target " : ""), 17390Sstevel@tonic-gate mp->sbm_cm.sbdev_bp->b_num, 17400Sstevel@tonic-gate mp->sbm_cm.sbdev_unum); 17410Sstevel@tonic-gate 17420Sstevel@tonic-gate DR_DEV_INTERNAL_ERROR(&s_mp->sbm_cm); 17430Sstevel@tonic-gate return; 17440Sstevel@tonic-gate } 17450Sstevel@tonic-gate 17460Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_RELDONE; 17470Sstevel@tonic-gate if (t_mp != NULL) 17480Sstevel@tonic-gate t_mp->sbm_flags |= DR_MFLAG_RELDONE; 17490Sstevel@tonic-gate 17500Sstevel@tonic-gate /* this should not fail */ 17510Sstevel@tonic-gate if (dr_release_dev_done(&s_mp->sbm_cm) != 0) { 17520Sstevel@tonic-gate /* catch this in debug kernels */ 17530Sstevel@tonic-gate ASSERT(0); 17540Sstevel@tonic-gate return; 17550Sstevel@tonic-gate } 17560Sstevel@tonic-gate 17570Sstevel@tonic-gate PR_MEM("%s: marking %s release DONE\n", 17580Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 17590Sstevel@tonic-gate 17600Sstevel@tonic-gate s_mp->sbm_cm.sbdev_ostate = SBD_STAT_UNCONFIGURED; 17610Sstevel@tonic-gate 17620Sstevel@tonic-gate if (t_mp != NULL) { 17630Sstevel@tonic-gate /* should not fail */ 17640Sstevel@tonic-gate rv = dr_release_dev_done(&t_mp->sbm_cm); 17650Sstevel@tonic-gate if (rv != 0) { 17660Sstevel@tonic-gate /* catch this in debug kernels */ 17670Sstevel@tonic-gate ASSERT(0); 17680Sstevel@tonic-gate return; 17690Sstevel@tonic-gate } 17700Sstevel@tonic-gate 17710Sstevel@tonic-gate PR_MEM("%s: marking %s release DONE\n", 17720Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 17730Sstevel@tonic-gate 17740Sstevel@tonic-gate t_mp->sbm_cm.sbdev_ostate = SBD_STAT_UNCONFIGURED; 17750Sstevel@tonic-gate } 17760Sstevel@tonic-gate } 17770Sstevel@tonic-gate 17780Sstevel@tonic-gate /*ARGSUSED*/ 17790Sstevel@tonic-gate int 17800Sstevel@tonic-gate dr_disconnect_mem(dr_mem_unit_t *mp) 17810Sstevel@tonic-gate { 17820Sstevel@tonic-gate static fn_t f = "dr_disconnect_mem"; 17830Sstevel@tonic-gate update_membounds_t umb; 17840Sstevel@tonic-gate 17850Sstevel@tonic-gate #ifdef DEBUG 17860Sstevel@tonic-gate int state = mp->sbm_cm.sbdev_state; 17870Sstevel@tonic-gate ASSERT(state == DR_STATE_CONNECTED || 17880Sstevel@tonic-gate state == DR_STATE_UNCONFIGURED); 17890Sstevel@tonic-gate #endif 17900Sstevel@tonic-gate 17910Sstevel@tonic-gate PR_MEM("%s...\n", f); 17920Sstevel@tonic-gate 17930Sstevel@tonic-gate if (mp->sbm_del_mlist && mp->sbm_del_mlist != mp->sbm_mlist) 17940Sstevel@tonic-gate memlist_delete(mp->sbm_del_mlist); 17950Sstevel@tonic-gate mp->sbm_del_mlist = NULL; 17960Sstevel@tonic-gate 17970Sstevel@tonic-gate if (mp->sbm_mlist) { 17980Sstevel@tonic-gate memlist_delete(mp->sbm_mlist); 17990Sstevel@tonic-gate mp->sbm_mlist = NULL; 18000Sstevel@tonic-gate } 18010Sstevel@tonic-gate 18020Sstevel@tonic-gate /* 18030Sstevel@tonic-gate * Remove memory from lgroup 18040Sstevel@tonic-gate * For now, only board info is required. 18050Sstevel@tonic-gate */ 18060Sstevel@tonic-gate umb.u_board = mp->sbm_cm.sbdev_bp->b_num; 18070Sstevel@tonic-gate umb.u_base = (uint64_t)-1; 18080Sstevel@tonic-gate umb.u_len = (uint64_t)-1; 18090Sstevel@tonic-gate 18100Sstevel@tonic-gate lgrp_plat_config(LGRP_CONFIG_MEM_DEL, (uintptr_t)&umb); 18110Sstevel@tonic-gate 18120Sstevel@tonic-gate return (0); 18130Sstevel@tonic-gate } 18140Sstevel@tonic-gate 18150Sstevel@tonic-gate int 18160Sstevel@tonic-gate dr_cancel_mem(dr_mem_unit_t *s_mp) 18170Sstevel@tonic-gate { 18180Sstevel@tonic-gate dr_mem_unit_t *t_mp; 18190Sstevel@tonic-gate dr_state_t state; 18200Sstevel@tonic-gate static fn_t f = "dr_cancel_mem"; 18210Sstevel@tonic-gate 18220Sstevel@tonic-gate state = s_mp->sbm_cm.sbdev_state; 18230Sstevel@tonic-gate 18240Sstevel@tonic-gate if (s_mp->sbm_flags & DR_MFLAG_TARGET) { 18250Sstevel@tonic-gate /* must cancel source board, not target board */ 18260Sstevel@tonic-gate /* TODO: set error */ 18270Sstevel@tonic-gate return (-1); 18280Sstevel@tonic-gate } else if (s_mp->sbm_flags & DR_MFLAG_SOURCE) { 18290Sstevel@tonic-gate t_mp = s_mp->sbm_peer; 18300Sstevel@tonic-gate ASSERT(t_mp != NULL); 18310Sstevel@tonic-gate ASSERT(t_mp->sbm_peer == s_mp); 18320Sstevel@tonic-gate 18330Sstevel@tonic-gate /* must always match the source board's state */ 18340Sstevel@tonic-gate /* TODO: is this assertion correct? */ 18350Sstevel@tonic-gate ASSERT(t_mp->sbm_cm.sbdev_state == state); 18360Sstevel@tonic-gate } else { 18370Sstevel@tonic-gate /* this is no target unit */ 18380Sstevel@tonic-gate t_mp = NULL; 18390Sstevel@tonic-gate } 18400Sstevel@tonic-gate 18410Sstevel@tonic-gate switch (state) { 18420Sstevel@tonic-gate case DR_STATE_UNREFERENCED: /* state set by dr_release_dev_done */ 18430Sstevel@tonic-gate ASSERT((s_mp->sbm_flags & DR_MFLAG_RELOWNER) == 0); 18440Sstevel@tonic-gate 18450Sstevel@tonic-gate if (t_mp != NULL && t_mp->sbm_del_mlist != NULL) { 18460Sstevel@tonic-gate PR_MEM("%s: undoing target %s memory delete\n", 18470Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 18480Sstevel@tonic-gate dr_add_memory_spans(t_mp, t_mp->sbm_del_mlist); 18490Sstevel@tonic-gate 18500Sstevel@tonic-gate DR_DEV_CLR_UNREFERENCED(&t_mp->sbm_cm); 18510Sstevel@tonic-gate } 18520Sstevel@tonic-gate 18530Sstevel@tonic-gate if (s_mp->sbm_del_mlist != NULL) { 18540Sstevel@tonic-gate PR_MEM("%s: undoing %s memory delete\n", 18550Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 18560Sstevel@tonic-gate 18570Sstevel@tonic-gate dr_add_memory_spans(s_mp, s_mp->sbm_del_mlist); 18580Sstevel@tonic-gate } 18590Sstevel@tonic-gate 18600Sstevel@tonic-gate /*FALLTHROUGH*/ 18610Sstevel@tonic-gate 18620Sstevel@tonic-gate /* TODO: should no longer be possible to see the release state here */ 18630Sstevel@tonic-gate case DR_STATE_RELEASE: /* state set by dr_release_mem_done */ 18640Sstevel@tonic-gate 18650Sstevel@tonic-gate ASSERT((s_mp->sbm_flags & DR_MFLAG_RELOWNER) == 0); 18660Sstevel@tonic-gate 18670Sstevel@tonic-gate if (t_mp != NULL) { 18680Sstevel@tonic-gate ASSERT(t_mp->sbm_del_mlist == t_mp->sbm_mlist); 18690Sstevel@tonic-gate t_mp->sbm_del_mlist = NULL; 18700Sstevel@tonic-gate 18710Sstevel@tonic-gate if (t_mp->sbm_mlist != NULL) { 18720Sstevel@tonic-gate memlist_delete(t_mp->sbm_mlist); 18730Sstevel@tonic-gate t_mp->sbm_mlist = NULL; 18740Sstevel@tonic-gate } 18750Sstevel@tonic-gate 18760Sstevel@tonic-gate t_mp->sbm_peer = NULL; 18770Sstevel@tonic-gate t_mp->sbm_flags = 0; 18780Sstevel@tonic-gate t_mp->sbm_cm.sbdev_busy = 0; 18790Sstevel@tonic-gate dr_init_mem_unit_data(t_mp); 18800Sstevel@tonic-gate 18810Sstevel@tonic-gate DR_DEV_CLR_RELEASED(&t_mp->sbm_cm); 18820Sstevel@tonic-gate 18830Sstevel@tonic-gate dr_device_transition( 18840Sstevel@tonic-gate &t_mp->sbm_cm, DR_STATE_CONFIGURED); 18850Sstevel@tonic-gate } 18860Sstevel@tonic-gate 18870Sstevel@tonic-gate if (s_mp->sbm_del_mlist != s_mp->sbm_mlist) 18880Sstevel@tonic-gate memlist_delete(s_mp->sbm_del_mlist); 18890Sstevel@tonic-gate s_mp->sbm_del_mlist = NULL; 18900Sstevel@tonic-gate 18910Sstevel@tonic-gate if (s_mp->sbm_mlist != NULL) { 18920Sstevel@tonic-gate memlist_delete(s_mp->sbm_mlist); 18930Sstevel@tonic-gate s_mp->sbm_mlist = NULL; 18940Sstevel@tonic-gate } 18950Sstevel@tonic-gate 18960Sstevel@tonic-gate s_mp->sbm_peer = NULL; 18970Sstevel@tonic-gate s_mp->sbm_flags = 0; 18980Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 0; 18990Sstevel@tonic-gate dr_init_mem_unit_data(s_mp); 19000Sstevel@tonic-gate 19010Sstevel@tonic-gate return (0); 19020Sstevel@tonic-gate 19030Sstevel@tonic-gate default: 19040Sstevel@tonic-gate PR_MEM("%s: WARNING unexpected state (%d) for %s\n", 19050Sstevel@tonic-gate f, (int)state, s_mp->sbm_cm.sbdev_path); 19060Sstevel@tonic-gate 19070Sstevel@tonic-gate return (-1); 19080Sstevel@tonic-gate } 19090Sstevel@tonic-gate /*NOTREACHED*/ 19100Sstevel@tonic-gate } 19110Sstevel@tonic-gate 19120Sstevel@tonic-gate void 19130Sstevel@tonic-gate dr_init_mem_unit(dr_mem_unit_t *mp) 19140Sstevel@tonic-gate { 19150Sstevel@tonic-gate dr_state_t new_state; 19160Sstevel@tonic-gate 19170Sstevel@tonic-gate 19180Sstevel@tonic-gate if (DR_DEV_IS_ATTACHED(&mp->sbm_cm)) { 19190Sstevel@tonic-gate new_state = DR_STATE_CONFIGURED; 19200Sstevel@tonic-gate mp->sbm_cm.sbdev_cond = SBD_COND_OK; 19210Sstevel@tonic-gate } else if (DR_DEV_IS_PRESENT(&mp->sbm_cm)) { 19220Sstevel@tonic-gate new_state = DR_STATE_CONNECTED; 19230Sstevel@tonic-gate mp->sbm_cm.sbdev_cond = SBD_COND_OK; 19240Sstevel@tonic-gate } else if (mp->sbm_cm.sbdev_id != (drmachid_t)0) { 19250Sstevel@tonic-gate new_state = DR_STATE_OCCUPIED; 19260Sstevel@tonic-gate } else { 19270Sstevel@tonic-gate new_state = DR_STATE_EMPTY; 19280Sstevel@tonic-gate } 19290Sstevel@tonic-gate 19300Sstevel@tonic-gate if (DR_DEV_IS_PRESENT(&mp->sbm_cm)) 19310Sstevel@tonic-gate dr_init_mem_unit_data(mp); 19320Sstevel@tonic-gate 19330Sstevel@tonic-gate /* delay transition until fully initialized */ 19340Sstevel@tonic-gate dr_device_transition(&mp->sbm_cm, new_state); 19350Sstevel@tonic-gate } 19360Sstevel@tonic-gate 19370Sstevel@tonic-gate static void 19380Sstevel@tonic-gate dr_init_mem_unit_data(dr_mem_unit_t *mp) 19390Sstevel@tonic-gate { 19400Sstevel@tonic-gate drmachid_t id = mp->sbm_cm.sbdev_id; 19410Sstevel@tonic-gate uint64_t bytes; 19420Sstevel@tonic-gate sbd_error_t *err; 19430Sstevel@tonic-gate static fn_t f = "dr_init_mem_unit_data"; 19440Sstevel@tonic-gate update_membounds_t umb; 19450Sstevel@tonic-gate 19460Sstevel@tonic-gate PR_MEM("%s...\n", f); 19470Sstevel@tonic-gate 19480Sstevel@tonic-gate /* a little sanity checking */ 19490Sstevel@tonic-gate ASSERT(mp->sbm_peer == NULL); 19500Sstevel@tonic-gate ASSERT(mp->sbm_flags == 0); 19510Sstevel@tonic-gate 19520Sstevel@tonic-gate /* get basepfn of mem unit */ 19530Sstevel@tonic-gate err = drmach_mem_get_base_physaddr(id, &bytes); 19540Sstevel@tonic-gate if (err) { 19550Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 19560Sstevel@tonic-gate mp->sbm_basepfn = (pfn_t)-1; 19570Sstevel@tonic-gate } else 19580Sstevel@tonic-gate mp->sbm_basepfn = _b64top(bytes); 19590Sstevel@tonic-gate 19600Sstevel@tonic-gate /* attempt to get number of pages from PDA */ 19610Sstevel@tonic-gate err = drmach_mem_get_size(id, &bytes); 19620Sstevel@tonic-gate if (err) { 19630Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 19640Sstevel@tonic-gate mp->sbm_npages = 0; 19650Sstevel@tonic-gate } else 19660Sstevel@tonic-gate mp->sbm_npages = _b64top(bytes); 19670Sstevel@tonic-gate 19680Sstevel@tonic-gate /* if didn't work, calculate using memlist */ 19690Sstevel@tonic-gate if (mp->sbm_npages == 0) { 19700Sstevel@tonic-gate struct memlist *ml, *mlist; 19710Sstevel@tonic-gate /* 19720Sstevel@tonic-gate * Either we couldn't open the PDA or our 19730Sstevel@tonic-gate * PDA has garbage in it. We must have the 19740Sstevel@tonic-gate * page count consistent and whatever the 19750Sstevel@tonic-gate * OS states has precedence over the PDA 19760Sstevel@tonic-gate * so let's check the kernel. 19770Sstevel@tonic-gate */ 19780Sstevel@tonic-gate /* TODO: curious comment. it suggests pda query should happen if this fails */ 19790Sstevel@tonic-gate PR_MEM("%s: PDA query failed for npages." 19800Sstevel@tonic-gate " Checking memlist for %s\n", 19810Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path); 19820Sstevel@tonic-gate 19830Sstevel@tonic-gate mlist = dr_get_memlist(mp); 19840Sstevel@tonic-gate for (ml = mlist; ml; ml = ml->next) 19850Sstevel@tonic-gate mp->sbm_npages += btop(ml->size); 19860Sstevel@tonic-gate memlist_delete(mlist); 19870Sstevel@tonic-gate } 19880Sstevel@tonic-gate 19890Sstevel@tonic-gate err = drmach_mem_get_alignment(id, &bytes); 19900Sstevel@tonic-gate if (err) { 19910Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 19920Sstevel@tonic-gate mp->sbm_alignment_mask = 0; 19930Sstevel@tonic-gate } else 19940Sstevel@tonic-gate mp->sbm_alignment_mask = _b64top(bytes); 19950Sstevel@tonic-gate 19960Sstevel@tonic-gate err = drmach_mem_get_slice_size(id, &bytes); 19970Sstevel@tonic-gate if (err) { 19980Sstevel@tonic-gate DRERR_SET_C(&mp->sbm_cm.sbdev_error, &err); 19990Sstevel@tonic-gate mp->sbm_slice_size = 0; /* paranoia */ 20000Sstevel@tonic-gate } else 20010Sstevel@tonic-gate mp->sbm_slice_size = bytes; 20020Sstevel@tonic-gate 20030Sstevel@tonic-gate /* 20040Sstevel@tonic-gate * Add memory to lgroup 20050Sstevel@tonic-gate */ 20060Sstevel@tonic-gate umb.u_board = mp->sbm_cm.sbdev_bp->b_num; 20070Sstevel@tonic-gate umb.u_base = (uint64_t)mp->sbm_basepfn << MMU_PAGESHIFT; 20080Sstevel@tonic-gate umb.u_len = (uint64_t)mp->sbm_npages << MMU_PAGESHIFT; 20090Sstevel@tonic-gate 20100Sstevel@tonic-gate lgrp_plat_config(LGRP_CONFIG_MEM_ADD, (uintptr_t)&umb); 20110Sstevel@tonic-gate 20120Sstevel@tonic-gate PR_MEM("%s: %s (basepfn = 0x%x, npgs = %d)\n", 20130Sstevel@tonic-gate f, mp->sbm_cm.sbdev_path, mp->sbm_basepfn, mp->sbm_npages); 20140Sstevel@tonic-gate } 20150Sstevel@tonic-gate 20160Sstevel@tonic-gate static int 20170Sstevel@tonic-gate dr_reserve_mem_spans(memhandle_t *mhp, struct memlist *ml) 20180Sstevel@tonic-gate { 20190Sstevel@tonic-gate int err; 20200Sstevel@tonic-gate pfn_t base; 20210Sstevel@tonic-gate pgcnt_t npgs; 20220Sstevel@tonic-gate struct memlist *mc; 20230Sstevel@tonic-gate static fn_t f = "dr_reserve_mem_spans"; 20240Sstevel@tonic-gate 20250Sstevel@tonic-gate PR_MEM("%s...\n", f); 20260Sstevel@tonic-gate 20270Sstevel@tonic-gate /* 20280Sstevel@tonic-gate * Walk the supplied memlist scheduling each span for removal 20290Sstevel@tonic-gate * with kphysm_del_span. It is possible that a span may intersect 20300Sstevel@tonic-gate * an area occupied by the cage. 20310Sstevel@tonic-gate */ 20320Sstevel@tonic-gate for (mc = ml; mc != NULL; mc = mc->next) { 20330Sstevel@tonic-gate base = _b64top(mc->address); 20340Sstevel@tonic-gate npgs = _b64top(mc->size); 20350Sstevel@tonic-gate 20360Sstevel@tonic-gate err = kphysm_del_span(*mhp, base, npgs); 20370Sstevel@tonic-gate if (err != KPHYSM_OK) { 20380Sstevel@tonic-gate cmn_err(CE_WARN, "%s memory reserve failed." 20390Sstevel@tonic-gate " unexpected kphysm_del_span return value %d;" 20400Sstevel@tonic-gate " basepfn=0x%lx npages=%ld", 20410Sstevel@tonic-gate f, err, base, npgs); 20420Sstevel@tonic-gate 20430Sstevel@tonic-gate return (-1); 20440Sstevel@tonic-gate } 20450Sstevel@tonic-gate } 20460Sstevel@tonic-gate 20470Sstevel@tonic-gate return (0); 20480Sstevel@tonic-gate } 20490Sstevel@tonic-gate 20500Sstevel@tonic-gate /* debug counters */ 20510Sstevel@tonic-gate int dr_smt_realigned; 20520Sstevel@tonic-gate int dr_smt_preference[4]; 20530Sstevel@tonic-gate 20540Sstevel@tonic-gate #ifdef DEBUG 20550Sstevel@tonic-gate uint_t dr_ignore_board; /* if bit[bnum-1] set, board won't be candidate */ 20560Sstevel@tonic-gate #endif 20570Sstevel@tonic-gate 20580Sstevel@tonic-gate /* 20590Sstevel@tonic-gate * Find and reserve a copy/rename target board suitable for the 20600Sstevel@tonic-gate * given source board. 20610Sstevel@tonic-gate * All boards in the system are examined and categorized in relation to 20620Sstevel@tonic-gate * their memory size versus the source board's memory size. Order of 20630Sstevel@tonic-gate * preference is: 20640Sstevel@tonic-gate * 1st: board has same memory size 20650Sstevel@tonic-gate * 2nd: board has larger memory size 20660Sstevel@tonic-gate * 3rd: board has smaller memory size 20670Sstevel@tonic-gate * 4th: board has smaller memory size, available memory will be reduced. 20680Sstevel@tonic-gate * Boards in category 3 and 4 will have their MC's reprogrammed to locate the 20690Sstevel@tonic-gate * span to which the MC responds to address span that appropriately covers 20700Sstevel@tonic-gate * the nonrelocatable span of the source board. 20710Sstevel@tonic-gate */ 20720Sstevel@tonic-gate static int 20730Sstevel@tonic-gate dr_select_mem_target(dr_handle_t *hp, 20740Sstevel@tonic-gate dr_mem_unit_t *s_mp, struct memlist *s_ml) 20750Sstevel@tonic-gate { 20760Sstevel@tonic-gate pgcnt_t sz = _b64top(s_mp->sbm_slice_size); 20770Sstevel@tonic-gate pgcnt_t sm = sz - 1; /* mem_slice_mask */ 20780Sstevel@tonic-gate pfn_t s_phi, t_phi; 20790Sstevel@tonic-gate 20800Sstevel@tonic-gate int n_sets = 4; /* same, larger, smaller, clipped */ 20810Sstevel@tonic-gate int preference; /* lower value is higher preference */ 20820Sstevel@tonic-gate int n_units_per_set; 20830Sstevel@tonic-gate int idx; 20840Sstevel@tonic-gate dr_mem_unit_t **sets; 20850Sstevel@tonic-gate 20860Sstevel@tonic-gate int t_bd; 20870Sstevel@tonic-gate int t_unit; 20880Sstevel@tonic-gate int rv; 20890Sstevel@tonic-gate int allow_src_memrange_modify; 20900Sstevel@tonic-gate int allow_targ_memrange_modify; 20910Sstevel@tonic-gate drmachid_t t_id; 20920Sstevel@tonic-gate dr_board_t *s_bp, *t_bp; 20930Sstevel@tonic-gate dr_mem_unit_t *t_mp, *c_mp; 20940Sstevel@tonic-gate struct memlist *d_ml, *t_ml, *x_ml; 20950Sstevel@tonic-gate memquery_t s_mq = {0}; 20960Sstevel@tonic-gate static fn_t f = "dr_select_mem_target"; 20970Sstevel@tonic-gate 20980Sstevel@tonic-gate PR_MEM("%s...\n", f); 20990Sstevel@tonic-gate 21000Sstevel@tonic-gate ASSERT(s_ml != NULL); 21010Sstevel@tonic-gate 21020Sstevel@tonic-gate n_units_per_set = MAX_BOARDS * MAX_MEM_UNITS_PER_BOARD; 21030Sstevel@tonic-gate sets = GETSTRUCT(dr_mem_unit_t *, n_units_per_set * n_sets); 21040Sstevel@tonic-gate 21050Sstevel@tonic-gate s_bp = hp->h_bd; 21060Sstevel@tonic-gate /* calculate the offset into the slice of the last source board pfn */ 21070Sstevel@tonic-gate ASSERT(s_mp->sbm_npages != 0); 21080Sstevel@tonic-gate s_phi = (s_mp->sbm_basepfn + s_mp->sbm_npages - 1) & sm; 21090Sstevel@tonic-gate 21100Sstevel@tonic-gate allow_src_memrange_modify = drmach_allow_memrange_modify(s_bp->b_id); 21110Sstevel@tonic-gate 21120Sstevel@tonic-gate /* 21130Sstevel@tonic-gate * Make one pass through all memory units on all boards 21140Sstevel@tonic-gate * and categorize them with respect to the source board. 21150Sstevel@tonic-gate */ 21160Sstevel@tonic-gate for (t_bd = 0; t_bd < MAX_BOARDS; t_bd++) { 21170Sstevel@tonic-gate /* 21180Sstevel@tonic-gate * The board structs are a contiguous array 21190Sstevel@tonic-gate * so we take advantage of that to find the 21200Sstevel@tonic-gate * correct board struct pointer for a given 21210Sstevel@tonic-gate * board number. 21220Sstevel@tonic-gate */ 21230Sstevel@tonic-gate t_bp = dr_lookup_board(t_bd); 21240Sstevel@tonic-gate 21250Sstevel@tonic-gate /* source board can not be its own target */ 21260Sstevel@tonic-gate if (s_bp->b_num == t_bp->b_num) 21270Sstevel@tonic-gate continue; 21280Sstevel@tonic-gate 21290Sstevel@tonic-gate for (t_unit = 0; t_unit < MAX_MEM_UNITS_PER_BOARD; t_unit++) { 21300Sstevel@tonic-gate 21310Sstevel@tonic-gate t_mp = dr_get_mem_unit(t_bp, t_unit); 21320Sstevel@tonic-gate 21330Sstevel@tonic-gate /* this memory node must be attached */ 21340Sstevel@tonic-gate if (!DR_DEV_IS_ATTACHED(&t_mp->sbm_cm)) 21350Sstevel@tonic-gate continue; 21360Sstevel@tonic-gate 21370Sstevel@tonic-gate /* source unit can not be its own target */ 21380Sstevel@tonic-gate if (s_mp == t_mp) { 21390Sstevel@tonic-gate /* catch this is debug kernels */ 21400Sstevel@tonic-gate ASSERT(0); 21410Sstevel@tonic-gate continue; 21420Sstevel@tonic-gate } 21430Sstevel@tonic-gate 21440Sstevel@tonic-gate /* 21450Sstevel@tonic-gate * this memory node must not already be reserved 21460Sstevel@tonic-gate * by some other memory delete operation. 21470Sstevel@tonic-gate */ 21480Sstevel@tonic-gate if (t_mp->sbm_flags & DR_MFLAG_RESERVED) 21490Sstevel@tonic-gate continue; 21500Sstevel@tonic-gate 21510Sstevel@tonic-gate /* 21520Sstevel@tonic-gate * categorize the memory node 21530Sstevel@tonic-gate * If this is a smaller memory node, create a 21540Sstevel@tonic-gate * temporary, edited copy of the source board's 21550Sstevel@tonic-gate * memlist containing only the span of the non- 21560Sstevel@tonic-gate * relocatable pages. 21570Sstevel@tonic-gate */ 21580Sstevel@tonic-gate t_phi = (t_mp->sbm_basepfn + t_mp->sbm_npages - 1) & sm; 21590Sstevel@tonic-gate t_id = t_mp->sbm_cm.sbdev_bp->b_id; 21600Sstevel@tonic-gate allow_targ_memrange_modify = 21610Sstevel@tonic-gate drmach_allow_memrange_modify(t_id); 21620Sstevel@tonic-gate if (t_mp->sbm_npages == s_mp->sbm_npages && 21630Sstevel@tonic-gate t_phi == s_phi) { 21640Sstevel@tonic-gate preference = 0; 21650Sstevel@tonic-gate t_mp->sbm_slice_offset = 0; 21660Sstevel@tonic-gate } else if (t_mp->sbm_npages > s_mp->sbm_npages && 21670Sstevel@tonic-gate t_phi > s_phi) { 21680Sstevel@tonic-gate /* 21690Sstevel@tonic-gate * Selecting this target will require modifying 21700Sstevel@tonic-gate * the source and/or target physical address 21710Sstevel@tonic-gate * ranges. Skip if not supported by platform. 21720Sstevel@tonic-gate */ 21730Sstevel@tonic-gate if (!allow_src_memrange_modify || 21740Sstevel@tonic-gate !allow_targ_memrange_modify) { 21750Sstevel@tonic-gate PR_MEM("%s: skip target %s, memory " 21760Sstevel@tonic-gate "range relocation not supported " 21770Sstevel@tonic-gate "by platform\n", f, 21780Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path); 21790Sstevel@tonic-gate continue; 21800Sstevel@tonic-gate } 21810Sstevel@tonic-gate preference = 1; 21820Sstevel@tonic-gate t_mp->sbm_slice_offset = 0; 21830Sstevel@tonic-gate } else { 21840Sstevel@tonic-gate pfn_t pfn = 0; 21850Sstevel@tonic-gate 21860Sstevel@tonic-gate /* 21870Sstevel@tonic-gate * Selecting this target will require modifying 21880Sstevel@tonic-gate * the source and/or target physical address 21890Sstevel@tonic-gate * ranges. Skip if not supported by platform. 21900Sstevel@tonic-gate */ 21910Sstevel@tonic-gate if (!allow_src_memrange_modify || 21920Sstevel@tonic-gate !allow_targ_memrange_modify) { 21930Sstevel@tonic-gate PR_MEM("%s: skip target %s, memory " 21940Sstevel@tonic-gate "range relocation not supported " 21950Sstevel@tonic-gate "by platform\n", f, 21960Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path); 21970Sstevel@tonic-gate continue; 21980Sstevel@tonic-gate } 21990Sstevel@tonic-gate 22000Sstevel@tonic-gate /* 22010Sstevel@tonic-gate * Check if its mc can be programmed to relocate 22020Sstevel@tonic-gate * the active address range to match the 22030Sstevel@tonic-gate * nonrelocatable span of the source board. 22040Sstevel@tonic-gate */ 22050Sstevel@tonic-gate preference = 2; 22060Sstevel@tonic-gate 22070Sstevel@tonic-gate if (s_mq.phys_pages == 0) { 22080Sstevel@tonic-gate /* 22090Sstevel@tonic-gate * find non-relocatable span on 22100Sstevel@tonic-gate * source board. 22110Sstevel@tonic-gate */ 22120Sstevel@tonic-gate rv = kphysm_del_span_query( 22130Sstevel@tonic-gate s_mp->sbm_basepfn, 22140Sstevel@tonic-gate s_mp->sbm_npages, &s_mq); 22150Sstevel@tonic-gate if (rv != KPHYSM_OK) { 22160Sstevel@tonic-gate PR_MEM("%s: %s: unexpected" 22170Sstevel@tonic-gate " kphysm_del_span_query" 22180Sstevel@tonic-gate " return value %d;" 22190Sstevel@tonic-gate " basepfn 0x%lx, npages %ld\n", 22200Sstevel@tonic-gate f, 22210Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 22220Sstevel@tonic-gate rv, 22230Sstevel@tonic-gate s_mp->sbm_basepfn, 22240Sstevel@tonic-gate s_mp->sbm_npages); 22250Sstevel@tonic-gate 22260Sstevel@tonic-gate /* paranoia */ 22270Sstevel@tonic-gate s_mq.phys_pages = 0; 22280Sstevel@tonic-gate 22290Sstevel@tonic-gate continue; 22300Sstevel@tonic-gate } 22310Sstevel@tonic-gate 22320Sstevel@tonic-gate /* more paranoia */ 22330Sstevel@tonic-gate ASSERT(s_mq.phys_pages != 0); 22340Sstevel@tonic-gate ASSERT(s_mq.nonrelocatable != 0); 22350Sstevel@tonic-gate 22360Sstevel@tonic-gate /* 22370Sstevel@tonic-gate * this should not happen 22380Sstevel@tonic-gate * if it does, it simply means that 22390Sstevel@tonic-gate * we can not proceed with qualifying 22400Sstevel@tonic-gate * this target candidate. 22410Sstevel@tonic-gate */ 22420Sstevel@tonic-gate if (s_mq.nonrelocatable == 0) 22430Sstevel@tonic-gate continue; 22440Sstevel@tonic-gate 22450Sstevel@tonic-gate PR_MEM("%s: %s: nonrelocatable" 22460Sstevel@tonic-gate " span (0x%lx..0x%lx)\n", 22470Sstevel@tonic-gate f, 22480Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path, 22490Sstevel@tonic-gate s_mq.first_nonrelocatable, 22500Sstevel@tonic-gate s_mq.last_nonrelocatable); 22510Sstevel@tonic-gate } 22520Sstevel@tonic-gate 22530Sstevel@tonic-gate /* 22540Sstevel@tonic-gate * Round down the starting pfn of the 22550Sstevel@tonic-gate * nonrelocatable span on the source board 22560Sstevel@tonic-gate * to nearest programmable boundary possible 22570Sstevel@tonic-gate * with this target candidate. 22580Sstevel@tonic-gate */ 22590Sstevel@tonic-gate pfn = s_mq.first_nonrelocatable & 22600Sstevel@tonic-gate ~t_mp->sbm_alignment_mask; 22610Sstevel@tonic-gate 22620Sstevel@tonic-gate /* skip candidate if memory is too small */ 22630Sstevel@tonic-gate if (pfn + t_mp->sbm_npages < 22640Sstevel@tonic-gate s_mq.last_nonrelocatable) 22650Sstevel@tonic-gate continue; 22660Sstevel@tonic-gate 22670Sstevel@tonic-gate /* 22680Sstevel@tonic-gate * reprogramming an mc to relocate its 22690Sstevel@tonic-gate * active address range means the beginning 22700Sstevel@tonic-gate * address to which the DIMMS respond will 22710Sstevel@tonic-gate * be somewhere above the slice boundary 22720Sstevel@tonic-gate * address. The larger the size of memory 22730Sstevel@tonic-gate * on this unit, the more likely part of it 22740Sstevel@tonic-gate * will exist beyond the end of the slice. 22750Sstevel@tonic-gate * The portion of the memory that does is 22760Sstevel@tonic-gate * unavailable to the system until the mc 22770Sstevel@tonic-gate * reprogrammed to a more favorable base 22780Sstevel@tonic-gate * address. 22790Sstevel@tonic-gate * An attempt is made to avoid the loss by 22800Sstevel@tonic-gate * recalculating the mc base address relative 22810Sstevel@tonic-gate * to the end of the slice. This may produce 22820Sstevel@tonic-gate * a more favorable result. If not, we lower 22830Sstevel@tonic-gate * the board's preference rating so that it 22840Sstevel@tonic-gate * is one the last candidate boards to be 22850Sstevel@tonic-gate * considered. 22860Sstevel@tonic-gate */ 22870Sstevel@tonic-gate if ((pfn + t_mp->sbm_npages) & ~sm) { 22880Sstevel@tonic-gate pfn_t p; 22890Sstevel@tonic-gate 22900Sstevel@tonic-gate ASSERT(sz >= t_mp->sbm_npages); 22910Sstevel@tonic-gate 22920Sstevel@tonic-gate /* 22930Sstevel@tonic-gate * calculate an alternative starting 22940Sstevel@tonic-gate * address relative to the end of the 22950Sstevel@tonic-gate * slice's address space. 22960Sstevel@tonic-gate */ 22970Sstevel@tonic-gate p = pfn & ~sm; 22980Sstevel@tonic-gate p = p + (sz - t_mp->sbm_npages); 22990Sstevel@tonic-gate p = p & ~t_mp->sbm_alignment_mask; 23000Sstevel@tonic-gate 23010Sstevel@tonic-gate if ((p > s_mq.first_nonrelocatable) || 23020Sstevel@tonic-gate (p + t_mp->sbm_npages < 23030Sstevel@tonic-gate s_mq.last_nonrelocatable)) { 23040Sstevel@tonic-gate 23050Sstevel@tonic-gate /* 23060Sstevel@tonic-gate * alternative starting addr 23070Sstevel@tonic-gate * won't work. Lower preference 23080Sstevel@tonic-gate * rating of this board, since 23090Sstevel@tonic-gate * some number of pages will 23100Sstevel@tonic-gate * unavailable for use. 23110Sstevel@tonic-gate */ 23120Sstevel@tonic-gate preference = 3; 23130Sstevel@tonic-gate } else { 23140Sstevel@tonic-gate dr_smt_realigned++; 23150Sstevel@tonic-gate pfn = p; 23160Sstevel@tonic-gate } 23170Sstevel@tonic-gate } 23180Sstevel@tonic-gate 23190Sstevel@tonic-gate /* 23200Sstevel@tonic-gate * translate calculated pfn to an offset 23210Sstevel@tonic-gate * relative to the slice boundary. If the 23220Sstevel@tonic-gate * candidate board is selected, this offset 23230Sstevel@tonic-gate * will be used to calculate the values 23240Sstevel@tonic-gate * programmed into the mc. 23250Sstevel@tonic-gate */ 23260Sstevel@tonic-gate t_mp->sbm_slice_offset = pfn & sm; 23270Sstevel@tonic-gate PR_MEM("%s: %s:" 23280Sstevel@tonic-gate " proposed mc offset 0x%lx\n", 23290Sstevel@tonic-gate f, 23300Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path, 23310Sstevel@tonic-gate t_mp->sbm_slice_offset); 23320Sstevel@tonic-gate } 23330Sstevel@tonic-gate 23340Sstevel@tonic-gate dr_smt_preference[preference]++; 23350Sstevel@tonic-gate 23360Sstevel@tonic-gate /* calculate index to start of preference set */ 23370Sstevel@tonic-gate idx = n_units_per_set * preference; 23380Sstevel@tonic-gate /* calculate offset to respective element */ 23390Sstevel@tonic-gate idx += t_bd * MAX_MEM_UNITS_PER_BOARD + t_unit; 23400Sstevel@tonic-gate 23410Sstevel@tonic-gate ASSERT(idx < n_units_per_set * n_sets); 23420Sstevel@tonic-gate sets[idx] = t_mp; 23430Sstevel@tonic-gate } 23440Sstevel@tonic-gate } 23450Sstevel@tonic-gate 23460Sstevel@tonic-gate /* 23470Sstevel@tonic-gate * NOTE: this would be a good place to sort each candidate 23480Sstevel@tonic-gate * set in to some desired order, e.g. memory size in ascending 23490Sstevel@tonic-gate * order. Without an additional sorting step here, the order 23500Sstevel@tonic-gate * within a set is ascending board number order. 23510Sstevel@tonic-gate */ 23520Sstevel@tonic-gate 23530Sstevel@tonic-gate c_mp = NULL; 23540Sstevel@tonic-gate x_ml = NULL; 23550Sstevel@tonic-gate t_ml = NULL; 23560Sstevel@tonic-gate for (idx = 0; idx < n_units_per_set * n_sets; idx++) { 23570Sstevel@tonic-gate memquery_t mq; 23580Sstevel@tonic-gate 23590Sstevel@tonic-gate /* cleanup t_ml after previous pass */ 23600Sstevel@tonic-gate if (t_ml != NULL) { 23610Sstevel@tonic-gate memlist_delete(t_ml); 23620Sstevel@tonic-gate t_ml = NULL; 23630Sstevel@tonic-gate } 23640Sstevel@tonic-gate 23650Sstevel@tonic-gate /* get candidate target board mem unit */ 23660Sstevel@tonic-gate t_mp = sets[idx]; 23670Sstevel@tonic-gate if (t_mp == NULL) 23680Sstevel@tonic-gate continue; 23690Sstevel@tonic-gate 23700Sstevel@tonic-gate /* get target board memlist */ 23710Sstevel@tonic-gate t_ml = dr_get_memlist(t_mp); 23720Sstevel@tonic-gate if (t_ml == NULL) { 23730Sstevel@tonic-gate cmn_err(CE_WARN, "%s: no memlist for" 23740Sstevel@tonic-gate " mem-unit %d, board %d", 23750Sstevel@tonic-gate f, 23760Sstevel@tonic-gate t_mp->sbm_cm.sbdev_bp->b_num, 23770Sstevel@tonic-gate t_mp->sbm_cm.sbdev_unum); 23780Sstevel@tonic-gate 23790Sstevel@tonic-gate continue; 23800Sstevel@tonic-gate } 23810Sstevel@tonic-gate 23820Sstevel@tonic-gate /* get appropriate source board memlist */ 23830Sstevel@tonic-gate t_phi = (t_mp->sbm_basepfn + t_mp->sbm_npages - 1) & sm; 23840Sstevel@tonic-gate if (t_mp->sbm_npages < s_mp->sbm_npages || t_phi < s_phi) { 23850Sstevel@tonic-gate spgcnt_t excess; 23860Sstevel@tonic-gate 23870Sstevel@tonic-gate /* 23880Sstevel@tonic-gate * make a copy of the source board memlist 23890Sstevel@tonic-gate * then edit it to remove the spans that 23900Sstevel@tonic-gate * are outside the calculated span of 23910Sstevel@tonic-gate * [pfn..s_mq.last_nonrelocatable]. 23920Sstevel@tonic-gate */ 23930Sstevel@tonic-gate if (x_ml != NULL) 23940Sstevel@tonic-gate memlist_delete(x_ml); 23950Sstevel@tonic-gate 23960Sstevel@tonic-gate x_ml = memlist_dup(s_ml); 23970Sstevel@tonic-gate if (x_ml == NULL) { 23980Sstevel@tonic-gate PR_MEM("%s: memlist_dup failed\n", f); 23990Sstevel@tonic-gate /* TODO: should abort */ 24000Sstevel@tonic-gate continue; 24010Sstevel@tonic-gate } 24020Sstevel@tonic-gate 24030Sstevel@tonic-gate /* trim off lower portion */ 24040Sstevel@tonic-gate excess = t_mp->sbm_slice_offset - 24050Sstevel@tonic-gate (s_mp->sbm_basepfn & sm); 24060Sstevel@tonic-gate 24070Sstevel@tonic-gate if (excess > 0) { 24080Sstevel@tonic-gate x_ml = memlist_del_span( 24090Sstevel@tonic-gate x_ml, 24100Sstevel@tonic-gate _ptob64(s_mp->sbm_basepfn), 24110Sstevel@tonic-gate _ptob64(excess)); 24120Sstevel@tonic-gate } 24130Sstevel@tonic-gate ASSERT(x_ml); 24140Sstevel@tonic-gate 24150Sstevel@tonic-gate /* 24160Sstevel@tonic-gate * Since this candidate target board is smaller 24170Sstevel@tonic-gate * than the source board, s_mq must have been 24180Sstevel@tonic-gate * initialized in previous loop while processing 24190Sstevel@tonic-gate * this or some other candidate board. 24200Sstevel@tonic-gate * FIXME: this is weak. 24210Sstevel@tonic-gate */ 24220Sstevel@tonic-gate ASSERT(s_mq.phys_pages != 0); 24230Sstevel@tonic-gate 24240Sstevel@tonic-gate /* trim off upper portion */ 24250Sstevel@tonic-gate excess = (s_mp->sbm_basepfn + s_mp->sbm_npages) 24260Sstevel@tonic-gate - (s_mq.last_nonrelocatable + 1); 24270Sstevel@tonic-gate if (excess > 0) { 24280Sstevel@tonic-gate pfn_t p; 24290Sstevel@tonic-gate 24300Sstevel@tonic-gate p = s_mq.last_nonrelocatable + 1; 24310Sstevel@tonic-gate x_ml = memlist_del_span( 24320Sstevel@tonic-gate x_ml, 24330Sstevel@tonic-gate _ptob64(p), 24340Sstevel@tonic-gate _ptob64(excess)); 24350Sstevel@tonic-gate } 24360Sstevel@tonic-gate 24370Sstevel@tonic-gate PR_MEM("%s: %s: edited source memlist:\n", 24380Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 24390Sstevel@tonic-gate PR_MEMLIST_DUMP(x_ml); 24400Sstevel@tonic-gate 24410Sstevel@tonic-gate #ifdef DEBUG 24420Sstevel@tonic-gate /* sanity check memlist */ 24430Sstevel@tonic-gate d_ml = x_ml; 24440Sstevel@tonic-gate while (d_ml->next != NULL) 24450Sstevel@tonic-gate d_ml = d_ml->next; 24460Sstevel@tonic-gate 24470Sstevel@tonic-gate ASSERT(d_ml->address + d_ml->size == 24480Sstevel@tonic-gate _ptob64(s_mq.last_nonrelocatable + 1)); 24490Sstevel@tonic-gate #endif 24500Sstevel@tonic-gate 24510Sstevel@tonic-gate /* 24520Sstevel@tonic-gate * x_ml now describes only the portion of the 24530Sstevel@tonic-gate * source board that will be moved during the 24540Sstevel@tonic-gate * copy/rename operation. 24550Sstevel@tonic-gate */ 24560Sstevel@tonic-gate d_ml = x_ml; 24570Sstevel@tonic-gate } else { 24580Sstevel@tonic-gate /* use original memlist; all spans will be moved */ 24590Sstevel@tonic-gate d_ml = s_ml; 24600Sstevel@tonic-gate } 24610Sstevel@tonic-gate 24620Sstevel@tonic-gate /* verify target can support source memory spans. */ 24630Sstevel@tonic-gate if (memlist_canfit(d_ml, t_ml) == 0) { 24640Sstevel@tonic-gate PR_MEM("%s: source memlist won't" 24650Sstevel@tonic-gate " fit in target memlist\n", f); 24660Sstevel@tonic-gate PR_MEM("%s: source memlist:\n", f); 24670Sstevel@tonic-gate PR_MEMLIST_DUMP(d_ml); 24680Sstevel@tonic-gate PR_MEM("%s: target memlist:\n", f); 24690Sstevel@tonic-gate PR_MEMLIST_DUMP(t_ml); 24700Sstevel@tonic-gate 24710Sstevel@tonic-gate continue; 24720Sstevel@tonic-gate } 24730Sstevel@tonic-gate 24740Sstevel@tonic-gate /* NOTE: the value of d_ml is not used beyond this point */ 24750Sstevel@tonic-gate 24760Sstevel@tonic-gate PR_MEM("%s: checking for no-reloc in %s, " 24770Sstevel@tonic-gate " basepfn=0x%lx, npages=%ld\n", 24780Sstevel@tonic-gate f, 24790Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path, 24800Sstevel@tonic-gate t_mp->sbm_basepfn, 24810Sstevel@tonic-gate t_mp->sbm_npages); 24820Sstevel@tonic-gate 24830Sstevel@tonic-gate rv = kphysm_del_span_query( 24840Sstevel@tonic-gate t_mp->sbm_basepfn, t_mp->sbm_npages, &mq); 24850Sstevel@tonic-gate if (rv != KPHYSM_OK) { 24860Sstevel@tonic-gate PR_MEM("%s: kphysm_del_span_query:" 24870Sstevel@tonic-gate " unexpected return value %d\n", f, rv); 24880Sstevel@tonic-gate 24890Sstevel@tonic-gate continue; 24900Sstevel@tonic-gate } 24910Sstevel@tonic-gate 24920Sstevel@tonic-gate if (mq.nonrelocatable != 0) { 24930Sstevel@tonic-gate PR_MEM("%s: candidate %s has" 24940Sstevel@tonic-gate " nonrelocatable span [0x%lx..0x%lx]\n", 24950Sstevel@tonic-gate f, 24960Sstevel@tonic-gate t_mp->sbm_cm.sbdev_path, 24970Sstevel@tonic-gate mq.first_nonrelocatable, 24980Sstevel@tonic-gate mq.last_nonrelocatable); 24990Sstevel@tonic-gate 25000Sstevel@tonic-gate continue; 25010Sstevel@tonic-gate } 25020Sstevel@tonic-gate 25030Sstevel@tonic-gate #ifdef DEBUG 25040Sstevel@tonic-gate /* 25050Sstevel@tonic-gate * This is a debug tool for excluding certain boards 25060Sstevel@tonic-gate * from being selected as a target board candidate. 25070Sstevel@tonic-gate * dr_ignore_board is only tested by this driver. 25080Sstevel@tonic-gate * It must be set with adb, obp, /etc/system or your 25090Sstevel@tonic-gate * favorite debugger. 25100Sstevel@tonic-gate */ 25110Sstevel@tonic-gate if (dr_ignore_board & 25120Sstevel@tonic-gate (1 << (t_mp->sbm_cm.sbdev_bp->b_num - 1))) { 25130Sstevel@tonic-gate PR_MEM("%s: dr_ignore_board flag set," 25140Sstevel@tonic-gate " ignoring %s as candidate\n", 25150Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 25160Sstevel@tonic-gate continue; 25170Sstevel@tonic-gate } 25180Sstevel@tonic-gate #endif 25190Sstevel@tonic-gate 25200Sstevel@tonic-gate /* 25210Sstevel@tonic-gate * Reserve excess source board memory, if any. 25220Sstevel@tonic-gate * 25230Sstevel@tonic-gate * When the number of pages on the candidate target 25240Sstevel@tonic-gate * board is less than the number of pages on the source, 25250Sstevel@tonic-gate * then some spans (clearly) of the source board's address 25260Sstevel@tonic-gate * space will not be covered by physical memory after the 25270Sstevel@tonic-gate * copy/rename completes. The following code block 25280Sstevel@tonic-gate * schedules those spans to be deleted. 25290Sstevel@tonic-gate */ 25300Sstevel@tonic-gate if (t_mp->sbm_npages < s_mp->sbm_npages || t_phi < s_phi) { 25310Sstevel@tonic-gate pfn_t pfn; 25320Sstevel@tonic-gate uint64_t s_del_pa; 25330Sstevel@tonic-gate struct memlist *ml; 25340Sstevel@tonic-gate 25350Sstevel@tonic-gate d_ml = memlist_dup(s_ml); 25360Sstevel@tonic-gate if (d_ml == NULL) { 25370Sstevel@tonic-gate PR_MEM("%s: cant dup src brd memlist\n", f); 25380Sstevel@tonic-gate /* TODO: should abort */ 25390Sstevel@tonic-gate continue; 25400Sstevel@tonic-gate } 25410Sstevel@tonic-gate 25420Sstevel@tonic-gate /* calculate base pfn relative to target board */ 25430Sstevel@tonic-gate pfn = s_mp->sbm_basepfn & ~sm; 25440Sstevel@tonic-gate pfn += t_mp->sbm_slice_offset; 25450Sstevel@tonic-gate 25460Sstevel@tonic-gate /* 25470Sstevel@tonic-gate * cannot split dynamically added segment 25480Sstevel@tonic-gate */ 25490Sstevel@tonic-gate s_del_pa = _ptob64(pfn + t_mp->sbm_npages); 25500Sstevel@tonic-gate PR_MEM("%s: proposed src delete pa=0x%lx\n", f, 25510Sstevel@tonic-gate s_del_pa); 25520Sstevel@tonic-gate PR_MEM("%s: checking for split of dyn seg list:\n", f); 25530Sstevel@tonic-gate PR_MEMLIST_DUMP(s_mp->sbm_dyn_segs); 25540Sstevel@tonic-gate for (ml = s_mp->sbm_dyn_segs; ml; ml = ml->next) { 25550Sstevel@tonic-gate if (s_del_pa > ml->address && 25560Sstevel@tonic-gate s_del_pa < ml->address + ml->size) { 25570Sstevel@tonic-gate s_del_pa = ml->address; 25580Sstevel@tonic-gate break; 25590Sstevel@tonic-gate } 25600Sstevel@tonic-gate } 25610Sstevel@tonic-gate 25620Sstevel@tonic-gate /* remove span that will reside on candidate board */ 25630Sstevel@tonic-gate d_ml = memlist_del_span(d_ml, _ptob64(pfn), 25640Sstevel@tonic-gate s_del_pa - _ptob64(pfn)); 25650Sstevel@tonic-gate 25660Sstevel@tonic-gate PR_MEM("%s: %s: reserving src brd memlist:\n", 25670Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 25680Sstevel@tonic-gate PR_MEMLIST_DUMP(d_ml); 25690Sstevel@tonic-gate 25700Sstevel@tonic-gate /* reserve excess spans */ 25710Sstevel@tonic-gate if (dr_reserve_mem_spans( 25720Sstevel@tonic-gate &s_mp->sbm_memhandle, d_ml) != 0) { 25730Sstevel@tonic-gate 25740Sstevel@tonic-gate /* likely more non-reloc pages appeared */ 25750Sstevel@tonic-gate /* TODO: restart from top? */ 25760Sstevel@tonic-gate continue; 25770Sstevel@tonic-gate } 25780Sstevel@tonic-gate } else { 25790Sstevel@tonic-gate /* no excess source board memory */ 25800Sstevel@tonic-gate d_ml = NULL; 25810Sstevel@tonic-gate } 25820Sstevel@tonic-gate 25830Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_RESERVED; 25840Sstevel@tonic-gate 25850Sstevel@tonic-gate /* 25860Sstevel@tonic-gate * reserve all memory on target board. 25870Sstevel@tonic-gate * NOTE: source board's memhandle is used. 25880Sstevel@tonic-gate * 25890Sstevel@tonic-gate * If this succeeds (eq 0), then target selection is 25900Sstevel@tonic-gate * complete and all unwanted memory spans, both source and 25910Sstevel@tonic-gate * target, have been reserved. Loop is terminated. 25920Sstevel@tonic-gate */ 25930Sstevel@tonic-gate if (dr_reserve_mem_spans(&s_mp->sbm_memhandle, t_ml) == 0) { 25940Sstevel@tonic-gate PR_MEM("%s: %s: target board memory reserved\n", 25950Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 25960Sstevel@tonic-gate 25970Sstevel@tonic-gate /* a candidate target board is now reserved */ 25980Sstevel@tonic-gate t_mp->sbm_flags |= DR_MFLAG_RESERVED; 25990Sstevel@tonic-gate c_mp = t_mp; 26000Sstevel@tonic-gate 26010Sstevel@tonic-gate /* *** EXITING LOOP *** */ 26020Sstevel@tonic-gate break; 26030Sstevel@tonic-gate } 26040Sstevel@tonic-gate 26050Sstevel@tonic-gate /* did not successfully reserve the target board. */ 26060Sstevel@tonic-gate PR_MEM("%s: could not reserve target %s\n", 26070Sstevel@tonic-gate f, t_mp->sbm_cm.sbdev_path); 26080Sstevel@tonic-gate 26090Sstevel@tonic-gate /* 26100Sstevel@tonic-gate * NOTE: an undo of the dr_reserve_mem_span work 26110Sstevel@tonic-gate * will happen automatically when the memhandle 26120Sstevel@tonic-gate * (s_mp->sbm_memhandle) is kphysm_del_release'd. 26130Sstevel@tonic-gate */ 26140Sstevel@tonic-gate 26150Sstevel@tonic-gate s_mp->sbm_flags &= ~DR_MFLAG_RESERVED; 26160Sstevel@tonic-gate } 26170Sstevel@tonic-gate 26180Sstevel@tonic-gate /* clean up after memlist editing logic */ 26190Sstevel@tonic-gate if (x_ml != NULL) 26200Sstevel@tonic-gate memlist_delete(x_ml); 26210Sstevel@tonic-gate 26220Sstevel@tonic-gate FREESTRUCT(sets, dr_mem_unit_t *, n_units_per_set * n_sets); 26230Sstevel@tonic-gate 26240Sstevel@tonic-gate /* 26250Sstevel@tonic-gate * c_mp will be NULL when the entire sets[] array 26260Sstevel@tonic-gate * has been searched without reserving a target board. 26270Sstevel@tonic-gate */ 26280Sstevel@tonic-gate if (c_mp == NULL) { 26290Sstevel@tonic-gate PR_MEM("%s: %s: target selection failed.\n", 26300Sstevel@tonic-gate f, s_mp->sbm_cm.sbdev_path); 26310Sstevel@tonic-gate 26320Sstevel@tonic-gate if (t_ml != NULL) 26330Sstevel@tonic-gate memlist_delete(t_ml); 26340Sstevel@tonic-gate 26350Sstevel@tonic-gate return (-1); 26360Sstevel@tonic-gate } 26370Sstevel@tonic-gate 26380Sstevel@tonic-gate PR_MEM("%s: found target %s for source %s\n", 26390Sstevel@tonic-gate f, 26400Sstevel@tonic-gate c_mp->sbm_cm.sbdev_path, 26410Sstevel@tonic-gate s_mp->sbm_cm.sbdev_path); 26420Sstevel@tonic-gate 26430Sstevel@tonic-gate s_mp->sbm_peer = c_mp; 26440Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_SOURCE; 26450Sstevel@tonic-gate s_mp->sbm_del_mlist = d_ml; /* spans to be deleted, if any */ 26460Sstevel@tonic-gate s_mp->sbm_mlist = s_ml; 26470Sstevel@tonic-gate s_mp->sbm_cm.sbdev_busy = 1; 26480Sstevel@tonic-gate 26490Sstevel@tonic-gate c_mp->sbm_peer = s_mp; 26500Sstevel@tonic-gate c_mp->sbm_flags |= DR_MFLAG_TARGET; 26510Sstevel@tonic-gate c_mp->sbm_del_mlist = t_ml; /* spans to be deleted */ 26520Sstevel@tonic-gate c_mp->sbm_mlist = t_ml; 26530Sstevel@tonic-gate c_mp->sbm_cm.sbdev_busy = 1; 26540Sstevel@tonic-gate 26550Sstevel@tonic-gate s_mp->sbm_flags &= ~DR_MFLAG_MEMRESIZE; 26560Sstevel@tonic-gate if (c_mp->sbm_npages > s_mp->sbm_npages) { 26570Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_MEMUPSIZE; 26580Sstevel@tonic-gate PR_MEM("%s: upsize detected (source=%d < target=%d)\n", 26590Sstevel@tonic-gate f, s_mp->sbm_npages, c_mp->sbm_npages); 26600Sstevel@tonic-gate } else if (c_mp->sbm_npages < s_mp->sbm_npages) { 26610Sstevel@tonic-gate s_mp->sbm_flags |= DR_MFLAG_MEMDOWNSIZE; 26620Sstevel@tonic-gate PR_MEM("%s: downsize detected (source=%d > target=%d)\n", 26630Sstevel@tonic-gate f, s_mp->sbm_npages, c_mp->sbm_npages); 26640Sstevel@tonic-gate } 26650Sstevel@tonic-gate 26660Sstevel@tonic-gate return (0); 26670Sstevel@tonic-gate } 26680Sstevel@tonic-gate 26690Sstevel@tonic-gate /* 26700Sstevel@tonic-gate * Memlist support. 26710Sstevel@tonic-gate */ 26720Sstevel@tonic-gate static struct memlist * 26730Sstevel@tonic-gate memlist_dup(struct memlist *mlist) 26740Sstevel@tonic-gate { 26750Sstevel@tonic-gate struct memlist *hl = NULL, *tl, **mlp; 26760Sstevel@tonic-gate 26770Sstevel@tonic-gate if (mlist == NULL) 26780Sstevel@tonic-gate return (NULL); 26790Sstevel@tonic-gate 26800Sstevel@tonic-gate mlp = &hl; 26810Sstevel@tonic-gate tl = *mlp; 26820Sstevel@tonic-gate for (; mlist; mlist = mlist->next) { 26830Sstevel@tonic-gate *mlp = GETSTRUCT(struct memlist, 1); 26840Sstevel@tonic-gate (*mlp)->address = mlist->address; 26850Sstevel@tonic-gate (*mlp)->size = mlist->size; 26860Sstevel@tonic-gate (*mlp)->prev = tl; 26870Sstevel@tonic-gate tl = *mlp; 26880Sstevel@tonic-gate mlp = &((*mlp)->next); 26890Sstevel@tonic-gate } 26900Sstevel@tonic-gate *mlp = NULL; 26910Sstevel@tonic-gate 26920Sstevel@tonic-gate return (hl); 26930Sstevel@tonic-gate } 26940Sstevel@tonic-gate 26950Sstevel@tonic-gate /* 26960Sstevel@tonic-gate * Determine whether the source memlist (s_mlist) will 26970Sstevel@tonic-gate * fit into the target memlist (t_mlist) in terms of 26980Sstevel@tonic-gate * size and holes (i.e. based on same relative base address). 26990Sstevel@tonic-gate */ 27000Sstevel@tonic-gate static int 27010Sstevel@tonic-gate memlist_canfit(struct memlist *s_mlist, struct memlist *t_mlist) 27020Sstevel@tonic-gate { 27030Sstevel@tonic-gate int rv = 0; 27040Sstevel@tonic-gate uint64_t s_basepa, t_basepa; 27050Sstevel@tonic-gate struct memlist *s_ml, *t_ml; 27060Sstevel@tonic-gate 27070Sstevel@tonic-gate if ((s_mlist == NULL) || (t_mlist == NULL)) 27080Sstevel@tonic-gate return (0); 27090Sstevel@tonic-gate 27100Sstevel@tonic-gate /* 27110Sstevel@tonic-gate * Base both memlists on common base address (0). 27120Sstevel@tonic-gate */ 27130Sstevel@tonic-gate s_basepa = s_mlist->address; 27140Sstevel@tonic-gate t_basepa = t_mlist->address; 27150Sstevel@tonic-gate 27160Sstevel@tonic-gate for (s_ml = s_mlist; s_ml; s_ml = s_ml->next) 27170Sstevel@tonic-gate s_ml->address -= s_basepa; 27180Sstevel@tonic-gate 27190Sstevel@tonic-gate for (t_ml = t_mlist; t_ml; t_ml = t_ml->next) 27200Sstevel@tonic-gate t_ml->address -= t_basepa; 27210Sstevel@tonic-gate 27220Sstevel@tonic-gate s_ml = s_mlist; 27230Sstevel@tonic-gate for (t_ml = t_mlist; t_ml && s_ml; t_ml = t_ml->next) { 27240Sstevel@tonic-gate uint64_t s_start, s_end; 27250Sstevel@tonic-gate uint64_t t_start, t_end; 27260Sstevel@tonic-gate 27270Sstevel@tonic-gate t_start = t_ml->address; 27280Sstevel@tonic-gate t_end = t_start + t_ml->size; 27290Sstevel@tonic-gate 27300Sstevel@tonic-gate for (; s_ml; s_ml = s_ml->next) { 27310Sstevel@tonic-gate s_start = s_ml->address; 27320Sstevel@tonic-gate s_end = s_start + s_ml->size; 27330Sstevel@tonic-gate 27340Sstevel@tonic-gate if ((s_start < t_start) || (s_end > t_end)) 27350Sstevel@tonic-gate break; 27360Sstevel@tonic-gate } 27370Sstevel@tonic-gate } 27380Sstevel@tonic-gate /* 27390Sstevel@tonic-gate * If we ran out of source memlist chunks that mean 27400Sstevel@tonic-gate * we found a home for all of them. 27410Sstevel@tonic-gate */ 27420Sstevel@tonic-gate if (s_ml == NULL) 27430Sstevel@tonic-gate rv = 1; 27440Sstevel@tonic-gate 27450Sstevel@tonic-gate /* 27460Sstevel@tonic-gate * Need to add base addresses back since memlists 27470Sstevel@tonic-gate * are probably in use by caller. 27480Sstevel@tonic-gate */ 27490Sstevel@tonic-gate for (s_ml = s_mlist; s_ml; s_ml = s_ml->next) 27500Sstevel@tonic-gate s_ml->address += s_basepa; 27510Sstevel@tonic-gate 27520Sstevel@tonic-gate for (t_ml = t_mlist; t_ml; t_ml = t_ml->next) 27530Sstevel@tonic-gate t_ml->address += t_basepa; 27540Sstevel@tonic-gate 27550Sstevel@tonic-gate return (rv); 27560Sstevel@tonic-gate } 27570Sstevel@tonic-gate 27580Sstevel@tonic-gate static struct memlist * 27590Sstevel@tonic-gate memlist_del_span(struct memlist *mlist, uint64_t base, uint64_t len) 27600Sstevel@tonic-gate { 27610Sstevel@tonic-gate uint64_t end; 27620Sstevel@tonic-gate struct memlist *ml, *tl, *nlp; 27630Sstevel@tonic-gate 27640Sstevel@tonic-gate if (mlist == NULL) 27650Sstevel@tonic-gate return (NULL); 27660Sstevel@tonic-gate 27670Sstevel@tonic-gate end = base + len; 27680Sstevel@tonic-gate if ((end <= mlist->address) || (base == end)) 27690Sstevel@tonic-gate return (mlist); 27700Sstevel@tonic-gate 27710Sstevel@tonic-gate for (tl = ml = mlist; ml; tl = ml, ml = nlp) { 27720Sstevel@tonic-gate uint64_t mend; 27730Sstevel@tonic-gate 27740Sstevel@tonic-gate nlp = ml->next; 27750Sstevel@tonic-gate 27760Sstevel@tonic-gate if (end <= ml->address) 27770Sstevel@tonic-gate break; 27780Sstevel@tonic-gate 27790Sstevel@tonic-gate mend = ml->address + ml->size; 27800Sstevel@tonic-gate if (base < mend) { 27810Sstevel@tonic-gate if (base <= ml->address) { 27820Sstevel@tonic-gate ml->address = end; 27830Sstevel@tonic-gate if (end >= mend) 27840Sstevel@tonic-gate ml->size = 0ull; 27850Sstevel@tonic-gate else 27860Sstevel@tonic-gate ml->size = mend - ml->address; 27870Sstevel@tonic-gate } else { 27880Sstevel@tonic-gate ml->size = base - ml->address; 27890Sstevel@tonic-gate if (end < mend) { 27900Sstevel@tonic-gate struct memlist *nl; 27910Sstevel@tonic-gate /* 27920Sstevel@tonic-gate * splitting an memlist entry. 27930Sstevel@tonic-gate */ 27940Sstevel@tonic-gate nl = GETSTRUCT(struct memlist, 1); 27950Sstevel@tonic-gate nl->address = end; 27960Sstevel@tonic-gate nl->size = mend - nl->address; 27970Sstevel@tonic-gate if ((nl->next = nlp) != NULL) 27980Sstevel@tonic-gate nlp->prev = nl; 27990Sstevel@tonic-gate nl->prev = ml; 28000Sstevel@tonic-gate ml->next = nl; 28010Sstevel@tonic-gate nlp = nl; 28020Sstevel@tonic-gate } 28030Sstevel@tonic-gate } 28040Sstevel@tonic-gate if (ml->size == 0ull) { 28050Sstevel@tonic-gate if (ml == mlist) { 28060Sstevel@tonic-gate if ((mlist = nlp) != NULL) 28070Sstevel@tonic-gate nlp->prev = NULL; 28080Sstevel@tonic-gate FREESTRUCT(ml, struct memlist, 1); 28090Sstevel@tonic-gate if (mlist == NULL) 28100Sstevel@tonic-gate break; 28110Sstevel@tonic-gate ml = nlp; 28120Sstevel@tonic-gate } else { 28130Sstevel@tonic-gate if ((tl->next = nlp) != NULL) 28140Sstevel@tonic-gate nlp->prev = tl; 28150Sstevel@tonic-gate FREESTRUCT(ml, struct memlist, 1); 28160Sstevel@tonic-gate ml = tl; 28170Sstevel@tonic-gate } 28180Sstevel@tonic-gate } 28190Sstevel@tonic-gate } 28200Sstevel@tonic-gate } 28210Sstevel@tonic-gate 28220Sstevel@tonic-gate return (mlist); 28230Sstevel@tonic-gate } 28240Sstevel@tonic-gate 28250Sstevel@tonic-gate /* 28260Sstevel@tonic-gate * add span without merging 28270Sstevel@tonic-gate */ 28280Sstevel@tonic-gate static struct memlist * 28290Sstevel@tonic-gate memlist_cat_span(struct memlist *mlist, uint64_t base, uint64_t len) 28300Sstevel@tonic-gate { 28310Sstevel@tonic-gate struct memlist *ml, *tl, *nl; 28320Sstevel@tonic-gate 28330Sstevel@tonic-gate if (len == 0ull) 28340Sstevel@tonic-gate return (NULL); 28350Sstevel@tonic-gate 28360Sstevel@tonic-gate if (mlist == NULL) { 28370Sstevel@tonic-gate mlist = GETSTRUCT(struct memlist, 1); 28380Sstevel@tonic-gate mlist->address = base; 28390Sstevel@tonic-gate mlist->size = len; 28400Sstevel@tonic-gate mlist->next = mlist->prev = NULL; 28410Sstevel@tonic-gate 28420Sstevel@tonic-gate return (mlist); 28430Sstevel@tonic-gate } 28440Sstevel@tonic-gate 28450Sstevel@tonic-gate for (tl = ml = mlist; ml; tl = ml, ml = ml->next) { 28460Sstevel@tonic-gate if (base < ml->address) { 28470Sstevel@tonic-gate nl = GETSTRUCT(struct memlist, 1); 28480Sstevel@tonic-gate nl->address = base; 28490Sstevel@tonic-gate nl->size = len; 28500Sstevel@tonic-gate nl->next = ml; 28510Sstevel@tonic-gate if ((nl->prev = ml->prev) != NULL) 28520Sstevel@tonic-gate nl->prev->next = nl; 28530Sstevel@tonic-gate ml->prev = nl; 28540Sstevel@tonic-gate if (mlist == ml) 28550Sstevel@tonic-gate mlist = nl; 28560Sstevel@tonic-gate break; 28570Sstevel@tonic-gate } 28580Sstevel@tonic-gate } 28590Sstevel@tonic-gate 28600Sstevel@tonic-gate if (ml == NULL) { 28610Sstevel@tonic-gate nl = GETSTRUCT(struct memlist, 1); 28620Sstevel@tonic-gate nl->address = base; 28630Sstevel@tonic-gate nl->size = len; 28640Sstevel@tonic-gate nl->next = NULL; 28650Sstevel@tonic-gate nl->prev = tl; 28660Sstevel@tonic-gate tl->next = nl; 28670Sstevel@tonic-gate } 28680Sstevel@tonic-gate 28690Sstevel@tonic-gate return (mlist); 28700Sstevel@tonic-gate } 2871