1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include <sys/mman.h> 30*0Sstevel@tonic-gate #include <sys/param.h> 31*0Sstevel@tonic-gate #include <sys/stat.h> 32*0Sstevel@tonic-gate #include <sys/types.h> 33*0Sstevel@tonic-gate #include <assert.h> 34*0Sstevel@tonic-gate #include <errno.h> 35*0Sstevel@tonic-gate #include <fcntl.h> 36*0Sstevel@tonic-gate #include <libproc.h> 37*0Sstevel@tonic-gate #include <limits.h> 38*0Sstevel@tonic-gate #include <procfs.h> 39*0Sstevel@tonic-gate #include <stdio.h> 40*0Sstevel@tonic-gate #include <stdlib.h> 41*0Sstevel@tonic-gate #include <strings.h> 42*0Sstevel@tonic-gate #include <time.h> 43*0Sstevel@tonic-gate #include <unistd.h> 44*0Sstevel@tonic-gate #include "rcapd.h" 45*0Sstevel@tonic-gate #include "rcapd_rfd.h" 46*0Sstevel@tonic-gate #include "rcapd_mapping.h" 47*0Sstevel@tonic-gate #include "utils.h" 48*0Sstevel@tonic-gate 49*0Sstevel@tonic-gate static int lpc_xmap_update(lprocess_t *); 50*0Sstevel@tonic-gate #ifdef DEBUG 51*0Sstevel@tonic-gate extern int lmapping_dump_diff(lmapping_t *lm1, lmapping_t *lm2); 52*0Sstevel@tonic-gate #endif /* DEBUG */ 53*0Sstevel@tonic-gate 54*0Sstevel@tonic-gate /* 55*0Sstevel@tonic-gate * The number of file descriptors required to grab a process and create an 56*0Sstevel@tonic-gate * agent in it. 57*0Sstevel@tonic-gate */ 58*0Sstevel@tonic-gate #define PGRAB_FD_COUNT 10 59*0Sstevel@tonic-gate 60*0Sstevel@tonic-gate /* 61*0Sstevel@tonic-gate * Record a position in an address space as it corresponds to a prpageheader_t 62*0Sstevel@tonic-gate * and affiliated structures. 63*0Sstevel@tonic-gate */ 64*0Sstevel@tonic-gate typedef struct prpageheader_cur { 65*0Sstevel@tonic-gate int pr_nmap; /* number of mappings in address space */ 66*0Sstevel@tonic-gate int pr_map; /* number of this mapping */ 67*0Sstevel@tonic-gate uint64_t pr_pgoff; /* page offset into mapping */ 68*0Sstevel@tonic-gate uint64_t pr_npage; /* number of pages in mapping */ 69*0Sstevel@tonic-gate uint64_t pr_pagesize; /* page size of mapping */ 70*0Sstevel@tonic-gate uintptr_t pr_addr; /* base of mapping */ 71*0Sstevel@tonic-gate prpageheader_t *pr_prpageheader; /* associated page header */ 72*0Sstevel@tonic-gate void *pr_pdaddr; /* address of page's byte in pagedata */ 73*0Sstevel@tonic-gate prxmap_t *pr_xmap; /* array containing per-segment information */ 74*0Sstevel@tonic-gate int pr_nxmap; /* number of xmaps in array */ 75*0Sstevel@tonic-gate int64_t pr_rss; /* number of resident pages in mapping, */ 76*0Sstevel@tonic-gate /* or -1 if xmap is out of sync */ 77*0Sstevel@tonic-gate int64_t pr_pg_rss; /* number of pageable pages in mapping, or -1 */ 78*0Sstevel@tonic-gate } prpageheader_cur_t; 79*0Sstevel@tonic-gate 80*0Sstevel@tonic-gate static struct ps_prochandle *scan_pr; /* currently-scanned process's handle */ 81*0Sstevel@tonic-gate 82*0Sstevel@tonic-gate typedef enum { 83*0Sstevel@tonic-gate STDL_NORMAL, 84*0Sstevel@tonic-gate STDL_HIGH 85*0Sstevel@tonic-gate } st_debug_level_t; 86*0Sstevel@tonic-gate 87*0Sstevel@tonic-gate /* 88*0Sstevel@tonic-gate * Output a scanning-related debug message. 89*0Sstevel@tonic-gate */ 90*0Sstevel@tonic-gate /*PRINTFLIKE3*/ /*ARGSUSED*/ 91*0Sstevel@tonic-gate static void 92*0Sstevel@tonic-gate st_debug(st_debug_level_t level, lcollection_t *lcol, char *msg, ...) 93*0Sstevel@tonic-gate { 94*0Sstevel@tonic-gate #ifdef DEBUG_MSG 95*0Sstevel@tonic-gate va_list alist; 96*0Sstevel@tonic-gate char *buf; 97*0Sstevel@tonic-gate size_t len; 98*0Sstevel@tonic-gate 99*0Sstevel@tonic-gate if (get_message_priority() < ((level == STDL_HIGH) ? RCM_DEBUG_HIGH 100*0Sstevel@tonic-gate : RCM_DEBUG)) 101*0Sstevel@tonic-gate return; 102*0Sstevel@tonic-gate 103*0Sstevel@tonic-gate len = strlen(msg) + LINELEN; 104*0Sstevel@tonic-gate buf = malloc(len); 105*0Sstevel@tonic-gate if (buf == NULL) 106*0Sstevel@tonic-gate return; 107*0Sstevel@tonic-gate (void) snprintf(buf, len, "%s %s scanner %s", rcfg.rcfg_mode_name, 108*0Sstevel@tonic-gate lcol->lcol_name, msg); 109*0Sstevel@tonic-gate 110*0Sstevel@tonic-gate va_start(alist, msg); 111*0Sstevel@tonic-gate vdprintfe(RCM_DEBUG, buf, alist); 112*0Sstevel@tonic-gate va_end(alist); 113*0Sstevel@tonic-gate 114*0Sstevel@tonic-gate free(buf); 115*0Sstevel@tonic-gate #endif /* DEBUG_MSG */ 116*0Sstevel@tonic-gate } 117*0Sstevel@tonic-gate 118*0Sstevel@tonic-gate /* 119*0Sstevel@tonic-gate * Determine the collection's current victim, based on its last. The last will 120*0Sstevel@tonic-gate * be returned, or, if invalid, any other valid process, if the collection has 121*0Sstevel@tonic-gate * any. 122*0Sstevel@tonic-gate */ 123*0Sstevel@tonic-gate static lprocess_t * 124*0Sstevel@tonic-gate get_valid_victim(lcollection_t *lcol, lprocess_t *lpc) 125*0Sstevel@tonic-gate { 126*0Sstevel@tonic-gate if (lpc == NULL || !lcollection_member(lcol, lpc)) 127*0Sstevel@tonic-gate lpc = lcol->lcol_lprocess; 128*0Sstevel@tonic-gate 129*0Sstevel@tonic-gate /* 130*0Sstevel@tonic-gate * Find the next scannable process, and make it the victim. 131*0Sstevel@tonic-gate */ 132*0Sstevel@tonic-gate while (lpc != NULL && lpc->lpc_unscannable != 0) 133*0Sstevel@tonic-gate lpc = lpc->lpc_next; 134*0Sstevel@tonic-gate 135*0Sstevel@tonic-gate return (lpc); 136*0Sstevel@tonic-gate } 137*0Sstevel@tonic-gate 138*0Sstevel@tonic-gate /* 139*0Sstevel@tonic-gate * Get a process's combined current pagedata (per-page referenced and modified 140*0Sstevel@tonic-gate * bits) and set the supplied pointer to it. The caller is responsible for 141*0Sstevel@tonic-gate * freeing the data. If the pagedata is unreadable, a nonzero value is 142*0Sstevel@tonic-gate * returned, and errno is set. Otherwise, 0 is returned. 143*0Sstevel@tonic-gate */ 144*0Sstevel@tonic-gate static int 145*0Sstevel@tonic-gate get_pagedata(prpageheader_t **pghpp, int fd) 146*0Sstevel@tonic-gate { 147*0Sstevel@tonic-gate int res; 148*0Sstevel@tonic-gate struct stat st; 149*0Sstevel@tonic-gate 150*0Sstevel@tonic-gate redo: 151*0Sstevel@tonic-gate errno = 0; 152*0Sstevel@tonic-gate if (fstat(fd, &st) != 0) { 153*0Sstevel@tonic-gate debug("cannot stat pagedata\n"); 154*0Sstevel@tonic-gate return (-1); 155*0Sstevel@tonic-gate } 156*0Sstevel@tonic-gate 157*0Sstevel@tonic-gate errno = 0; 158*0Sstevel@tonic-gate *pghpp = malloc(st.st_size); 159*0Sstevel@tonic-gate if (*pghpp == NULL) { 160*0Sstevel@tonic-gate debug("cannot malloc() %ld bytes for pagedata", st.st_size); 161*0Sstevel@tonic-gate return (-1); 162*0Sstevel@tonic-gate } 163*0Sstevel@tonic-gate (void) bzero(*pghpp, st.st_size); 164*0Sstevel@tonic-gate 165*0Sstevel@tonic-gate errno = 0; 166*0Sstevel@tonic-gate if ((res = read(fd, *pghpp, st.st_size)) != st.st_size) { 167*0Sstevel@tonic-gate free(*pghpp); 168*0Sstevel@tonic-gate *pghpp = NULL; 169*0Sstevel@tonic-gate if (res > 0 || errno == E2BIG) { 170*0Sstevel@tonic-gate debug("pagedata changed size, retrying\n"); 171*0Sstevel@tonic-gate goto redo; 172*0Sstevel@tonic-gate } else { 173*0Sstevel@tonic-gate debug("cannot read pagedata"); 174*0Sstevel@tonic-gate return (-1); 175*0Sstevel@tonic-gate } 176*0Sstevel@tonic-gate } 177*0Sstevel@tonic-gate 178*0Sstevel@tonic-gate return (0); 179*0Sstevel@tonic-gate } 180*0Sstevel@tonic-gate 181*0Sstevel@tonic-gate /* 182*0Sstevel@tonic-gate * Return the count of kilobytes of pages represented by the given pagedata 183*0Sstevel@tonic-gate * which meet the given criteria, having pages which are in all of the states 184*0Sstevel@tonic-gate * specified by the mask, and in none of the states in the notmask. If the 185*0Sstevel@tonic-gate * CP_CLEAR flag is set, the pagedata will also be cleared. 186*0Sstevel@tonic-gate */ 187*0Sstevel@tonic-gate #define CP_CLEAR 1 188*0Sstevel@tonic-gate static uint64_t 189*0Sstevel@tonic-gate count_pages(prpageheader_t *pghp, int flags, int mask, int notmask) 190*0Sstevel@tonic-gate { 191*0Sstevel@tonic-gate int map; 192*0Sstevel@tonic-gate caddr_t cur, end; 193*0Sstevel@tonic-gate prpageheader_t pgh = *pghp; 194*0Sstevel@tonic-gate prasmap_t *asmapp; 195*0Sstevel@tonic-gate uint64_t count = 0; 196*0Sstevel@tonic-gate 197*0Sstevel@tonic-gate cur = (caddr_t)pghp + sizeof (*pghp); 198*0Sstevel@tonic-gate for (map = 0; map < pgh.pr_nmap; map++) { 199*0Sstevel@tonic-gate asmapp = (prasmap_t *)(uintptr_t)cur; 200*0Sstevel@tonic-gate cur += sizeof (*asmapp); 201*0Sstevel@tonic-gate end = cur + asmapp->pr_npage; 202*0Sstevel@tonic-gate while (cur < end) { 203*0Sstevel@tonic-gate if ((*cur & mask) == mask && (*cur & notmask) == 0) 204*0Sstevel@tonic-gate count += asmapp->pr_pagesize / 1024; 205*0Sstevel@tonic-gate if ((flags & CP_CLEAR) != 0) 206*0Sstevel@tonic-gate *cur = 0; 207*0Sstevel@tonic-gate cur++; 208*0Sstevel@tonic-gate } 209*0Sstevel@tonic-gate 210*0Sstevel@tonic-gate /* 211*0Sstevel@tonic-gate * Skip to next 64-bit-aligned address to get the next 212*0Sstevel@tonic-gate * prasmap_t. 213*0Sstevel@tonic-gate */ 214*0Sstevel@tonic-gate cur = (caddr_t)((intptr_t)(cur + 7) & ~7); 215*0Sstevel@tonic-gate } 216*0Sstevel@tonic-gate 217*0Sstevel@tonic-gate return (count); 218*0Sstevel@tonic-gate } 219*0Sstevel@tonic-gate 220*0Sstevel@tonic-gate /* 221*0Sstevel@tonic-gate * Return the amount of memory (in kilobytes) that hasn't been referenced or 222*0Sstevel@tonic-gate * modified, which memory which will be paged out first. Should be written to 223*0Sstevel@tonic-gate * exclude nonresident pages when sufficient interfaces exist. 224*0Sstevel@tonic-gate */ 225*0Sstevel@tonic-gate static uint64_t 226*0Sstevel@tonic-gate unrm_size(lprocess_t *lpc) 227*0Sstevel@tonic-gate { 228*0Sstevel@tonic-gate return (count_pages(lpc->lpc_prpageheader, CP_CLEAR, 229*0Sstevel@tonic-gate 0, PG_MODIFIED | PG_REFERENCED)); 230*0Sstevel@tonic-gate } 231*0Sstevel@tonic-gate 232*0Sstevel@tonic-gate /* 233*0Sstevel@tonic-gate * Advance a prpageheader_cur_t to the address space's next mapping, returning 234*0Sstevel@tonic-gate * its address, or NULL if there is none. Any known nonpageable or nonresident 235*0Sstevel@tonic-gate * mappings will be skipped over. 236*0Sstevel@tonic-gate */ 237*0Sstevel@tonic-gate static uintptr_t 238*0Sstevel@tonic-gate advance_prpageheader_cur_nextmapping(prpageheader_cur_t *pcp) 239*0Sstevel@tonic-gate { 240*0Sstevel@tonic-gate prasmap_t *pap; 241*0Sstevel@tonic-gate int i; 242*0Sstevel@tonic-gate 243*0Sstevel@tonic-gate next: 244*0Sstevel@tonic-gate ASSERT(pcp->pr_map < pcp->pr_nmap); 245*0Sstevel@tonic-gate if ((pcp->pr_map + 1) == pcp->pr_nmap) 246*0Sstevel@tonic-gate return (NULL); 247*0Sstevel@tonic-gate pcp->pr_map++; 248*0Sstevel@tonic-gate if (pcp->pr_pgoff < pcp->pr_npage) { 249*0Sstevel@tonic-gate pcp->pr_pdaddr = (caddr_t)((uintptr_t)pcp->pr_pdaddr + 250*0Sstevel@tonic-gate (pcp->pr_npage - pcp->pr_pgoff)); 251*0Sstevel@tonic-gate pcp->pr_pgoff = pcp->pr_npage; 252*0Sstevel@tonic-gate } 253*0Sstevel@tonic-gate /* 254*0Sstevel@tonic-gate * Skip to next 64-bit-aligned address to get the next prasmap_t. 255*0Sstevel@tonic-gate */ 256*0Sstevel@tonic-gate pcp->pr_pdaddr = (caddr_t)(((uintptr_t)pcp->pr_pdaddr + 7) & ~7); 257*0Sstevel@tonic-gate pap = (prasmap_t *)pcp->pr_pdaddr; 258*0Sstevel@tonic-gate pcp->pr_pgoff = 0; 259*0Sstevel@tonic-gate pcp->pr_npage = pap->pr_npage; 260*0Sstevel@tonic-gate pcp->pr_pagesize = pap->pr_pagesize; 261*0Sstevel@tonic-gate pcp->pr_addr = pap->pr_vaddr; 262*0Sstevel@tonic-gate pcp->pr_pdaddr = pap + 1; 263*0Sstevel@tonic-gate 264*0Sstevel@tonic-gate /* 265*0Sstevel@tonic-gate * Skip any known nonpageable mappings. Currently, the only one 266*0Sstevel@tonic-gate * detected is the schedctl page. 267*0Sstevel@tonic-gate */ 268*0Sstevel@tonic-gate if ((pap->pr_mflags ^ (MA_SHARED | MA_READ | MA_WRITE | MA_EXEC | 269*0Sstevel@tonic-gate MA_ANON)) == 0 && pap->pr_npage == 1) { 270*0Sstevel@tonic-gate debug("identified nonpageable schedctl mapping at %p\n", 271*0Sstevel@tonic-gate (void *)pcp->pr_addr); 272*0Sstevel@tonic-gate goto next; 273*0Sstevel@tonic-gate } 274*0Sstevel@tonic-gate 275*0Sstevel@tonic-gate /* 276*0Sstevel@tonic-gate * Skip mappings with no resident pages. If the xmap does not 277*0Sstevel@tonic-gate * correspond to the pagedata for any reason, it will be ignored. 278*0Sstevel@tonic-gate */ 279*0Sstevel@tonic-gate pcp->pr_rss = -1; 280*0Sstevel@tonic-gate pcp->pr_pg_rss = -1; 281*0Sstevel@tonic-gate for (i = 0; i < pcp->pr_nxmap; i++) { 282*0Sstevel@tonic-gate prxmap_t *xmap = &pcp->pr_xmap[i]; 283*0Sstevel@tonic-gate 284*0Sstevel@tonic-gate if (pcp->pr_addr == xmap->pr_vaddr && xmap->pr_size == 285*0Sstevel@tonic-gate (pcp->pr_npage * pcp->pr_pagesize)) { 286*0Sstevel@tonic-gate pcp->pr_rss = xmap->pr_rss; 287*0Sstevel@tonic-gate /* 288*0Sstevel@tonic-gate * Remove COW pages from the pageable RSS count. 289*0Sstevel@tonic-gate */ 290*0Sstevel@tonic-gate if ((xmap->pr_mflags & MA_SHARED) == 0) 291*0Sstevel@tonic-gate pcp->pr_pg_rss = xmap->pr_anon; 292*0Sstevel@tonic-gate break; 293*0Sstevel@tonic-gate } 294*0Sstevel@tonic-gate } 295*0Sstevel@tonic-gate if (pcp->pr_rss == 0) { 296*0Sstevel@tonic-gate debug("identified nonresident mapping at 0x%p\n", 297*0Sstevel@tonic-gate (void *)pcp->pr_addr); 298*0Sstevel@tonic-gate goto next; 299*0Sstevel@tonic-gate } else if (pcp->pr_pg_rss == 0) { 300*0Sstevel@tonic-gate debug("identified unpageable mapping at 0x%p\n", 301*0Sstevel@tonic-gate (void *)pcp->pr_addr); 302*0Sstevel@tonic-gate goto next; 303*0Sstevel@tonic-gate } 304*0Sstevel@tonic-gate 305*0Sstevel@tonic-gate return (pcp->pr_addr); 306*0Sstevel@tonic-gate } 307*0Sstevel@tonic-gate 308*0Sstevel@tonic-gate /* 309*0Sstevel@tonic-gate * Advance a prpageheader_cur_t to the mapping's next page, returning its 310*0Sstevel@tonic-gate * address, or NULL if there is none. 311*0Sstevel@tonic-gate */ 312*0Sstevel@tonic-gate static void * 313*0Sstevel@tonic-gate advance_prpageheader_cur(prpageheader_cur_t *pcp) 314*0Sstevel@tonic-gate { 315*0Sstevel@tonic-gate ASSERT(pcp->pr_pgoff < pcp->pr_npage); 316*0Sstevel@tonic-gate if ((pcp->pr_pgoff + 1) == pcp->pr_npage) 317*0Sstevel@tonic-gate return (NULL); 318*0Sstevel@tonic-gate pcp->pr_pdaddr = (caddr_t)pcp->pr_pdaddr + 1; 319*0Sstevel@tonic-gate pcp->pr_pgoff++; 320*0Sstevel@tonic-gate 321*0Sstevel@tonic-gate ASSERT((*(char *)pcp->pr_pdaddr & ~(PG_MODIFIED | PG_REFERENCED)) == 0); 322*0Sstevel@tonic-gate return ((caddr_t)pcp->pr_addr + pcp->pr_pgoff * pcp->pr_pagesize); 323*0Sstevel@tonic-gate } 324*0Sstevel@tonic-gate 325*0Sstevel@tonic-gate /* 326*0Sstevel@tonic-gate * Initialize a prpageheader_cur_t, positioned at the first page of the mapping 327*0Sstevel@tonic-gate * of an address space. 328*0Sstevel@tonic-gate */ 329*0Sstevel@tonic-gate static void * 330*0Sstevel@tonic-gate set_prpageheader_cur(prpageheader_cur_t *pcp, prpageheader_t *php, 331*0Sstevel@tonic-gate prxmap_t *xmap, int nxmap) 332*0Sstevel@tonic-gate { 333*0Sstevel@tonic-gate bzero(pcp, sizeof (*pcp)); 334*0Sstevel@tonic-gate pcp->pr_nmap = php->pr_nmap; 335*0Sstevel@tonic-gate pcp->pr_map = -1; 336*0Sstevel@tonic-gate pcp->pr_prpageheader = php; 337*0Sstevel@tonic-gate pcp->pr_xmap = xmap; 338*0Sstevel@tonic-gate pcp->pr_nxmap = nxmap; 339*0Sstevel@tonic-gate pcp->pr_pdaddr = (prpageheader_t *)php + 1; 340*0Sstevel@tonic-gate 341*0Sstevel@tonic-gate return ((void *)advance_prpageheader_cur_nextmapping(pcp)); 342*0Sstevel@tonic-gate } 343*0Sstevel@tonic-gate 344*0Sstevel@tonic-gate /* 345*0Sstevel@tonic-gate * Position a prpageheader_cur_t to the mapped address greater or equal to the 346*0Sstevel@tonic-gate * given value. 347*0Sstevel@tonic-gate */ 348*0Sstevel@tonic-gate static void * 349*0Sstevel@tonic-gate set_prpageheader_cur_addr(prpageheader_cur_t *pcp, prpageheader_t *php, 350*0Sstevel@tonic-gate prxmap_t *xmap, int nxmap, void *naddr) 351*0Sstevel@tonic-gate { 352*0Sstevel@tonic-gate void *addr = set_prpageheader_cur(pcp, php, xmap, nxmap); 353*0Sstevel@tonic-gate 354*0Sstevel@tonic-gate while (addr != NULL && addr <= naddr) 355*0Sstevel@tonic-gate if (naddr < (void *)((caddr_t)pcp->pr_addr + 356*0Sstevel@tonic-gate pcp->pr_pagesize * pcp->pr_npage)) { 357*0Sstevel@tonic-gate uint64_t pgdiff = ((uintptr_t)naddr - 358*0Sstevel@tonic-gate (uintptr_t)pcp->pr_addr) / pcp->pr_pagesize; 359*0Sstevel@tonic-gate pcp->pr_pgoff += pgdiff; 360*0Sstevel@tonic-gate pcp->pr_pdaddr = (caddr_t)pcp->pr_pdaddr + pgdiff; 361*0Sstevel@tonic-gate addr = (caddr_t)pcp->pr_addr + pcp->pr_pagesize * 362*0Sstevel@tonic-gate pcp->pr_pgoff; 363*0Sstevel@tonic-gate break; 364*0Sstevel@tonic-gate } else 365*0Sstevel@tonic-gate addr = 366*0Sstevel@tonic-gate (void *)advance_prpageheader_cur_nextmapping(pcp); 367*0Sstevel@tonic-gate 368*0Sstevel@tonic-gate return (addr); 369*0Sstevel@tonic-gate } 370*0Sstevel@tonic-gate 371*0Sstevel@tonic-gate static void 372*0Sstevel@tonic-gate revoke_pagedata(rfd_t *rfd) 373*0Sstevel@tonic-gate { 374*0Sstevel@tonic-gate lprocess_t *lpc = rfd->rfd_data; 375*0Sstevel@tonic-gate 376*0Sstevel@tonic-gate st_debug(STDL_NORMAL, lpc->lpc_collection, "revoking pagedata for" 377*0Sstevel@tonic-gate " process %d\n", (int)lpc->lpc_pid); 378*0Sstevel@tonic-gate ASSERT(lpc->lpc_pgdata_fd != -1); 379*0Sstevel@tonic-gate lpc->lpc_pgdata_fd = -1; 380*0Sstevel@tonic-gate } 381*0Sstevel@tonic-gate 382*0Sstevel@tonic-gate #ifdef DEBUG 383*0Sstevel@tonic-gate static void 384*0Sstevel@tonic-gate mklmapping(lmapping_t **lm, prpageheader_t *pgh) 385*0Sstevel@tonic-gate { 386*0Sstevel@tonic-gate prpageheader_cur_t cur; 387*0Sstevel@tonic-gate void *addr; 388*0Sstevel@tonic-gate 389*0Sstevel@tonic-gate addr = set_prpageheader_cur(&cur, pgh, NULL, -1); 390*0Sstevel@tonic-gate ASSERT(*lm == NULL); 391*0Sstevel@tonic-gate while (addr != NULL) { 392*0Sstevel@tonic-gate (void) lmapping_insert(lm, cur.pr_addr, cur.pr_npage * 393*0Sstevel@tonic-gate cur.pr_pagesize); 394*0Sstevel@tonic-gate addr = (void *)advance_prpageheader_cur_nextmapping(&cur); 395*0Sstevel@tonic-gate } 396*0Sstevel@tonic-gate } 397*0Sstevel@tonic-gate 398*0Sstevel@tonic-gate static void 399*0Sstevel@tonic-gate lmapping_dump(lmapping_t *lm) 400*0Sstevel@tonic-gate { 401*0Sstevel@tonic-gate debug("lm: %p\n", (void *)lm); 402*0Sstevel@tonic-gate while (lm != NULL) { 403*0Sstevel@tonic-gate debug("\t(%p, %llx\n", (void *)lm->lm_addr, 404*0Sstevel@tonic-gate (unsigned long long)lm->lm_size); 405*0Sstevel@tonic-gate lm = lm->lm_next; 406*0Sstevel@tonic-gate } 407*0Sstevel@tonic-gate } 408*0Sstevel@tonic-gate #endif /* DEBUG */ 409*0Sstevel@tonic-gate 410*0Sstevel@tonic-gate /* 411*0Sstevel@tonic-gate * OR two prpagedata_t which are supposedly snapshots of the same address 412*0Sstevel@tonic-gate * space. Intersecting mappings with different page sizes are tolerated but 413*0Sstevel@tonic-gate * not normalized (not accurate). If the mappings of the two snapshots differ 414*0Sstevel@tonic-gate * in any regard, the supplied mappings_changed flag will be set. 415*0Sstevel@tonic-gate */ 416*0Sstevel@tonic-gate static void 417*0Sstevel@tonic-gate OR_pagedata(prpageheader_t *src, prpageheader_t *dst, int *mappings_changedp) 418*0Sstevel@tonic-gate { 419*0Sstevel@tonic-gate prpageheader_cur_t src_cur; 420*0Sstevel@tonic-gate prpageheader_cur_t dst_cur; 421*0Sstevel@tonic-gate uintptr_t src_addr; 422*0Sstevel@tonic-gate uintptr_t dst_addr; 423*0Sstevel@tonic-gate int mappings_changed = 0; 424*0Sstevel@tonic-gate 425*0Sstevel@tonic-gate /* 426*0Sstevel@tonic-gate * OR source pagedata with the destination, for pages of intersecting 427*0Sstevel@tonic-gate * mappings. 428*0Sstevel@tonic-gate */ 429*0Sstevel@tonic-gate src_addr = (uintptr_t)set_prpageheader_cur(&src_cur, src, NULL, -1); 430*0Sstevel@tonic-gate dst_addr = (uintptr_t)set_prpageheader_cur(&dst_cur, dst, NULL, -1); 431*0Sstevel@tonic-gate while (src_addr != NULL && dst_addr != NULL) { 432*0Sstevel@tonic-gate while (src_addr == dst_addr && src_addr != NULL) { 433*0Sstevel@tonic-gate *(char *)dst_cur.pr_pdaddr |= 434*0Sstevel@tonic-gate *(char *)src_cur.pr_pdaddr; 435*0Sstevel@tonic-gate src_addr = (uintptr_t)advance_prpageheader_cur( 436*0Sstevel@tonic-gate &src_cur); 437*0Sstevel@tonic-gate dst_addr = (uintptr_t)advance_prpageheader_cur( 438*0Sstevel@tonic-gate &dst_cur); 439*0Sstevel@tonic-gate } 440*0Sstevel@tonic-gate if (src_addr != dst_addr) 441*0Sstevel@tonic-gate mappings_changed = 1; 442*0Sstevel@tonic-gate src_addr = advance_prpageheader_cur_nextmapping(&src_cur); 443*0Sstevel@tonic-gate dst_addr = advance_prpageheader_cur_nextmapping(&dst_cur); 444*0Sstevel@tonic-gate while (src_addr != dst_addr && src_addr != NULL && dst_addr != 445*0Sstevel@tonic-gate NULL) { 446*0Sstevel@tonic-gate mappings_changed = 1; 447*0Sstevel@tonic-gate if (src_addr < dst_addr) 448*0Sstevel@tonic-gate src_addr = advance_prpageheader_cur_nextmapping( 449*0Sstevel@tonic-gate &src_cur); 450*0Sstevel@tonic-gate else 451*0Sstevel@tonic-gate dst_addr = advance_prpageheader_cur_nextmapping( 452*0Sstevel@tonic-gate &dst_cur); 453*0Sstevel@tonic-gate } 454*0Sstevel@tonic-gate } 455*0Sstevel@tonic-gate 456*0Sstevel@tonic-gate *mappings_changedp = mappings_changed; 457*0Sstevel@tonic-gate } 458*0Sstevel@tonic-gate 459*0Sstevel@tonic-gate /* 460*0Sstevel@tonic-gate * Merge the current pagedata with that on hand. If the pagedata is 461*0Sstevel@tonic-gate * unretrievable for any reason, such as the process having exited or being a 462*0Sstevel@tonic-gate * zombie, a nonzero value is returned, the process should be marked 463*0Sstevel@tonic-gate * unscannable, and future attempts to scan it should be avoided, since the 464*0Sstevel@tonic-gate * symptom is probably permament. If the mappings of either pagedata 465*0Sstevel@tonic-gate * differ in any respect, the supplied callback will be invoked once. 466*0Sstevel@tonic-gate */ 467*0Sstevel@tonic-gate static int 468*0Sstevel@tonic-gate merge_current_pagedata(lprocess_t *lpc, 469*0Sstevel@tonic-gate void(*mappings_changed_cb) (lprocess_t *)) 470*0Sstevel@tonic-gate { 471*0Sstevel@tonic-gate prpageheader_t *pghp; 472*0Sstevel@tonic-gate int mappings_changed = 0; 473*0Sstevel@tonic-gate 474*0Sstevel@tonic-gate if (lpc->lpc_pgdata_fd < 0 || get_pagedata(&pghp, lpc->lpc_pgdata_fd) != 475*0Sstevel@tonic-gate 0) { 476*0Sstevel@tonic-gate char pathbuf[PROC_PATH_MAX]; 477*0Sstevel@tonic-gate 478*0Sstevel@tonic-gate (void) snprintf(pathbuf, sizeof (pathbuf), "/proc/%d/pagedata", 479*0Sstevel@tonic-gate (int)lpc->lpc_pid); 480*0Sstevel@tonic-gate if ((lpc->lpc_pgdata_fd = rfd_open(pathbuf, 1, RFD_PAGEDATA, 481*0Sstevel@tonic-gate revoke_pagedata, lpc, O_RDONLY, 0)) < 0 || 482*0Sstevel@tonic-gate get_pagedata(&pghp, lpc->lpc_pgdata_fd) != 0) 483*0Sstevel@tonic-gate return (-1); 484*0Sstevel@tonic-gate debug("starting/resuming pagedata collection for %d\n", 485*0Sstevel@tonic-gate (int)lpc->lpc_pid); 486*0Sstevel@tonic-gate } 487*0Sstevel@tonic-gate debug("process %d: %llu/%llukB r/m'd since last read\n", 488*0Sstevel@tonic-gate (int)lpc->lpc_pid, (unsigned long long)count_pages(pghp, 0, 489*0Sstevel@tonic-gate PG_MODIFIED | PG_REFERENCED, 0), (unsigned long long)lpc->lpc_rss); 490*0Sstevel@tonic-gate if (lpc->lpc_prpageheader != NULL) { 491*0Sstevel@tonic-gate /* 492*0Sstevel@tonic-gate * OR the two snapshots. 493*0Sstevel@tonic-gate */ 494*0Sstevel@tonic-gate #ifdef DEBUG 495*0Sstevel@tonic-gate lmapping_t *old = NULL; 496*0Sstevel@tonic-gate lmapping_t *new = NULL; 497*0Sstevel@tonic-gate 498*0Sstevel@tonic-gate mklmapping(&new, pghp); 499*0Sstevel@tonic-gate mklmapping(&old, lpc->lpc_prpageheader); 500*0Sstevel@tonic-gate #endif /* DEBUG */ 501*0Sstevel@tonic-gate OR_pagedata(lpc->lpc_prpageheader, pghp, &mappings_changed); 502*0Sstevel@tonic-gate #ifdef DEBUG 503*0Sstevel@tonic-gate if (((mappings_changed != 0) ^ 504*0Sstevel@tonic-gate (lmapping_dump_diff(old, new) != 0))) { 505*0Sstevel@tonic-gate debug("lmapping_changed inconsistent with lmapping\n"); 506*0Sstevel@tonic-gate debug("old\n"); 507*0Sstevel@tonic-gate lmapping_dump(old); 508*0Sstevel@tonic-gate debug("new\n"); 509*0Sstevel@tonic-gate lmapping_dump(new); 510*0Sstevel@tonic-gate debug("ignored\n"); 511*0Sstevel@tonic-gate lmapping_dump(lpc->lpc_ignore); 512*0Sstevel@tonic-gate ASSERT(0); 513*0Sstevel@tonic-gate } 514*0Sstevel@tonic-gate lmapping_free(&new); 515*0Sstevel@tonic-gate lmapping_free(&old); 516*0Sstevel@tonic-gate #endif /* DEBUG */ 517*0Sstevel@tonic-gate free(lpc->lpc_prpageheader); 518*0Sstevel@tonic-gate } else 519*0Sstevel@tonic-gate mappings_changed = 1; 520*0Sstevel@tonic-gate lpc->lpc_prpageheader = pghp; 521*0Sstevel@tonic-gate debug("process %d: %llu/%llukB r/m'd since hand swept\n", 522*0Sstevel@tonic-gate (int)lpc->lpc_pid, (unsigned long long)count_pages(pghp, 0, 523*0Sstevel@tonic-gate PG_MODIFIED | PG_REFERENCED, 0), 524*0Sstevel@tonic-gate (unsigned long long)lpc->lpc_rss); 525*0Sstevel@tonic-gate if (mappings_changed != 0) { 526*0Sstevel@tonic-gate debug("process %d: mappings changed\n", (int)lpc->lpc_pid); 527*0Sstevel@tonic-gate if (mappings_changed_cb != NULL) 528*0Sstevel@tonic-gate mappings_changed_cb(lpc); 529*0Sstevel@tonic-gate } 530*0Sstevel@tonic-gate return (0); 531*0Sstevel@tonic-gate } 532*0Sstevel@tonic-gate 533*0Sstevel@tonic-gate /* 534*0Sstevel@tonic-gate * Attempt to page out a region of the given process's address space. May 535*0Sstevel@tonic-gate * return nonzero if not all of the pages may are pageable, for any reason. 536*0Sstevel@tonic-gate */ 537*0Sstevel@tonic-gate static int 538*0Sstevel@tonic-gate pageout(pid_t pid, struct ps_prochandle *Pr, caddr_t start, caddr_t end) 539*0Sstevel@tonic-gate { 540*0Sstevel@tonic-gate int res; 541*0Sstevel@tonic-gate 542*0Sstevel@tonic-gate if (end <= start) 543*0Sstevel@tonic-gate return (0); 544*0Sstevel@tonic-gate 545*0Sstevel@tonic-gate errno = 0; 546*0Sstevel@tonic-gate res = pr_memcntl(Pr, start, (end - start), MC_SYNC, 547*0Sstevel@tonic-gate (caddr_t)(MS_ASYNC | MS_INVALIDATE), 0, 0); 548*0Sstevel@tonic-gate debug_high("pr_memcntl [%p-%p): %d", (void *)start, (void *)end, res); 549*0Sstevel@tonic-gate 550*0Sstevel@tonic-gate /* 551*0Sstevel@tonic-gate * EBUSY indicates none of the pages have backing store allocated, or 552*0Sstevel@tonic-gate * some pages were locked, which are less interesting than other 553*0Sstevel@tonic-gate * conditions, which are noted. 554*0Sstevel@tonic-gate */ 555*0Sstevel@tonic-gate if (res != 0) 556*0Sstevel@tonic-gate if (errno == EBUSY) 557*0Sstevel@tonic-gate res = 0; 558*0Sstevel@tonic-gate else 559*0Sstevel@tonic-gate debug("%d: can't pageout %p+%llx (errno %d)", (int)pid, 560*0Sstevel@tonic-gate (void *)start, (long long)(end - start), errno); 561*0Sstevel@tonic-gate 562*0Sstevel@tonic-gate return (res); 563*0Sstevel@tonic-gate } 564*0Sstevel@tonic-gate 565*0Sstevel@tonic-gate /* 566*0Sstevel@tonic-gate * Compute the delta of the victim process's RSS since the last call. If the 567*0Sstevel@tonic-gate * psinfo cannot be obtained, no work is done, and no error is returned; it is 568*0Sstevel@tonic-gate * up to the caller to detect the process' termination via other means. 569*0Sstevel@tonic-gate */ 570*0Sstevel@tonic-gate static int64_t 571*0Sstevel@tonic-gate rss_delta(psinfo_t *new_psinfo, psinfo_t *old_psinfo, lprocess_t *vic) 572*0Sstevel@tonic-gate { 573*0Sstevel@tonic-gate int64_t d_rss = 0; 574*0Sstevel@tonic-gate 575*0Sstevel@tonic-gate if (get_psinfo(vic->lpc_pid, new_psinfo, vic->lpc_psinfo_fd, 576*0Sstevel@tonic-gate lprocess_update_psinfo_fd_cb, vic, vic) == 0) { 577*0Sstevel@tonic-gate d_rss = (int64_t)new_psinfo->pr_rssize - 578*0Sstevel@tonic-gate (int64_t)old_psinfo->pr_rssize; 579*0Sstevel@tonic-gate if (d_rss < 0) 580*0Sstevel@tonic-gate vic->lpc_collection->lcol_stat.lcols_pg_eff += 581*0Sstevel@tonic-gate (- d_rss); 582*0Sstevel@tonic-gate *old_psinfo = *new_psinfo; 583*0Sstevel@tonic-gate } 584*0Sstevel@tonic-gate 585*0Sstevel@tonic-gate return (d_rss); 586*0Sstevel@tonic-gate } 587*0Sstevel@tonic-gate 588*0Sstevel@tonic-gate static void 589*0Sstevel@tonic-gate unignore_mappings(lprocess_t *lpc) 590*0Sstevel@tonic-gate { 591*0Sstevel@tonic-gate debug("clearing ignored set\n"); 592*0Sstevel@tonic-gate lmapping_free(&lpc->lpc_ignore); 593*0Sstevel@tonic-gate } 594*0Sstevel@tonic-gate 595*0Sstevel@tonic-gate static void 596*0Sstevel@tonic-gate unignore_referenced_mappings(lprocess_t *lpc) 597*0Sstevel@tonic-gate { 598*0Sstevel@tonic-gate prpageheader_cur_t cur; 599*0Sstevel@tonic-gate void *vicaddr; 600*0Sstevel@tonic-gate 601*0Sstevel@tonic-gate vicaddr = set_prpageheader_cur(&cur, lpc->lpc_prpageheader, NULL, -1); 602*0Sstevel@tonic-gate while (vicaddr != NULL) { 603*0Sstevel@tonic-gate if (((*(char *)cur.pr_pdaddr) & (PG_REFERENCED | PG_MODIFIED)) 604*0Sstevel@tonic-gate != 0) { 605*0Sstevel@tonic-gate if (lmapping_remove(&lpc->lpc_ignore, cur.pr_addr, 606*0Sstevel@tonic-gate cur.pr_npage * cur.pr_pagesize) == 0) 607*0Sstevel@tonic-gate debug("removed mapping 0x%p+0t%llukB from" 608*0Sstevel@tonic-gate " ignored set\n", (void *)cur.pr_addr, 609*0Sstevel@tonic-gate (unsigned long long)(cur.pr_npage * 610*0Sstevel@tonic-gate cur.pr_pagesize / 1024)); 611*0Sstevel@tonic-gate vicaddr = (void *)advance_prpageheader_cur_nextmapping( 612*0Sstevel@tonic-gate &cur); 613*0Sstevel@tonic-gate } else if ((vicaddr = advance_prpageheader_cur(&cur)) == NULL) 614*0Sstevel@tonic-gate vicaddr = (void *)advance_prpageheader_cur_nextmapping( 615*0Sstevel@tonic-gate &cur); 616*0Sstevel@tonic-gate } 617*0Sstevel@tonic-gate } 618*0Sstevel@tonic-gate 619*0Sstevel@tonic-gate /* 620*0Sstevel@tonic-gate * Resume scanning, starting with the last victim, if it is still valid, or any 621*0Sstevel@tonic-gate * other one, otherwise. 622*0Sstevel@tonic-gate */ 623*0Sstevel@tonic-gate void 624*0Sstevel@tonic-gate scan(lcollection_t *lcol, int64_t excess) 625*0Sstevel@tonic-gate { 626*0Sstevel@tonic-gate lprocess_t *vic, *lpc; 627*0Sstevel@tonic-gate void *vicaddr, *endaddr, *nvicaddr; 628*0Sstevel@tonic-gate prpageheader_cur_t cur; 629*0Sstevel@tonic-gate psinfo_t old_psinfo, new_psinfo; 630*0Sstevel@tonic-gate hrtime_t scan_start; 631*0Sstevel@tonic-gate int res, resumed; 632*0Sstevel@tonic-gate uint64_t col_unrm_size; 633*0Sstevel@tonic-gate 634*0Sstevel@tonic-gate st_debug(STDL_NORMAL, lcol, "starting to scan, excess %lldk\n", 635*0Sstevel@tonic-gate (long long)excess); 636*0Sstevel@tonic-gate 637*0Sstevel@tonic-gate /* 638*0Sstevel@tonic-gate * Determine the address to start scanning at, depending on whether 639*0Sstevel@tonic-gate * scanning can be resumed. 640*0Sstevel@tonic-gate */ 641*0Sstevel@tonic-gate endaddr = NULL; 642*0Sstevel@tonic-gate if ((vic = get_valid_victim(lcol, lcol->lcol_victim)) == 643*0Sstevel@tonic-gate lcol->lcol_victim && lcol->lcol_resaddr != NULL) { 644*0Sstevel@tonic-gate vicaddr = lcol->lcol_resaddr; 645*0Sstevel@tonic-gate st_debug(STDL_NORMAL, lcol, "resuming process %d\n", 646*0Sstevel@tonic-gate (int)vic->lpc_pid); 647*0Sstevel@tonic-gate resumed = 1; 648*0Sstevel@tonic-gate } else { 649*0Sstevel@tonic-gate vicaddr = NULL; 650*0Sstevel@tonic-gate resumed = 0; 651*0Sstevel@tonic-gate } 652*0Sstevel@tonic-gate 653*0Sstevel@tonic-gate scan_start = gethrtime(); 654*0Sstevel@tonic-gate /* 655*0Sstevel@tonic-gate * Obtain the most current pagedata for the processes that might be 656*0Sstevel@tonic-gate * scanned, and remove from the ignored set any mappings which have 657*0Sstevel@tonic-gate * referenced or modified pages (in the hopes that the pageability of 658*0Sstevel@tonic-gate * the mapping's pages may have changed). Determine if the 659*0Sstevel@tonic-gate * unreferenced and unmodified portion is impossibly small to suffice 660*0Sstevel@tonic-gate * to reduce the excess completely. If so, ignore these bits so that 661*0Sstevel@tonic-gate * even working set will be paged out. 662*0Sstevel@tonic-gate */ 663*0Sstevel@tonic-gate col_unrm_size = 0; 664*0Sstevel@tonic-gate lpc = vic; 665*0Sstevel@tonic-gate while (lpc != NULL && should_run) { 666*0Sstevel@tonic-gate if (merge_current_pagedata(lpc, unignore_mappings) != 0) { 667*0Sstevel@tonic-gate st_debug(STDL_NORMAL, lcol, "process %d:" 668*0Sstevel@tonic-gate " exited/temporarily unscannable", 669*0Sstevel@tonic-gate (int)lpc->lpc_pid); 670*0Sstevel@tonic-gate goto next; 671*0Sstevel@tonic-gate } 672*0Sstevel@tonic-gate debug("process %d: %llu/%llukB scannable\n", (int)lpc->lpc_pid, 673*0Sstevel@tonic-gate (unsigned long long)(lpc->lpc_unrm = unrm_size(lpc)), 674*0Sstevel@tonic-gate (unsigned long long)lpc->lpc_size); 675*0Sstevel@tonic-gate col_unrm_size += lpc->lpc_unrm = unrm_size(lpc); 676*0Sstevel@tonic-gate 677*0Sstevel@tonic-gate if ((lcol->lcol_stat.lcols_scan_count % 678*0Sstevel@tonic-gate RCAPD_IGNORED_SET_FLUSH_IVAL) == 0) { 679*0Sstevel@tonic-gate /* 680*0Sstevel@tonic-gate * Periodically clear the set of ignored mappings. 681*0Sstevel@tonic-gate * This will allow processes whose ignored segments' 682*0Sstevel@tonic-gate * pageability have changed (without a corresponding 683*0Sstevel@tonic-gate * reference or modification to a page) to be 684*0Sstevel@tonic-gate * recognized. 685*0Sstevel@tonic-gate */ 686*0Sstevel@tonic-gate if (lcol->lcol_stat.lcols_scan_count > 0) 687*0Sstevel@tonic-gate unignore_mappings(lpc); 688*0Sstevel@tonic-gate } else { 689*0Sstevel@tonic-gate /* 690*0Sstevel@tonic-gate * Ensure mappings with referenced or modified pages 691*0Sstevel@tonic-gate * are not in the ignored set. Their usage might mean 692*0Sstevel@tonic-gate * the condition which made them unpageable is gone. 693*0Sstevel@tonic-gate */ 694*0Sstevel@tonic-gate unignore_referenced_mappings(lpc); 695*0Sstevel@tonic-gate } 696*0Sstevel@tonic-gate next: 697*0Sstevel@tonic-gate lpc = lpc->lpc_next != NULL ? get_valid_victim(lcol, 698*0Sstevel@tonic-gate lpc->lpc_next) : NULL; 699*0Sstevel@tonic-gate } 700*0Sstevel@tonic-gate if (col_unrm_size < excess) { 701*0Sstevel@tonic-gate lpc = vic; 702*0Sstevel@tonic-gate debug("will not reduce excess with only unreferenced pages\n"); 703*0Sstevel@tonic-gate while (lpc != NULL && should_run) { 704*0Sstevel@tonic-gate if (lpc->lpc_prpageheader != NULL) { 705*0Sstevel@tonic-gate (void) count_pages(lpc->lpc_prpageheader, 706*0Sstevel@tonic-gate CP_CLEAR, 0, 0); 707*0Sstevel@tonic-gate if (lpc->lpc_pgdata_fd >= 0) { 708*0Sstevel@tonic-gate if (rfd_close(lpc->lpc_pgdata_fd) != 0) 709*0Sstevel@tonic-gate debug("coud not close %d" 710*0Sstevel@tonic-gate " lpc_pgdata_fd %d", 711*0Sstevel@tonic-gate (int)lpc->lpc_pid, 712*0Sstevel@tonic-gate lpc->lpc_pgdata_fd); 713*0Sstevel@tonic-gate lpc->lpc_pgdata_fd = -1; 714*0Sstevel@tonic-gate } 715*0Sstevel@tonic-gate } 716*0Sstevel@tonic-gate lpc = lpc->lpc_next != NULL ? get_valid_victim(lcol, 717*0Sstevel@tonic-gate lpc->lpc_next) : NULL; 718*0Sstevel@tonic-gate } 719*0Sstevel@tonic-gate } 720*0Sstevel@tonic-gate 721*0Sstevel@tonic-gate /* 722*0Sstevel@tonic-gate * Examine each process for pages to remove until the excess is 723*0Sstevel@tonic-gate * reduced. 724*0Sstevel@tonic-gate */ 725*0Sstevel@tonic-gate while (vic != NULL && excess > 0 && should_run) { 726*0Sstevel@tonic-gate /* 727*0Sstevel@tonic-gate * Skip processes whose death was reported when the merging of 728*0Sstevel@tonic-gate * pagedata was attempted. 729*0Sstevel@tonic-gate */ 730*0Sstevel@tonic-gate if (vic->lpc_prpageheader == NULL) 731*0Sstevel@tonic-gate goto nextproc; 732*0Sstevel@tonic-gate 733*0Sstevel@tonic-gate /* 734*0Sstevel@tonic-gate * Obtain optional segment residency information. 735*0Sstevel@tonic-gate */ 736*0Sstevel@tonic-gate if (lpc_xmap_update(vic) != 0) 737*0Sstevel@tonic-gate st_debug(STDL_NORMAL, lcol, "process %d: xmap" 738*0Sstevel@tonic-gate " unreadable; ignoring", (int)vic->lpc_pid); 739*0Sstevel@tonic-gate 740*0Sstevel@tonic-gate #ifdef DEBUG_MSG 741*0Sstevel@tonic-gate { 742*0Sstevel@tonic-gate void *ovicaddr = vicaddr; 743*0Sstevel@tonic-gate #endif /* DEBUG_MSG */ 744*0Sstevel@tonic-gate vicaddr = set_prpageheader_cur_addr(&cur, vic->lpc_prpageheader, 745*0Sstevel@tonic-gate vic->lpc_xmap, vic->lpc_nxmap, vicaddr); 746*0Sstevel@tonic-gate #ifdef DEBUG_MSG 747*0Sstevel@tonic-gate st_debug(STDL_NORMAL, lcol, "trying to resume from" 748*0Sstevel@tonic-gate " 0x%p, next 0x%p\n", ovicaddr, vicaddr); 749*0Sstevel@tonic-gate } 750*0Sstevel@tonic-gate #endif /* DEBUG_MSG */ 751*0Sstevel@tonic-gate 752*0Sstevel@tonic-gate /* 753*0Sstevel@tonic-gate * Take control of the victim. 754*0Sstevel@tonic-gate */ 755*0Sstevel@tonic-gate if (get_psinfo(vic->lpc_pid, &old_psinfo, 756*0Sstevel@tonic-gate vic->lpc_psinfo_fd, lprocess_update_psinfo_fd_cb, 757*0Sstevel@tonic-gate vic, vic) != 0) { 758*0Sstevel@tonic-gate st_debug(STDL_NORMAL, lcol, "cannot get %d psinfo", 759*0Sstevel@tonic-gate (int)vic->lpc_pid); 760*0Sstevel@tonic-gate goto nextproc; 761*0Sstevel@tonic-gate } 762*0Sstevel@tonic-gate (void) rfd_reserve(PGRAB_FD_COUNT); 763*0Sstevel@tonic-gate if ((scan_pr = Pgrab(vic->lpc_pid, 0, &res)) == NULL) { 764*0Sstevel@tonic-gate st_debug(STDL_NORMAL, lcol, "cannot grab %d (%d)", 765*0Sstevel@tonic-gate (int)vic->lpc_pid, res); 766*0Sstevel@tonic-gate goto nextproc; 767*0Sstevel@tonic-gate } 768*0Sstevel@tonic-gate if (Pcreate_agent(scan_pr) != 0) { 769*0Sstevel@tonic-gate st_debug(STDL_NORMAL, lcol, "cannot control %d", 770*0Sstevel@tonic-gate (int)vic->lpc_pid); 771*0Sstevel@tonic-gate goto nextproc; 772*0Sstevel@tonic-gate } 773*0Sstevel@tonic-gate /* 774*0Sstevel@tonic-gate * Be very pessimistic about the state of the agent LWP -- 775*0Sstevel@tonic-gate * verify it's actually stopped. 776*0Sstevel@tonic-gate */ 777*0Sstevel@tonic-gate errno = 0; 778*0Sstevel@tonic-gate while (Pstate(scan_pr) == PS_RUN) 779*0Sstevel@tonic-gate (void) Pwait(scan_pr, 0); 780*0Sstevel@tonic-gate if (Pstate(scan_pr) != PS_STOP) { 781*0Sstevel@tonic-gate st_debug(STDL_NORMAL, lcol, "agent not in expected" 782*0Sstevel@tonic-gate " state (%d)", Pstate(scan_pr)); 783*0Sstevel@tonic-gate goto nextproc; 784*0Sstevel@tonic-gate } 785*0Sstevel@tonic-gate 786*0Sstevel@tonic-gate /* 787*0Sstevel@tonic-gate * Within the victim's address space, find contiguous ranges of 788*0Sstevel@tonic-gate * unreferenced pages to page out. 789*0Sstevel@tonic-gate */ 790*0Sstevel@tonic-gate st_debug(STDL_NORMAL, lcol, "paging out process %d\n", 791*0Sstevel@tonic-gate (int)vic->lpc_pid); 792*0Sstevel@tonic-gate while (excess > 0 && vicaddr != NULL && should_run) { 793*0Sstevel@tonic-gate /* 794*0Sstevel@tonic-gate * Skip mappings in the ignored set. Mappings get 795*0Sstevel@tonic-gate * placed in the ignored set when all their resident 796*0Sstevel@tonic-gate * pages are unreference and unmodified, yet unpageable 797*0Sstevel@tonic-gate * -- such as when they are locked, or involved in 798*0Sstevel@tonic-gate * asynchronous I/O. They will be scanned again when 799*0Sstevel@tonic-gate * some page is referenced or modified. 800*0Sstevel@tonic-gate */ 801*0Sstevel@tonic-gate if (lmapping_contains(vic->lpc_ignore, cur.pr_addr, 802*0Sstevel@tonic-gate cur.pr_npage * cur.pr_pagesize)) { 803*0Sstevel@tonic-gate debug("ignored mapping at 0x%p\n", 804*0Sstevel@tonic-gate (void *)cur.pr_addr); 805*0Sstevel@tonic-gate /* 806*0Sstevel@tonic-gate * Update statistics. 807*0Sstevel@tonic-gate */ 808*0Sstevel@tonic-gate lcol->lcol_stat.lcols_pg_att += 809*0Sstevel@tonic-gate cur.pr_npage * cur.pr_pagesize / 1024; 810*0Sstevel@tonic-gate 811*0Sstevel@tonic-gate vicaddr = (void *) 812*0Sstevel@tonic-gate advance_prpageheader_cur_nextmapping(&cur); 813*0Sstevel@tonic-gate continue; 814*0Sstevel@tonic-gate } 815*0Sstevel@tonic-gate 816*0Sstevel@tonic-gate /* 817*0Sstevel@tonic-gate * Determine a range of unreferenced pages to page out, 818*0Sstevel@tonic-gate * and clear the R/M bits in the preceding referenced 819*0Sstevel@tonic-gate * range. 820*0Sstevel@tonic-gate */ 821*0Sstevel@tonic-gate st_debug(STDL_HIGH, lcol, "start from mapping at 0x%p," 822*0Sstevel@tonic-gate " npage %llu\n", vicaddr, 823*0Sstevel@tonic-gate (unsigned long long)cur.pr_npage); 824*0Sstevel@tonic-gate while (vicaddr != NULL && 825*0Sstevel@tonic-gate *(caddr_t)cur.pr_pdaddr != 0) { 826*0Sstevel@tonic-gate *(caddr_t)cur.pr_pdaddr = 0; 827*0Sstevel@tonic-gate vicaddr = advance_prpageheader_cur(&cur); 828*0Sstevel@tonic-gate } 829*0Sstevel@tonic-gate st_debug(STDL_HIGH, lcol, "advance, vicaddr %p, pdaddr" 830*0Sstevel@tonic-gate " %p\n", vicaddr, cur.pr_pdaddr); 831*0Sstevel@tonic-gate if (vicaddr == NULL) { 832*0Sstevel@tonic-gate /* 833*0Sstevel@tonic-gate * The end of mapping was reached before any 834*0Sstevel@tonic-gate * unreferenced pages were seen. 835*0Sstevel@tonic-gate */ 836*0Sstevel@tonic-gate vicaddr = (void *) 837*0Sstevel@tonic-gate advance_prpageheader_cur_nextmapping(&cur); 838*0Sstevel@tonic-gate continue; 839*0Sstevel@tonic-gate } 840*0Sstevel@tonic-gate do 841*0Sstevel@tonic-gate endaddr = advance_prpageheader_cur(&cur); 842*0Sstevel@tonic-gate while (endaddr != NULL && 843*0Sstevel@tonic-gate *(caddr_t)cur.pr_pdaddr == 0 && 844*0Sstevel@tonic-gate (((intptr_t)endaddr - (intptr_t)vicaddr) / 845*0Sstevel@tonic-gate 1024) < excess); 846*0Sstevel@tonic-gate st_debug(STDL_HIGH, lcol, "endaddr %p, *cur %d\n", 847*0Sstevel@tonic-gate endaddr, *(caddr_t)cur.pr_pdaddr); 848*0Sstevel@tonic-gate 849*0Sstevel@tonic-gate /* 850*0Sstevel@tonic-gate * Page out from vicaddr to the end of the mapping, or 851*0Sstevel@tonic-gate * endaddr if set, then continue scanning after 852*0Sstevel@tonic-gate * endaddr, or the next mapping, if not set. 853*0Sstevel@tonic-gate */ 854*0Sstevel@tonic-gate nvicaddr = endaddr; 855*0Sstevel@tonic-gate if (endaddr == NULL) 856*0Sstevel@tonic-gate endaddr = (caddr_t)cur.pr_addr + 857*0Sstevel@tonic-gate cur.pr_pagesize * cur.pr_npage; 858*0Sstevel@tonic-gate if (pageout(vic->lpc_pid, scan_pr, vicaddr, endaddr) == 859*0Sstevel@tonic-gate 0) { 860*0Sstevel@tonic-gate int64_t d_rss, att; 861*0Sstevel@tonic-gate int willignore = 0; 862*0Sstevel@tonic-gate 863*0Sstevel@tonic-gate excess += (d_rss = rss_delta( 864*0Sstevel@tonic-gate &new_psinfo, &old_psinfo, vic)); 865*0Sstevel@tonic-gate 866*0Sstevel@tonic-gate /* 867*0Sstevel@tonic-gate * If this pageout attempt was unsuccessful 868*0Sstevel@tonic-gate * (the resident portion was not affected), and 869*0Sstevel@tonic-gate * was for the whole mapping, put it in the 870*0Sstevel@tonic-gate * ignored set, so it will not be scanned again 871*0Sstevel@tonic-gate * until some page is referenced or modified. 872*0Sstevel@tonic-gate */ 873*0Sstevel@tonic-gate if (d_rss >= 0 && (void *)cur.pr_addr == 874*0Sstevel@tonic-gate vicaddr && (cur.pr_pagesize * cur.pr_npage) 875*0Sstevel@tonic-gate == ((uintptr_t)endaddr - 876*0Sstevel@tonic-gate (uintptr_t)vicaddr)) { 877*0Sstevel@tonic-gate if (lmapping_insert( 878*0Sstevel@tonic-gate &vic->lpc_ignore, 879*0Sstevel@tonic-gate cur.pr_addr, 880*0Sstevel@tonic-gate cur.pr_pagesize * 881*0Sstevel@tonic-gate cur.pr_npage) != 0) 882*0Sstevel@tonic-gate debug("not enough memory to add" 883*0Sstevel@tonic-gate " mapping at %p to ignored" 884*0Sstevel@tonic-gate " set\n", 885*0Sstevel@tonic-gate (void *)cur.pr_addr); 886*0Sstevel@tonic-gate willignore = 1; 887*0Sstevel@tonic-gate } 888*0Sstevel@tonic-gate 889*0Sstevel@tonic-gate /* 890*0Sstevel@tonic-gate * Update statistics. 891*0Sstevel@tonic-gate */ 892*0Sstevel@tonic-gate lcol->lcol_stat.lcols_pg_att += (att = 893*0Sstevel@tonic-gate ((intptr_t)endaddr - (intptr_t)vicaddr) / 894*0Sstevel@tonic-gate 1024); 895*0Sstevel@tonic-gate st_debug(STDL_NORMAL, lcol, "paged out 0x%p" 896*0Sstevel@tonic-gate "+0t(%llu/%llu)kB%s\n", vicaddr, 897*0Sstevel@tonic-gate (unsigned long long)((d_rss < 898*0Sstevel@tonic-gate 0) ? - d_rss : 0), (unsigned long long)att, 899*0Sstevel@tonic-gate willignore ? " (will ignore)" : ""); 900*0Sstevel@tonic-gate } else { 901*0Sstevel@tonic-gate st_debug(STDL_NORMAL, lcol, 902*0Sstevel@tonic-gate "process %d: exited/unscannable\n", 903*0Sstevel@tonic-gate (int)vic->lpc_pid); 904*0Sstevel@tonic-gate vic->lpc_unscannable = 1; 905*0Sstevel@tonic-gate goto nextproc; 906*0Sstevel@tonic-gate } 907*0Sstevel@tonic-gate 908*0Sstevel@tonic-gate /* 909*0Sstevel@tonic-gate * Update the statistics file, if it's time. 910*0Sstevel@tonic-gate */ 911*0Sstevel@tonic-gate check_update_statistics(); 912*0Sstevel@tonic-gate 913*0Sstevel@tonic-gate vicaddr = (nvicaddr != NULL) ? nvicaddr : (void 914*0Sstevel@tonic-gate *)advance_prpageheader_cur_nextmapping(&cur); 915*0Sstevel@tonic-gate } 916*0Sstevel@tonic-gate excess += rss_delta(&new_psinfo, &old_psinfo, vic); 917*0Sstevel@tonic-gate st_debug(STDL_NORMAL, lcol, "done, excess %lld\n", 918*0Sstevel@tonic-gate (long long)excess); 919*0Sstevel@tonic-gate nextproc: 920*0Sstevel@tonic-gate /* 921*0Sstevel@tonic-gate * If a process was grabbed, release it, destroying its agent. 922*0Sstevel@tonic-gate */ 923*0Sstevel@tonic-gate if (scan_pr != NULL) { 924*0Sstevel@tonic-gate (void) Prelease(scan_pr, 0); 925*0Sstevel@tonic-gate scan_pr = NULL; 926*0Sstevel@tonic-gate } 927*0Sstevel@tonic-gate lcol->lcol_victim = vic; 928*0Sstevel@tonic-gate /* 929*0Sstevel@tonic-gate * Scan the collection at most once. Only if scanning was not 930*0Sstevel@tonic-gate * aborted for any reason, and the end of lprocess has not been 931*0Sstevel@tonic-gate * reached, determine the next victim and scan it. 932*0Sstevel@tonic-gate */ 933*0Sstevel@tonic-gate if (vic != NULL) { 934*0Sstevel@tonic-gate if (vic->lpc_next != NULL) { 935*0Sstevel@tonic-gate /* 936*0Sstevel@tonic-gate * Determine the next process to be scanned. 937*0Sstevel@tonic-gate */ 938*0Sstevel@tonic-gate if (excess > 0) { 939*0Sstevel@tonic-gate vic = get_valid_victim(lcol, 940*0Sstevel@tonic-gate vic->lpc_next); 941*0Sstevel@tonic-gate vicaddr = 0; 942*0Sstevel@tonic-gate } 943*0Sstevel@tonic-gate } else { 944*0Sstevel@tonic-gate /* 945*0Sstevel@tonic-gate * A complete scan of the collection was made, 946*0Sstevel@tonic-gate * so tick the scan counter and stop scanning 947*0Sstevel@tonic-gate * until the next request. 948*0Sstevel@tonic-gate */ 949*0Sstevel@tonic-gate lcol->lcol_stat.lcols_scan_count++; 950*0Sstevel@tonic-gate lcol->lcol_stat.lcols_scan_time_complete 951*0Sstevel@tonic-gate = lcol->lcol_stat.lcols_scan_time; 952*0Sstevel@tonic-gate /* 953*0Sstevel@tonic-gate * If an excess still exists, tick the 954*0Sstevel@tonic-gate * "ineffective scan" counter, signalling that 955*0Sstevel@tonic-gate * the cap may be uneforceable. 956*0Sstevel@tonic-gate */ 957*0Sstevel@tonic-gate if (resumed == 0 && excess > 0) 958*0Sstevel@tonic-gate lcol->lcol_stat 959*0Sstevel@tonic-gate .lcols_scan_ineffective++; 960*0Sstevel@tonic-gate /* 961*0Sstevel@tonic-gate * Scanning should start at the beginning of 962*0Sstevel@tonic-gate * the process list at the next request. 963*0Sstevel@tonic-gate */ 964*0Sstevel@tonic-gate if (excess > 0) 965*0Sstevel@tonic-gate vic = NULL; 966*0Sstevel@tonic-gate } 967*0Sstevel@tonic-gate } 968*0Sstevel@tonic-gate } 969*0Sstevel@tonic-gate lcol->lcol_stat.lcols_scan_time += (gethrtime() - scan_start); 970*0Sstevel@tonic-gate st_debug(STDL_HIGH, lcol, "done scanning; excess %lld\n", 971*0Sstevel@tonic-gate (long long)excess); 972*0Sstevel@tonic-gate 973*0Sstevel@tonic-gate lcol->lcol_resaddr = vicaddr; 974*0Sstevel@tonic-gate if (lcol->lcol_resaddr == NULL && lcol->lcol_victim != NULL) { 975*0Sstevel@tonic-gate lcol->lcol_victim = get_valid_victim(lcol, 976*0Sstevel@tonic-gate lcol->lcol_victim->lpc_next); 977*0Sstevel@tonic-gate } 978*0Sstevel@tonic-gate } 979*0Sstevel@tonic-gate 980*0Sstevel@tonic-gate /* 981*0Sstevel@tonic-gate * Abort the scan in progress, and destroy the agent LWP of any grabbed 982*0Sstevel@tonic-gate * processes. 983*0Sstevel@tonic-gate */ 984*0Sstevel@tonic-gate void 985*0Sstevel@tonic-gate scan_abort(void) 986*0Sstevel@tonic-gate { 987*0Sstevel@tonic-gate if (scan_pr != NULL) 988*0Sstevel@tonic-gate (void) Prelease(scan_pr, NULL); 989*0Sstevel@tonic-gate } 990*0Sstevel@tonic-gate 991*0Sstevel@tonic-gate static void 992*0Sstevel@tonic-gate revoke_xmap(rfd_t *rfd) 993*0Sstevel@tonic-gate { 994*0Sstevel@tonic-gate lprocess_t *lpc = rfd->rfd_data; 995*0Sstevel@tonic-gate 996*0Sstevel@tonic-gate debug("revoking xmap for process %d\n", (int)lpc->lpc_pid); 997*0Sstevel@tonic-gate ASSERT(lpc->lpc_xmap_fd != -1); 998*0Sstevel@tonic-gate lpc->lpc_xmap_fd = -1; 999*0Sstevel@tonic-gate } 1000*0Sstevel@tonic-gate 1001*0Sstevel@tonic-gate /* 1002*0Sstevel@tonic-gate * Retrieve the process's current xmap , which is used to determine the size of 1003*0Sstevel@tonic-gate * the resident portion of its segments. Return zero if successful. 1004*0Sstevel@tonic-gate */ 1005*0Sstevel@tonic-gate static int 1006*0Sstevel@tonic-gate lpc_xmap_update(lprocess_t *lpc) 1007*0Sstevel@tonic-gate { 1008*0Sstevel@tonic-gate int res; 1009*0Sstevel@tonic-gate struct stat st; 1010*0Sstevel@tonic-gate 1011*0Sstevel@tonic-gate free(lpc->lpc_xmap); 1012*0Sstevel@tonic-gate lpc->lpc_xmap = NULL; 1013*0Sstevel@tonic-gate lpc->lpc_nxmap = -1; 1014*0Sstevel@tonic-gate 1015*0Sstevel@tonic-gate if (lpc->lpc_xmap_fd == -1) { 1016*0Sstevel@tonic-gate char pathbuf[PROC_PATH_MAX]; 1017*0Sstevel@tonic-gate 1018*0Sstevel@tonic-gate (void) snprintf(pathbuf, sizeof (pathbuf), "/proc/%d/xmap", 1019*0Sstevel@tonic-gate (int)lpc->lpc_pid); 1020*0Sstevel@tonic-gate if ((lpc->lpc_xmap_fd = rfd_open(pathbuf, 1, RFD_XMAP, 1021*0Sstevel@tonic-gate revoke_xmap, lpc, O_RDONLY, 0)) < 0) 1022*0Sstevel@tonic-gate return (-1); 1023*0Sstevel@tonic-gate } 1024*0Sstevel@tonic-gate 1025*0Sstevel@tonic-gate redo: 1026*0Sstevel@tonic-gate errno = 0; 1027*0Sstevel@tonic-gate if (fstat(lpc->lpc_xmap_fd, &st) != 0) { 1028*0Sstevel@tonic-gate debug("cannot stat xmap\n"); 1029*0Sstevel@tonic-gate (void) rfd_close(lpc->lpc_xmap_fd); 1030*0Sstevel@tonic-gate lpc->lpc_xmap_fd = -1; 1031*0Sstevel@tonic-gate return (-1); 1032*0Sstevel@tonic-gate } 1033*0Sstevel@tonic-gate 1034*0Sstevel@tonic-gate if ((st.st_size % sizeof (*lpc->lpc_xmap)) != 0) { 1035*0Sstevel@tonic-gate debug("xmap wrong size\n"); 1036*0Sstevel@tonic-gate (void) rfd_close(lpc->lpc_xmap_fd); 1037*0Sstevel@tonic-gate lpc->lpc_xmap_fd = -1; 1038*0Sstevel@tonic-gate return (-1); 1039*0Sstevel@tonic-gate } 1040*0Sstevel@tonic-gate 1041*0Sstevel@tonic-gate lpc->lpc_xmap = malloc(st.st_size); 1042*0Sstevel@tonic-gate if (lpc->lpc_xmap == NULL) { 1043*0Sstevel@tonic-gate debug("cannot malloc() %ld bytes for xmap", st.st_size); 1044*0Sstevel@tonic-gate (void) rfd_close(lpc->lpc_xmap_fd); 1045*0Sstevel@tonic-gate lpc->lpc_xmap_fd = -1; 1046*0Sstevel@tonic-gate return (-1); 1047*0Sstevel@tonic-gate } 1048*0Sstevel@tonic-gate 1049*0Sstevel@tonic-gate if ((res = pread(lpc->lpc_xmap_fd, lpc->lpc_xmap, st.st_size, 0)) != 1050*0Sstevel@tonic-gate st.st_size) { 1051*0Sstevel@tonic-gate free(lpc->lpc_xmap); 1052*0Sstevel@tonic-gate lpc->lpc_xmap = NULL; 1053*0Sstevel@tonic-gate if (res > 0) { 1054*0Sstevel@tonic-gate debug("xmap changed size, retrying\n"); 1055*0Sstevel@tonic-gate goto redo; 1056*0Sstevel@tonic-gate } else { 1057*0Sstevel@tonic-gate debug("cannot read xmap"); 1058*0Sstevel@tonic-gate return (-1); 1059*0Sstevel@tonic-gate } 1060*0Sstevel@tonic-gate } 1061*0Sstevel@tonic-gate lpc->lpc_nxmap = st.st_size / sizeof (*lpc->lpc_xmap); 1062*0Sstevel@tonic-gate 1063*0Sstevel@tonic-gate return (0); 1064*0Sstevel@tonic-gate } 1065