xref: /onnv-gate/usr/src/uts/common/os/watchpoint.c (revision 0:68f95e015346)
1*0Sstevel@tonic-gate /*
2*0Sstevel@tonic-gate  * CDDL HEADER START
3*0Sstevel@tonic-gate  *
4*0Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*0Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*0Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*0Sstevel@tonic-gate  * with the License.
8*0Sstevel@tonic-gate  *
9*0Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*0Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*0Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*0Sstevel@tonic-gate  * and limitations under the License.
13*0Sstevel@tonic-gate  *
14*0Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*0Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*0Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*0Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*0Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*0Sstevel@tonic-gate  *
20*0Sstevel@tonic-gate  * CDDL HEADER END
21*0Sstevel@tonic-gate  */
22*0Sstevel@tonic-gate /*
23*0Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*0Sstevel@tonic-gate  * Use is subject to license terms.
25*0Sstevel@tonic-gate  */
26*0Sstevel@tonic-gate 
27*0Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*0Sstevel@tonic-gate 
29*0Sstevel@tonic-gate #include <sys/types.h>
30*0Sstevel@tonic-gate #include <sys/t_lock.h>
31*0Sstevel@tonic-gate #include <sys/param.h>
32*0Sstevel@tonic-gate #include <sys/cred.h>
33*0Sstevel@tonic-gate #include <sys/debug.h>
34*0Sstevel@tonic-gate #include <sys/inline.h>
35*0Sstevel@tonic-gate #include <sys/kmem.h>
36*0Sstevel@tonic-gate #include <sys/proc.h>
37*0Sstevel@tonic-gate #include <sys/regset.h>
38*0Sstevel@tonic-gate #include <sys/sysmacros.h>
39*0Sstevel@tonic-gate #include <sys/systm.h>
40*0Sstevel@tonic-gate #include <sys/prsystm.h>
41*0Sstevel@tonic-gate #include <sys/buf.h>
42*0Sstevel@tonic-gate #include <sys/signal.h>
43*0Sstevel@tonic-gate #include <sys/user.h>
44*0Sstevel@tonic-gate #include <sys/cpuvar.h>
45*0Sstevel@tonic-gate 
46*0Sstevel@tonic-gate #include <sys/fault.h>
47*0Sstevel@tonic-gate #include <sys/syscall.h>
48*0Sstevel@tonic-gate #include <sys/procfs.h>
49*0Sstevel@tonic-gate #include <sys/cmn_err.h>
50*0Sstevel@tonic-gate #include <sys/stack.h>
51*0Sstevel@tonic-gate #include <sys/watchpoint.h>
52*0Sstevel@tonic-gate #include <sys/copyops.h>
53*0Sstevel@tonic-gate #include <sys/schedctl.h>
54*0Sstevel@tonic-gate 
55*0Sstevel@tonic-gate #include <sys/mman.h>
56*0Sstevel@tonic-gate #include <vm/as.h>
57*0Sstevel@tonic-gate #include <vm/seg.h>
58*0Sstevel@tonic-gate 
59*0Sstevel@tonic-gate /*
60*0Sstevel@tonic-gate  * Copy ops vector for watchpoints.
61*0Sstevel@tonic-gate  */
62*0Sstevel@tonic-gate static int	watch_copyin(const void *, void *, size_t);
63*0Sstevel@tonic-gate static int	watch_xcopyin(const void *, void *, size_t);
64*0Sstevel@tonic-gate static int	watch_copyout(const void *, void *, size_t);
65*0Sstevel@tonic-gate static int	watch_xcopyout(const void *, void *, size_t);
66*0Sstevel@tonic-gate static int	watch_copyinstr(const char *, char *, size_t, size_t *);
67*0Sstevel@tonic-gate static int	watch_copyoutstr(const char *, char *, size_t, size_t *);
68*0Sstevel@tonic-gate static int	watch_fuword8(const void *, uint8_t *);
69*0Sstevel@tonic-gate static int	watch_fuword16(const void *, uint16_t *);
70*0Sstevel@tonic-gate static int	watch_fuword32(const void *, uint32_t *);
71*0Sstevel@tonic-gate static int	watch_suword8(void *, uint8_t);
72*0Sstevel@tonic-gate static int	watch_suword16(void *, uint16_t);
73*0Sstevel@tonic-gate static int	watch_suword32(void *, uint32_t);
74*0Sstevel@tonic-gate static int	watch_physio(int (*)(struct buf *), struct buf *,
75*0Sstevel@tonic-gate     dev_t, int, void (*)(struct buf *), struct uio *);
76*0Sstevel@tonic-gate #ifdef _LP64
77*0Sstevel@tonic-gate static int	watch_fuword64(const void *, uint64_t *);
78*0Sstevel@tonic-gate static int	watch_suword64(void *, uint64_t);
79*0Sstevel@tonic-gate #endif
80*0Sstevel@tonic-gate 
81*0Sstevel@tonic-gate struct copyops watch_copyops = {
82*0Sstevel@tonic-gate 	watch_copyin,
83*0Sstevel@tonic-gate 	watch_xcopyin,
84*0Sstevel@tonic-gate 	watch_copyout,
85*0Sstevel@tonic-gate 	watch_xcopyout,
86*0Sstevel@tonic-gate 	watch_copyinstr,
87*0Sstevel@tonic-gate 	watch_copyoutstr,
88*0Sstevel@tonic-gate 	watch_fuword8,
89*0Sstevel@tonic-gate 	watch_fuword16,
90*0Sstevel@tonic-gate 	watch_fuword32,
91*0Sstevel@tonic-gate #ifdef _LP64
92*0Sstevel@tonic-gate 	watch_fuword64,
93*0Sstevel@tonic-gate #else
94*0Sstevel@tonic-gate 	NULL,
95*0Sstevel@tonic-gate #endif
96*0Sstevel@tonic-gate 	watch_suword8,
97*0Sstevel@tonic-gate 	watch_suword16,
98*0Sstevel@tonic-gate 	watch_suword32,
99*0Sstevel@tonic-gate #ifdef _LP64
100*0Sstevel@tonic-gate 	watch_suword64,
101*0Sstevel@tonic-gate #else
102*0Sstevel@tonic-gate 	NULL,
103*0Sstevel@tonic-gate #endif
104*0Sstevel@tonic-gate 	watch_physio
105*0Sstevel@tonic-gate };
106*0Sstevel@tonic-gate 
107*0Sstevel@tonic-gate /*
108*0Sstevel@tonic-gate  * Map the 'rw' argument to a protection flag.
109*0Sstevel@tonic-gate  */
110*0Sstevel@tonic-gate static int
rw_to_prot(enum seg_rw rw)111*0Sstevel@tonic-gate rw_to_prot(enum seg_rw rw)
112*0Sstevel@tonic-gate {
113*0Sstevel@tonic-gate 	switch (rw) {
114*0Sstevel@tonic-gate 	case S_EXEC:
115*0Sstevel@tonic-gate 		return (PROT_EXEC);
116*0Sstevel@tonic-gate 	case S_READ:
117*0Sstevel@tonic-gate 		return (PROT_READ);
118*0Sstevel@tonic-gate 	case S_WRITE:
119*0Sstevel@tonic-gate 		return (PROT_WRITE);
120*0Sstevel@tonic-gate 	default:
121*0Sstevel@tonic-gate 		return (PROT_NONE);	/* can't happen */
122*0Sstevel@tonic-gate 	}
123*0Sstevel@tonic-gate }
124*0Sstevel@tonic-gate 
125*0Sstevel@tonic-gate /*
126*0Sstevel@tonic-gate  * Map the 'rw' argument to an index into an array of exec/write/read things.
127*0Sstevel@tonic-gate  * The index follows the precedence order:  exec .. write .. read
128*0Sstevel@tonic-gate  */
129*0Sstevel@tonic-gate static int
rw_to_index(enum seg_rw rw)130*0Sstevel@tonic-gate rw_to_index(enum seg_rw rw)
131*0Sstevel@tonic-gate {
132*0Sstevel@tonic-gate 	switch (rw) {
133*0Sstevel@tonic-gate 	default:	/* default case "can't happen" */
134*0Sstevel@tonic-gate 	case S_EXEC:
135*0Sstevel@tonic-gate 		return (0);
136*0Sstevel@tonic-gate 	case S_WRITE:
137*0Sstevel@tonic-gate 		return (1);
138*0Sstevel@tonic-gate 	case S_READ:
139*0Sstevel@tonic-gate 		return (2);
140*0Sstevel@tonic-gate 	}
141*0Sstevel@tonic-gate }
142*0Sstevel@tonic-gate 
143*0Sstevel@tonic-gate /*
144*0Sstevel@tonic-gate  * Map an index back to a seg_rw.
145*0Sstevel@tonic-gate  */
146*0Sstevel@tonic-gate static enum seg_rw S_rw[4] = {
147*0Sstevel@tonic-gate 	S_EXEC,
148*0Sstevel@tonic-gate 	S_WRITE,
149*0Sstevel@tonic-gate 	S_READ,
150*0Sstevel@tonic-gate 	S_READ,
151*0Sstevel@tonic-gate };
152*0Sstevel@tonic-gate 
153*0Sstevel@tonic-gate #define	X	0
154*0Sstevel@tonic-gate #define	W	1
155*0Sstevel@tonic-gate #define	R	2
156*0Sstevel@tonic-gate #define	sum(a)	(a[X] + a[W] + a[R])
157*0Sstevel@tonic-gate 
158*0Sstevel@tonic-gate /*
159*0Sstevel@tonic-gate  * Common code for pr_mappage() and pr_unmappage().
160*0Sstevel@tonic-gate  */
161*0Sstevel@tonic-gate static int
pr_do_mappage(caddr_t addr,size_t size,int mapin,enum seg_rw rw,int kernel)162*0Sstevel@tonic-gate pr_do_mappage(caddr_t addr, size_t size, int mapin, enum seg_rw rw, int kernel)
163*0Sstevel@tonic-gate {
164*0Sstevel@tonic-gate 	proc_t *p = curproc;
165*0Sstevel@tonic-gate 	struct as *as = p->p_as;
166*0Sstevel@tonic-gate 	char *eaddr = addr + size;
167*0Sstevel@tonic-gate 	int prot_rw = rw_to_prot(rw);
168*0Sstevel@tonic-gate 	int xrw = rw_to_index(rw);
169*0Sstevel@tonic-gate 	int rv = 0;
170*0Sstevel@tonic-gate 	struct watched_page *pwp;
171*0Sstevel@tonic-gate 	struct watched_page tpw;
172*0Sstevel@tonic-gate 	avl_index_t where;
173*0Sstevel@tonic-gate 	uint_t prot;
174*0Sstevel@tonic-gate 
175*0Sstevel@tonic-gate 	ASSERT(as != &kas);
176*0Sstevel@tonic-gate 
177*0Sstevel@tonic-gate startover:
178*0Sstevel@tonic-gate 	ASSERT(rv == 0);
179*0Sstevel@tonic-gate 	if (avl_numnodes(&as->a_wpage) == 0)
180*0Sstevel@tonic-gate 		return (0);
181*0Sstevel@tonic-gate 
182*0Sstevel@tonic-gate 	/*
183*0Sstevel@tonic-gate 	 * as->a_wpage can only be changed while the process is totally stopped.
184*0Sstevel@tonic-gate 	 * Don't grab p_lock here.  Holding p_lock while grabbing the address
185*0Sstevel@tonic-gate 	 * space lock leads to deadlocks with the clock thread.  Note that if an
186*0Sstevel@tonic-gate 	 * as_fault() is servicing a fault to a watched page on behalf of an
187*0Sstevel@tonic-gate 	 * XHAT provider, watchpoint will be temporarily cleared (and wp_prot
188*0Sstevel@tonic-gate 	 * will be set to wp_oprot).  Since this is done while holding as writer
189*0Sstevel@tonic-gate 	 * lock, we need to grab as lock (reader lock is good enough).
190*0Sstevel@tonic-gate 	 *
191*0Sstevel@tonic-gate 	 * p_maplock prevents simultaneous execution of this function.  Under
192*0Sstevel@tonic-gate 	 * normal circumstances, holdwatch() will stop all other threads, so the
193*0Sstevel@tonic-gate 	 * lock isn't really needed.  But there may be multiple threads within
194*0Sstevel@tonic-gate 	 * stop() when SWATCHOK is set, so we need to handle multiple threads
195*0Sstevel@tonic-gate 	 * at once.  See holdwatch() for the details of this dance.
196*0Sstevel@tonic-gate 	 */
197*0Sstevel@tonic-gate 
198*0Sstevel@tonic-gate 	mutex_enter(&p->p_maplock);
199*0Sstevel@tonic-gate 	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
200*0Sstevel@tonic-gate 
201*0Sstevel@tonic-gate 	tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
202*0Sstevel@tonic-gate 	if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
203*0Sstevel@tonic-gate 		pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
204*0Sstevel@tonic-gate 
205*0Sstevel@tonic-gate 	for (; pwp != NULL && pwp->wp_vaddr < eaddr;
206*0Sstevel@tonic-gate 		pwp = AVL_NEXT(&as->a_wpage, pwp)) {
207*0Sstevel@tonic-gate 
208*0Sstevel@tonic-gate 		/*
209*0Sstevel@tonic-gate 		 * If the requested protection has not been
210*0Sstevel@tonic-gate 		 * removed, we need not remap this page.
211*0Sstevel@tonic-gate 		 */
212*0Sstevel@tonic-gate 		prot = pwp->wp_prot;
213*0Sstevel@tonic-gate 		if (kernel || (prot & PROT_USER))
214*0Sstevel@tonic-gate 			if (prot & prot_rw)
215*0Sstevel@tonic-gate 				continue;
216*0Sstevel@tonic-gate 		/*
217*0Sstevel@tonic-gate 		 * If the requested access does not exist in the page's
218*0Sstevel@tonic-gate 		 * original protections, we need not remap this page.
219*0Sstevel@tonic-gate 		 * If the page does not exist yet, we can't test it.
220*0Sstevel@tonic-gate 		 */
221*0Sstevel@tonic-gate 		if ((prot = pwp->wp_oprot) != 0) {
222*0Sstevel@tonic-gate 			if (!(kernel || (prot & PROT_USER)))
223*0Sstevel@tonic-gate 				continue;
224*0Sstevel@tonic-gate 			if (!(prot & prot_rw))
225*0Sstevel@tonic-gate 				continue;
226*0Sstevel@tonic-gate 		}
227*0Sstevel@tonic-gate 
228*0Sstevel@tonic-gate 		if (mapin) {
229*0Sstevel@tonic-gate 			/*
230*0Sstevel@tonic-gate 			 * Before mapping the page in, ensure that
231*0Sstevel@tonic-gate 			 * all other lwps are held in the kernel.
232*0Sstevel@tonic-gate 			 */
233*0Sstevel@tonic-gate 			if (p->p_mapcnt == 0) {
234*0Sstevel@tonic-gate 				/*
235*0Sstevel@tonic-gate 				 * Release as lock while in holdwatch()
236*0Sstevel@tonic-gate 				 * in case other threads need to grab it.
237*0Sstevel@tonic-gate 				 */
238*0Sstevel@tonic-gate 				AS_LOCK_EXIT(as, &as->a_lock);
239*0Sstevel@tonic-gate 				mutex_exit(&p->p_maplock);
240*0Sstevel@tonic-gate 				if (holdwatch() != 0) {
241*0Sstevel@tonic-gate 					/*
242*0Sstevel@tonic-gate 					 * We stopped in holdwatch().
243*0Sstevel@tonic-gate 					 * Start all over again because the
244*0Sstevel@tonic-gate 					 * watched page list may have changed.
245*0Sstevel@tonic-gate 					 */
246*0Sstevel@tonic-gate 					goto startover;
247*0Sstevel@tonic-gate 				}
248*0Sstevel@tonic-gate 				mutex_enter(&p->p_maplock);
249*0Sstevel@tonic-gate 				AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
250*0Sstevel@tonic-gate 			}
251*0Sstevel@tonic-gate 			p->p_mapcnt++;
252*0Sstevel@tonic-gate 		}
253*0Sstevel@tonic-gate 
254*0Sstevel@tonic-gate 		addr = pwp->wp_vaddr;
255*0Sstevel@tonic-gate 		rv++;
256*0Sstevel@tonic-gate 
257*0Sstevel@tonic-gate 		prot = pwp->wp_prot;
258*0Sstevel@tonic-gate 		if (mapin) {
259*0Sstevel@tonic-gate 			if (kernel)
260*0Sstevel@tonic-gate 				pwp->wp_kmap[xrw]++;
261*0Sstevel@tonic-gate 			else
262*0Sstevel@tonic-gate 				pwp->wp_umap[xrw]++;
263*0Sstevel@tonic-gate 			pwp->wp_flags |= WP_NOWATCH;
264*0Sstevel@tonic-gate 			if (pwp->wp_kmap[X] + pwp->wp_umap[X])
265*0Sstevel@tonic-gate 				/* cannot have exec-only protection */
266*0Sstevel@tonic-gate 				prot |= PROT_READ|PROT_EXEC;
267*0Sstevel@tonic-gate 			if (pwp->wp_kmap[R] + pwp->wp_umap[R])
268*0Sstevel@tonic-gate 				prot |= PROT_READ;
269*0Sstevel@tonic-gate 			if (pwp->wp_kmap[W] + pwp->wp_umap[W])
270*0Sstevel@tonic-gate 				/* cannot have write-only protection */
271*0Sstevel@tonic-gate 				prot |= PROT_READ|PROT_WRITE;
272*0Sstevel@tonic-gate #if 0	/* damned broken mmu feature! */
273*0Sstevel@tonic-gate 			if (sum(pwp->wp_umap) == 0)
274*0Sstevel@tonic-gate 				prot &= ~PROT_USER;
275*0Sstevel@tonic-gate #endif
276*0Sstevel@tonic-gate 		} else {
277*0Sstevel@tonic-gate 			ASSERT(pwp->wp_flags & WP_NOWATCH);
278*0Sstevel@tonic-gate 			if (kernel) {
279*0Sstevel@tonic-gate 				ASSERT(pwp->wp_kmap[xrw] != 0);
280*0Sstevel@tonic-gate 				--pwp->wp_kmap[xrw];
281*0Sstevel@tonic-gate 			} else {
282*0Sstevel@tonic-gate 				ASSERT(pwp->wp_umap[xrw] != 0);
283*0Sstevel@tonic-gate 				--pwp->wp_umap[xrw];
284*0Sstevel@tonic-gate 			}
285*0Sstevel@tonic-gate 			if (sum(pwp->wp_kmap) + sum(pwp->wp_umap) == 0)
286*0Sstevel@tonic-gate 				pwp->wp_flags &= ~WP_NOWATCH;
287*0Sstevel@tonic-gate 			else {
288*0Sstevel@tonic-gate 				if (pwp->wp_kmap[X] + pwp->wp_umap[X])
289*0Sstevel@tonic-gate 					/* cannot have exec-only protection */
290*0Sstevel@tonic-gate 					prot |= PROT_READ|PROT_EXEC;
291*0Sstevel@tonic-gate 				if (pwp->wp_kmap[R] + pwp->wp_umap[R])
292*0Sstevel@tonic-gate 					prot |= PROT_READ;
293*0Sstevel@tonic-gate 				if (pwp->wp_kmap[W] + pwp->wp_umap[W])
294*0Sstevel@tonic-gate 					/* cannot have write-only protection */
295*0Sstevel@tonic-gate 					prot |= PROT_READ|PROT_WRITE;
296*0Sstevel@tonic-gate #if 0	/* damned broken mmu feature! */
297*0Sstevel@tonic-gate 				if (sum(pwp->wp_umap) == 0)
298*0Sstevel@tonic-gate 					prot &= ~PROT_USER;
299*0Sstevel@tonic-gate #endif
300*0Sstevel@tonic-gate 			}
301*0Sstevel@tonic-gate 		}
302*0Sstevel@tonic-gate 
303*0Sstevel@tonic-gate 
304*0Sstevel@tonic-gate 		if (pwp->wp_oprot != 0) {	/* if page exists */
305*0Sstevel@tonic-gate 			struct seg *seg;
306*0Sstevel@tonic-gate 			uint_t oprot;
307*0Sstevel@tonic-gate 			int err, retrycnt = 0;
308*0Sstevel@tonic-gate 
309*0Sstevel@tonic-gate 			AS_LOCK_EXIT(as, &as->a_lock);
310*0Sstevel@tonic-gate 			AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
311*0Sstevel@tonic-gate 		retry:
312*0Sstevel@tonic-gate 			seg = as_segat(as, addr);
313*0Sstevel@tonic-gate 			ASSERT(seg != NULL);
314*0Sstevel@tonic-gate 			SEGOP_GETPROT(seg, addr, 0, &oprot);
315*0Sstevel@tonic-gate 			if (prot != oprot) {
316*0Sstevel@tonic-gate 				err = SEGOP_SETPROT(seg, addr, PAGESIZE, prot);
317*0Sstevel@tonic-gate 				if (err == IE_RETRY) {
318*0Sstevel@tonic-gate 					ASSERT(retrycnt == 0);
319*0Sstevel@tonic-gate 					retrycnt++;
320*0Sstevel@tonic-gate 					goto retry;
321*0Sstevel@tonic-gate 				}
322*0Sstevel@tonic-gate 			}
323*0Sstevel@tonic-gate 			AS_LOCK_EXIT(as, &as->a_lock);
324*0Sstevel@tonic-gate 		} else
325*0Sstevel@tonic-gate 			AS_LOCK_EXIT(as, &as->a_lock);
326*0Sstevel@tonic-gate 
327*0Sstevel@tonic-gate 		/*
328*0Sstevel@tonic-gate 		 * When all pages are mapped back to their normal state,
329*0Sstevel@tonic-gate 		 * continue the other lwps.
330*0Sstevel@tonic-gate 		 */
331*0Sstevel@tonic-gate 		if (!mapin) {
332*0Sstevel@tonic-gate 			ASSERT(p->p_mapcnt > 0);
333*0Sstevel@tonic-gate 			p->p_mapcnt--;
334*0Sstevel@tonic-gate 			if (p->p_mapcnt == 0) {
335*0Sstevel@tonic-gate 				mutex_exit(&p->p_maplock);
336*0Sstevel@tonic-gate 				mutex_enter(&p->p_lock);
337*0Sstevel@tonic-gate 				continuelwps(p);
338*0Sstevel@tonic-gate 				mutex_exit(&p->p_lock);
339*0Sstevel@tonic-gate 				mutex_enter(&p->p_maplock);
340*0Sstevel@tonic-gate 			}
341*0Sstevel@tonic-gate 		}
342*0Sstevel@tonic-gate 
343*0Sstevel@tonic-gate 		AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
344*0Sstevel@tonic-gate 	}
345*0Sstevel@tonic-gate 
346*0Sstevel@tonic-gate 	AS_LOCK_EXIT(as, &as->a_lock);
347*0Sstevel@tonic-gate 	mutex_exit(&p->p_maplock);
348*0Sstevel@tonic-gate 
349*0Sstevel@tonic-gate 	return (rv);
350*0Sstevel@tonic-gate }
351*0Sstevel@tonic-gate 
352*0Sstevel@tonic-gate /*
353*0Sstevel@tonic-gate  * Restore the original page protections on an address range.
354*0Sstevel@tonic-gate  * If 'kernel' is non-zero, just do it for the kernel.
355*0Sstevel@tonic-gate  * pr_mappage() returns non-zero if it actually changed anything.
356*0Sstevel@tonic-gate  *
357*0Sstevel@tonic-gate  * pr_mappage() and pr_unmappage() must be executed in matched pairs,
358*0Sstevel@tonic-gate  * but pairs may be nested within other pairs.  The reference counts
359*0Sstevel@tonic-gate  * sort it all out.  See pr_do_mappage(), above.
360*0Sstevel@tonic-gate  */
361*0Sstevel@tonic-gate static int
pr_mappage(const caddr_t addr,size_t size,enum seg_rw rw,int kernel)362*0Sstevel@tonic-gate pr_mappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel)
363*0Sstevel@tonic-gate {
364*0Sstevel@tonic-gate 	return (pr_do_mappage(addr, size, 1, rw, kernel));
365*0Sstevel@tonic-gate }
366*0Sstevel@tonic-gate 
367*0Sstevel@tonic-gate /*
368*0Sstevel@tonic-gate  * Set the modified page protections on a watched page.
369*0Sstevel@tonic-gate  * Inverse of pr_mappage().
370*0Sstevel@tonic-gate  * Needs to be called only if pr_mappage() returned non-zero.
371*0Sstevel@tonic-gate  */
372*0Sstevel@tonic-gate static void
pr_unmappage(const caddr_t addr,size_t size,enum seg_rw rw,int kernel)373*0Sstevel@tonic-gate pr_unmappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel)
374*0Sstevel@tonic-gate {
375*0Sstevel@tonic-gate 	(void) pr_do_mappage(addr, size, 0, rw, kernel);
376*0Sstevel@tonic-gate }
377*0Sstevel@tonic-gate 
378*0Sstevel@tonic-gate /*
379*0Sstevel@tonic-gate  * Function called by an lwp after it resumes from stop().
380*0Sstevel@tonic-gate  */
381*0Sstevel@tonic-gate void
setallwatch(void)382*0Sstevel@tonic-gate setallwatch(void)
383*0Sstevel@tonic-gate {
384*0Sstevel@tonic-gate 	proc_t *p = curproc;
385*0Sstevel@tonic-gate 	struct as *as = curproc->p_as;
386*0Sstevel@tonic-gate 	struct watched_page *pwp, *next;
387*0Sstevel@tonic-gate 	struct seg *seg;
388*0Sstevel@tonic-gate 	caddr_t vaddr;
389*0Sstevel@tonic-gate 	uint_t prot;
390*0Sstevel@tonic-gate 	int err, retrycnt;
391*0Sstevel@tonic-gate 
392*0Sstevel@tonic-gate 	if (p->p_wprot == NULL)
393*0Sstevel@tonic-gate 		return;
394*0Sstevel@tonic-gate 
395*0Sstevel@tonic-gate 	ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));
396*0Sstevel@tonic-gate 
397*0Sstevel@tonic-gate 	AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
398*0Sstevel@tonic-gate 
399*0Sstevel@tonic-gate 	pwp = p->p_wprot;
400*0Sstevel@tonic-gate 	while (pwp != NULL) {
401*0Sstevel@tonic-gate 
402*0Sstevel@tonic-gate 		vaddr = pwp->wp_vaddr;
403*0Sstevel@tonic-gate 		retrycnt = 0;
404*0Sstevel@tonic-gate 	retry:
405*0Sstevel@tonic-gate 		ASSERT(pwp->wp_flags & WP_SETPROT);
406*0Sstevel@tonic-gate 		if ((seg = as_segat(as, vaddr)) != NULL &&
407*0Sstevel@tonic-gate 		    !(pwp->wp_flags & WP_NOWATCH)) {
408*0Sstevel@tonic-gate 			prot = pwp->wp_prot;
409*0Sstevel@tonic-gate 			err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
410*0Sstevel@tonic-gate 			if (err == IE_RETRY) {
411*0Sstevel@tonic-gate 				ASSERT(retrycnt == 0);
412*0Sstevel@tonic-gate 				retrycnt++;
413*0Sstevel@tonic-gate 				goto retry;
414*0Sstevel@tonic-gate 			}
415*0Sstevel@tonic-gate 		}
416*0Sstevel@tonic-gate 
417*0Sstevel@tonic-gate 		next = pwp->wp_list;
418*0Sstevel@tonic-gate 
419*0Sstevel@tonic-gate 		if (pwp->wp_read + pwp->wp_write + pwp->wp_exec == 0) {
420*0Sstevel@tonic-gate 			/*
421*0Sstevel@tonic-gate 			 * No watched areas remain in this page.
422*0Sstevel@tonic-gate 			 * Free the watched_page structure.
423*0Sstevel@tonic-gate 			 */
424*0Sstevel@tonic-gate 			avl_remove(&as->a_wpage, pwp);
425*0Sstevel@tonic-gate 			kmem_free(pwp, sizeof (struct watched_page));
426*0Sstevel@tonic-gate 		} else {
427*0Sstevel@tonic-gate 			pwp->wp_flags &= ~WP_SETPROT;
428*0Sstevel@tonic-gate 		}
429*0Sstevel@tonic-gate 
430*0Sstevel@tonic-gate 		pwp = next;
431*0Sstevel@tonic-gate 	}
432*0Sstevel@tonic-gate 	p->p_wprot = NULL;
433*0Sstevel@tonic-gate 
434*0Sstevel@tonic-gate 	AS_LOCK_EXIT(as, &as->a_lock);
435*0Sstevel@tonic-gate }
436*0Sstevel@tonic-gate 
437*0Sstevel@tonic-gate 
438*0Sstevel@tonic-gate 
439*0Sstevel@tonic-gate /* Must be called with as lock held */
440*0Sstevel@tonic-gate int
pr_is_watchpage_as(caddr_t addr,enum seg_rw rw,struct as * as)441*0Sstevel@tonic-gate pr_is_watchpage_as(caddr_t addr, enum seg_rw rw, struct as *as)
442*0Sstevel@tonic-gate {
443*0Sstevel@tonic-gate 	register struct watched_page *pwp;
444*0Sstevel@tonic-gate 	struct watched_page tpw;
445*0Sstevel@tonic-gate 	uint_t prot;
446*0Sstevel@tonic-gate 	int rv = 0;
447*0Sstevel@tonic-gate 
448*0Sstevel@tonic-gate 	switch (rw) {
449*0Sstevel@tonic-gate 	case S_READ:
450*0Sstevel@tonic-gate 	case S_WRITE:
451*0Sstevel@tonic-gate 	case S_EXEC:
452*0Sstevel@tonic-gate 		break;
453*0Sstevel@tonic-gate 	default:
454*0Sstevel@tonic-gate 		return (0);
455*0Sstevel@tonic-gate 	}
456*0Sstevel@tonic-gate 
457*0Sstevel@tonic-gate 	/*
458*0Sstevel@tonic-gate 	 * as->a_wpage can only be modified while the process is totally
459*0Sstevel@tonic-gate 	 * stopped.  We need, and should use, no locks here.
460*0Sstevel@tonic-gate 	 */
461*0Sstevel@tonic-gate 	if (as != &kas && avl_numnodes(&as->a_wpage) != 0) {
462*0Sstevel@tonic-gate 		tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
463*0Sstevel@tonic-gate 		pwp = avl_find(&as->a_wpage, &tpw, NULL);
464*0Sstevel@tonic-gate 		if (pwp != NULL) {
465*0Sstevel@tonic-gate 			ASSERT(addr >= pwp->wp_vaddr &&
466*0Sstevel@tonic-gate 			    addr < pwp->wp_vaddr + PAGESIZE);
467*0Sstevel@tonic-gate 			if (pwp->wp_oprot != 0) {
468*0Sstevel@tonic-gate 				prot = pwp->wp_prot;
469*0Sstevel@tonic-gate 				switch (rw) {
470*0Sstevel@tonic-gate 				case S_READ:
471*0Sstevel@tonic-gate 					rv = ((prot & (PROT_USER|PROT_READ))
472*0Sstevel@tonic-gate 						!= (PROT_USER|PROT_READ));
473*0Sstevel@tonic-gate 					break;
474*0Sstevel@tonic-gate 				case S_WRITE:
475*0Sstevel@tonic-gate 					rv = ((prot & (PROT_USER|PROT_WRITE))
476*0Sstevel@tonic-gate 						!= (PROT_USER|PROT_WRITE));
477*0Sstevel@tonic-gate 					break;
478*0Sstevel@tonic-gate 				case S_EXEC:
479*0Sstevel@tonic-gate 					rv = ((prot & (PROT_USER|PROT_EXEC))
480*0Sstevel@tonic-gate 						!= (PROT_USER|PROT_EXEC));
481*0Sstevel@tonic-gate 					break;
482*0Sstevel@tonic-gate 				default:
483*0Sstevel@tonic-gate 					/* can't happen! */
484*0Sstevel@tonic-gate 					break;
485*0Sstevel@tonic-gate 				}
486*0Sstevel@tonic-gate 			}
487*0Sstevel@tonic-gate 		}
488*0Sstevel@tonic-gate 	}
489*0Sstevel@tonic-gate 
490*0Sstevel@tonic-gate 	return (rv);
491*0Sstevel@tonic-gate }
492*0Sstevel@tonic-gate 
493*0Sstevel@tonic-gate 
494*0Sstevel@tonic-gate /*
495*0Sstevel@tonic-gate  * trap() calls here to determine if a fault is in a watched page.
496*0Sstevel@tonic-gate  * We return nonzero if this is true and the load/store would fail.
497*0Sstevel@tonic-gate  */
498*0Sstevel@tonic-gate int
pr_is_watchpage(caddr_t addr,enum seg_rw rw)499*0Sstevel@tonic-gate pr_is_watchpage(caddr_t addr, enum seg_rw rw)
500*0Sstevel@tonic-gate {
501*0Sstevel@tonic-gate 	struct as *as = curproc->p_as;
502*0Sstevel@tonic-gate 	int rv;
503*0Sstevel@tonic-gate 
504*0Sstevel@tonic-gate 	if ((as == &kas) || avl_numnodes(&as->a_wpage) == 0)
505*0Sstevel@tonic-gate 		return (0);
506*0Sstevel@tonic-gate 
507*0Sstevel@tonic-gate 	/* Grab the lock because of XHAT (see comment in pr_mappage()) */
508*0Sstevel@tonic-gate 	AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
509*0Sstevel@tonic-gate 	rv = pr_is_watchpage_as(addr, rw, as);
510*0Sstevel@tonic-gate 	AS_LOCK_EXIT(as, &as->a_lock);
511*0Sstevel@tonic-gate 
512*0Sstevel@tonic-gate 	return (rv);
513*0Sstevel@tonic-gate }
514*0Sstevel@tonic-gate 
515*0Sstevel@tonic-gate 
516*0Sstevel@tonic-gate 
517*0Sstevel@tonic-gate /*
518*0Sstevel@tonic-gate  * trap() calls here to determine if a fault is a watchpoint.
519*0Sstevel@tonic-gate  */
520*0Sstevel@tonic-gate int
pr_is_watchpoint(caddr_t * paddr,int * pta,size_t size,size_t * plen,enum seg_rw rw)521*0Sstevel@tonic-gate pr_is_watchpoint(caddr_t *paddr, int *pta, size_t size, size_t *plen,
522*0Sstevel@tonic-gate 	enum seg_rw rw)
523*0Sstevel@tonic-gate {
524*0Sstevel@tonic-gate 	proc_t *p = curproc;
525*0Sstevel@tonic-gate 	caddr_t addr = *paddr;
526*0Sstevel@tonic-gate 	caddr_t eaddr = addr + size;
527*0Sstevel@tonic-gate 	register struct watched_area *pwa;
528*0Sstevel@tonic-gate 	struct watched_area twa;
529*0Sstevel@tonic-gate 	int rv = 0;
530*0Sstevel@tonic-gate 	int ta = 0;
531*0Sstevel@tonic-gate 	size_t len = 0;
532*0Sstevel@tonic-gate 
533*0Sstevel@tonic-gate 	switch (rw) {
534*0Sstevel@tonic-gate 	case S_READ:
535*0Sstevel@tonic-gate 	case S_WRITE:
536*0Sstevel@tonic-gate 	case S_EXEC:
537*0Sstevel@tonic-gate 		break;
538*0Sstevel@tonic-gate 	default:
539*0Sstevel@tonic-gate 		*pta = 0;
540*0Sstevel@tonic-gate 		return (0);
541*0Sstevel@tonic-gate 	}
542*0Sstevel@tonic-gate 
543*0Sstevel@tonic-gate 	/*
544*0Sstevel@tonic-gate 	 * p->p_warea is protected by p->p_lock.
545*0Sstevel@tonic-gate 	 */
546*0Sstevel@tonic-gate 	mutex_enter(&p->p_lock);
547*0Sstevel@tonic-gate 
548*0Sstevel@tonic-gate 	/* BEGIN CSTYLED */
549*0Sstevel@tonic-gate 	/*
550*0Sstevel@tonic-gate 	 * This loop is somewhat complicated because the fault region can span
551*0Sstevel@tonic-gate 	 * multiple watched areas.  For example:
552*0Sstevel@tonic-gate 	 *
553*0Sstevel@tonic-gate 	 *            addr              eaddr
554*0Sstevel@tonic-gate 	 * 		+-----------------+
555*0Sstevel@tonic-gate 	 * 		| fault region    |
556*0Sstevel@tonic-gate 	 * 	+-------+--------+----+---+------------+
557*0Sstevel@tonic-gate 	 *      | prot not right |    | prot correct   |
558*0Sstevel@tonic-gate 	 *      +----------------+    +----------------+
559*0Sstevel@tonic-gate 	 *    wa_vaddr	      wa_eaddr
560*0Sstevel@tonic-gate 	 *    		      wa_vaddr		wa_eaddr
561*0Sstevel@tonic-gate 	 *
562*0Sstevel@tonic-gate 	 * We start at the area greater than or equal to the starting address.
563*0Sstevel@tonic-gate 	 * As long as some portion of the fault region overlaps the current
564*0Sstevel@tonic-gate 	 * area, we continue checking permissions until we find an appropriate
565*0Sstevel@tonic-gate 	 * match.
566*0Sstevel@tonic-gate 	 */
567*0Sstevel@tonic-gate 	/* END CSTYLED */
568*0Sstevel@tonic-gate 	twa.wa_vaddr = addr;
569*0Sstevel@tonic-gate 	twa.wa_eaddr = eaddr;
570*0Sstevel@tonic-gate 
571*0Sstevel@tonic-gate 	for (pwa = pr_find_watched_area(p, &twa, NULL);
572*0Sstevel@tonic-gate 	    pwa != NULL && eaddr > pwa->wa_vaddr && addr < pwa->wa_eaddr;
573*0Sstevel@tonic-gate 	    pwa = AVL_NEXT(&p->p_warea, pwa)) {
574*0Sstevel@tonic-gate 
575*0Sstevel@tonic-gate 		switch (rw) {
576*0Sstevel@tonic-gate 		case S_READ:
577*0Sstevel@tonic-gate 			if (pwa->wa_flags & WA_READ)
578*0Sstevel@tonic-gate 				rv = TRAP_RWATCH;
579*0Sstevel@tonic-gate 			break;
580*0Sstevel@tonic-gate 		case S_WRITE:
581*0Sstevel@tonic-gate 			if (pwa->wa_flags & WA_WRITE)
582*0Sstevel@tonic-gate 				rv = TRAP_WWATCH;
583*0Sstevel@tonic-gate 			break;
584*0Sstevel@tonic-gate 		case S_EXEC:
585*0Sstevel@tonic-gate 			if (pwa->wa_flags & WA_EXEC)
586*0Sstevel@tonic-gate 				rv = TRAP_XWATCH;
587*0Sstevel@tonic-gate 			break;
588*0Sstevel@tonic-gate 		default:
589*0Sstevel@tonic-gate 			/* can't happen */
590*0Sstevel@tonic-gate 			break;
591*0Sstevel@tonic-gate 		}
592*0Sstevel@tonic-gate 
593*0Sstevel@tonic-gate 		/*
594*0Sstevel@tonic-gate 		 * If protections didn't match, check the next watched
595*0Sstevel@tonic-gate 		 * area
596*0Sstevel@tonic-gate 		 */
597*0Sstevel@tonic-gate 		if (rv != 0) {
598*0Sstevel@tonic-gate 			if (addr < pwa->wa_vaddr)
599*0Sstevel@tonic-gate 				addr = pwa->wa_vaddr;
600*0Sstevel@tonic-gate 			len = pwa->wa_eaddr - addr;
601*0Sstevel@tonic-gate 			if (pwa->wa_flags & WA_TRAPAFTER)
602*0Sstevel@tonic-gate 				ta = 1;
603*0Sstevel@tonic-gate 			break;
604*0Sstevel@tonic-gate 		}
605*0Sstevel@tonic-gate 	}
606*0Sstevel@tonic-gate 
607*0Sstevel@tonic-gate 	mutex_exit(&p->p_lock);
608*0Sstevel@tonic-gate 
609*0Sstevel@tonic-gate 	*paddr = addr;
610*0Sstevel@tonic-gate 	*pta = ta;
611*0Sstevel@tonic-gate 	if (plen != NULL)
612*0Sstevel@tonic-gate 		*plen = len;
613*0Sstevel@tonic-gate 	return (rv);
614*0Sstevel@tonic-gate }
615*0Sstevel@tonic-gate 
616*0Sstevel@tonic-gate /*
617*0Sstevel@tonic-gate  * Set up to perform a single-step at user level for the
618*0Sstevel@tonic-gate  * case of a trapafter watchpoint.  Called from trap().
619*0Sstevel@tonic-gate  */
620*0Sstevel@tonic-gate void
do_watch_step(caddr_t vaddr,size_t sz,enum seg_rw rw,int watchcode,greg_t pc)621*0Sstevel@tonic-gate do_watch_step(caddr_t vaddr, size_t sz, enum seg_rw rw,
622*0Sstevel@tonic-gate 	int watchcode, greg_t pc)
623*0Sstevel@tonic-gate {
624*0Sstevel@tonic-gate 	register klwp_t *lwp = ttolwp(curthread);
625*0Sstevel@tonic-gate 	struct lwp_watch *pw = &lwp->lwp_watch[rw_to_index(rw)];
626*0Sstevel@tonic-gate 
627*0Sstevel@tonic-gate 	/*
628*0Sstevel@tonic-gate 	 * Check to see if we are already performing this special
629*0Sstevel@tonic-gate 	 * watchpoint single-step.  We must not do pr_mappage() twice.
630*0Sstevel@tonic-gate 	 */
631*0Sstevel@tonic-gate 
632*0Sstevel@tonic-gate 	/* special check for two read traps on the same instruction */
633*0Sstevel@tonic-gate 	if (rw == S_READ && pw->wpaddr != NULL &&
634*0Sstevel@tonic-gate 	    !(pw->wpaddr <= vaddr && vaddr < pw->wpaddr + pw->wpsize)) {
635*0Sstevel@tonic-gate 		ASSERT(lwp->lwp_watchtrap != 0);
636*0Sstevel@tonic-gate 		pw++;	/* use the extra S_READ struct */
637*0Sstevel@tonic-gate 	}
638*0Sstevel@tonic-gate 
639*0Sstevel@tonic-gate 	if (pw->wpaddr != NULL) {
640*0Sstevel@tonic-gate 		ASSERT(lwp->lwp_watchtrap != 0);
641*0Sstevel@tonic-gate 		ASSERT(pw->wpaddr <= vaddr && vaddr < pw->wpaddr + pw->wpsize);
642*0Sstevel@tonic-gate 		if (pw->wpcode == 0) {
643*0Sstevel@tonic-gate 			pw->wpcode = watchcode;
644*0Sstevel@tonic-gate 			pw->wppc = pc;
645*0Sstevel@tonic-gate 		}
646*0Sstevel@tonic-gate 	} else {
647*0Sstevel@tonic-gate 		int mapped = pr_mappage(vaddr, sz, rw, 0);
648*0Sstevel@tonic-gate 		prstep(lwp, 1);
649*0Sstevel@tonic-gate 		lwp->lwp_watchtrap = 1;
650*0Sstevel@tonic-gate 		pw->wpaddr = vaddr;
651*0Sstevel@tonic-gate 		pw->wpsize = sz;
652*0Sstevel@tonic-gate 		pw->wpcode = watchcode;
653*0Sstevel@tonic-gate 		pw->wpmapped = mapped;
654*0Sstevel@tonic-gate 		pw->wppc = pc;
655*0Sstevel@tonic-gate 	}
656*0Sstevel@tonic-gate }
657*0Sstevel@tonic-gate 
658*0Sstevel@tonic-gate /*
659*0Sstevel@tonic-gate  * Undo the effects of do_watch_step().
660*0Sstevel@tonic-gate  * Called from trap() after the single-step is finished.
661*0Sstevel@tonic-gate  * Also called from issig_forreal() and stop() with a NULL
662*0Sstevel@tonic-gate  * argument to avoid having these things set more than once.
663*0Sstevel@tonic-gate  */
664*0Sstevel@tonic-gate int
undo_watch_step(k_siginfo_t * sip)665*0Sstevel@tonic-gate undo_watch_step(k_siginfo_t *sip)
666*0Sstevel@tonic-gate {
667*0Sstevel@tonic-gate 	register klwp_t *lwp = ttolwp(curthread);
668*0Sstevel@tonic-gate 	int fault = 0;
669*0Sstevel@tonic-gate 
670*0Sstevel@tonic-gate 	if (lwp->lwp_watchtrap) {
671*0Sstevel@tonic-gate 		struct lwp_watch *pw = lwp->lwp_watch;
672*0Sstevel@tonic-gate 		int i;
673*0Sstevel@tonic-gate 
674*0Sstevel@tonic-gate 		for (i = 0; i < 4; i++, pw++) {
675*0Sstevel@tonic-gate 			if (pw->wpaddr == NULL)
676*0Sstevel@tonic-gate 				continue;
677*0Sstevel@tonic-gate 			if (pw->wpmapped)
678*0Sstevel@tonic-gate 				pr_unmappage(pw->wpaddr, pw->wpsize, S_rw[i],
679*0Sstevel@tonic-gate 				    0);
680*0Sstevel@tonic-gate 			if (pw->wpcode != 0) {
681*0Sstevel@tonic-gate 				if (sip != NULL) {
682*0Sstevel@tonic-gate 					sip->si_signo = SIGTRAP;
683*0Sstevel@tonic-gate 					sip->si_code = pw->wpcode;
684*0Sstevel@tonic-gate 					sip->si_addr = pw->wpaddr;
685*0Sstevel@tonic-gate 					sip->si_trapafter = 1;
686*0Sstevel@tonic-gate 					sip->si_pc = (caddr_t)pw->wppc;
687*0Sstevel@tonic-gate 				}
688*0Sstevel@tonic-gate 				fault = FLTWATCH;
689*0Sstevel@tonic-gate 				pw->wpcode = 0;
690*0Sstevel@tonic-gate 			}
691*0Sstevel@tonic-gate 			pw->wpaddr = NULL;
692*0Sstevel@tonic-gate 			pw->wpsize = 0;
693*0Sstevel@tonic-gate 			pw->wpmapped = 0;
694*0Sstevel@tonic-gate 		}
695*0Sstevel@tonic-gate 		lwp->lwp_watchtrap = 0;
696*0Sstevel@tonic-gate 	}
697*0Sstevel@tonic-gate 
698*0Sstevel@tonic-gate 	return (fault);
699*0Sstevel@tonic-gate }
700*0Sstevel@tonic-gate 
701*0Sstevel@tonic-gate /*
702*0Sstevel@tonic-gate  * Handle a watchpoint that occurs while doing copyin()
703*0Sstevel@tonic-gate  * or copyout() in a system call.
704*0Sstevel@tonic-gate  * Return non-zero if the fault or signal is cleared
705*0Sstevel@tonic-gate  * by a debugger while the lwp is stopped.
706*0Sstevel@tonic-gate  */
707*0Sstevel@tonic-gate static int
sys_watchpoint(caddr_t addr,int watchcode,int ta)708*0Sstevel@tonic-gate sys_watchpoint(caddr_t addr, int watchcode, int ta)
709*0Sstevel@tonic-gate {
710*0Sstevel@tonic-gate 	extern greg_t getuserpc(void);	/* XXX header file */
711*0Sstevel@tonic-gate 	k_sigset_t smask;
712*0Sstevel@tonic-gate 	register proc_t *p = ttoproc(curthread);
713*0Sstevel@tonic-gate 	register klwp_t *lwp = ttolwp(curthread);
714*0Sstevel@tonic-gate 	register sigqueue_t *sqp;
715*0Sstevel@tonic-gate 	int rval;
716*0Sstevel@tonic-gate 
717*0Sstevel@tonic-gate 	/* assert no locks are held */
718*0Sstevel@tonic-gate 	/* ASSERT(curthread->t_nlocks == 0); */
719*0Sstevel@tonic-gate 
720*0Sstevel@tonic-gate 	sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
721*0Sstevel@tonic-gate 	sqp->sq_info.si_signo = SIGTRAP;
722*0Sstevel@tonic-gate 	sqp->sq_info.si_code = watchcode;
723*0Sstevel@tonic-gate 	sqp->sq_info.si_addr = addr;
724*0Sstevel@tonic-gate 	sqp->sq_info.si_trapafter = ta;
725*0Sstevel@tonic-gate 	sqp->sq_info.si_pc = (caddr_t)getuserpc();
726*0Sstevel@tonic-gate 
727*0Sstevel@tonic-gate 	mutex_enter(&p->p_lock);
728*0Sstevel@tonic-gate 
729*0Sstevel@tonic-gate 	/* this will be tested and cleared by the caller */
730*0Sstevel@tonic-gate 	lwp->lwp_sysabort = 0;
731*0Sstevel@tonic-gate 
732*0Sstevel@tonic-gate 	if (prismember(&p->p_fltmask, FLTWATCH)) {
733*0Sstevel@tonic-gate 		lwp->lwp_curflt = (uchar_t)FLTWATCH;
734*0Sstevel@tonic-gate 		lwp->lwp_siginfo = sqp->sq_info;
735*0Sstevel@tonic-gate 		stop(PR_FAULTED, FLTWATCH);
736*0Sstevel@tonic-gate 		if (lwp->lwp_curflt == 0) {
737*0Sstevel@tonic-gate 			mutex_exit(&p->p_lock);
738*0Sstevel@tonic-gate 			kmem_free(sqp, sizeof (sigqueue_t));
739*0Sstevel@tonic-gate 			return (1);
740*0Sstevel@tonic-gate 		}
741*0Sstevel@tonic-gate 		lwp->lwp_curflt = 0;
742*0Sstevel@tonic-gate 	}
743*0Sstevel@tonic-gate 
744*0Sstevel@tonic-gate 	/*
745*0Sstevel@tonic-gate 	 * post the SIGTRAP signal.
746*0Sstevel@tonic-gate 	 * Block all other signals so we only stop showing SIGTRAP.
747*0Sstevel@tonic-gate 	 */
748*0Sstevel@tonic-gate 	if (signal_is_blocked(curthread, SIGTRAP) ||
749*0Sstevel@tonic-gate 	    sigismember(&p->p_ignore, SIGTRAP)) {
750*0Sstevel@tonic-gate 		/* SIGTRAP is blocked or ignored, forget the rest. */
751*0Sstevel@tonic-gate 		mutex_exit(&p->p_lock);
752*0Sstevel@tonic-gate 		kmem_free(sqp, sizeof (sigqueue_t));
753*0Sstevel@tonic-gate 		return (0);
754*0Sstevel@tonic-gate 	}
755*0Sstevel@tonic-gate 	sigdelq(p, curthread, SIGTRAP);
756*0Sstevel@tonic-gate 	sigaddqa(p, curthread, sqp);
757*0Sstevel@tonic-gate 	schedctl_finish_sigblock(curthread);
758*0Sstevel@tonic-gate 	smask = curthread->t_hold;
759*0Sstevel@tonic-gate 	sigfillset(&curthread->t_hold);
760*0Sstevel@tonic-gate 	sigdiffset(&curthread->t_hold, &cantmask);
761*0Sstevel@tonic-gate 	sigdelset(&curthread->t_hold, SIGTRAP);
762*0Sstevel@tonic-gate 	mutex_exit(&p->p_lock);
763*0Sstevel@tonic-gate 
764*0Sstevel@tonic-gate 	rval = ((ISSIG_FAST(curthread, lwp, p, FORREAL))? 0 : 1);
765*0Sstevel@tonic-gate 
766*0Sstevel@tonic-gate 	/* restore the original signal mask */
767*0Sstevel@tonic-gate 	mutex_enter(&p->p_lock);
768*0Sstevel@tonic-gate 	curthread->t_hold = smask;
769*0Sstevel@tonic-gate 	mutex_exit(&p->p_lock);
770*0Sstevel@tonic-gate 
771*0Sstevel@tonic-gate 	return (rval);
772*0Sstevel@tonic-gate }
773*0Sstevel@tonic-gate 
774*0Sstevel@tonic-gate /*
775*0Sstevel@tonic-gate  * Wrappers for the copyin()/copyout() functions to deal
776*0Sstevel@tonic-gate  * with watchpoints that fire while in system calls.
777*0Sstevel@tonic-gate  */
778*0Sstevel@tonic-gate 
779*0Sstevel@tonic-gate static int
watch_xcopyin(const void * uaddr,void * kaddr,size_t count)780*0Sstevel@tonic-gate watch_xcopyin(const void *uaddr, void *kaddr, size_t count)
781*0Sstevel@tonic-gate {
782*0Sstevel@tonic-gate 	klwp_t *lwp = ttolwp(curthread);
783*0Sstevel@tonic-gate 	caddr_t watch_uaddr = (caddr_t)uaddr;
784*0Sstevel@tonic-gate 	caddr_t watch_kaddr = (caddr_t)kaddr;
785*0Sstevel@tonic-gate 	int error = 0;
786*0Sstevel@tonic-gate 	label_t ljb;
787*0Sstevel@tonic-gate 	size_t part;
788*0Sstevel@tonic-gate 	int mapped;
789*0Sstevel@tonic-gate 
790*0Sstevel@tonic-gate 	while (count && error == 0) {
791*0Sstevel@tonic-gate 		int watchcode;
792*0Sstevel@tonic-gate 		caddr_t vaddr;
793*0Sstevel@tonic-gate 		size_t len;
794*0Sstevel@tonic-gate 		int ta;
795*0Sstevel@tonic-gate 
796*0Sstevel@tonic-gate 		if ((part = PAGESIZE -
797*0Sstevel@tonic-gate 		    (((uintptr_t)uaddr) & PAGEOFFSET)) > count)
798*0Sstevel@tonic-gate 			part = count;
799*0Sstevel@tonic-gate 
800*0Sstevel@tonic-gate 		if (!pr_is_watchpage(watch_uaddr, S_READ))
801*0Sstevel@tonic-gate 			watchcode = 0;
802*0Sstevel@tonic-gate 		else {
803*0Sstevel@tonic-gate 			vaddr = watch_uaddr;
804*0Sstevel@tonic-gate 			watchcode = pr_is_watchpoint(&vaddr, &ta,
805*0Sstevel@tonic-gate 			    part, &len, S_READ);
806*0Sstevel@tonic-gate 			if (watchcode && ta == 0)
807*0Sstevel@tonic-gate 				part = vaddr - watch_uaddr;
808*0Sstevel@tonic-gate 		}
809*0Sstevel@tonic-gate 
810*0Sstevel@tonic-gate 		/*
811*0Sstevel@tonic-gate 		 * Copy the initial part, up to a watched address, if any.
812*0Sstevel@tonic-gate 		 */
813*0Sstevel@tonic-gate 		if (part != 0) {
814*0Sstevel@tonic-gate 			mapped = pr_mappage(watch_uaddr, part, S_READ, 1);
815*0Sstevel@tonic-gate 			if (on_fault(&ljb))
816*0Sstevel@tonic-gate 				error = EFAULT;
817*0Sstevel@tonic-gate 			else
818*0Sstevel@tonic-gate 				copyin_noerr(watch_uaddr, watch_kaddr, part);
819*0Sstevel@tonic-gate 			no_fault();
820*0Sstevel@tonic-gate 			if (mapped)
821*0Sstevel@tonic-gate 				pr_unmappage(watch_uaddr, part, S_READ, 1);
822*0Sstevel@tonic-gate 			watch_uaddr += part;
823*0Sstevel@tonic-gate 			watch_kaddr += part;
824*0Sstevel@tonic-gate 			count -= part;
825*0Sstevel@tonic-gate 		}
826*0Sstevel@tonic-gate 		/*
827*0Sstevel@tonic-gate 		 * If trapafter was specified, then copy through the
828*0Sstevel@tonic-gate 		 * watched area before taking the watchpoint trap.
829*0Sstevel@tonic-gate 		 */
830*0Sstevel@tonic-gate 		while (count && watchcode && ta && len > part && error == 0) {
831*0Sstevel@tonic-gate 			len -= part;
832*0Sstevel@tonic-gate 			if ((part = PAGESIZE) > count)
833*0Sstevel@tonic-gate 				part = count;
834*0Sstevel@tonic-gate 			if (part > len)
835*0Sstevel@tonic-gate 				part = len;
836*0Sstevel@tonic-gate 			mapped = pr_mappage(watch_uaddr, part, S_READ, 1);
837*0Sstevel@tonic-gate 			if (on_fault(&ljb))
838*0Sstevel@tonic-gate 				error = EFAULT;
839*0Sstevel@tonic-gate 			else
840*0Sstevel@tonic-gate 				copyin_noerr(watch_uaddr, watch_kaddr, part);
841*0Sstevel@tonic-gate 			no_fault();
842*0Sstevel@tonic-gate 			if (mapped)
843*0Sstevel@tonic-gate 				pr_unmappage(watch_uaddr, part, S_READ, 1);
844*0Sstevel@tonic-gate 			watch_uaddr += part;
845*0Sstevel@tonic-gate 			watch_kaddr += part;
846*0Sstevel@tonic-gate 			count -= part;
847*0Sstevel@tonic-gate 		}
848*0Sstevel@tonic-gate 
849*0Sstevel@tonic-gate error:
850*0Sstevel@tonic-gate 		/* if we hit a watched address, do the watchpoint logic */
851*0Sstevel@tonic-gate 		if (watchcode &&
852*0Sstevel@tonic-gate 		    (!sys_watchpoint(vaddr, watchcode, ta) ||
853*0Sstevel@tonic-gate 		    lwp->lwp_sysabort)) {
854*0Sstevel@tonic-gate 			lwp->lwp_sysabort = 0;
855*0Sstevel@tonic-gate 			error = EFAULT;
856*0Sstevel@tonic-gate 			break;
857*0Sstevel@tonic-gate 		}
858*0Sstevel@tonic-gate 	}
859*0Sstevel@tonic-gate 
860*0Sstevel@tonic-gate 	return (error);
861*0Sstevel@tonic-gate }
862*0Sstevel@tonic-gate 
863*0Sstevel@tonic-gate static int
watch_copyin(const void * kaddr,void * uaddr,size_t count)864*0Sstevel@tonic-gate watch_copyin(const void *kaddr, void *uaddr, size_t count)
865*0Sstevel@tonic-gate {
866*0Sstevel@tonic-gate 	return (watch_xcopyin(kaddr, uaddr, count) ? -1 : 0);
867*0Sstevel@tonic-gate }
868*0Sstevel@tonic-gate 
869*0Sstevel@tonic-gate 
870*0Sstevel@tonic-gate static int
watch_xcopyout(const void * kaddr,void * uaddr,size_t count)871*0Sstevel@tonic-gate watch_xcopyout(const void *kaddr, void *uaddr, size_t count)
872*0Sstevel@tonic-gate {
873*0Sstevel@tonic-gate 	klwp_t *lwp = ttolwp(curthread);
874*0Sstevel@tonic-gate 	caddr_t watch_uaddr = (caddr_t)uaddr;
875*0Sstevel@tonic-gate 	caddr_t watch_kaddr = (caddr_t)kaddr;
876*0Sstevel@tonic-gate 	int error = 0;
877*0Sstevel@tonic-gate 	label_t ljb;
878*0Sstevel@tonic-gate 
879*0Sstevel@tonic-gate 	while (count && error == 0) {
880*0Sstevel@tonic-gate 		int watchcode;
881*0Sstevel@tonic-gate 		caddr_t vaddr;
882*0Sstevel@tonic-gate 		size_t part;
883*0Sstevel@tonic-gate 		size_t len;
884*0Sstevel@tonic-gate 		int ta;
885*0Sstevel@tonic-gate 		int mapped;
886*0Sstevel@tonic-gate 
887*0Sstevel@tonic-gate 		if ((part = PAGESIZE -
888*0Sstevel@tonic-gate 		    (((uintptr_t)uaddr) & PAGEOFFSET)) > count)
889*0Sstevel@tonic-gate 			part = count;
890*0Sstevel@tonic-gate 
891*0Sstevel@tonic-gate 		if (!pr_is_watchpage(watch_uaddr, S_WRITE))
892*0Sstevel@tonic-gate 			watchcode = 0;
893*0Sstevel@tonic-gate 		else {
894*0Sstevel@tonic-gate 			vaddr = watch_uaddr;
895*0Sstevel@tonic-gate 			watchcode = pr_is_watchpoint(&vaddr, &ta,
896*0Sstevel@tonic-gate 			    part, &len, S_WRITE);
897*0Sstevel@tonic-gate 			if (watchcode) {
898*0Sstevel@tonic-gate 				if (ta == 0)
899*0Sstevel@tonic-gate 					part = vaddr - watch_uaddr;
900*0Sstevel@tonic-gate 				else {
901*0Sstevel@tonic-gate 					len += vaddr - watch_uaddr;
902*0Sstevel@tonic-gate 					if (part > len)
903*0Sstevel@tonic-gate 						part = len;
904*0Sstevel@tonic-gate 				}
905*0Sstevel@tonic-gate 			}
906*0Sstevel@tonic-gate 		}
907*0Sstevel@tonic-gate 
908*0Sstevel@tonic-gate 		/*
909*0Sstevel@tonic-gate 		 * Copy the initial part, up to a watched address, if any.
910*0Sstevel@tonic-gate 		 */
911*0Sstevel@tonic-gate 		if (part != 0) {
912*0Sstevel@tonic-gate 			mapped = pr_mappage(watch_uaddr, part, S_WRITE, 1);
913*0Sstevel@tonic-gate 			if (on_fault(&ljb))
914*0Sstevel@tonic-gate 				error = EFAULT;
915*0Sstevel@tonic-gate 			else
916*0Sstevel@tonic-gate 				copyout_noerr(watch_kaddr, watch_uaddr, part);
917*0Sstevel@tonic-gate 			no_fault();
918*0Sstevel@tonic-gate 			if (mapped)
919*0Sstevel@tonic-gate 				pr_unmappage(watch_uaddr, part, S_WRITE, 1);
920*0Sstevel@tonic-gate 			watch_uaddr += part;
921*0Sstevel@tonic-gate 			watch_kaddr += part;
922*0Sstevel@tonic-gate 			count -= part;
923*0Sstevel@tonic-gate 		}
924*0Sstevel@tonic-gate 
925*0Sstevel@tonic-gate 		/*
926*0Sstevel@tonic-gate 		 * If trapafter was specified, then copy through the
927*0Sstevel@tonic-gate 		 * watched area before taking the watchpoint trap.
928*0Sstevel@tonic-gate 		 */
929*0Sstevel@tonic-gate 		while (count && watchcode && ta && len > part && error == 0) {
930*0Sstevel@tonic-gate 			len -= part;
931*0Sstevel@tonic-gate 			if ((part = PAGESIZE) > count)
932*0Sstevel@tonic-gate 				part = count;
933*0Sstevel@tonic-gate 			if (part > len)
934*0Sstevel@tonic-gate 				part = len;
935*0Sstevel@tonic-gate 			mapped = pr_mappage(watch_uaddr, part, S_WRITE, 1);
936*0Sstevel@tonic-gate 			if (on_fault(&ljb))
937*0Sstevel@tonic-gate 				error = EFAULT;
938*0Sstevel@tonic-gate 			else
939*0Sstevel@tonic-gate 				copyout_noerr(watch_kaddr, watch_uaddr, part);
940*0Sstevel@tonic-gate 			no_fault();
941*0Sstevel@tonic-gate 			if (mapped)
942*0Sstevel@tonic-gate 				pr_unmappage(watch_uaddr, part, S_WRITE, 1);
943*0Sstevel@tonic-gate 			watch_uaddr += part;
944*0Sstevel@tonic-gate 			watch_kaddr += part;
945*0Sstevel@tonic-gate 			count -= part;
946*0Sstevel@tonic-gate 		}
947*0Sstevel@tonic-gate 
948*0Sstevel@tonic-gate 		/* if we hit a watched address, do the watchpoint logic */
949*0Sstevel@tonic-gate 		if (watchcode &&
950*0Sstevel@tonic-gate 		    (!sys_watchpoint(vaddr, watchcode, ta) ||
951*0Sstevel@tonic-gate 		    lwp->lwp_sysabort)) {
952*0Sstevel@tonic-gate 			lwp->lwp_sysabort = 0;
953*0Sstevel@tonic-gate 			error = EFAULT;
954*0Sstevel@tonic-gate 			break;
955*0Sstevel@tonic-gate 		}
956*0Sstevel@tonic-gate 	}
957*0Sstevel@tonic-gate 
958*0Sstevel@tonic-gate 	return (error);
959*0Sstevel@tonic-gate }
960*0Sstevel@tonic-gate 
961*0Sstevel@tonic-gate static int
watch_copyout(const void * kaddr,void * uaddr,size_t count)962*0Sstevel@tonic-gate watch_copyout(const void *kaddr, void *uaddr, size_t count)
963*0Sstevel@tonic-gate {
964*0Sstevel@tonic-gate 	return (watch_xcopyout(kaddr, uaddr, count) ? -1 : 0);
965*0Sstevel@tonic-gate }
966*0Sstevel@tonic-gate 
967*0Sstevel@tonic-gate static int
watch_copyinstr(const char * uaddr,char * kaddr,size_t maxlength,size_t * lencopied)968*0Sstevel@tonic-gate watch_copyinstr(
969*0Sstevel@tonic-gate 	const char *uaddr,
970*0Sstevel@tonic-gate 	char *kaddr,
971*0Sstevel@tonic-gate 	size_t maxlength,
972*0Sstevel@tonic-gate 	size_t *lencopied)
973*0Sstevel@tonic-gate {
974*0Sstevel@tonic-gate 	klwp_t *lwp = ttolwp(curthread);
975*0Sstevel@tonic-gate 	size_t resid;
976*0Sstevel@tonic-gate 	int error = 0;
977*0Sstevel@tonic-gate 	label_t ljb;
978*0Sstevel@tonic-gate 
979*0Sstevel@tonic-gate 	if ((resid = maxlength) == 0)
980*0Sstevel@tonic-gate 		return (ENAMETOOLONG);
981*0Sstevel@tonic-gate 
982*0Sstevel@tonic-gate 	while (resid && error == 0) {
983*0Sstevel@tonic-gate 		int watchcode;
984*0Sstevel@tonic-gate 		caddr_t vaddr;
985*0Sstevel@tonic-gate 		size_t part;
986*0Sstevel@tonic-gate 		size_t len;
987*0Sstevel@tonic-gate 		size_t size;
988*0Sstevel@tonic-gate 		int ta;
989*0Sstevel@tonic-gate 		int mapped;
990*0Sstevel@tonic-gate 
991*0Sstevel@tonic-gate 		if ((part = PAGESIZE -
992*0Sstevel@tonic-gate 		    (((uintptr_t)uaddr) & PAGEOFFSET)) > resid)
993*0Sstevel@tonic-gate 			part = resid;
994*0Sstevel@tonic-gate 
995*0Sstevel@tonic-gate 		if (!pr_is_watchpage((caddr_t)uaddr, S_READ))
996*0Sstevel@tonic-gate 			watchcode = 0;
997*0Sstevel@tonic-gate 		else {
998*0Sstevel@tonic-gate 			vaddr = (caddr_t)uaddr;
999*0Sstevel@tonic-gate 			watchcode = pr_is_watchpoint(&vaddr, &ta,
1000*0Sstevel@tonic-gate 			    part, &len, S_READ);
1001*0Sstevel@tonic-gate 			if (watchcode) {
1002*0Sstevel@tonic-gate 				if (ta == 0)
1003*0Sstevel@tonic-gate 					part = vaddr - uaddr;
1004*0Sstevel@tonic-gate 				else {
1005*0Sstevel@tonic-gate 					len += vaddr - uaddr;
1006*0Sstevel@tonic-gate 					if (part > len)
1007*0Sstevel@tonic-gate 						part = len;
1008*0Sstevel@tonic-gate 				}
1009*0Sstevel@tonic-gate 			}
1010*0Sstevel@tonic-gate 		}
1011*0Sstevel@tonic-gate 
1012*0Sstevel@tonic-gate 		/*
1013*0Sstevel@tonic-gate 		 * Copy the initial part, up to a watched address, if any.
1014*0Sstevel@tonic-gate 		 */
1015*0Sstevel@tonic-gate 		if (part != 0) {
1016*0Sstevel@tonic-gate 			mapped = pr_mappage((caddr_t)uaddr, part, S_READ, 1);
1017*0Sstevel@tonic-gate 			if (on_fault(&ljb))
1018*0Sstevel@tonic-gate 				error = EFAULT;
1019*0Sstevel@tonic-gate 			else
1020*0Sstevel@tonic-gate 				error = copyinstr_noerr(uaddr, kaddr, part,
1021*0Sstevel@tonic-gate 				    &size);
1022*0Sstevel@tonic-gate 			no_fault();
1023*0Sstevel@tonic-gate 			if (mapped)
1024*0Sstevel@tonic-gate 				pr_unmappage((caddr_t)uaddr, part, S_READ, 1);
1025*0Sstevel@tonic-gate 			uaddr += size;
1026*0Sstevel@tonic-gate 			kaddr += size;
1027*0Sstevel@tonic-gate 			resid -= size;
1028*0Sstevel@tonic-gate 			if (error == ENAMETOOLONG && resid > 0)
1029*0Sstevel@tonic-gate 			    error = 0;
1030*0Sstevel@tonic-gate 			if (error != 0 || (watchcode &&
1031*0Sstevel@tonic-gate 			    (uaddr < vaddr || kaddr[-1] == '\0')))
1032*0Sstevel@tonic-gate 				break;	/* didn't reach the watched area */
1033*0Sstevel@tonic-gate 		}
1034*0Sstevel@tonic-gate 
1035*0Sstevel@tonic-gate 		/*
1036*0Sstevel@tonic-gate 		 * If trapafter was specified, then copy through the
1037*0Sstevel@tonic-gate 		 * watched area before taking the watchpoint trap.
1038*0Sstevel@tonic-gate 		 */
1039*0Sstevel@tonic-gate 		while (resid && watchcode && ta && len > part && error == 0 &&
1040*0Sstevel@tonic-gate 		    size == part && kaddr[-1] != '\0') {
1041*0Sstevel@tonic-gate 			len -= part;
1042*0Sstevel@tonic-gate 			if ((part = PAGESIZE) > resid)
1043*0Sstevel@tonic-gate 				part = resid;
1044*0Sstevel@tonic-gate 			if (part > len)
1045*0Sstevel@tonic-gate 				part = len;
1046*0Sstevel@tonic-gate 			mapped = pr_mappage((caddr_t)uaddr, part, S_READ, 1);
1047*0Sstevel@tonic-gate 			if (on_fault(&ljb))
1048*0Sstevel@tonic-gate 				error = EFAULT;
1049*0Sstevel@tonic-gate 			else
1050*0Sstevel@tonic-gate 				error = copyinstr_noerr(uaddr, kaddr, part,
1051*0Sstevel@tonic-gate 				    &size);
1052*0Sstevel@tonic-gate 			no_fault();
1053*0Sstevel@tonic-gate 			if (mapped)
1054*0Sstevel@tonic-gate 				pr_unmappage((caddr_t)uaddr, part, S_READ, 1);
1055*0Sstevel@tonic-gate 			uaddr += size;
1056*0Sstevel@tonic-gate 			kaddr += size;
1057*0Sstevel@tonic-gate 			resid -= size;
1058*0Sstevel@tonic-gate 			if (error == ENAMETOOLONG && resid > 0)
1059*0Sstevel@tonic-gate 			    error = 0;
1060*0Sstevel@tonic-gate 		}
1061*0Sstevel@tonic-gate 
1062*0Sstevel@tonic-gate 		/* if we hit a watched address, do the watchpoint logic */
1063*0Sstevel@tonic-gate 		if (watchcode &&
1064*0Sstevel@tonic-gate 		    (!sys_watchpoint(vaddr, watchcode, ta) ||
1065*0Sstevel@tonic-gate 		    lwp->lwp_sysabort)) {
1066*0Sstevel@tonic-gate 			lwp->lwp_sysabort = 0;
1067*0Sstevel@tonic-gate 			error = EFAULT;
1068*0Sstevel@tonic-gate 			break;
1069*0Sstevel@tonic-gate 		}
1070*0Sstevel@tonic-gate 
1071*0Sstevel@tonic-gate 		if (error == 0 && part != 0 &&
1072*0Sstevel@tonic-gate 		    (size < part || kaddr[-1] == '\0'))
1073*0Sstevel@tonic-gate 			break;
1074*0Sstevel@tonic-gate 	}
1075*0Sstevel@tonic-gate 
1076*0Sstevel@tonic-gate 	if (error != EFAULT && lencopied)
1077*0Sstevel@tonic-gate 		*lencopied = maxlength - resid;
1078*0Sstevel@tonic-gate 	return (error);
1079*0Sstevel@tonic-gate }
1080*0Sstevel@tonic-gate 
1081*0Sstevel@tonic-gate static int
watch_copyoutstr(const char * kaddr,char * uaddr,size_t maxlength,size_t * lencopied)1082*0Sstevel@tonic-gate watch_copyoutstr(
1083*0Sstevel@tonic-gate 	const char *kaddr,
1084*0Sstevel@tonic-gate 	char *uaddr,
1085*0Sstevel@tonic-gate 	size_t maxlength,
1086*0Sstevel@tonic-gate 	size_t *lencopied)
1087*0Sstevel@tonic-gate {
1088*0Sstevel@tonic-gate 	klwp_t *lwp = ttolwp(curthread);
1089*0Sstevel@tonic-gate 	size_t resid;
1090*0Sstevel@tonic-gate 	int error = 0;
1091*0Sstevel@tonic-gate 	label_t ljb;
1092*0Sstevel@tonic-gate 
1093*0Sstevel@tonic-gate 	if ((resid = maxlength) == 0)
1094*0Sstevel@tonic-gate 		return (ENAMETOOLONG);
1095*0Sstevel@tonic-gate 
1096*0Sstevel@tonic-gate 	while (resid && error == 0) {
1097*0Sstevel@tonic-gate 		int watchcode;
1098*0Sstevel@tonic-gate 		caddr_t vaddr;
1099*0Sstevel@tonic-gate 		size_t part;
1100*0Sstevel@tonic-gate 		size_t len;
1101*0Sstevel@tonic-gate 		size_t size;
1102*0Sstevel@tonic-gate 		int ta;
1103*0Sstevel@tonic-gate 		int mapped;
1104*0Sstevel@tonic-gate 
1105*0Sstevel@tonic-gate 		if ((part = PAGESIZE -
1106*0Sstevel@tonic-gate 		    (((uintptr_t)uaddr) & PAGEOFFSET)) > resid)
1107*0Sstevel@tonic-gate 			part = resid;
1108*0Sstevel@tonic-gate 
1109*0Sstevel@tonic-gate 		if (!pr_is_watchpage(uaddr, S_WRITE)) {
1110*0Sstevel@tonic-gate 			watchcode = 0;
1111*0Sstevel@tonic-gate 		} else {
1112*0Sstevel@tonic-gate 			vaddr = uaddr;
1113*0Sstevel@tonic-gate 			watchcode = pr_is_watchpoint(&vaddr, &ta,
1114*0Sstevel@tonic-gate 			    part, &len, S_WRITE);
1115*0Sstevel@tonic-gate 			if (watchcode && ta == 0)
1116*0Sstevel@tonic-gate 				part = vaddr - uaddr;
1117*0Sstevel@tonic-gate 		}
1118*0Sstevel@tonic-gate 
1119*0Sstevel@tonic-gate 		/*
1120*0Sstevel@tonic-gate 		 * Copy the initial part, up to a watched address, if any.
1121*0Sstevel@tonic-gate 		 */
1122*0Sstevel@tonic-gate 		if (part != 0) {
1123*0Sstevel@tonic-gate 			mapped = pr_mappage(uaddr, part, S_WRITE, 1);
1124*0Sstevel@tonic-gate 			if (on_fault(&ljb))
1125*0Sstevel@tonic-gate 				error = EFAULT;
1126*0Sstevel@tonic-gate 			else
1127*0Sstevel@tonic-gate 				error = copyoutstr_noerr(kaddr, uaddr, part,
1128*0Sstevel@tonic-gate 				    &size);
1129*0Sstevel@tonic-gate 			no_fault();
1130*0Sstevel@tonic-gate 			if (mapped)
1131*0Sstevel@tonic-gate 				pr_unmappage(uaddr, part, S_WRITE, 1);
1132*0Sstevel@tonic-gate 			uaddr += size;
1133*0Sstevel@tonic-gate 			kaddr += size;
1134*0Sstevel@tonic-gate 			resid -= size;
1135*0Sstevel@tonic-gate 			if (error == ENAMETOOLONG && resid > 0)
1136*0Sstevel@tonic-gate 			    error = 0;
1137*0Sstevel@tonic-gate 			if (error != 0 || (watchcode &&
1138*0Sstevel@tonic-gate 			    (uaddr < vaddr || kaddr[-1] == '\0')))
1139*0Sstevel@tonic-gate 				break;	/* didn't reach the watched area */
1140*0Sstevel@tonic-gate 		}
1141*0Sstevel@tonic-gate 
1142*0Sstevel@tonic-gate 		/*
1143*0Sstevel@tonic-gate 		 * If trapafter was specified, then copy through the
1144*0Sstevel@tonic-gate 		 * watched area before taking the watchpoint trap.
1145*0Sstevel@tonic-gate 		 */
1146*0Sstevel@tonic-gate 		while (resid && watchcode && ta && len > part && error == 0 &&
1147*0Sstevel@tonic-gate 		    size == part && kaddr[-1] != '\0') {
1148*0Sstevel@tonic-gate 			len -= part;
1149*0Sstevel@tonic-gate 			if ((part = PAGESIZE) > resid)
1150*0Sstevel@tonic-gate 				part = resid;
1151*0Sstevel@tonic-gate 			if (part > len)
1152*0Sstevel@tonic-gate 				part = len;
1153*0Sstevel@tonic-gate 			mapped = pr_mappage(uaddr, part, S_WRITE, 1);
1154*0Sstevel@tonic-gate 			if (on_fault(&ljb))
1155*0Sstevel@tonic-gate 				error = EFAULT;
1156*0Sstevel@tonic-gate 			else
1157*0Sstevel@tonic-gate 				error = copyoutstr_noerr(kaddr, uaddr, part,
1158*0Sstevel@tonic-gate 				    &size);
1159*0Sstevel@tonic-gate 			no_fault();
1160*0Sstevel@tonic-gate 			if (mapped)
1161*0Sstevel@tonic-gate 				pr_unmappage(uaddr, part, S_WRITE, 1);
1162*0Sstevel@tonic-gate 			uaddr += size;
1163*0Sstevel@tonic-gate 			kaddr += size;
1164*0Sstevel@tonic-gate 			resid -= size;
1165*0Sstevel@tonic-gate 			if (error == ENAMETOOLONG && resid > 0)
1166*0Sstevel@tonic-gate 			    error = 0;
1167*0Sstevel@tonic-gate 		}
1168*0Sstevel@tonic-gate 
1169*0Sstevel@tonic-gate 		/* if we hit a watched address, do the watchpoint logic */
1170*0Sstevel@tonic-gate 		if (watchcode &&
1171*0Sstevel@tonic-gate 		    (!sys_watchpoint(vaddr, watchcode, ta) ||
1172*0Sstevel@tonic-gate 		    lwp->lwp_sysabort)) {
1173*0Sstevel@tonic-gate 			lwp->lwp_sysabort = 0;
1174*0Sstevel@tonic-gate 			error = EFAULT;
1175*0Sstevel@tonic-gate 			break;
1176*0Sstevel@tonic-gate 		}
1177*0Sstevel@tonic-gate 
1178*0Sstevel@tonic-gate 		if (error == 0 && part != 0 &&
1179*0Sstevel@tonic-gate 		    (size < part || kaddr[-1] == '\0'))
1180*0Sstevel@tonic-gate 			break;
1181*0Sstevel@tonic-gate 	}
1182*0Sstevel@tonic-gate 
1183*0Sstevel@tonic-gate 	if (error != EFAULT && lencopied)
1184*0Sstevel@tonic-gate 		*lencopied = maxlength - resid;
1185*0Sstevel@tonic-gate 	return (error);
1186*0Sstevel@tonic-gate }
1187*0Sstevel@tonic-gate 
1188*0Sstevel@tonic-gate typedef int (*fuword_func)(const void *, void *);
1189*0Sstevel@tonic-gate 
1190*0Sstevel@tonic-gate /*
1191*0Sstevel@tonic-gate  * Generic form of watch_fuword8(), watch_fuword16(), etc.
1192*0Sstevel@tonic-gate  */
1193*0Sstevel@tonic-gate static int
watch_fuword(const void * addr,void * dst,fuword_func func,size_t size)1194*0Sstevel@tonic-gate watch_fuword(const void *addr, void *dst, fuword_func func, size_t size)
1195*0Sstevel@tonic-gate {
1196*0Sstevel@tonic-gate 	klwp_t *lwp = ttolwp(curthread);
1197*0Sstevel@tonic-gate 	int watchcode;
1198*0Sstevel@tonic-gate 	caddr_t vaddr;
1199*0Sstevel@tonic-gate 	int mapped;
1200*0Sstevel@tonic-gate 	int rv = 0;
1201*0Sstevel@tonic-gate 	int ta;
1202*0Sstevel@tonic-gate 	label_t ljb;
1203*0Sstevel@tonic-gate 
1204*0Sstevel@tonic-gate 	for (;;) {
1205*0Sstevel@tonic-gate 
1206*0Sstevel@tonic-gate 		vaddr = (caddr_t)addr;
1207*0Sstevel@tonic-gate 		watchcode = pr_is_watchpoint(&vaddr, &ta, size, NULL, S_READ);
1208*0Sstevel@tonic-gate 		if (watchcode == 0 || ta != 0) {
1209*0Sstevel@tonic-gate 			mapped = pr_mappage((caddr_t)addr, size, S_READ, 1);
1210*0Sstevel@tonic-gate 			if (on_fault(&ljb))
1211*0Sstevel@tonic-gate 				rv = -1;
1212*0Sstevel@tonic-gate 			else
1213*0Sstevel@tonic-gate 				(*func)(addr, dst);
1214*0Sstevel@tonic-gate 			no_fault();
1215*0Sstevel@tonic-gate 			if (mapped)
1216*0Sstevel@tonic-gate 				pr_unmappage((caddr_t)addr, size, S_READ, 1);
1217*0Sstevel@tonic-gate 		}
1218*0Sstevel@tonic-gate 		if (watchcode &&
1219*0Sstevel@tonic-gate 		    (!sys_watchpoint(vaddr, watchcode, ta) ||
1220*0Sstevel@tonic-gate 		    lwp->lwp_sysabort)) {
1221*0Sstevel@tonic-gate 			lwp->lwp_sysabort = 0;
1222*0Sstevel@tonic-gate 			rv = -1;
1223*0Sstevel@tonic-gate 			break;
1224*0Sstevel@tonic-gate 		}
1225*0Sstevel@tonic-gate 		if (watchcode == 0 || ta != 0)
1226*0Sstevel@tonic-gate 			break;
1227*0Sstevel@tonic-gate 	}
1228*0Sstevel@tonic-gate 
1229*0Sstevel@tonic-gate 	return (rv);
1230*0Sstevel@tonic-gate }
1231*0Sstevel@tonic-gate 
1232*0Sstevel@tonic-gate static int
watch_fuword8(const void * addr,uint8_t * dst)1233*0Sstevel@tonic-gate watch_fuword8(const void *addr, uint8_t *dst)
1234*0Sstevel@tonic-gate {
1235*0Sstevel@tonic-gate 	return (watch_fuword(addr, dst, (fuword_func)fuword8_noerr,
1236*0Sstevel@tonic-gate 	    sizeof (*dst)));
1237*0Sstevel@tonic-gate }
1238*0Sstevel@tonic-gate 
1239*0Sstevel@tonic-gate static int
watch_fuword16(const void * addr,uint16_t * dst)1240*0Sstevel@tonic-gate watch_fuword16(const void *addr, uint16_t *dst)
1241*0Sstevel@tonic-gate {
1242*0Sstevel@tonic-gate 	return (watch_fuword(addr, dst, (fuword_func)fuword16_noerr,
1243*0Sstevel@tonic-gate 	    sizeof (*dst)));
1244*0Sstevel@tonic-gate }
1245*0Sstevel@tonic-gate 
1246*0Sstevel@tonic-gate static int
watch_fuword32(const void * addr,uint32_t * dst)1247*0Sstevel@tonic-gate watch_fuword32(const void *addr, uint32_t *dst)
1248*0Sstevel@tonic-gate {
1249*0Sstevel@tonic-gate 	return (watch_fuword(addr, dst, (fuword_func)fuword32_noerr,
1250*0Sstevel@tonic-gate 	    sizeof (*dst)));
1251*0Sstevel@tonic-gate }
1252*0Sstevel@tonic-gate 
1253*0Sstevel@tonic-gate #ifdef _LP64
1254*0Sstevel@tonic-gate static int
watch_fuword64(const void * addr,uint64_t * dst)1255*0Sstevel@tonic-gate watch_fuword64(const void *addr, uint64_t *dst)
1256*0Sstevel@tonic-gate {
1257*0Sstevel@tonic-gate 	return (watch_fuword(addr, dst, (fuword_func)fuword64_noerr,
1258*0Sstevel@tonic-gate 	    sizeof (*dst)));
1259*0Sstevel@tonic-gate }
1260*0Sstevel@tonic-gate #endif
1261*0Sstevel@tonic-gate 
1262*0Sstevel@tonic-gate 
1263*0Sstevel@tonic-gate static int
watch_suword8(void * addr,uint8_t value)1264*0Sstevel@tonic-gate watch_suword8(void *addr, uint8_t value)
1265*0Sstevel@tonic-gate {
1266*0Sstevel@tonic-gate 	klwp_t *lwp = ttolwp(curthread);
1267*0Sstevel@tonic-gate 	int watchcode;
1268*0Sstevel@tonic-gate 	caddr_t vaddr;
1269*0Sstevel@tonic-gate 	int mapped;
1270*0Sstevel@tonic-gate 	int rv = 0;
1271*0Sstevel@tonic-gate 	int ta;
1272*0Sstevel@tonic-gate 	label_t ljb;
1273*0Sstevel@tonic-gate 
1274*0Sstevel@tonic-gate 	for (;;) {
1275*0Sstevel@tonic-gate 
1276*0Sstevel@tonic-gate 		vaddr = (caddr_t)addr;
1277*0Sstevel@tonic-gate 		watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1278*0Sstevel@tonic-gate 		    S_WRITE);
1279*0Sstevel@tonic-gate 		if (watchcode == 0 || ta != 0) {
1280*0Sstevel@tonic-gate 			mapped = pr_mappage((caddr_t)addr, sizeof (value),
1281*0Sstevel@tonic-gate 			    S_WRITE, 1);
1282*0Sstevel@tonic-gate 			if (on_fault(&ljb))
1283*0Sstevel@tonic-gate 				rv = -1;
1284*0Sstevel@tonic-gate 			else
1285*0Sstevel@tonic-gate 				suword8_noerr(addr, value);
1286*0Sstevel@tonic-gate 			no_fault();
1287*0Sstevel@tonic-gate 			if (mapped)
1288*0Sstevel@tonic-gate 				pr_unmappage((caddr_t)addr, sizeof (value),
1289*0Sstevel@tonic-gate 				    S_WRITE, 1);
1290*0Sstevel@tonic-gate 		}
1291*0Sstevel@tonic-gate 		if (watchcode &&
1292*0Sstevel@tonic-gate 		    (!sys_watchpoint(vaddr, watchcode, ta) ||
1293*0Sstevel@tonic-gate 		    lwp->lwp_sysabort)) {
1294*0Sstevel@tonic-gate 			lwp->lwp_sysabort = 0;
1295*0Sstevel@tonic-gate 			rv = -1;
1296*0Sstevel@tonic-gate 			break;
1297*0Sstevel@tonic-gate 		}
1298*0Sstevel@tonic-gate 		if (watchcode == 0 || ta != 0)
1299*0Sstevel@tonic-gate 			break;
1300*0Sstevel@tonic-gate 	}
1301*0Sstevel@tonic-gate 
1302*0Sstevel@tonic-gate 	return (rv);
1303*0Sstevel@tonic-gate }
1304*0Sstevel@tonic-gate 
1305*0Sstevel@tonic-gate static int
watch_suword16(void * addr,uint16_t value)1306*0Sstevel@tonic-gate watch_suword16(void *addr, uint16_t value)
1307*0Sstevel@tonic-gate {
1308*0Sstevel@tonic-gate 	klwp_t *lwp = ttolwp(curthread);
1309*0Sstevel@tonic-gate 	int watchcode;
1310*0Sstevel@tonic-gate 	caddr_t vaddr;
1311*0Sstevel@tonic-gate 	int mapped;
1312*0Sstevel@tonic-gate 	int rv = 0;
1313*0Sstevel@tonic-gate 	int ta;
1314*0Sstevel@tonic-gate 	label_t ljb;
1315*0Sstevel@tonic-gate 
1316*0Sstevel@tonic-gate 	for (;;) {
1317*0Sstevel@tonic-gate 
1318*0Sstevel@tonic-gate 		vaddr = (caddr_t)addr;
1319*0Sstevel@tonic-gate 		watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1320*0Sstevel@tonic-gate 		    S_WRITE);
1321*0Sstevel@tonic-gate 		if (watchcode == 0 || ta != 0) {
1322*0Sstevel@tonic-gate 			mapped = pr_mappage((caddr_t)addr, sizeof (value),
1323*0Sstevel@tonic-gate 			    S_WRITE, 1);
1324*0Sstevel@tonic-gate 			if (on_fault(&ljb))
1325*0Sstevel@tonic-gate 				rv = -1;
1326*0Sstevel@tonic-gate 			else
1327*0Sstevel@tonic-gate 				suword16_noerr(addr, value);
1328*0Sstevel@tonic-gate 			no_fault();
1329*0Sstevel@tonic-gate 			if (mapped)
1330*0Sstevel@tonic-gate 				pr_unmappage((caddr_t)addr, sizeof (value),
1331*0Sstevel@tonic-gate 				    S_WRITE, 1);
1332*0Sstevel@tonic-gate 		}
1333*0Sstevel@tonic-gate 		if (watchcode &&
1334*0Sstevel@tonic-gate 		    (!sys_watchpoint(vaddr, watchcode, ta) ||
1335*0Sstevel@tonic-gate 		    lwp->lwp_sysabort)) {
1336*0Sstevel@tonic-gate 			lwp->lwp_sysabort = 0;
1337*0Sstevel@tonic-gate 			rv = -1;
1338*0Sstevel@tonic-gate 			break;
1339*0Sstevel@tonic-gate 		}
1340*0Sstevel@tonic-gate 		if (watchcode == 0 || ta != 0)
1341*0Sstevel@tonic-gate 			break;
1342*0Sstevel@tonic-gate 	}
1343*0Sstevel@tonic-gate 
1344*0Sstevel@tonic-gate 	return (rv);
1345*0Sstevel@tonic-gate }
1346*0Sstevel@tonic-gate 
1347*0Sstevel@tonic-gate static int
watch_suword32(void * addr,uint32_t value)1348*0Sstevel@tonic-gate watch_suword32(void *addr, uint32_t value)
1349*0Sstevel@tonic-gate {
1350*0Sstevel@tonic-gate 	klwp_t *lwp = ttolwp(curthread);
1351*0Sstevel@tonic-gate 	int watchcode;
1352*0Sstevel@tonic-gate 	caddr_t vaddr;
1353*0Sstevel@tonic-gate 	int mapped;
1354*0Sstevel@tonic-gate 	int rv = 0;
1355*0Sstevel@tonic-gate 	int ta;
1356*0Sstevel@tonic-gate 	label_t ljb;
1357*0Sstevel@tonic-gate 
1358*0Sstevel@tonic-gate 	for (;;) {
1359*0Sstevel@tonic-gate 
1360*0Sstevel@tonic-gate 		vaddr = (caddr_t)addr;
1361*0Sstevel@tonic-gate 		watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1362*0Sstevel@tonic-gate 		    S_WRITE);
1363*0Sstevel@tonic-gate 		if (watchcode == 0 || ta != 0) {
1364*0Sstevel@tonic-gate 			mapped = pr_mappage((caddr_t)addr, sizeof (value),
1365*0Sstevel@tonic-gate 			    S_WRITE, 1);
1366*0Sstevel@tonic-gate 			if (on_fault(&ljb))
1367*0Sstevel@tonic-gate 				rv = -1;
1368*0Sstevel@tonic-gate 			else
1369*0Sstevel@tonic-gate 				suword32_noerr(addr, value);
1370*0Sstevel@tonic-gate 			no_fault();
1371*0Sstevel@tonic-gate 			if (mapped)
1372*0Sstevel@tonic-gate 				pr_unmappage((caddr_t)addr, sizeof (value),
1373*0Sstevel@tonic-gate 				    S_WRITE, 1);
1374*0Sstevel@tonic-gate 		}
1375*0Sstevel@tonic-gate 		if (watchcode &&
1376*0Sstevel@tonic-gate 		    (!sys_watchpoint(vaddr, watchcode, ta) ||
1377*0Sstevel@tonic-gate 		    lwp->lwp_sysabort)) {
1378*0Sstevel@tonic-gate 			lwp->lwp_sysabort = 0;
1379*0Sstevel@tonic-gate 			rv = -1;
1380*0Sstevel@tonic-gate 			break;
1381*0Sstevel@tonic-gate 		}
1382*0Sstevel@tonic-gate 		if (watchcode == 0 || ta != 0)
1383*0Sstevel@tonic-gate 			break;
1384*0Sstevel@tonic-gate 	}
1385*0Sstevel@tonic-gate 
1386*0Sstevel@tonic-gate 	return (rv);
1387*0Sstevel@tonic-gate }
1388*0Sstevel@tonic-gate 
1389*0Sstevel@tonic-gate #ifdef _LP64
1390*0Sstevel@tonic-gate static int
watch_suword64(void * addr,uint64_t value)1391*0Sstevel@tonic-gate watch_suword64(void *addr, uint64_t value)
1392*0Sstevel@tonic-gate {
1393*0Sstevel@tonic-gate 	klwp_t *lwp = ttolwp(curthread);
1394*0Sstevel@tonic-gate 	int watchcode;
1395*0Sstevel@tonic-gate 	caddr_t vaddr;
1396*0Sstevel@tonic-gate 	int mapped;
1397*0Sstevel@tonic-gate 	int rv = 0;
1398*0Sstevel@tonic-gate 	int ta;
1399*0Sstevel@tonic-gate 	label_t ljb;
1400*0Sstevel@tonic-gate 
1401*0Sstevel@tonic-gate 	for (;;) {
1402*0Sstevel@tonic-gate 
1403*0Sstevel@tonic-gate 		vaddr = (caddr_t)addr;
1404*0Sstevel@tonic-gate 		watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1405*0Sstevel@tonic-gate 		    S_WRITE);
1406*0Sstevel@tonic-gate 		if (watchcode == 0 || ta != 0) {
1407*0Sstevel@tonic-gate 			mapped = pr_mappage((caddr_t)addr, sizeof (value),
1408*0Sstevel@tonic-gate 			    S_WRITE, 1);
1409*0Sstevel@tonic-gate 			if (on_fault(&ljb))
1410*0Sstevel@tonic-gate 				rv = -1;
1411*0Sstevel@tonic-gate 			else
1412*0Sstevel@tonic-gate 				suword64_noerr(addr, value);
1413*0Sstevel@tonic-gate 			no_fault();
1414*0Sstevel@tonic-gate 			if (mapped)
1415*0Sstevel@tonic-gate 				pr_unmappage((caddr_t)addr, sizeof (value),
1416*0Sstevel@tonic-gate 				    S_WRITE, 1);
1417*0Sstevel@tonic-gate 		}
1418*0Sstevel@tonic-gate 		if (watchcode &&
1419*0Sstevel@tonic-gate 		    (!sys_watchpoint(vaddr, watchcode, ta) ||
1420*0Sstevel@tonic-gate 		    lwp->lwp_sysabort)) {
1421*0Sstevel@tonic-gate 			lwp->lwp_sysabort = 0;
1422*0Sstevel@tonic-gate 			rv = -1;
1423*0Sstevel@tonic-gate 			break;
1424*0Sstevel@tonic-gate 		}
1425*0Sstevel@tonic-gate 		if (watchcode == 0 || ta != 0)
1426*0Sstevel@tonic-gate 			break;
1427*0Sstevel@tonic-gate 	}
1428*0Sstevel@tonic-gate 
1429*0Sstevel@tonic-gate 	return (rv);
1430*0Sstevel@tonic-gate }
1431*0Sstevel@tonic-gate #endif /* _LP64 */
1432*0Sstevel@tonic-gate 
1433*0Sstevel@tonic-gate /*
1434*0Sstevel@tonic-gate  * Check for watched addresses in the given address space.
1435*0Sstevel@tonic-gate  * Return 1 if this is true, otherwise 0.
1436*0Sstevel@tonic-gate  */
1437*0Sstevel@tonic-gate static int
pr_is_watched(caddr_t base,size_t len,int rw)1438*0Sstevel@tonic-gate pr_is_watched(caddr_t base, size_t len, int rw)
1439*0Sstevel@tonic-gate {
1440*0Sstevel@tonic-gate 	caddr_t saddr = (caddr_t)((uintptr_t)base & (uintptr_t)PAGEMASK);
1441*0Sstevel@tonic-gate 	caddr_t eaddr = base + len;
1442*0Sstevel@tonic-gate 	caddr_t paddr;
1443*0Sstevel@tonic-gate 
1444*0Sstevel@tonic-gate 	for (paddr = saddr; paddr < eaddr; paddr += PAGESIZE) {
1445*0Sstevel@tonic-gate 		if (pr_is_watchpage(paddr, rw))
1446*0Sstevel@tonic-gate 			return (1);
1447*0Sstevel@tonic-gate 	}
1448*0Sstevel@tonic-gate 
1449*0Sstevel@tonic-gate 	return (0);
1450*0Sstevel@tonic-gate }
1451*0Sstevel@tonic-gate 
1452*0Sstevel@tonic-gate /*
1453*0Sstevel@tonic-gate  * Wrapper for the physio() function.
1454*0Sstevel@tonic-gate  * Splits one uio operation with multiple iovecs into uio operations with
1455*0Sstevel@tonic-gate  * only one iovecs to do the watchpoint handling separately for each iovecs.
1456*0Sstevel@tonic-gate  */
1457*0Sstevel@tonic-gate static int
watch_physio(int (* strat)(struct buf *),struct buf * bp,dev_t dev,int rw,void (* mincnt)(struct buf *),struct uio * uio)1458*0Sstevel@tonic-gate watch_physio(int (*strat)(struct buf *), struct buf *bp, dev_t dev,
1459*0Sstevel@tonic-gate     int rw, void (*mincnt)(struct buf *), struct uio *uio)
1460*0Sstevel@tonic-gate {
1461*0Sstevel@tonic-gate 	struct uio auio;
1462*0Sstevel@tonic-gate 	struct iovec *iov;
1463*0Sstevel@tonic-gate 	caddr_t  base;
1464*0Sstevel@tonic-gate 	size_t len;
1465*0Sstevel@tonic-gate 	int seg_rw;
1466*0Sstevel@tonic-gate 	int error = 0;
1467*0Sstevel@tonic-gate 
1468*0Sstevel@tonic-gate 	if (uio->uio_segflg == UIO_SYSSPACE)
1469*0Sstevel@tonic-gate 		return (default_physio(strat, bp, dev, rw, mincnt, uio));
1470*0Sstevel@tonic-gate 
1471*0Sstevel@tonic-gate 	seg_rw = (rw == B_READ) ? S_WRITE : S_READ;
1472*0Sstevel@tonic-gate 
1473*0Sstevel@tonic-gate 	while (uio->uio_iovcnt > 0) {
1474*0Sstevel@tonic-gate 		if (uio->uio_resid == 0) {
1475*0Sstevel@tonic-gate 			/*
1476*0Sstevel@tonic-gate 			 * Make sure to return the uio structure with the
1477*0Sstevel@tonic-gate 			 * same values as default_physio() does.
1478*0Sstevel@tonic-gate 			 */
1479*0Sstevel@tonic-gate 			uio->uio_iov++;
1480*0Sstevel@tonic-gate 			uio->uio_iovcnt--;
1481*0Sstevel@tonic-gate 			continue;
1482*0Sstevel@tonic-gate 		}
1483*0Sstevel@tonic-gate 
1484*0Sstevel@tonic-gate 		iov = uio->uio_iov;
1485*0Sstevel@tonic-gate 		len = MIN(iov->iov_len, uio->uio_resid);
1486*0Sstevel@tonic-gate 
1487*0Sstevel@tonic-gate 		auio.uio_iovcnt = 1;
1488*0Sstevel@tonic-gate 		auio.uio_iov = iov;
1489*0Sstevel@tonic-gate 		auio.uio_resid = len;
1490*0Sstevel@tonic-gate 		auio.uio_loffset = uio->uio_loffset;
1491*0Sstevel@tonic-gate 		auio.uio_llimit = uio->uio_llimit;
1492*0Sstevel@tonic-gate 		auio.uio_fmode = uio->uio_fmode;
1493*0Sstevel@tonic-gate 		auio.uio_extflg = uio->uio_extflg;
1494*0Sstevel@tonic-gate 		auio.uio_segflg = uio->uio_segflg;
1495*0Sstevel@tonic-gate 
1496*0Sstevel@tonic-gate 		base = iov->iov_base;
1497*0Sstevel@tonic-gate 
1498*0Sstevel@tonic-gate 		if (!pr_is_watched(base, len, seg_rw)) {
1499*0Sstevel@tonic-gate 			/*
1500*0Sstevel@tonic-gate 			 * The given memory references don't cover a
1501*0Sstevel@tonic-gate 			 * watched page.
1502*0Sstevel@tonic-gate 			 */
1503*0Sstevel@tonic-gate 			error = default_physio(strat, bp, dev, rw, mincnt,
1504*0Sstevel@tonic-gate 			    &auio);
1505*0Sstevel@tonic-gate 
1506*0Sstevel@tonic-gate 			/* Update uio with values from auio. */
1507*0Sstevel@tonic-gate 			len -= auio.uio_resid;
1508*0Sstevel@tonic-gate 			uio->uio_resid -= len;
1509*0Sstevel@tonic-gate 			uio->uio_loffset += len;
1510*0Sstevel@tonic-gate 
1511*0Sstevel@tonic-gate 			/*
1512*0Sstevel@tonic-gate 			 * Return if an error occurred or not all data
1513*0Sstevel@tonic-gate 			 * was copied.
1514*0Sstevel@tonic-gate 			 */
1515*0Sstevel@tonic-gate 			if (auio.uio_resid || error)
1516*0Sstevel@tonic-gate 				break;
1517*0Sstevel@tonic-gate 			uio->uio_iov++;
1518*0Sstevel@tonic-gate 			uio->uio_iovcnt--;
1519*0Sstevel@tonic-gate 		} else {
1520*0Sstevel@tonic-gate 			int mapped, watchcode, ta;
1521*0Sstevel@tonic-gate 			caddr_t vaddr = base;
1522*0Sstevel@tonic-gate 			klwp_t *lwp = ttolwp(curthread);
1523*0Sstevel@tonic-gate 
1524*0Sstevel@tonic-gate 			watchcode = pr_is_watchpoint(&vaddr, &ta, len,
1525*0Sstevel@tonic-gate 			    NULL, seg_rw);
1526*0Sstevel@tonic-gate 
1527*0Sstevel@tonic-gate 			if (watchcode == 0 || ta != 0) {
1528*0Sstevel@tonic-gate 				/*
1529*0Sstevel@tonic-gate 				 * Do the io if the given memory references
1530*0Sstevel@tonic-gate 				 * don't cover a watched area (watchcode=0)
1531*0Sstevel@tonic-gate 				 * or if WA_TRAPAFTER was specified.
1532*0Sstevel@tonic-gate 				 */
1533*0Sstevel@tonic-gate 				mapped = pr_mappage(base, len, seg_rw, 1);
1534*0Sstevel@tonic-gate 				error = default_physio(strat, bp, dev, rw,
1535*0Sstevel@tonic-gate 				    mincnt, &auio);
1536*0Sstevel@tonic-gate 				if (mapped)
1537*0Sstevel@tonic-gate 					pr_unmappage(base, len, seg_rw, 1);
1538*0Sstevel@tonic-gate 
1539*0Sstevel@tonic-gate 				len -= auio.uio_resid;
1540*0Sstevel@tonic-gate 				uio->uio_resid -= len;
1541*0Sstevel@tonic-gate 				uio->uio_loffset += len;
1542*0Sstevel@tonic-gate 			}
1543*0Sstevel@tonic-gate 
1544*0Sstevel@tonic-gate 			/*
1545*0Sstevel@tonic-gate 			 * If we hit a watched address, do the watchpoint logic.
1546*0Sstevel@tonic-gate 			 */
1547*0Sstevel@tonic-gate 			if (watchcode &&
1548*0Sstevel@tonic-gate 			    (!sys_watchpoint(vaddr, watchcode, ta) ||
1549*0Sstevel@tonic-gate 			    lwp->lwp_sysabort)) {
1550*0Sstevel@tonic-gate 				lwp->lwp_sysabort = 0;
1551*0Sstevel@tonic-gate 				return (EFAULT);
1552*0Sstevel@tonic-gate 			}
1553*0Sstevel@tonic-gate 
1554*0Sstevel@tonic-gate 			/*
1555*0Sstevel@tonic-gate 			 * Check for errors from default_physio().
1556*0Sstevel@tonic-gate 			 */
1557*0Sstevel@tonic-gate 			if (watchcode == 0 || ta != 0) {
1558*0Sstevel@tonic-gate 				if (auio.uio_resid || error)
1559*0Sstevel@tonic-gate 					break;
1560*0Sstevel@tonic-gate 				uio->uio_iov++;
1561*0Sstevel@tonic-gate 				uio->uio_iovcnt--;
1562*0Sstevel@tonic-gate 			}
1563*0Sstevel@tonic-gate 		}
1564*0Sstevel@tonic-gate 	}
1565*0Sstevel@tonic-gate 
1566*0Sstevel@tonic-gate 	return (error);
1567*0Sstevel@tonic-gate }
1568*0Sstevel@tonic-gate 
1569*0Sstevel@tonic-gate int
wa_compare(const void * a,const void * b)1570*0Sstevel@tonic-gate wa_compare(const void *a, const void *b)
1571*0Sstevel@tonic-gate {
1572*0Sstevel@tonic-gate 	const watched_area_t *pa = a;
1573*0Sstevel@tonic-gate 	const watched_area_t *pb = b;
1574*0Sstevel@tonic-gate 
1575*0Sstevel@tonic-gate 	if (pa->wa_vaddr < pb->wa_vaddr)
1576*0Sstevel@tonic-gate 		return (-1);
1577*0Sstevel@tonic-gate 	else if (pa->wa_vaddr > pb->wa_vaddr)
1578*0Sstevel@tonic-gate 		return (1);
1579*0Sstevel@tonic-gate 	else
1580*0Sstevel@tonic-gate 		return (0);
1581*0Sstevel@tonic-gate }
1582*0Sstevel@tonic-gate 
1583*0Sstevel@tonic-gate int
wp_compare(const void * a,const void * b)1584*0Sstevel@tonic-gate wp_compare(const void *a, const void *b)
1585*0Sstevel@tonic-gate {
1586*0Sstevel@tonic-gate 	const watched_page_t *pa = a;
1587*0Sstevel@tonic-gate 	const watched_page_t *pb = b;
1588*0Sstevel@tonic-gate 
1589*0Sstevel@tonic-gate 	if (pa->wp_vaddr < pb->wp_vaddr)
1590*0Sstevel@tonic-gate 		return (-1);
1591*0Sstevel@tonic-gate 	else if (pa->wp_vaddr > pb->wp_vaddr)
1592*0Sstevel@tonic-gate 		return (1);
1593*0Sstevel@tonic-gate 	else
1594*0Sstevel@tonic-gate 		return (0);
1595*0Sstevel@tonic-gate }
1596*0Sstevel@tonic-gate 
1597*0Sstevel@tonic-gate /*
1598*0Sstevel@tonic-gate  * Given an address range, finds the first watched area which overlaps some or
1599*0Sstevel@tonic-gate  * all of the range.
1600*0Sstevel@tonic-gate  */
1601*0Sstevel@tonic-gate watched_area_t *
pr_find_watched_area(proc_t * p,watched_area_t * pwa,avl_index_t * where)1602*0Sstevel@tonic-gate pr_find_watched_area(proc_t *p, watched_area_t *pwa, avl_index_t *where)
1603*0Sstevel@tonic-gate {
1604*0Sstevel@tonic-gate 	caddr_t vaddr = pwa->wa_vaddr;
1605*0Sstevel@tonic-gate 	caddr_t eaddr = pwa->wa_eaddr;
1606*0Sstevel@tonic-gate 	watched_area_t *wap;
1607*0Sstevel@tonic-gate 	avl_index_t real_where;
1608*0Sstevel@tonic-gate 
1609*0Sstevel@tonic-gate 	/* First, check if there is an exact match.  */
1610*0Sstevel@tonic-gate 	wap = avl_find(&p->p_warea, pwa, &real_where);
1611*0Sstevel@tonic-gate 
1612*0Sstevel@tonic-gate 
1613*0Sstevel@tonic-gate 	/* Check to see if we overlap with the previous area.  */
1614*0Sstevel@tonic-gate 	if (wap == NULL) {
1615*0Sstevel@tonic-gate 		wap = avl_nearest(&p->p_warea, real_where, AVL_BEFORE);
1616*0Sstevel@tonic-gate 		if (wap != NULL &&
1617*0Sstevel@tonic-gate 		    (vaddr >= wap->wa_eaddr || eaddr <= wap->wa_vaddr))
1618*0Sstevel@tonic-gate 			wap = NULL;
1619*0Sstevel@tonic-gate 	}
1620*0Sstevel@tonic-gate 
1621*0Sstevel@tonic-gate 	/* Try the next area.  */
1622*0Sstevel@tonic-gate 	if (wap == NULL) {
1623*0Sstevel@tonic-gate 		wap = avl_nearest(&p->p_warea, real_where, AVL_AFTER);
1624*0Sstevel@tonic-gate 		if (wap != NULL &&
1625*0Sstevel@tonic-gate 		    (vaddr >= wap->wa_eaddr || eaddr <= wap->wa_vaddr))
1626*0Sstevel@tonic-gate 			wap = NULL;
1627*0Sstevel@tonic-gate 	}
1628*0Sstevel@tonic-gate 
1629*0Sstevel@tonic-gate 	if (where)
1630*0Sstevel@tonic-gate 		*where = real_where;
1631*0Sstevel@tonic-gate 
1632*0Sstevel@tonic-gate 	return (wap);
1633*0Sstevel@tonic-gate }
1634*0Sstevel@tonic-gate 
1635*0Sstevel@tonic-gate void
watch_enable(kthread_id_t t)1636*0Sstevel@tonic-gate watch_enable(kthread_id_t t)
1637*0Sstevel@tonic-gate {
1638*0Sstevel@tonic-gate 	t->t_proc_flag |= TP_WATCHPT;
1639*0Sstevel@tonic-gate 	install_copyops(t, &watch_copyops);
1640*0Sstevel@tonic-gate }
1641*0Sstevel@tonic-gate 
1642*0Sstevel@tonic-gate void
watch_disable(kthread_id_t t)1643*0Sstevel@tonic-gate watch_disable(kthread_id_t t)
1644*0Sstevel@tonic-gate {
1645*0Sstevel@tonic-gate 	t->t_proc_flag &= ~TP_WATCHPT;
1646*0Sstevel@tonic-gate 	remove_copyops(t);
1647*0Sstevel@tonic-gate }
1648*0Sstevel@tonic-gate 
1649*0Sstevel@tonic-gate int
copyin_nowatch(const void * uaddr,void * kaddr,size_t len)1650*0Sstevel@tonic-gate copyin_nowatch(const void *uaddr, void *kaddr, size_t len)
1651*0Sstevel@tonic-gate {
1652*0Sstevel@tonic-gate 	int watched, ret;
1653*0Sstevel@tonic-gate 
1654*0Sstevel@tonic-gate 	watched = watch_disable_addr(uaddr, len, S_READ);
1655*0Sstevel@tonic-gate 	ret = copyin(uaddr, kaddr, len);
1656*0Sstevel@tonic-gate 	if (watched)
1657*0Sstevel@tonic-gate 		watch_enable_addr(uaddr, len, S_READ);
1658*0Sstevel@tonic-gate 
1659*0Sstevel@tonic-gate 	return (ret);
1660*0Sstevel@tonic-gate }
1661*0Sstevel@tonic-gate 
1662*0Sstevel@tonic-gate int
copyout_nowatch(const void * kaddr,void * uaddr,size_t len)1663*0Sstevel@tonic-gate copyout_nowatch(const void *kaddr, void *uaddr, size_t len)
1664*0Sstevel@tonic-gate {
1665*0Sstevel@tonic-gate 	int watched, ret;
1666*0Sstevel@tonic-gate 
1667*0Sstevel@tonic-gate 	watched = watch_disable_addr(uaddr, len, S_WRITE);
1668*0Sstevel@tonic-gate 	ret = copyout(kaddr, uaddr, len);
1669*0Sstevel@tonic-gate 	if (watched)
1670*0Sstevel@tonic-gate 		watch_enable_addr(uaddr, len, S_WRITE);
1671*0Sstevel@tonic-gate 
1672*0Sstevel@tonic-gate 	return (ret);
1673*0Sstevel@tonic-gate }
1674*0Sstevel@tonic-gate 
1675*0Sstevel@tonic-gate #ifdef _LP64
1676*0Sstevel@tonic-gate int
fuword64_nowatch(const void * addr,uint64_t * value)1677*0Sstevel@tonic-gate fuword64_nowatch(const void *addr, uint64_t *value)
1678*0Sstevel@tonic-gate {
1679*0Sstevel@tonic-gate 	int watched, ret;
1680*0Sstevel@tonic-gate 
1681*0Sstevel@tonic-gate 	watched = watch_disable_addr(addr, sizeof (*value), S_READ);
1682*0Sstevel@tonic-gate 	ret = fuword64(addr, value);
1683*0Sstevel@tonic-gate 	if (watched)
1684*0Sstevel@tonic-gate 		watch_enable_addr(addr, sizeof (*value), S_READ);
1685*0Sstevel@tonic-gate 
1686*0Sstevel@tonic-gate 	return (ret);
1687*0Sstevel@tonic-gate }
1688*0Sstevel@tonic-gate #endif
1689*0Sstevel@tonic-gate 
1690*0Sstevel@tonic-gate int
fuword32_nowatch(const void * addr,uint32_t * value)1691*0Sstevel@tonic-gate fuword32_nowatch(const void *addr, uint32_t *value)
1692*0Sstevel@tonic-gate {
1693*0Sstevel@tonic-gate 	int watched, ret;
1694*0Sstevel@tonic-gate 
1695*0Sstevel@tonic-gate 	watched = watch_disable_addr(addr, sizeof (*value), S_READ);
1696*0Sstevel@tonic-gate 	ret = fuword32(addr, value);
1697*0Sstevel@tonic-gate 	if (watched)
1698*0Sstevel@tonic-gate 		watch_enable_addr(addr, sizeof (*value), S_READ);
1699*0Sstevel@tonic-gate 
1700*0Sstevel@tonic-gate 	return (ret);
1701*0Sstevel@tonic-gate }
1702*0Sstevel@tonic-gate 
1703*0Sstevel@tonic-gate #ifdef _LP64
1704*0Sstevel@tonic-gate int
suword64_nowatch(void * addr,uint64_t value)1705*0Sstevel@tonic-gate suword64_nowatch(void *addr, uint64_t value)
1706*0Sstevel@tonic-gate {
1707*0Sstevel@tonic-gate 	int watched, ret;
1708*0Sstevel@tonic-gate 
1709*0Sstevel@tonic-gate 	watched = watch_disable_addr(addr, sizeof (value), S_WRITE);
1710*0Sstevel@tonic-gate 	ret = suword64(addr, value);
1711*0Sstevel@tonic-gate 	if (watched)
1712*0Sstevel@tonic-gate 		watch_enable_addr(addr, sizeof (value), S_WRITE);
1713*0Sstevel@tonic-gate 
1714*0Sstevel@tonic-gate 	return (ret);
1715*0Sstevel@tonic-gate }
1716*0Sstevel@tonic-gate #endif
1717*0Sstevel@tonic-gate 
1718*0Sstevel@tonic-gate int
suword32_nowatch(void * addr,uint32_t value)1719*0Sstevel@tonic-gate suword32_nowatch(void *addr, uint32_t value)
1720*0Sstevel@tonic-gate {
1721*0Sstevel@tonic-gate 	int watched, ret;
1722*0Sstevel@tonic-gate 
1723*0Sstevel@tonic-gate 	watched = watch_disable_addr(addr, sizeof (value), S_WRITE);
1724*0Sstevel@tonic-gate 	ret = suword32(addr, value);
1725*0Sstevel@tonic-gate 	if (watched)
1726*0Sstevel@tonic-gate 		watch_enable_addr(addr, sizeof (value), S_WRITE);
1727*0Sstevel@tonic-gate 
1728*0Sstevel@tonic-gate 	return (ret);
1729*0Sstevel@tonic-gate }
1730*0Sstevel@tonic-gate 
1731*0Sstevel@tonic-gate int
watch_disable_addr(const void * addr,size_t len,enum seg_rw rw)1732*0Sstevel@tonic-gate watch_disable_addr(const void *addr, size_t len, enum seg_rw rw)
1733*0Sstevel@tonic-gate {
1734*0Sstevel@tonic-gate 	if (pr_watch_active(curproc))
1735*0Sstevel@tonic-gate 		return (pr_mappage((caddr_t)addr, len, rw, 1));
1736*0Sstevel@tonic-gate 	return (0);
1737*0Sstevel@tonic-gate }
1738*0Sstevel@tonic-gate 
1739*0Sstevel@tonic-gate void
watch_enable_addr(const void * addr,size_t len,enum seg_rw rw)1740*0Sstevel@tonic-gate watch_enable_addr(const void *addr, size_t len, enum seg_rw rw)
1741*0Sstevel@tonic-gate {
1742*0Sstevel@tonic-gate 	if (pr_watch_active(curproc))
1743*0Sstevel@tonic-gate 		pr_unmappage((caddr_t)addr, len, rw, 1);
1744*0Sstevel@tonic-gate }
1745