xref: /onnv-gate/usr/src/uts/common/sys/lvm/md_mirror.h (revision 7975:f7037f0cdac8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #ifndef _SYS_MD_MIRROR_H
27 #define	_SYS_MD_MIRROR_H
28 
29 #include <sys/callb.h>
30 #include <sys/lvm/mdvar.h>
31 #include <sys/lvm/md_mirror_shared.h>
32 #include <sys/lvm/md_rename.h>
33 
34 #ifdef	__cplusplus
35 extern "C" {
36 #endif
37 
38 /*
39  * following bits are used in status word in the common section
40  * of unit structure
41  */
42 #define	SMS_IS(sm, state) (((sm)->sm_state & (state)) != 0)
43 #define	SMS_BY_INDEX_IS(un, index, state) \
44 		(((un)->un_sm[(index)].sm_state & (state)) != 0)
45 
46 #define	SMS_BY_INDEX_IS_TARGET(un, index) \
47 		((un)->un_sm[(index)].sm_flags & MD_SM_RESYNC_TARGET)
48 
49 #define	SUBMIRROR_IS_READABLE(un, isubmirror)				\
50 	((((un)->un_sm[(isubmirror)].sm_state & SMS_IGNORE) == 0) &&	\
51 	    ((un)->un_sm[(isubmirror)].sm_state & 			\
52 	    (SMS_RUNNING | SMS_COMP_ERRED | SMS_COMP_RESYNC)))
53 
54 #define	SUBMIRROR_IS_WRITEABLE(un, isubmirror)			\
55 	((un)->un_sm[(isubmirror)].sm_state &			\
56 	    (SMS_RUNNING | SMS_COMP_ERRED | SMS_COMP_RESYNC |	\
57 	    SMS_ATTACHED_RESYNC | SMS_OFFLINE_RESYNC))
58 
59 /*
60  * Default resync block size for MN resync messages
61  */
62 #define	MD_DEF_RESYNC_BLK_SZ		8192
63 
64 /*
65  * macro to test if the current block is within the current resync region
66  */
67 #define	IN_RESYNC_REGION(un, ps) \
68 	((un->un_rs_prev_overlap != NULL) && (ps->ps_firstblk >= \
69 	    un->un_rs_prev_overlap->ps_firstblk) && \
70 	    (ps->ps_lastblk <=  un->un_rs_prev_overlap->ps_lastblk))
71 /*
72  * Default resync update interval (in minutes).
73  */
74 #define	MD_DEF_MIRROR_RESYNC_INTVL	5
75 
76 /*
77  * Defines for flags argument in function set_sm_comp_state()
78  */
79 #define	MD_STATE_NO_XMIT	0x0000 /* Local action, (sent from master) */
80 #define	MD_STATE_XMIT		0x0001 /* Non-local action, send to master */
81 #define	MD_STATE_WMUPDATE	0x0002 /* Action because of watermark update */
82 #define	MD_STATE_OCHELD		0x0004 /* open/close lock held */
83 
84 /*
85  * Defines for flags argument in function check_comp_4_hotspares()
86  */
87 #define	MD_HOTSPARE_NO_XMIT	0x0000 /* Local action, (sent from master) */
88 #define	MD_HOTSPARE_XMIT	0x0001 /* Non-local action, send to master */
89 #define	MD_HOTSPARE_WMUPDATE	0x0002 /* Action because of watermark update */
90 #define	MD_HOTSPARE_LINKHELD	0x0004 /* md_link_rw lock held */
91 
92 /*
93  * Defines for argument in function send_mn_resync_done_message()
94  */
95 #define	RESYNC_ERR		0x1
96 #define	CLEAR_OPT_NOT_DONE	0x2
97 
98 /*
99  * Defines for argument in function resync_read_blk_range()
100  */
101 #define	MD_FIRST_RESYNC_NEXT	0x1
102 #define	MD_SEND_MESS_XMIT	0x2
103 #define	MD_RESYNC_FLAG_ERR	0x4
104 
105 /*
106  * Define for argument in function wait_for_overlaps()
107  */
108 #define	MD_OVERLAP_ALLOW_REPEAT	0x1	/* Allow if ps already in tree */
109 #define	MD_OVERLAP_NO_REPEAT	0	/* ps must not already be in tree */
110 
111 /*
112  * Define for max retries of mirror_owner
113  */
114 #define	MD_OWNER_RETRIES	10
115 
116 /*
117  * mm_submirror32_od and mm_unit32_od are used only for 32 bit old format
118  */
119 #if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
120 #pragma pack(4)
121 #endif
122 typedef struct  mm_submirror32_od {	/* submirrors */
123 	mdkey_t		sm_key;
124 	dev32_t		sm_dev;
125 	sm_state_t	sm_state;
126 	sm_flags_t	sm_flags;
127 	caddr32_t	xx_sm_shared_by_blk;	/* really void *) */
128 	caddr32_t	xx_sm_shared_by_indx;	/* really void *) */
129 	caddr32_t	xx_sm_get_component_count;
130 	caddr32_t	xx_sm_get_bcss;	/* block count skip size */
131 	md_m_shared32_od_t sm_shared;	/* used for mirroring plain devices */
132 	int		sm_hsp_id;	/* used for mirroring plain devices */
133 	struct timeval32 sm_timestamp;	/* time of last state change */
134 } mm_submirror32_od_t;
135 
136 typedef struct	mm_submirror {		/* submirrors */
137 	mdkey_t		sm_key;
138 	md_dev64_t	sm_dev;		/* 64 bit */
139 	sm_state_t	sm_state;
140 	sm_flags_t	sm_flags;
141 	md_m_shared_t	sm_shared;	/* used for mirroring plain devices */
142 	int		sm_hsp_id;	/* used for mirroring plain devices */
143 	md_timeval32_t	sm_timestamp;	/* time of last state change, 32 bit */
144 } mm_submirror_t;
145 
146 typedef struct mm_unit32_od {
147 	mdc_unit32_od_t	c;			/* common stuff */
148 
149 	int		un_last_read;		/* last submirror index read */
150 	uint_t		un_changecnt;
151 	ushort_t	un_nsm;			/* number of submirrors */
152 	mm_submirror32_od_t un_sm[NMIRROR];
153 	int		un_overlap_tree_flag;
154 	int		xx_un_overlap_tree_mx[2];	/* replaces mutex */
155 	ushort_t	xx_un_overlap_tree_cv;
156 	caddr32_t	xx_un_overlap_root;
157 	mm_rd_opt_t	un_read_option;		/* mirror read option */
158 	mm_wr_opt_t	un_write_option;	/* mirror write option */
159 	mm_pass_num_t	un_pass_num;		/* resync pass number */
160 	/*
161 	 * following used to keep dirty bitmaps
162 	 */
163 	int		xx_un_resync_mx[2];	/* replaces mutex */
164 	ushort_t	xx_un_resync_cv;
165 	uint_t		un_resync_flg;
166 	uint_t		un_waiting_to_mark;
167 	uint_t		un_waiting_to_commit;
168 	caddr32_t	xx_un_outstanding_writes;	/* outstanding write */
169 	caddr32_t	xx_un_goingclean_bm;
170 	caddr32_t	xx_un_goingdirty_bm;
171 	caddr32_t	xx_un_dirty_bm;
172 	caddr32_t	xx_un_resync_bm;
173 	uint_t		un_rrd_blksize;	/* The blocksize of the dirty bits */
174 	uint_t		un_rrd_num;	/* The number of resync regions */
175 	mddb_recid_t	un_rr_dirty_recid;	/* resync region bm record id */
176 	/*
177 	 * following stuff is private to resync process
178 	 */
179 	int		un_rs_copysize;
180 	int		un_rs_dests;	/* destinations */
181 	daddr32_t	un_rs_resync_done;	/* used for percent done */
182 	daddr32_t	un_rs_resync_2_do;	/* user for percent done */
183 	int		un_rs_dropped_lock;
184 	caddr32_t	un_rs_type;		/* type of resync in progress */
185 	/*
186 	 * Incore elements in this old structure are no longer referenced by
187 	 * current 64 bit kernel.  Comment them out for maintenance purpose.
188 	 *
189 	 * 	mm_submirror_ic_t	un_smic[NMIRROR];
190 	 * 	kmutex_t		un_ovrlap_chn_mx;
191 	 * 	kcondvar_t		un_ovrlap_chn_cv;
192 	 * 	struct md_mps		*un_ovrlap_chn;
193 	 * 	kmutex_t		un_resync_mx;
194 	 * 	kcondvar_t		un_resync_cv;
195 	 * 	short			*un_outstanding_writes;
196 	 * 	uchar_t			*un_goingclean_bm;
197 	 * 	uchar_t			*un_goingdirty_bm;
198 	 * 	uchar_t			*un_dirty_bm;
199 	 * 	uchar_t			*un_resync_bm;
200 	 * 	char			*un_rs_buffer;
201 	 */
202 } mm_unit32_od_t;
203 #if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
204 #pragma pack()
205 #endif
206 
207 /* Types of resync in progress (used for un_rs_type) */
208 #define	MD_RS_NONE		0		/* No resync */
209 #define	MD_RS_OPTIMIZED		0x0001		/* Optimized resync */
210 #define	MD_RS_COMPONENT		0x0002		/* Component resync */
211 #define	MD_RS_SUBMIRROR		0x0003		/* Submirror resync */
212 #define	MD_RS_ABR		0x0004		/* Application based resync */
213 
214 /*
215  * un_rs_type is split into the following bitfields:
216  *
217  * 0-3	Resync type (as above)
218  * 4-7	Submirror index [0..3]
219  * 8-31	Component index
220  */
221 #define	RS_TYPE_MASK	0xF
222 #define	RS_SMI_MASK	0xF0
223 #define	RS_CI_MASK	0x1FFF00
224 
225 #define	RS_TYPE(x)	((x) & RS_TYPE_MASK)
226 #define	RS_SMI(x)	(((x) & RS_SMI_MASK) >> 4)
227 #define	RS_CI(x)	(((x) & RS_CI_MASK) >> 8)
228 
229 #define	SET_RS_TYPE(x, v)	{					\
230 				    (x) &= ~RS_TYPE_MASK;		\
231 				    (x) |= ((v) & RS_TYPE_MASK);	\
232 				}
233 #define	SET_RS_TYPE_NONE(x)	{					\
234 				    (x) &= ~RS_TYPE_MASK;		\
235 				}
236 #define	SET_RS_SMI(x, v)	{					\
237 				    (x) &= ~RS_SMI_MASK; 		\
238 				    (x) |= (((v) << 4) & RS_SMI_MASK);	\
239 				}
240 #define	SET_RS_CI(x, v)		{					\
241 				    (x) &= ~RS_CI_MASK;			\
242 				    (x) |= (((v) << 8) & RS_CI_MASK);	\
243 				}
244 
245 typedef struct	mm_submirror_ic {
246 	intptr_t	(*sm_shared_by_blk)(md_dev64_t, void *,
247 				diskaddr_t, u_longlong_t *);
248 	intptr_t	(*sm_shared_by_indx)(md_dev64_t, void *, int);
249 	int		(*sm_get_component_count)(md_dev64_t, void *);
250 	int		(*sm_get_bcss)(md_dev64_t, void *, int, diskaddr_t *,
251 				size_t *, u_longlong_t *, u_longlong_t *);
252 } mm_submirror_ic_t;
253 
254 typedef struct md_mps {
255 	DAEMON_QUEUE
256 	buf_t		*ps_bp;
257 	struct mm_unit	*ps_un;
258 	mdi_unit_t	*ps_ui;
259 	uint_t		 ps_childbflags;
260 	caddr_t		 ps_addr;
261 	diskaddr_t	 ps_firstblk;
262 	diskaddr_t	 ps_lastblk;
263 	uint_t		 ps_flags;
264 	uint_t		 ps_allfrom_sm;		/* entire read came from here */
265 	uint_t		 ps_writable_sm;
266 	uint_t		 ps_current_sm;
267 	uint_t		 ps_active_cnt;
268 	int		 ps_frags;
269 	uint_t		 ps_changecnt;
270 	struct md_mps	*ps_unused1;
271 	struct md_mps	*ps_unused2;
272 	void		 (*ps_call)();
273 	kmutex_t	 ps_mx;
274 	avl_node_t	ps_overlap_node;
275 } md_mps_t;
276 
277 #define	MD_MPS_ON_OVERLAP	0x0001
278 #define	MD_MPS_ERROR		0x0002
279 #define	MD_MPS_WRITE_AFTER_READ	0x0004
280 #define	MD_MPS_WOW		0x0008
281 #define	MD_MPS_DONTFREE		0x0010
282 #define	MD_MPS_DONE		0x0020
283 #define	MD_MPS_MAPPED		0x0040		/* re: MD_STR_MAPPED	*/
284 #define	MD_MPS_NOBLOCK		0x0080		/* re: MD_NOBLOCK	*/
285 #define	MD_MPS_ABR		0x0100		/* re: MD_STR_ABR	*/
286 #define	MD_MPS_DMR		0x0200		/* re: MD_STR_DMR	*/
287 #define	MD_MPS_WMUPDATE		0x0400		/* re: MD_STR_WMUPDATE	*/
288 #define	MD_MPS_DIRTY_RD		0x0800		/* re: MD_STR_DIRTY_RD	*/
289 #define	MD_MPS_RESYNC_READ	0x1000
290 #define	MD_MPS_FLAG_ERROR	0x2000		/* re: MD_STR_FLAG_ERR	*/
291 #define	MD_MPS_BLOCKABLE_IO	0x4000		/* re: MD_STR_BLOCK_OK  */
292 
293 #define	MPS_FREE(kc, ps)			\
294 {						\
295 	if ((ps)->ps_flags & MD_MPS_DONTFREE)	\
296 		(ps)->ps_flags |= MD_MPS_DONE;	\
297 	else					\
298 		kmem_cache_free((kc), (ps));	\
299 }
300 
301 typedef struct md_mcs {
302 	DAEMON_QUEUE
303 	md_mps_t	*cs_ps;
304 	minor_t		 cs_mdunit;
305 	/* Add new structure members HERE!! */
306 	buf_t		 cs_buf;
307 	/*  DO NOT add structure members here; cs_buf is dynamically sized */
308 } md_mcs_t;
309 
310 typedef struct  mm_mirror_ic {
311 	kmutex_t	un_overlap_tree_mx;
312 	kcondvar_t	un_overlap_tree_cv;
313 	avl_tree_t	un_overlap_root;
314 	kmutex_t	un_resync_mx;
315 	kcondvar_t	un_resync_cv;
316 	short		*un_outstanding_writes; /* outstanding write array */
317 	uchar_t		*un_goingclean_bm;
318 	uchar_t		*un_goingdirty_bm;
319 	uchar_t		*un_dirty_bm;
320 	uchar_t		*un_resync_bm;
321 	char		*un_rs_buffer;
322 	int		un_suspend_wr_flag;
323 	kmutex_t	un_suspend_wr_mx;
324 	kcondvar_t	un_suspend_wr_cv;
325 	md_mn_nodeid_t	un_mirror_owner;	/* Node which owns mirror */
326 	diskaddr_t	un_resync_startbl;	/* Start block for resync */
327 	kmutex_t	un_owner_mx;		/* Mutex for un_owner_state */
328 	uint_t		un_owner_state;		/* See below */
329 	uint_t		un_mirror_owner_status;	/* status for ioctl request */
330 	kmutex_t	un_dmr_mx;		/* mutex for DMR requests */
331 	kcondvar_t	un_dmr_cv;		/* condvar for DMR requests */
332 	int		un_dmr_last_read;	/* last DMR submirror read */
333 	callb_cpr_t	un_rs_cprinfo;		/* CPR info for resync thread */
334 	kmutex_t	un_rs_cpr_mx;		/* Mutex for CPR info */
335 	uint_t		un_resync_completed;	/* type of last resync */
336 	int		un_abr_count;		/* count of sp's with abr set */
337 }mm_mirror_ic_t;
338 
339 #define	MM_MN_OWNER_SENT	0x0001		/* RPC in progress */
340 #define	MM_MN_BECOME_OWNER	0x0002		/* Ownership change in prog. */
341 #define	MM_MN_PREVENT_CHANGE	0x0004		/* Disallow ownership change */
342 
343 typedef struct mm_unit {
344 	mdc_unit_t	c;			/* common stuff */
345 
346 	int		un_last_read;		/* last submirror index read */
347 	uint_t		un_changecnt;
348 	ushort_t	un_nsm;			/* number of submirrors */
349 	mm_submirror_t	un_sm[NMIRROR];
350 	int		un_overlap_tree_flag;
351 	mm_rd_opt_t	un_read_option;		/* mirror read option */
352 	mm_wr_opt_t	un_write_option;	/* mirror write option */
353 	mm_pass_num_t	un_pass_num;		/* resync pass number */
354 	/*
355 	 * following used to keep dirty bitmaps
356 	 */
357 	uint_t		un_resync_flg;
358 	uint_t		un_waiting_to_mark;
359 	uint_t		un_waiting_to_commit;
360 	uint_t		un_rrd_blksize;	  /* The blocksize of the dirty bits */
361 	uint_t		un_rrd_num;	  /* The number of resync regions */
362 	mddb_recid_t	un_rr_dirty_recid; /* resync region bm db record id */
363 	/*
364 	 * following stuff is private to resync process
365 	 */
366 	int 		un_rs_copysize;
367 	int 		un_rs_dests;		/* destinations */
368 	diskaddr_t	un_rs_resync_done;	/* used for percent done */
369 	diskaddr_t	un_rs_resync_2_do;	/* user for percent done */
370 	int		un_rs_dropped_lock;
371 	uint_t		un_rs_type;		/* type of resync */
372 	/*
373 	 * Incore only elements
374 	 */
375 	mm_submirror_ic_t un_smic[NMIRROR];	/* NMIRROR elements array */
376 	mm_mirror_ic_t	un_mmic;
377 	kmutex_t	un_rrp_inflight_mx;
378 	/*
379 	 * resync thread control
380 	 */
381 	kthread_t	*un_rs_thread;		/* Resync thread ID */
382 	kmutex_t	un_rs_thread_mx;	/* Thread cv mutex */
383 	kcondvar_t	un_rs_thread_cv;	/* Cond. Var. for thread */
384 	uint_t		un_rs_thread_flags;	/* Thread control flags */
385 	md_mps_t	*un_rs_prev_overlap;	/* existing overlap request */
386 	timeout_id_t	un_rs_resync_to_id;	/* resync progress timeout */
387 	kmutex_t	un_rs_progress_mx;	/* Resync progress mutex */
388 	kcondvar_t	un_rs_progress_cv;	/* Cond. Var. for progress */
389 	uint_t		un_rs_progress_flags;	/* Thread control flags */
390 	void		*un_rs_msg;		/* Intra-node resync message */
391 } mm_unit_t;
392 
393 #define	un_overlap_tree_mx	un_mmic.un_overlap_tree_mx
394 #define	un_overlap_tree_cv	un_mmic.un_overlap_tree_cv
395 #define	un_overlap_root		un_mmic.un_overlap_root
396 #define	un_resync_mx		un_mmic.un_resync_mx
397 #define	un_resync_cv		un_mmic.un_resync_cv
398 #define	un_outstanding_writes	un_mmic.un_outstanding_writes
399 #define	un_goingclean_bm	un_mmic.un_goingclean_bm
400 #define	un_goingdirty_bm	un_mmic.un_goingdirty_bm
401 #define	un_dirty_bm		un_mmic.un_dirty_bm
402 #define	un_resync_bm		un_mmic.un_resync_bm
403 #define	un_rs_buffer		un_mmic.un_rs_buffer
404 #define	un_suspend_wr_mx	un_mmic.un_suspend_wr_mx
405 #define	un_suspend_wr_cv	un_mmic.un_suspend_wr_cv
406 #define	un_suspend_wr_flag	un_mmic.un_suspend_wr_flag
407 #define	un_mirror_owner		un_mmic.un_mirror_owner
408 #define	un_resync_startbl	un_mmic.un_resync_startbl
409 #define	un_owner_mx		un_mmic.un_owner_mx
410 #define	un_owner_state		un_mmic.un_owner_state
411 #define	un_mirror_reqs		un_mmic.un_mirror_reqs
412 #define	un_mirror_reqs_done	un_mmic.un_mirror_reqs_done
413 #define	un_mirror_owner_status	un_mmic.un_mirror_owner_status
414 #define	un_dmr_mx		un_mmic.un_dmr_mx
415 #define	un_dmr_cv		un_mmic.un_dmr_cv
416 #define	un_dmr_last_read	un_mmic.un_dmr_last_read
417 #define	un_rs_cprinfo		un_mmic.un_rs_cprinfo
418 #define	un_rs_cpr_mx		un_mmic.un_rs_cpr_mx
419 #define	un_resync_completed	un_mmic.un_resync_completed
420 #define	un_abr_count		un_mmic.un_abr_count
421 
422 
423 #define	MM_RF_GATECLOSED	0x0001
424 #define	MM_RF_COMMIT_NEEDED	0x0002
425 #define	MM_RF_COMMITING		0x0004
426 #define	MM_RF_STALL_CLEAN	(MM_RF_COMMITING | \
427 				    MM_RF_COMMIT_NEEDED | \
428 				    MM_RF_GATECLOSED)
429 
430 
431 #define	MD_MN_MIRROR_UNOWNED	0
432 #define	MD_MN_MIRROR_OWNER(un)	 (un->un_mirror_owner == md_mn_mynode_id)
433 #define	MD_MN_NO_MIRROR_OWNER(un)	\
434 	(un->un_mirror_owner == MD_MN_MIRROR_UNOWNED)
435 
436 typedef struct err_comp {
437 	struct err_comp	*ec_next;
438 	int		ec_smi;
439 	int		ec_ci;
440 } err_comp_t;
441 
442 extern	int	md_min_rr_size;
443 extern	int	md_def_num_rr;
444 
445 /* Optimized resync records controllers */
446 #define	MD_MIN_RR_SIZE		(md_min_rr_size)
447 #define	MD_DEF_NUM_RR		(md_def_num_rr)
448 #define	MD_MAX_NUM_RR		(4192*NBBY - sizeof (struct optim_resync))
449 
450 /* default resync buffer size */
451 #define	MD_DEF_RESYNC_BUF_SIZE	(1024)
452 
453 /* Structure for optimized resync records */
454 #define	OR_MAGIC	0xFECA	/* Only missing the L */
455 typedef struct optim_resync {
456 	uint_t	or_revision;
457 	uint_t	or_magic;
458 	uint_t	or_blksize;
459 	uint_t	or_num;
460 	uchar_t	or_rr[1];
461 } optim_resync_t;
462 
463 /* Type 2 for mirror records */
464 #define	MIRROR_REC	1
465 #define	RESYNC_REC	2
466 
467 #ifdef _KERNEL
468 
469 #define	NO_SUBMIRRORS	(0)
470 #define	ALL_SUBMIRRORS	(0xFFF)
471 #define	SMI2BIT(smi)	(1 << (smi))
472 
473 /* For use with mirror_other_sources() */
474 #define	WHOLE_SM	(-1)
475 
476 #define	BLK_TO_RR(i, b, un)  {\
477 	(i) = ((b) / ((un))->un_rrd_blksize); \
478 	if ((i) > ((un))->un_rrd_num) \
479 		{ panic("md: BLK_TO_RR"); } \
480 }
481 
482 #define	RR_TO_BLK(b, i, un) \
483 	(b) = ((i) * ((un))->un_rrd_blksize)
484 
485 #define	IS_GOING_DIRTY(i, un)	(isset((un)->un_goingdirty_bm, (i)))
486 #define	CLR_GOING_DIRTY(i, un)	(clrbit((un)->un_goingdirty_bm, (i)))
487 #define	SET_GOING_DIRTY(i, un)	(setbit((un)->un_goingdirty_bm, (i)))
488 
489 #define	IS_GOING_CLEAN(i, un)	(isset((un)->un_goingclean_bm, (i)))
490 #define	CLR_GOING_CLEAN(i, un)	(clrbit((un)->un_goingclean_bm, (i)))
491 #define	SET_GOING_CLEAN(i, un)	(setbit((un)->un_goingclean_bm, (i)))
492 
493 #define	IS_REGION_DIRTY(i, un)	(isset((un)->un_dirty_bm, (i)))
494 #define	CLR_REGION_DIRTY(i, un)	(clrbit((un)->un_dirty_bm, (i)))
495 #define	SET_REGION_DIRTY(i, un)	(setbit((un)->un_dirty_bm, (i)))
496 
497 #define	IS_KEEPDIRTY(i, un)	(isset((un)->un_resync_bm, (i)))
498 #define	CLR_KEEPDIRTY(i, un)	(clrbit((un)->un_resync_bm, (i)))
499 
500 
501 /*
502  * Write-On-Write handling.
503  *   flags for md_mirror_wow_flg
504  *   structure for quing copy-writes
505  *   macros for relative locating of header and buffer
506  */
507 #define	WOW_DISABLE	0x0001	/* turn off WOW detection */
508 #define	WOW_PHYS_ENABLE	0x0020	/* turn on WOW for PHYS */
509 #define	WOW_LOGIT	0x0002	/* log non-disabled WOW detections */
510 #define	WOW_NOCOPY	0x0004	/* repeat normal write on WOW detection */
511 
512 typedef	struct wowhdr {
513 	DAEMON_QUEUE
514 	md_mps_t	*wow_ps;
515 	int		wow_offset;
516 } wowhdr_t;
517 
518 #define	WOWBUF_HDR(wowbuf)	((void *)(wowbuf-sizeof (wowhdr_t)))
519 #define	WOWHDR_BUF(wowhdr)	((char *)wowhdr+sizeof (wowhdr_t))
520 
521 /*
522  * Structure used to to save information about DMR reads.  Used to save
523  * the count of all DMR reads and the timestamp of the last one executed.
524  * We declare a global with this structure and it can be read by a debugger to
525  * verify that the DMR ioctl has been executed and the number of times that it
526  * has been executed.
527  */
528 typedef struct dmr_stats {
529 	uint_t		dmr_count;
530 	struct timeval	dmr_timestamp;
531 } dmr_stats_t;
532 
533 /* Externals from mirror.c */
534 extern mddb_recid_t	mirror_get_sm_unit(md_dev64_t);
535 extern void		mirror_release_sm_unit(md_dev64_t);
536 
537 extern void		mirror_set_sm_state(mm_submirror_t *,
538 				mm_submirror_ic_t *, sm_state_t, int);
539 
540 extern void		mirror_commit(mm_unit_t *, int, mddb_recid_t *);
541 extern int		poke_hotspares(void);
542 extern void		build_submirror(mm_unit_t *, int, int);
543 extern int		mirror_build_incore(mm_unit_t *, int);
544 extern void		reset_mirror(mm_unit_t *, minor_t, int);
545 extern int		mirror_internal_open(minor_t, int, int, int, IOLOCK *);
546 extern int		mirror_internal_close(minor_t, int, int, IOLOCK *);
547 extern void		set_sm_comp_state(mm_unit_t *, int, int, int,
548 			    mddb_recid_t *, uint_t, IOLOCK *);
549 extern int		mirror_other_sources(mm_unit_t *, int, int, int);
550 extern int		mirror_resync_message(md_mn_rs_params_t *, IOLOCK *);
551 extern void		md_mirror_strategy(buf_t *, int, void *);
552 extern int		mirror_directed_read(dev_t, vol_directed_rd_t *, int);
553 extern void		mirror_check_failfast(minor_t mnum);
554 extern int		check_comp_4_hotspares(mm_unit_t *, int, int, uint_t,
555 			    mddb_recid_t, IOLOCK *);
556 extern void		mirror_overlap_tree_remove(md_mps_t *ps);
557 extern void		mirror_child_init(md_mcs_t *cs);
558 
559 /* Externals from mirror_ioctl.c */
560 extern void		reset_comp_states(mm_submirror_t *,
561 			    mm_submirror_ic_t *);
562 extern int		mirror_grow_unit(mm_unit_t *un, md_error_t *ep);
563 extern int		md_mirror_ioctl(dev_t dev, int cmd, void *data,
564 			    int mode, IOLOCK *lockp);
565 extern mm_unit_t	*mirror_getun(minor_t, md_error_t *, int, IOLOCK *);
566 extern void		mirror_get_status(mm_unit_t *un, IOLOCK *lockp);
567 extern int		mirror_choose_owner(mm_unit_t *un, md_mn_req_owner_t *);
568 
569 /* rename named service functions */
570 md_ren_list_svc_t	mirror_rename_listkids;
571 md_ren_svc_t		mirror_rename_check;
572 md_ren_roleswap_svc_t	mirror_renexch_update_kids;
573 md_ren_roleswap_svc_t	mirror_exchange_parent_update_to;
574 md_ren_roleswap_svc_t	mirror_exchange_self_update_from_down;
575 
576 /* Externals from mirror_resync.c */
577 extern int		unit_setup_resync(mm_unit_t *, int);
578 extern int		mirror_resync_unit(minor_t mnum, md_resync_ioctl_t *ri,
579 			    md_error_t *ep, IOLOCK *);
580 extern int		mirror_ioctl_resync(md_resync_ioctl_t *p, IOLOCK *);
581 extern int		mirror_mark_resync_region(mm_unit_t *, diskaddr_t,
582 				diskaddr_t);
583 extern void		resync_start_timeout(set_t setno);
584 extern int		mirror_resize_resync_regions(mm_unit_t *, diskaddr_t);
585 extern int		mirror_add_resync_regions(mm_unit_t *, diskaddr_t);
586 extern int		mirror_probedevs(md_probedev_t *, IOLOCK *);
587 extern void		mirror_copy_rr(int, uchar_t *, uchar_t *);
588 extern void		mirror_process_unit_resync(mm_unit_t *);
589 #endif	/* _KERNEL */
590 
591 #ifdef	__cplusplus
592 }
593 #endif
594 
595 #endif	/* _SYS_MD_MIRROR_H */
596