1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #ifndef _SYS_MD_MIRROR_H 27 #define _SYS_MD_MIRROR_H 28 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 #include <sys/callb.h> 32 #include <sys/lvm/mdvar.h> 33 #include <sys/lvm/md_mirror_shared.h> 34 #include <sys/lvm/md_rename.h> 35 36 #ifdef __cplusplus 37 extern "C" { 38 #endif 39 40 /* 41 * following bits are used in status word in the common section 42 * of unit structure 43 */ 44 #define SMS_IS(sm, state) (((sm)->sm_state & (state)) != 0) 45 #define SMS_BY_INDEX_IS(un, index, state) \ 46 (((un)->un_sm[(index)].sm_state & (state)) != 0) 47 48 #define SMS_BY_INDEX_IS_TARGET(un, index) \ 49 ((un)->un_sm[(index)].sm_flags & MD_SM_RESYNC_TARGET) 50 51 #define SUBMIRROR_IS_READABLE(un, isubmirror) \ 52 ((((un)->un_sm[(isubmirror)].sm_state & SMS_IGNORE) == 0) && \ 53 ((un)->un_sm[(isubmirror)].sm_state & \ 54 (SMS_RUNNING | SMS_COMP_ERRED | SMS_COMP_RESYNC))) 55 56 #define SUBMIRROR_IS_WRITEABLE(un, isubmirror) \ 57 ((un)->un_sm[(isubmirror)].sm_state & \ 58 (SMS_RUNNING | SMS_COMP_ERRED | SMS_COMP_RESYNC | \ 59 SMS_ATTACHED_RESYNC | SMS_OFFLINE_RESYNC)) 60 61 /* 62 * Default resync block size for MN resync messages 63 */ 64 #define MD_DEF_RESYNC_BLK_SZ 8192 65 66 /* 67 * macro to test if the current block is within the current resync region 68 */ 69 #define IN_RESYNC_REGION(un, ps) \ 70 ((un->un_rs_prev_overlap != NULL) && (ps->ps_firstblk >= \ 71 un->un_rs_prev_overlap->ps_firstblk) && \ 72 (ps->ps_lastblk <= un->un_rs_prev_overlap->ps_lastblk)) 73 /* 74 * Default resync update interval (in minutes). 75 */ 76 #define MD_DEF_MIRROR_RESYNC_INTVL 5 77 78 /* 79 * Defines for flags argument in function set_sm_comp_state() 80 */ 81 #define MD_STATE_NO_XMIT 0x0000 /* Local action, (sent from master) */ 82 #define MD_STATE_XMIT 0x0001 /* Non-local action, send to master */ 83 #define MD_STATE_WMUPDATE 0x0002 /* Action because of watermark update */ 84 #define MD_STATE_OCHELD 0x0004 /* open/close lock held */ 85 86 /* 87 * Defines for flags argument in function check_comp_4_hotspares() 88 */ 89 #define MD_HOTSPARE_NO_XMIT 0x0000 /* Local action, (sent from master) */ 90 #define MD_HOTSPARE_XMIT 0x0001 /* Non-local action, send to master */ 91 #define MD_HOTSPARE_WMUPDATE 0x0002 /* Action because of watermark update */ 92 #define MD_HOTSPARE_LINKHELD 0x0004 /* md_link_rw lock held */ 93 94 /* 95 * Defines for argument in function send_mn_resync_done_message() 96 */ 97 #define RESYNC_ERR 0x1 98 #define CLEAR_OPT_NOT_DONE 0x2 99 100 /* 101 * Defines for argument in function resync_read_blk_range() 102 */ 103 #define MD_FIRST_RESYNC_NEXT 0x1 104 #define MD_SEND_MESS_XMIT 0x2 105 #define MD_RESYNC_FLAG_ERR 0x4 106 107 /* 108 * Define for argument in function wait_for_overlaps() 109 */ 110 #define MD_OVERLAP_ALLOW_REPEAT 0x1 /* Allow if ps already in tree */ 111 #define MD_OVERLAP_NO_REPEAT 0 /* ps must not already be in tree */ 112 113 /* 114 * Define for max retries of mirror_owner 115 */ 116 #define MD_OWNER_RETRIES 10 117 118 /* 119 * mm_submirror32_od and mm_unit32_od are used only for 32 bit old format 120 */ 121 #if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4 122 #pragma pack(4) 123 #endif 124 typedef struct mm_submirror32_od { /* submirrors */ 125 mdkey_t sm_key; 126 dev32_t sm_dev; 127 sm_state_t sm_state; 128 sm_flags_t sm_flags; 129 caddr32_t xx_sm_shared_by_blk; /* really void *) */ 130 caddr32_t xx_sm_shared_by_indx; /* really void *) */ 131 caddr32_t xx_sm_get_component_count; 132 caddr32_t xx_sm_get_bcss; /* block count skip size */ 133 md_m_shared32_od_t sm_shared; /* used for mirroring plain devices */ 134 int sm_hsp_id; /* used for mirroring plain devices */ 135 struct timeval32 sm_timestamp; /* time of last state change */ 136 } mm_submirror32_od_t; 137 138 typedef struct mm_submirror { /* submirrors */ 139 mdkey_t sm_key; 140 md_dev64_t sm_dev; /* 64 bit */ 141 sm_state_t sm_state; 142 sm_flags_t sm_flags; 143 md_m_shared_t sm_shared; /* used for mirroring plain devices */ 144 int sm_hsp_id; /* used for mirroring plain devices */ 145 md_timeval32_t sm_timestamp; /* time of last state change, 32 bit */ 146 } mm_submirror_t; 147 148 typedef struct mm_unit32_od { 149 mdc_unit32_od_t c; /* common stuff */ 150 151 int un_last_read; /* last submirror index read */ 152 uint_t un_changecnt; 153 ushort_t un_nsm; /* number of submirrors */ 154 mm_submirror32_od_t un_sm[NMIRROR]; 155 int un_overlap_tree_flag; 156 int xx_un_overlap_tree_mx[2]; /* replaces mutex */ 157 ushort_t xx_un_overlap_tree_cv; 158 caddr32_t xx_un_overlap_root; 159 mm_rd_opt_t un_read_option; /* mirror read option */ 160 mm_wr_opt_t un_write_option; /* mirror write option */ 161 mm_pass_num_t un_pass_num; /* resync pass number */ 162 /* 163 * following used to keep dirty bitmaps 164 */ 165 int xx_un_resync_mx[2]; /* replaces mutex */ 166 ushort_t xx_un_resync_cv; 167 uint_t un_resync_flg; 168 uint_t un_waiting_to_mark; 169 uint_t un_waiting_to_commit; 170 caddr32_t xx_un_outstanding_writes; /* outstanding write */ 171 caddr32_t xx_un_goingclean_bm; 172 caddr32_t xx_un_goingdirty_bm; 173 caddr32_t xx_un_dirty_bm; 174 caddr32_t xx_un_resync_bm; 175 uint_t un_rrd_blksize; /* The blocksize of the dirty bits */ 176 uint_t un_rrd_num; /* The number of resync regions */ 177 mddb_recid_t un_rr_dirty_recid; /* resync region bm record id */ 178 /* 179 * following stuff is private to resync process 180 */ 181 int un_rs_copysize; 182 int un_rs_dests; /* destinations */ 183 daddr32_t un_rs_resync_done; /* used for percent done */ 184 daddr32_t un_rs_resync_2_do; /* user for percent done */ 185 int un_rs_dropped_lock; 186 caddr32_t un_rs_type; /* type of resync in progress */ 187 /* 188 * Incore elements in this old structure are no longer referenced by 189 * current 64 bit kernel. Comment them out for maintenance purpose. 190 * 191 * mm_submirror_ic_t un_smic[NMIRROR]; 192 * kmutex_t un_ovrlap_chn_mx; 193 * kcondvar_t un_ovrlap_chn_cv; 194 * struct md_mps *un_ovrlap_chn; 195 * kmutex_t un_resync_mx; 196 * kcondvar_t un_resync_cv; 197 * short *un_outstanding_writes; 198 * uchar_t *un_goingclean_bm; 199 * uchar_t *un_goingdirty_bm; 200 * uchar_t *un_dirty_bm; 201 * uchar_t *un_resync_bm; 202 * char *un_rs_buffer; 203 */ 204 } mm_unit32_od_t; 205 #if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4 206 #pragma pack() 207 #endif 208 209 /* Types of resync in progress (used for un_rs_type) */ 210 #define MD_RS_NONE 0 /* No resync */ 211 #define MD_RS_OPTIMIZED 0x0001 /* Optimized resync */ 212 #define MD_RS_COMPONENT 0x0002 /* Component resync */ 213 #define MD_RS_SUBMIRROR 0x0003 /* Submirror resync */ 214 #define MD_RS_ABR 0x0004 /* Application based resync */ 215 216 /* 217 * un_rs_type is split into the following bitfields: 218 * 219 * 0-3 Resync type (as above) 220 * 4-7 Submirror index [0..3] 221 * 8-31 Component index 222 */ 223 #define RS_TYPE_MASK 0xF 224 #define RS_SMI_MASK 0xF0 225 #define RS_CI_MASK 0x1FFF00 226 227 #define RS_TYPE(x) ((x) & RS_TYPE_MASK) 228 #define RS_SMI(x) (((x) & RS_SMI_MASK) >> 4) 229 #define RS_CI(x) (((x) & RS_CI_MASK) >> 8) 230 231 #define SET_RS_TYPE(x, v) { \ 232 (x) &= ~RS_TYPE_MASK; \ 233 (x) |= ((v) & RS_TYPE_MASK); \ 234 } 235 #define SET_RS_TYPE_NONE(x) { \ 236 (x) &= ~RS_TYPE_MASK; \ 237 } 238 #define SET_RS_SMI(x, v) { \ 239 (x) &= ~RS_SMI_MASK; \ 240 (x) |= (((v) << 4) & RS_SMI_MASK); \ 241 } 242 #define SET_RS_CI(x, v) { \ 243 (x) &= ~RS_CI_MASK; \ 244 (x) |= (((v) << 8) & RS_CI_MASK); \ 245 } 246 247 typedef struct mm_submirror_ic { 248 intptr_t (*sm_shared_by_blk)(md_dev64_t, void *, 249 diskaddr_t, u_longlong_t *); 250 intptr_t (*sm_shared_by_indx)(md_dev64_t, void *, int); 251 int (*sm_get_component_count)(md_dev64_t, void *); 252 int (*sm_get_bcss)(md_dev64_t, void *, int, diskaddr_t *, 253 size_t *, u_longlong_t *, u_longlong_t *); 254 } mm_submirror_ic_t; 255 256 typedef struct md_mps { 257 DAEMON_QUEUE 258 buf_t *ps_bp; 259 struct mm_unit *ps_un; 260 mdi_unit_t *ps_ui; 261 uint_t ps_childbflags; 262 caddr_t ps_addr; 263 diskaddr_t ps_firstblk; 264 diskaddr_t ps_lastblk; 265 uint_t ps_flags; 266 uint_t ps_allfrom_sm; /* entire read came from here */ 267 uint_t ps_writable_sm; 268 uint_t ps_current_sm; 269 uint_t ps_active_cnt; 270 int ps_frags; 271 uint_t ps_changecnt; 272 struct md_mps *ps_unused1; 273 struct md_mps *ps_unused2; 274 void (*ps_call)(); 275 kmutex_t ps_mx; 276 avl_node_t ps_overlap_node; 277 } md_mps_t; 278 279 #define MD_MPS_ON_OVERLAP 0x0001 280 #define MD_MPS_ERROR 0x0002 281 #define MD_MPS_WRITE_AFTER_READ 0x0004 282 #define MD_MPS_WOW 0x0008 283 #define MD_MPS_DONTFREE 0x0010 284 #define MD_MPS_DONE 0x0020 285 #define MD_MPS_MAPPED 0x0040 /* re: MD_STR_MAPPED */ 286 #define MD_MPS_NOBLOCK 0x0080 /* re: MD_NOBLOCK */ 287 #define MD_MPS_ABR 0x0100 /* re: MD_STR_ABR */ 288 #define MD_MPS_DMR 0x0200 /* re: MD_STR_DMR */ 289 #define MD_MPS_WMUPDATE 0x0400 /* re: MD_STR_WMUPDATE */ 290 #define MD_MPS_DIRTY_RD 0x0800 /* re: MD_STR_DIRTY_RD */ 291 #define MD_MPS_RESYNC_READ 0x1000 292 #define MD_MPS_FLAG_ERROR 0x2000 /* re: MD_STR_FLAG_ERR */ 293 294 #define MPS_FREE(kc, ps) \ 295 { \ 296 if ((ps)->ps_flags & MD_MPS_DONTFREE) \ 297 (ps)->ps_flags |= MD_MPS_DONE; \ 298 else \ 299 kmem_cache_free((kc), (ps)); \ 300 } 301 302 typedef struct md_mcs { 303 DAEMON_QUEUE 304 md_mps_t *cs_ps; 305 minor_t cs_mdunit; 306 /* Add new structure members HERE!! */ 307 buf_t cs_buf; 308 /* DO NOT add structure members here; cs_buf is dynamically sized */ 309 } md_mcs_t; 310 311 typedef struct mm_mirror_ic { 312 kmutex_t un_overlap_tree_mx; 313 kcondvar_t un_overlap_tree_cv; 314 avl_tree_t un_overlap_root; 315 kmutex_t un_resync_mx; 316 kcondvar_t un_resync_cv; 317 short *un_outstanding_writes; /* outstanding write array */ 318 uchar_t *un_goingclean_bm; 319 uchar_t *un_goingdirty_bm; 320 uchar_t *un_dirty_bm; 321 uchar_t *un_resync_bm; 322 char *un_rs_buffer; 323 int un_suspend_wr_flag; 324 kmutex_t un_suspend_wr_mx; 325 kcondvar_t un_suspend_wr_cv; 326 md_mn_nodeid_t un_mirror_owner; /* Node which owns mirror */ 327 diskaddr_t un_resync_startbl; /* Start block for resync */ 328 kmutex_t un_owner_mx; /* Mutex for un_owner_state */ 329 uint_t un_owner_state; /* See below */ 330 uint_t un_mirror_owner_status; /* status for ioctl request */ 331 kmutex_t un_dmr_mx; /* mutex for DMR requests */ 332 kcondvar_t un_dmr_cv; /* condvar for DMR requests */ 333 int un_dmr_last_read; /* last DMR submirror read */ 334 callb_cpr_t un_rs_cprinfo; /* CPR info for resync thread */ 335 kmutex_t un_rs_cpr_mx; /* Mutex for CPR info */ 336 uint_t un_resync_completed; /* type of last resync */ 337 int un_abr_count; /* count of sp's with abr set */ 338 }mm_mirror_ic_t; 339 340 #define MM_MN_OWNER_SENT 0x0001 /* RPC in progress */ 341 #define MM_MN_BECOME_OWNER 0x0002 /* Ownership change in prog. */ 342 #define MM_MN_PREVENT_CHANGE 0x0004 /* Disallow ownership change */ 343 344 typedef struct mm_unit { 345 mdc_unit_t c; /* common stuff */ 346 347 int un_last_read; /* last submirror index read */ 348 uint_t un_changecnt; 349 ushort_t un_nsm; /* number of submirrors */ 350 mm_submirror_t un_sm[NMIRROR]; 351 int un_overlap_tree_flag; 352 mm_rd_opt_t un_read_option; /* mirror read option */ 353 mm_wr_opt_t un_write_option; /* mirror write option */ 354 mm_pass_num_t un_pass_num; /* resync pass number */ 355 /* 356 * following used to keep dirty bitmaps 357 */ 358 uint_t un_resync_flg; 359 uint_t un_waiting_to_mark; 360 uint_t un_waiting_to_commit; 361 uint_t un_rrd_blksize; /* The blocksize of the dirty bits */ 362 uint_t un_rrd_num; /* The number of resync regions */ 363 mddb_recid_t un_rr_dirty_recid; /* resync region bm db record id */ 364 /* 365 * following stuff is private to resync process 366 */ 367 int un_rs_copysize; 368 int un_rs_dests; /* destinations */ 369 diskaddr_t un_rs_resync_done; /* used for percent done */ 370 diskaddr_t un_rs_resync_2_do; /* user for percent done */ 371 int un_rs_dropped_lock; 372 uint_t un_rs_type; /* type of resync */ 373 /* 374 * Incore only elements 375 */ 376 mm_submirror_ic_t un_smic[NMIRROR]; /* NMIRROR elements array */ 377 mm_mirror_ic_t un_mmic; 378 kmutex_t un_rrp_inflight_mx; 379 /* 380 * resync thread control 381 */ 382 kthread_t *un_rs_thread; /* Resync thread ID */ 383 kmutex_t un_rs_thread_mx; /* Thread cv mutex */ 384 kcondvar_t un_rs_thread_cv; /* Cond. Var. for thread */ 385 uint_t un_rs_thread_flags; /* Thread control flags */ 386 md_mps_t *un_rs_prev_overlap; /* existing overlap request */ 387 timeout_id_t un_rs_resync_to_id; /* resync progress timeout */ 388 kmutex_t un_rs_progress_mx; /* Resync progress mutex */ 389 kcondvar_t un_rs_progress_cv; /* Cond. Var. for progress */ 390 uint_t un_rs_progress_flags; /* Thread control flags */ 391 void *un_rs_msg; /* Intra-node resync message */ 392 } mm_unit_t; 393 394 #define un_overlap_tree_mx un_mmic.un_overlap_tree_mx 395 #define un_overlap_tree_cv un_mmic.un_overlap_tree_cv 396 #define un_overlap_root un_mmic.un_overlap_root 397 #define un_resync_mx un_mmic.un_resync_mx 398 #define un_resync_cv un_mmic.un_resync_cv 399 #define un_outstanding_writes un_mmic.un_outstanding_writes 400 #define un_goingclean_bm un_mmic.un_goingclean_bm 401 #define un_goingdirty_bm un_mmic.un_goingdirty_bm 402 #define un_dirty_bm un_mmic.un_dirty_bm 403 #define un_resync_bm un_mmic.un_resync_bm 404 #define un_rs_buffer un_mmic.un_rs_buffer 405 #define un_suspend_wr_mx un_mmic.un_suspend_wr_mx 406 #define un_suspend_wr_cv un_mmic.un_suspend_wr_cv 407 #define un_suspend_wr_flag un_mmic.un_suspend_wr_flag 408 #define un_mirror_owner un_mmic.un_mirror_owner 409 #define un_resync_startbl un_mmic.un_resync_startbl 410 #define un_owner_mx un_mmic.un_owner_mx 411 #define un_owner_state un_mmic.un_owner_state 412 #define un_mirror_reqs un_mmic.un_mirror_reqs 413 #define un_mirror_reqs_done un_mmic.un_mirror_reqs_done 414 #define un_mirror_owner_status un_mmic.un_mirror_owner_status 415 #define un_dmr_mx un_mmic.un_dmr_mx 416 #define un_dmr_cv un_mmic.un_dmr_cv 417 #define un_dmr_last_read un_mmic.un_dmr_last_read 418 #define un_rs_cprinfo un_mmic.un_rs_cprinfo 419 #define un_rs_cpr_mx un_mmic.un_rs_cpr_mx 420 #define un_resync_completed un_mmic.un_resync_completed 421 #define un_abr_count un_mmic.un_abr_count 422 423 424 #define MM_RF_GATECLOSED 0x0001 425 #define MM_RF_COMMIT_NEEDED 0x0002 426 #define MM_RF_COMMITING 0x0004 427 #define MM_RF_STALL_CLEAN (MM_RF_COMMITING | \ 428 MM_RF_COMMIT_NEEDED | \ 429 MM_RF_GATECLOSED) 430 431 432 #define MD_MN_MIRROR_UNOWNED 0 433 #define MD_MN_MIRROR_OWNER(un) (un->un_mirror_owner == md_mn_mynode_id) 434 #define MD_MN_NO_MIRROR_OWNER(un) \ 435 (un->un_mirror_owner == MD_MN_MIRROR_UNOWNED) 436 437 typedef struct err_comp { 438 struct err_comp *ec_next; 439 int ec_smi; 440 int ec_ci; 441 } err_comp_t; 442 443 extern int md_min_rr_size; 444 extern int md_def_num_rr; 445 446 /* Optimized resync records controllers */ 447 #define MD_MIN_RR_SIZE (md_min_rr_size) 448 #define MD_DEF_NUM_RR (md_def_num_rr) 449 #define MD_MAX_NUM_RR (4192*NBBY - sizeof (struct optim_resync)) 450 451 /* default resync buffer size */ 452 #define MD_DEF_RESYNC_BUF_SIZE (1024) 453 454 /* Structure for optimized resync records */ 455 #define OR_MAGIC 0xFECA /* Only missing the L */ 456 typedef struct optim_resync { 457 uint_t or_revision; 458 uint_t or_magic; 459 uint_t or_blksize; 460 uint_t or_num; 461 uchar_t or_rr[1]; 462 } optim_resync_t; 463 464 /* Type 2 for mirror records */ 465 #define MIRROR_REC 1 466 #define RESYNC_REC 2 467 468 #ifdef _KERNEL 469 470 #define NO_SUBMIRRORS (0) 471 #define ALL_SUBMIRRORS (0xFFF) 472 #define SMI2BIT(smi) (1 << (smi)) 473 474 /* For use with mirror_other_sources() */ 475 #define WHOLE_SM (-1) 476 477 #define BLK_TO_RR(i, b, un) {\ 478 (i) = ((b) / ((un))->un_rrd_blksize); \ 479 if ((i) > ((un))->un_rrd_num) \ 480 { panic("md: BLK_TO_RR"); } \ 481 } 482 483 #define RR_TO_BLK(b, i, un) \ 484 (b) = ((i) * ((un))->un_rrd_blksize) 485 486 #define IS_GOING_DIRTY(i, un) (isset((un)->un_goingdirty_bm, (i))) 487 #define CLR_GOING_DIRTY(i, un) (clrbit((un)->un_goingdirty_bm, (i))) 488 #define SET_GOING_DIRTY(i, un) (setbit((un)->un_goingdirty_bm, (i))) 489 490 #define IS_GOING_CLEAN(i, un) (isset((un)->un_goingclean_bm, (i))) 491 #define CLR_GOING_CLEAN(i, un) (clrbit((un)->un_goingclean_bm, (i))) 492 #define SET_GOING_CLEAN(i, un) (setbit((un)->un_goingclean_bm, (i))) 493 494 #define IS_REGION_DIRTY(i, un) (isset((un)->un_dirty_bm, (i))) 495 #define CLR_REGION_DIRTY(i, un) (clrbit((un)->un_dirty_bm, (i))) 496 #define SET_REGION_DIRTY(i, un) (setbit((un)->un_dirty_bm, (i))) 497 498 #define IS_KEEPDIRTY(i, un) (isset((un)->un_resync_bm, (i))) 499 #define CLR_KEEPDIRTY(i, un) (clrbit((un)->un_resync_bm, (i))) 500 501 502 /* 503 * Write-On-Write handling. 504 * flags for md_mirror_wow_flg 505 * structure for quing copy-writes 506 * macros for relative locating of header and buffer 507 */ 508 #define WOW_DISABLE 0x0001 /* turn off WOW detection */ 509 #define WOW_PHYS_ENABLE 0x0020 /* turn on WOW for PHYS */ 510 #define WOW_LOGIT 0x0002 /* log non-disabled WOW detections */ 511 #define WOW_NOCOPY 0x0004 /* repeat normal write on WOW detection */ 512 513 typedef struct wowhdr { 514 DAEMON_QUEUE 515 md_mps_t *wow_ps; 516 int wow_offset; 517 } wowhdr_t; 518 519 #define WOWBUF_HDR(wowbuf) ((void *)(wowbuf-sizeof (wowhdr_t))) 520 #define WOWHDR_BUF(wowhdr) ((char *)wowhdr+sizeof (wowhdr_t)) 521 522 /* 523 * Structure used to to save information about DMR reads. Used to save 524 * the count of all DMR reads and the timestamp of the last one executed. 525 * We declare a global with this structure and it can be read by a debugger to 526 * verify that the DMR ioctl has been executed and the number of times that it 527 * has been executed. 528 */ 529 typedef struct dmr_stats { 530 uint_t dmr_count; 531 struct timeval dmr_timestamp; 532 } dmr_stats_t; 533 534 /* Externals from mirror.c */ 535 extern mddb_recid_t mirror_get_sm_unit(md_dev64_t); 536 extern void mirror_release_sm_unit(md_dev64_t); 537 538 extern void mirror_set_sm_state(mm_submirror_t *, 539 mm_submirror_ic_t *, sm_state_t, int); 540 541 extern void mirror_commit(mm_unit_t *, int, mddb_recid_t *); 542 extern int poke_hotspares(void); 543 extern void build_submirror(mm_unit_t *, int, int); 544 extern int mirror_build_incore(mm_unit_t *, int); 545 extern void reset_mirror(mm_unit_t *, minor_t, int); 546 extern int mirror_internal_open(minor_t, int, int, int, IOLOCK *); 547 extern int mirror_internal_close(minor_t, int, int, IOLOCK *); 548 extern void set_sm_comp_state(mm_unit_t *, int, int, int, 549 mddb_recid_t *, uint_t, IOLOCK *); 550 extern int mirror_other_sources(mm_unit_t *, int, int, int); 551 extern int mirror_resync_message(md_mn_rs_params_t *, IOLOCK *); 552 extern void md_mirror_strategy(buf_t *, int, void *); 553 extern int mirror_directed_read(dev_t, vol_directed_rd_t *, int); 554 extern void mirror_check_failfast(minor_t mnum); 555 extern int check_comp_4_hotspares(mm_unit_t *, int, int, uint_t, 556 mddb_recid_t, IOLOCK *); 557 extern void mirror_overlap_tree_remove(md_mps_t *ps); 558 extern void mirror_child_init(md_mcs_t *cs); 559 560 /* Externals from mirror_ioctl.c */ 561 extern void reset_comp_states(mm_submirror_t *, 562 mm_submirror_ic_t *); 563 extern int mirror_grow_unit(mm_unit_t *un, md_error_t *ep); 564 extern int md_mirror_ioctl(dev_t dev, int cmd, void *data, 565 int mode, IOLOCK *lockp); 566 extern mm_unit_t *mirror_getun(minor_t, md_error_t *, int, IOLOCK *); 567 extern void mirror_get_status(mm_unit_t *un, IOLOCK *lockp); 568 extern int mirror_choose_owner(mm_unit_t *un, md_mn_req_owner_t *); 569 570 /* rename named service functions */ 571 md_ren_list_svc_t mirror_rename_listkids; 572 md_ren_svc_t mirror_rename_check; 573 md_ren_roleswap_svc_t mirror_renexch_update_kids; 574 md_ren_roleswap_svc_t mirror_exchange_parent_update_to; 575 md_ren_roleswap_svc_t mirror_exchange_self_update_from_down; 576 577 /* Externals from mirror_resync.c */ 578 extern int unit_setup_resync(mm_unit_t *, int); 579 extern int mirror_resync_unit(minor_t mnum, md_resync_ioctl_t *ri, 580 md_error_t *ep, IOLOCK *); 581 extern int mirror_ioctl_resync(md_resync_ioctl_t *p, IOLOCK *); 582 extern int mirror_mark_resync_region(mm_unit_t *, diskaddr_t, 583 diskaddr_t); 584 extern void resync_start_timeout(set_t setno); 585 extern int mirror_resize_resync_regions(mm_unit_t *, diskaddr_t); 586 extern int mirror_add_resync_regions(mm_unit_t *, diskaddr_t); 587 extern int mirror_probedevs(md_probedev_t *, IOLOCK *); 588 extern void mirror_copy_rr(int, uchar_t *, uchar_t *); 589 extern void mirror_process_unit_resync(mm_unit_t *); 590 #endif /* _KERNEL */ 591 592 #ifdef __cplusplus 593 } 594 #endif 595 596 #endif /* _SYS_MD_MIRROR_H */ 597