xref: /dflybsd-src/sys/vfs/hammer2/hammer2_admin.c (revision 725edadf86d63f56a584adf23265845c8590d734)
1 /*
2  * Copyright (c) 2015 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * This module implements the hammer2 helper thread API, including
36  * the frontend/backend XOP API.
37  */
38 #include "hammer2.h"
39 
40 /*
41  * Signal that the thread has work.
42  */
43 void
44 hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags)
45 {
46 	uint32_t oflags;
47 
48 	for (;;) {
49 		oflags = thr->flags;
50 		cpu_ccfence();
51 		if (oflags & HAMMER2_THREAD_WAITING) {
52 			if (atomic_cmpset_int(&thr->flags, oflags,
53 				  (oflags | flags) & ~HAMMER2_THREAD_WAITING)) {
54 				wakeup(&thr->flags);
55 				break;
56 			}
57 		} else {
58 			if (atomic_cmpset_int(&thr->flags, oflags,
59 					      oflags | flags)) {
60 				break;
61 			}
62 		}
63 	}
64 }
65 
66 /*
67  * Return status to waiting client(s)
68  *
69  * WARNING! During teardown (thr) can disappear the instant our cmpset
70  *	    succeeds.
71  */
72 void
73 hammer2_thr_return(hammer2_thread_t *thr, uint32_t flags)
74 {
75 	uint32_t oflags;
76 	uint32_t nflags;
77 
78 	for (;;) {
79 		oflags = thr->flags;
80 		cpu_ccfence();
81 		nflags = (oflags | flags) & ~HAMMER2_THREAD_CLIENTWAIT;
82 
83 		if (oflags & HAMMER2_THREAD_CLIENTWAIT) {
84 			if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
85 				wakeup(&thr->flags);
86 				break;
87 			}
88 		} else {
89 			if (atomic_cmpset_int(&thr->flags, oflags, nflags))
90 				break;
91 		}
92 	}
93 }
94 
95 /*
96  * Wait until the bits in flags are set.
97  *
98  * WARNING! During teardown (thr) can disappear the instant our cmpset
99  *	    succeeds.
100  */
101 void
102 hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags)
103 {
104 	uint32_t oflags;
105 	uint32_t nflags;
106 
107 	for (;;) {
108 		oflags = thr->flags;
109 		cpu_ccfence();
110 		if ((oflags & flags) == flags)
111 			break;
112 		nflags = oflags | HAMMER2_THREAD_CLIENTWAIT;
113 		tsleep_interlock(&thr->flags, 0);
114 		if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
115 			tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60);
116 		}
117 	}
118 }
119 
120 /*
121  * Wait until the bits in flags are clear.
122  *
123  * WARNING! During teardown (thr) can disappear the instant our cmpset
124  *	    succeeds.
125  */
126 void
127 hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags)
128 {
129 	uint32_t oflags;
130 	uint32_t nflags;
131 
132 	for (;;) {
133 		oflags = thr->flags;
134 		cpu_ccfence();
135 		if ((oflags & flags) == 0)
136 			break;
137 		nflags = oflags | HAMMER2_THREAD_CLIENTWAIT;
138 		tsleep_interlock(&thr->flags, 0);
139 		if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
140 			tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60);
141 		}
142 	}
143 }
144 
145 /*
146  * Initialize the supplied thread structure, starting the specified
147  * thread.
148  *
149  * NOTE: thr structure can be retained across mounts and unmounts for this
150  *	 pmp, so make sure the flags are in a sane state.
151  */
152 void
153 hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp,
154 		   const char *id, int clindex, int repidx,
155 		   void (*func)(void *arg))
156 {
157 	thr->pmp = pmp;
158 	thr->clindex = clindex;
159 	thr->repidx = repidx;
160 	TAILQ_INIT(&thr->xopq);
161 	atomic_clear_int(&thr->flags, HAMMER2_THREAD_STOP |
162 				      HAMMER2_THREAD_STOPPED |
163 				      HAMMER2_THREAD_FREEZE |
164 				      HAMMER2_THREAD_FROZEN);
165 	if (thr->scratch == NULL)
166 		thr->scratch = kmalloc(MAXPHYS, M_HAMMER2, M_WAITOK | M_ZERO);
167 	if (repidx >= 0) {
168 		lwkt_create(func, thr, &thr->td, NULL, 0, repidx % ncpus,
169 			    "%s-%s.%02d", id, pmp->pfs_names[clindex], repidx);
170 	} else {
171 		lwkt_create(func, thr, &thr->td, NULL, 0, -1,
172 			    "%s-%s", id, pmp->pfs_names[clindex]);
173 	}
174 }
175 
176 /*
177  * Terminate a thread.  This function will silently return if the thread
178  * was never initialized or has already been deleted.
179  *
180  * This is accomplished by setting the STOP flag and waiting for the td
181  * structure to become NULL.
182  */
183 void
184 hammer2_thr_delete(hammer2_thread_t *thr)
185 {
186 	if (thr->td == NULL)
187 		return;
188 	hammer2_thr_signal(thr, HAMMER2_THREAD_STOP);
189 	hammer2_thr_wait(thr, HAMMER2_THREAD_STOPPED);
190 	thr->pmp = NULL;
191 	if (thr->scratch) {
192 		kfree(thr->scratch, M_HAMMER2);
193 		thr->scratch = NULL;
194 	}
195 	KKASSERT(TAILQ_EMPTY(&thr->xopq));
196 }
197 
198 /*
199  * Asynchronous remaster request.  Ask the synchronization thread to
200  * start over soon (as if it were frozen and unfrozen, but without waiting).
201  * The thread always recalculates mastership relationships when restarting.
202  */
203 void
204 hammer2_thr_remaster(hammer2_thread_t *thr)
205 {
206 	if (thr->td == NULL)
207 		return;
208 	hammer2_thr_signal(thr, HAMMER2_THREAD_REMASTER);
209 }
210 
211 void
212 hammer2_thr_freeze_async(hammer2_thread_t *thr)
213 {
214 	hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE);
215 }
216 
217 void
218 hammer2_thr_freeze(hammer2_thread_t *thr)
219 {
220 	if (thr->td == NULL)
221 		return;
222 	hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE);
223 	hammer2_thr_wait(thr, HAMMER2_THREAD_FROZEN);
224 }
225 
226 void
227 hammer2_thr_unfreeze(hammer2_thread_t *thr)
228 {
229 	if (thr->td == NULL)
230 		return;
231 	hammer2_thr_signal(thr, HAMMER2_THREAD_UNFREEZE);
232 	hammer2_thr_wait_neg(thr, HAMMER2_THREAD_FROZEN);
233 }
234 
235 int
236 hammer2_thr_break(hammer2_thread_t *thr)
237 {
238 	if (thr->flags & (HAMMER2_THREAD_STOP |
239 			  HAMMER2_THREAD_REMASTER |
240 			  HAMMER2_THREAD_FREEZE)) {
241 		return 1;
242 	}
243 	return 0;
244 }
245 
246 /****************************************************************************
247  *			    HAMMER2 XOPS API	 			    *
248  ****************************************************************************/
249 
250 void
251 hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp)
252 {
253 	/* no extra fields in structure at the moment */
254 }
255 
256 /*
257  * Allocate a XOP request.
258  *
259  * Once allocated a XOP request can be started, collected, and retired,
260  * and can be retired early if desired.
261  *
262  * NOTE: Fifo indices might not be zero but ri == wi on objcache_get().
263  */
264 void *
265 hammer2_xop_alloc(hammer2_inode_t *ip, int flags)
266 {
267 	hammer2_xop_t *xop;
268 
269 	xop = objcache_get(cache_xops, M_WAITOK);
270 	KKASSERT(xop->head.cluster.array[0].chain == NULL);
271 
272 	xop->head.ip1 = ip;
273 	xop->head.func = NULL;
274 	xop->head.flags = flags;
275 	xop->head.state = 0;
276 	xop->head.error = 0;
277 	xop->head.collect_key = 0;
278 	xop->head.check_counter = 0;
279 	if (flags & HAMMER2_XOP_MODIFYING)
280 		xop->head.mtid = hammer2_trans_sub(ip->pmp);
281 	else
282 		xop->head.mtid = 0;
283 
284 	xop->head.cluster.nchains = ip->cluster.nchains;
285 	xop->head.cluster.pmp = ip->pmp;
286 	xop->head.cluster.flags = HAMMER2_CLUSTER_LOCKED;
287 
288 	/*
289 	 * run_mask - Active thread (or frontend) associated with XOP
290 	 */
291 	xop->head.run_mask = HAMMER2_XOPMASK_VOP;
292 
293 	hammer2_inode_ref(ip);
294 
295 	return xop;
296 }
297 
298 void
299 hammer2_xop_setname(hammer2_xop_head_t *xop, const char *name, size_t name_len)
300 {
301 	xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
302 	xop->name1_len = name_len;
303 	bcopy(name, xop->name1, name_len);
304 }
305 
306 void
307 hammer2_xop_setname2(hammer2_xop_head_t *xop, const char *name, size_t name_len)
308 {
309 	xop->name2 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
310 	xop->name2_len = name_len;
311 	bcopy(name, xop->name2, name_len);
312 }
313 
314 size_t
315 hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum)
316 {
317 	const size_t name_len = 18;
318 
319 	xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
320 	xop->name1_len = name_len;
321 	ksnprintf(xop->name1, name_len + 1, "0x%016jx", (intmax_t)inum);
322 
323 	return name_len;
324 }
325 
326 
327 void
328 hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2)
329 {
330 	xop->ip2 = ip2;
331 	hammer2_inode_ref(ip2);
332 }
333 
334 void
335 hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3)
336 {
337 	xop->ip3 = ip3;
338 	hammer2_inode_ref(ip3);
339 }
340 
341 void
342 hammer2_xop_reinit(hammer2_xop_head_t *xop)
343 {
344 	xop->state = 0;
345 	xop->error = 0;
346 	xop->collect_key = 0;
347 	xop->run_mask = HAMMER2_XOPMASK_VOP;
348 }
349 
350 /*
351  * A mounted PFS needs Xops threads to support frontend operations.
352  */
353 void
354 hammer2_xop_helper_create(hammer2_pfs_t *pmp)
355 {
356 	int i;
357 	int j;
358 
359 	lockmgr(&pmp->lock, LK_EXCLUSIVE);
360 	pmp->has_xop_threads = 1;
361 
362 	for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
363 		for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
364 			if (pmp->xop_groups[j].thrs[i].td)
365 				continue;
366 			hammer2_thr_create(&pmp->xop_groups[j].thrs[i], pmp,
367 					   "h2xop", i, j,
368 					   hammer2_primary_xops_thread);
369 		}
370 	}
371 	lockmgr(&pmp->lock, LK_RELEASE);
372 }
373 
374 void
375 hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp)
376 {
377 	int i;
378 	int j;
379 
380 	for (i = 0; i < pmp->pfs_nmasters; ++i) {
381 		for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
382 			if (pmp->xop_groups[j].thrs[i].td)
383 				hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]);
384 		}
385 	}
386 	pmp->has_xop_threads = 0;
387 }
388 
389 /*
390  * Start a XOP request, queueing it to all nodes in the cluster to
391  * execute the cluster op.
392  *
393  * XXX optimize single-target case.
394  */
395 void
396 hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_func_t func,
397 			 int notidx)
398 {
399 	hammer2_inode_t *ip1;
400 	hammer2_pfs_t *pmp;
401 	hammer2_thread_t *thr;
402 	int i;
403 	int ng;
404 	int nchains;
405 
406 	ip1 = xop->ip1;
407 	pmp = ip1->pmp;
408 	if (pmp->has_xop_threads == 0)
409 		hammer2_xop_helper_create(pmp);
410 
411 	/*
412 	 * The intent of the XOP sequencer is to ensure that ops on the same
413 	 * inode execute in the same order.  This is necessary when issuing
414 	 * modifying operations to multiple targets because some targets might
415 	 * get behind and the frontend is allowed to complete the moment a
416 	 * quorum of targets succeed.
417 	 *
418 	 * Strategy operations must be segregated from non-strategy operations
419 	 * to avoid a deadlock.  For example, if a vfsync and a bread/bwrite
420 	 * were queued to the same worker thread, the locked buffer in the
421 	 * strategy operation can deadlock the vfsync's buffer list scan.
422 	 *
423 	 * TODO - RENAME fails here because it is potentially modifying
424 	 *	  three different inodes.
425 	 */
426 	if (xop->flags & HAMMER2_XOP_STRATEGY) {
427 		hammer2_xop_strategy_t *xopst;
428 
429 		xopst = &((hammer2_xop_t *)xop)->xop_strategy;
430 		ng = (int)(hammer2_icrc32(&xop->ip1, sizeof(xop->ip1)) ^
431 			   hammer2_icrc32(&xopst->lbase, sizeof(xopst->lbase)));
432 		ng = ng & (HAMMER2_XOPGROUPS_MASK >> 1);
433 		ng += HAMMER2_XOPGROUPS / 2;
434 	} else {
435 		ng = (int)(hammer2_icrc32(&xop->ip1, sizeof(xop->ip1)));
436 		ng = ng & (HAMMER2_XOPGROUPS_MASK >> 1);
437 	}
438 	xop->func = func;
439 
440 	/*
441 	 * The instant xop is queued another thread can pick it off.  In the
442 	 * case of asynchronous ops, another thread might even finish and
443 	 * deallocate it.
444 	 */
445 	hammer2_spin_ex(&pmp->xop_spin);
446 	nchains = ip1->cluster.nchains;
447 	for (i = 0; i < nchains; ++i) {
448 		/*
449 		 * XXX ip1->cluster.array* not stable here.  This temporary
450 		 *     hack fixes basic issues in target XOPs which need to
451 		 *     obtain a starting chain from the inode but does not
452 		 *     address possible races against inode updates which
453 		 *     might NULL-out a chain.
454 		 */
455 		if (i != notidx && ip1->cluster.array[i].chain) {
456 			thr = &pmp->xop_groups[ng].thrs[i];
457 			atomic_set_int(&xop->run_mask, 1U << i);
458 			atomic_set_int(&xop->chk_mask, 1U << i);
459 			xop->collect[i].thr = thr;
460 			TAILQ_INSERT_TAIL(&thr->xopq, xop, collect[i].entry);
461 		}
462 	}
463 	hammer2_spin_unex(&pmp->xop_spin);
464 	/* xop can become invalid at this point */
465 
466 	/*
467 	 * Each thread has its own xopq
468 	 */
469 	for (i = 0; i < nchains; ++i) {
470 		if (i != notidx) {
471 			thr = &pmp->xop_groups[ng].thrs[i];
472 			hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ);
473 		}
474 	}
475 }
476 
477 void
478 hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_func_t func)
479 {
480 	hammer2_xop_start_except(xop, func, -1);
481 }
482 
483 /*
484  * Retire a XOP.  Used by both the VOP frontend and by the XOP backend.
485  */
486 void
487 hammer2_xop_retire(hammer2_xop_head_t *xop, uint32_t mask)
488 {
489 	hammer2_chain_t *chain;
490 	uint32_t nmask;
491 	int i;
492 
493 	/*
494 	 * Remove the frontend collector or remove a backend feeder.
495 	 * When removing the frontend we must wakeup any backend feeders
496 	 * who are waiting for FIFO space.
497 	 *
498 	 * XXX optimize wakeup.
499 	 */
500 	KKASSERT(xop->run_mask & mask);
501 	nmask = atomic_fetchadd_int(&xop->run_mask, -mask);
502 	if ((nmask & ~HAMMER2_XOPMASK_FIFOW) != mask) {
503 		if (mask == HAMMER2_XOPMASK_VOP) {
504 			if (nmask & HAMMER2_XOPMASK_FIFOW)
505 				wakeup(xop);
506 		}
507 		return;
508 	}
509 	/* else nobody else left, we can ignore FIFOW */
510 
511 	/*
512 	 * All collectors are gone, we can cleanup and dispose of the XOP.
513 	 * Note that this can wind up being a frontend OR a backend.
514 	 * Pending chains are locked shared and not owned by any thread.
515 	 *
516 	 * Cleanup the collection cluster.
517 	 */
518 	for (i = 0; i < xop->cluster.nchains; ++i) {
519 		xop->cluster.array[i].flags = 0;
520 		chain = xop->cluster.array[i].chain;
521 		if (chain) {
522 			xop->cluster.array[i].chain = NULL;
523 			hammer2_chain_drop_unhold(chain);
524 		}
525 	}
526 
527 	/*
528 	 * Cleanup the fifos, use check_counter to optimize the loop.
529 	 * Since we are the only entity left on this xop we don't have
530 	 * to worry about fifo flow control, and one lfence() will do the
531 	 * job.
532 	 */
533 	cpu_lfence();
534 	mask = xop->chk_mask;
535 	for (i = 0; mask && i < HAMMER2_MAXCLUSTER; ++i) {
536 		hammer2_xop_fifo_t *fifo = &xop->collect[i];
537 		while (fifo->ri != fifo->wi) {
538 			chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
539 			if (chain)
540 				hammer2_chain_drop_unhold(chain);
541 			++fifo->ri;
542 		}
543 		mask &= ~(1U << i);
544 	}
545 
546 	/*
547 	 * The inode is only held at this point, simply drop it.
548 	 */
549 	if (xop->ip1) {
550 		hammer2_inode_drop(xop->ip1);
551 		xop->ip1 = NULL;
552 	}
553 	if (xop->ip2) {
554 		hammer2_inode_drop(xop->ip2);
555 		xop->ip2 = NULL;
556 	}
557 	if (xop->ip3) {
558 		hammer2_inode_drop(xop->ip3);
559 		xop->ip3 = NULL;
560 	}
561 	if (xop->name1) {
562 		kfree(xop->name1, M_HAMMER2);
563 		xop->name1 = NULL;
564 		xop->name1_len = 0;
565 	}
566 	if (xop->name2) {
567 		kfree(xop->name2, M_HAMMER2);
568 		xop->name2 = NULL;
569 		xop->name2_len = 0;
570 	}
571 
572 	objcache_put(cache_xops, xop);
573 }
574 
575 /*
576  * (Backend) Returns non-zero if the frontend is still attached.
577  */
578 int
579 hammer2_xop_active(hammer2_xop_head_t *xop)
580 {
581 	if (xop->run_mask & HAMMER2_XOPMASK_VOP)
582 		return 1;
583 	else
584 		return 0;
585 }
586 
587 /*
588  * (Backend) Feed chain data through the cluster validator and back to
589  * the frontend.  Chains are fed from multiple nodes concurrently
590  * and pipelined via per-node FIFOs in the XOP.
591  *
592  * The chain must be locked (either shared or exclusive).  The caller may
593  * unlock and drop the chain on return.  This function will add an extra
594  * ref and hold the chain's data for the pass-back.
595  *
596  * No xop lock is needed because we are only manipulating fields under
597  * our direct control.
598  *
599  * Returns 0 on success and a hammer error code if sync is permanently
600  * lost.  The caller retains a ref on the chain but by convention
601  * the lock is typically inherited by the xop (caller loses lock).
602  *
603  * Returns non-zero on error.  In this situation the caller retains a
604  * ref on the chain but loses the lock (we unlock here).
605  */
606 int
607 hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
608 		 int clindex, int error)
609 {
610 	hammer2_xop_fifo_t *fifo;
611 	uint32_t mask;
612 
613 	/*
614 	 * Early termination (typicaly of xop_readir)
615 	 */
616 	if (hammer2_xop_active(xop) == 0) {
617 		error = EINTR;
618 		goto done;
619 	}
620 
621 	/*
622 	 * Multi-threaded entry into the XOP collector.  We own the
623 	 * fifo->wi for our clindex.
624 	 */
625 	fifo = &xop->collect[clindex];
626 
627 	if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO)
628 		lwkt_yield();
629 	while (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
630 		atomic_set_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL);
631 		mask = xop->run_mask;
632 		if ((mask & HAMMER2_XOPMASK_VOP) == 0) {
633 			error = EINTR;
634 			goto done;
635 		}
636 		tsleep_interlock(xop, 0);
637 		if (atomic_cmpset_int(&xop->run_mask, mask,
638 				      mask | HAMMER2_XOPMASK_FIFOW)) {
639 			if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
640 				tsleep(xop, PINTERLOCKED, "h2feed", hz*60);
641 			}
642 		}
643 		/* retry */
644 	}
645 	atomic_clear_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL);
646 	if (chain)
647 		hammer2_chain_ref_hold(chain);
648 	if (error == 0 && chain)
649 		error = chain->error;
650 	fifo->errors[fifo->wi & HAMMER2_XOPFIFO_MASK] = error;
651 	fifo->array[fifo->wi & HAMMER2_XOPFIFO_MASK] = chain;
652 	cpu_sfence();
653 	++fifo->wi;
654 	if (atomic_fetchadd_int(&xop->check_counter, HAMMER2_XOP_CHKINC) &
655 	    HAMMER2_XOP_CHKWAIT) {
656 		atomic_clear_int(&xop->check_counter, HAMMER2_XOP_CHKWAIT);
657 		wakeup(&xop->check_counter);
658 	}
659 	error = 0;
660 
661 	/*
662 	 * Cleanup.  If an error occurred we eat the lock.  If no error
663 	 * occurred the fifo inherits the lock and gains an additional ref.
664 	 *
665 	 * The caller's ref remains in both cases.
666 	 */
667 done:
668 	return error;
669 }
670 
671 /*
672  * (Frontend) collect a response from a running cluster op.
673  *
674  * Responses are fed from all appropriate nodes concurrently
675  * and collected into a cohesive response >= collect_key.
676  *
677  * The collector will return the instant quorum or other requirements
678  * are met, even if some nodes get behind or become non-responsive.
679  *
680  * HAMMER2_XOP_COLLECT_NOWAIT	- Used to 'poll' a completed collection,
681  *				  usually called synchronously from the
682  *				  node XOPs for the strategy code to
683  *				  fake the frontend collection and complete
684  *				  the BIO as soon as possible.
685  *
686  * HAMMER2_XOP_SYNCHRONIZER	- Reqeuest synchronization with a particular
687  *				  cluster index, prevents looping when that
688  *				  index is out of sync so caller can act on
689  *				  the out of sync element.  ESRCH and EDEADLK
690  *				  can be returned if this flag is specified.
691  *
692  * Returns 0 on success plus a filled out xop->cluster structure.
693  * Return ENOENT on normal termination.
694  * Otherwise return an error.
695  */
696 int
697 hammer2_xop_collect(hammer2_xop_head_t *xop, int flags)
698 {
699 	hammer2_xop_fifo_t *fifo;
700 	hammer2_chain_t *chain;
701 	hammer2_key_t lokey;
702 	int error;
703 	int keynull;
704 	int adv;		/* advance the element */
705 	int i;
706 	uint32_t check_counter;
707 
708 loop:
709 	/*
710 	 * First loop tries to advance pieces of the cluster which
711 	 * are out of sync.
712 	 */
713 	lokey = HAMMER2_KEY_MAX;
714 	keynull = HAMMER2_CHECK_NULL;
715 	check_counter = xop->check_counter;
716 	cpu_lfence();
717 
718 	for (i = 0; i < xop->cluster.nchains; ++i) {
719 		chain = xop->cluster.array[i].chain;
720 		if (chain == NULL) {
721 			adv = 1;
722 		} else if (chain->bref.key < xop->collect_key) {
723 			adv = 1;
724 		} else {
725 			keynull &= ~HAMMER2_CHECK_NULL;
726 			if (lokey > chain->bref.key)
727 				lokey = chain->bref.key;
728 			adv = 0;
729 		}
730 		if (adv == 0)
731 			continue;
732 
733 		/*
734 		 * Advance element if possible, advanced element may be NULL.
735 		 */
736 		if (chain)
737 			hammer2_chain_drop_unhold(chain);
738 
739 		fifo = &xop->collect[i];
740 		if (fifo->ri != fifo->wi) {
741 			cpu_lfence();
742 			chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
743 			error = fifo->errors[fifo->ri & HAMMER2_XOPFIFO_MASK];
744 			++fifo->ri;
745 			xop->cluster.array[i].chain = chain;
746 			xop->cluster.array[i].error = error;
747 			if (chain == NULL) {
748 				/* XXX */
749 				xop->cluster.array[i].flags |=
750 							HAMMER2_CITEM_NULL;
751 			}
752 			if (fifo->wi - fifo->ri <= HAMMER2_XOPFIFO / 2) {
753 				if (fifo->flags & HAMMER2_XOP_FIFO_STALL) {
754 					atomic_clear_int(&fifo->flags,
755 						    HAMMER2_XOP_FIFO_STALL);
756 					wakeup(xop);
757 					lwkt_yield();
758 				}
759 			}
760 			--i;		/* loop on same index */
761 		} else {
762 			/*
763 			 * Retain CITEM_NULL flag.  If set just repeat EOF.
764 			 * If not, the NULL,0 combination indicates an
765 			 * operation in-progress.
766 			 */
767 			xop->cluster.array[i].chain = NULL;
768 			/* retain any CITEM_NULL setting */
769 		}
770 	}
771 
772 	/*
773 	 * Determine whether the lowest collected key meets clustering
774 	 * requirements.  Returns:
775 	 *
776 	 * 0	 	 - key valid, cluster can be returned.
777 	 *
778 	 * ENOENT	 - normal end of scan, return ENOENT.
779 	 *
780 	 * ESRCH	 - sufficient elements collected, quorum agreement
781 	 *		   that lokey is not a valid element and should be
782 	 *		   skipped.
783 	 *
784 	 * EDEADLK	 - sufficient elements collected, no quorum agreement
785 	 *		   (and no agreement possible).  In this situation a
786 	 *		   repair is needed, for now we loop.
787 	 *
788 	 * EINPROGRESS	 - insufficient elements collected to resolve, wait
789 	 *		   for event and loop.
790 	 */
791 	if ((flags & HAMMER2_XOP_COLLECT_WAITALL) &&
792 	    xop->run_mask != HAMMER2_XOPMASK_VOP) {
793 		error = EINPROGRESS;
794 	} else {
795 		error = hammer2_cluster_check(&xop->cluster, lokey, keynull);
796 	}
797 	if (error == EINPROGRESS) {
798 		if ((flags & HAMMER2_XOP_COLLECT_NOWAIT) == 0)
799 			tsleep_interlock(&xop->check_counter, 0);
800 		if (atomic_cmpset_int(&xop->check_counter,
801 				      check_counter,
802 				      check_counter | HAMMER2_XOP_CHKWAIT)) {
803 			if (flags & HAMMER2_XOP_COLLECT_NOWAIT)
804 				goto done;
805 			tsleep(&xop->check_counter, PINTERLOCKED, "h2coll", hz*60);
806 		}
807 		goto loop;
808 	}
809 	if (error == ESRCH) {
810 		if (lokey != HAMMER2_KEY_MAX) {
811 			xop->collect_key = lokey + 1;
812 			goto loop;
813 		}
814 		error = ENOENT;
815 	}
816 	if (error == EDEADLK) {
817 		kprintf("hammer2: no quorum possible lokey %016jx\n",
818 			lokey);
819 		if (lokey != HAMMER2_KEY_MAX) {
820 			xop->collect_key = lokey + 1;
821 			goto loop;
822 		}
823 		error = ENOENT;
824 	}
825 	if (lokey == HAMMER2_KEY_MAX)
826 		xop->collect_key = lokey;
827 	else
828 		xop->collect_key = lokey + 1;
829 done:
830 	return error;
831 }
832 
833 /*
834  * N x M processing threads are available to handle XOPs, N per cluster
835  * index x M cluster nodes.
836  *
837  * Locate and return the next runnable xop, or NULL if no xops are
838  * present or none of the xops are currently runnable (for various reasons).
839  * The xop is left on the queue and serves to block other dependent xops
840  * from being run.
841  *
842  * Dependent xops will not be returned.
843  *
844  * Sets HAMMER2_XOP_FIFO_RUN on the returned xop or returns NULL.
845  *
846  * NOTE! Xops run concurrently for each cluster index.
847  */
848 #define XOP_HASH_SIZE	16
849 #define XOP_HASH_MASK	(XOP_HASH_SIZE - 1)
850 
851 static __inline
852 int
853 xop_testhash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
854 {
855 	uint32_t mask;
856 	int hv;
857 
858 	hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
859 	mask = 1U << (hv & 31);
860 	hv >>= 5;
861 
862 	return ((int)(hash[hv & XOP_HASH_MASK] & mask));
863 }
864 
865 static __inline
866 void
867 xop_sethash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
868 {
869 	uint32_t mask;
870 	int hv;
871 
872 	hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
873 	mask = 1U << (hv & 31);
874 	hv >>= 5;
875 
876 	hash[hv & XOP_HASH_MASK] |= mask;
877 }
878 
879 static
880 hammer2_xop_head_t *
881 hammer2_xop_next(hammer2_thread_t *thr)
882 {
883 	hammer2_pfs_t *pmp = thr->pmp;
884 	int clindex = thr->clindex;
885 	uint32_t hash[XOP_HASH_SIZE] = { 0 };
886 	hammer2_xop_head_t *xop;
887 
888 	hammer2_spin_ex(&pmp->xop_spin);
889 	TAILQ_FOREACH(xop, &thr->xopq, collect[clindex].entry) {
890 		/*
891 		 * Check dependency
892 		 */
893 		if (xop_testhash(thr, xop->ip1, hash) ||
894 		    (xop->ip2 && xop_testhash(thr, xop->ip2, hash)) ||
895 		    (xop->ip3 && xop_testhash(thr, xop->ip3, hash))) {
896 			continue;
897 		}
898 		xop_sethash(thr, xop->ip1, hash);
899 		if (xop->ip2)
900 			xop_sethash(thr, xop->ip2, hash);
901 		if (xop->ip3)
902 			xop_sethash(thr, xop->ip3, hash);
903 
904 		/*
905 		 * Check already running
906 		 */
907 		if (xop->collect[clindex].flags & HAMMER2_XOP_FIFO_RUN)
908 			continue;
909 
910 		/*
911 		 * Found a good one, return it.
912 		 */
913 		atomic_set_int(&xop->collect[clindex].flags,
914 			       HAMMER2_XOP_FIFO_RUN);
915 		break;
916 	}
917 	hammer2_spin_unex(&pmp->xop_spin);
918 
919 	return xop;
920 }
921 
922 /*
923  * Remove the completed XOP from the queue, clear HAMMER2_XOP_FIFO_RUN.
924  *
925  * NOTE! Xops run concurrently for each cluster index.
926  */
927 static
928 void
929 hammer2_xop_dequeue(hammer2_thread_t *thr, hammer2_xop_head_t *xop)
930 {
931 	hammer2_pfs_t *pmp = thr->pmp;
932 	int clindex = thr->clindex;
933 
934 	hammer2_spin_ex(&pmp->xop_spin);
935 	TAILQ_REMOVE(&thr->xopq, xop, collect[clindex].entry);
936 	atomic_clear_int(&xop->collect[clindex].flags,
937 			 HAMMER2_XOP_FIFO_RUN);
938 	hammer2_spin_unex(&pmp->xop_spin);
939 	if (TAILQ_FIRST(&thr->xopq))
940 		hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ);
941 }
942 
943 /*
944  * Primary management thread for xops support.  Each node has several such
945  * threads which replicate front-end operations on cluster nodes.
946  *
947  * XOPS thread node operations, allowing the function to focus on a single
948  * node in the cluster after validating the operation with the cluster.
949  * This is primarily what prevents dead or stalled nodes from stalling
950  * the front-end.
951  */
952 void
953 hammer2_primary_xops_thread(void *arg)
954 {
955 	hammer2_thread_t *thr = arg;
956 	hammer2_pfs_t *pmp;
957 	hammer2_xop_head_t *xop;
958 	uint32_t mask;
959 	uint32_t flags;
960 	uint32_t nflags;
961 	hammer2_xop_func_t last_func = NULL;
962 
963 	pmp = thr->pmp;
964 	/*xgrp = &pmp->xop_groups[thr->repidx]; not needed */
965 	mask = 1U << thr->clindex;
966 
967 	for (;;) {
968 		flags = thr->flags;
969 
970 		/*
971 		 * Handle stop request
972 		 */
973 		if (flags & HAMMER2_THREAD_STOP)
974 			break;
975 
976 		/*
977 		 * Handle freeze request
978 		 */
979 		if (flags & HAMMER2_THREAD_FREEZE) {
980 			nflags = (flags & ~(HAMMER2_THREAD_FREEZE |
981 					    HAMMER2_THREAD_CLIENTWAIT)) |
982 				 HAMMER2_THREAD_FROZEN;
983 			if (!atomic_cmpset_int(&thr->flags, flags, nflags))
984 				continue;
985 			if (flags & HAMMER2_THREAD_CLIENTWAIT)
986 				wakeup(&thr->flags);
987 			flags = nflags;
988 			/* fall through */
989 		}
990 
991 		if (flags & HAMMER2_THREAD_UNFREEZE) {
992 			nflags = flags & ~(HAMMER2_THREAD_UNFREEZE |
993 					   HAMMER2_THREAD_FROZEN |
994 					   HAMMER2_THREAD_CLIENTWAIT);
995 			if (!atomic_cmpset_int(&thr->flags, flags, nflags))
996 				continue;
997 			if (flags & HAMMER2_THREAD_CLIENTWAIT)
998 				wakeup(&thr->flags);
999 			flags = nflags;
1000 			/* fall through */
1001 		}
1002 
1003 		/*
1004 		 * Force idle if frozen until unfrozen or stopped.
1005 		 */
1006 		if (flags & HAMMER2_THREAD_FROZEN) {
1007 			nflags = flags | HAMMER2_THREAD_WAITING;
1008 			tsleep_interlock(&thr->flags, 0);
1009 			if (atomic_cmpset_int(&thr->flags, flags, nflags)) {
1010 				tsleep(&thr->flags, PINTERLOCKED, "frozen", 0);
1011 				atomic_clear_int(&thr->flags,
1012 						 HAMMER2_THREAD_WAITING);
1013 			}
1014 			continue;
1015 		}
1016 
1017 		/*
1018 		 * Reset state on REMASTER request
1019 		 */
1020 		if (flags & HAMMER2_THREAD_REMASTER) {
1021 			nflags = flags & ~HAMMER2_THREAD_REMASTER;
1022 			if (atomic_cmpset_int(&thr->flags, flags, nflags)) {
1023 				/* reset state here */
1024 			}
1025 			continue;
1026 		}
1027 
1028 		/*
1029 		 * Process requests.  Each request can be multi-queued.
1030 		 *
1031 		 * If we get behind and the frontend VOP is no longer active,
1032 		 * we retire the request without processing it.  The callback
1033 		 * may also abort processing if the frontend VOP becomes
1034 		 * inactive.
1035 		 */
1036 		if (flags & HAMMER2_THREAD_XOPQ) {
1037 			nflags = flags & ~HAMMER2_THREAD_XOPQ;
1038 			if (!atomic_cmpset_int(&thr->flags, flags, nflags))
1039 				continue;
1040 			flags = nflags;
1041 			/* fall through */
1042 		}
1043 		while ((xop = hammer2_xop_next(thr)) != NULL) {
1044 			if (hammer2_xop_active(xop)) {
1045 				last_func = xop->func;
1046 				xop->func(thr, (hammer2_xop_t *)xop);
1047 				hammer2_xop_dequeue(thr, xop);
1048 				hammer2_xop_retire(xop, mask);
1049 			} else {
1050 				last_func = xop->func;
1051 				hammer2_xop_feed(xop, NULL, thr->clindex,
1052 						 ECONNABORTED);
1053 				hammer2_xop_dequeue(thr, xop);
1054 				hammer2_xop_retire(xop, mask);
1055 			}
1056 		}
1057 
1058 		/*
1059 		 * Wait for event, interlock using THREAD_WAITING and
1060 		 * THREAD_SIGNAL.
1061 		 *
1062 		 * For robustness poll on a 30-second interval, but nominally
1063 		 * expect to be woken up.
1064 		 */
1065 		nflags = flags | HAMMER2_THREAD_WAITING;
1066 
1067 		tsleep_interlock(&thr->flags, 0);
1068 		if (atomic_cmpset_int(&thr->flags, flags, nflags)) {
1069 			tsleep(&thr->flags, PINTERLOCKED, "h2idle", hz*30);
1070 			atomic_clear_int(&thr->flags, HAMMER2_THREAD_WAITING);
1071 		}
1072 	}
1073 
1074 #if 0
1075 	/*
1076 	 * Cleanup / termination
1077 	 */
1078 	while ((xop = TAILQ_FIRST(&thr->xopq)) != NULL) {
1079 		kprintf("hammer2_thread: aborting xop %p\n", xop->func);
1080 		TAILQ_REMOVE(&thr->xopq, xop,
1081 			     collect[thr->clindex].entry);
1082 		hammer2_xop_retire(xop, mask);
1083 	}
1084 #endif
1085 	thr->td = NULL;
1086 	hammer2_thr_return(thr, HAMMER2_THREAD_STOPPED);
1087 	/* thr structure can go invalid after this point */
1088 }
1089