xref: /netbsd-src/external/gpl3/gcc.old/dist/libstdc++-v3/include/pstl/parallel_backend_tbb.h (revision 53d1339bf7f9c7367b35a9e1ebe693f9b047a47b)
1 // -*- C++ -*-
2 //===-- parallel_backend_tbb.h --------------------------------------------===//
3 //
4 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 // See https://llvm.org/LICENSE.txt for license information.
6 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #ifndef __PSTL_parallel_backend_tbb_H
11 #define __PSTL_parallel_backend_tbb_H
12 
13 #include <algorithm>
14 #include <type_traits>
15 
16 #include "parallel_backend_utils.h"
17 
18 // Bring in minimal required subset of Intel TBB
19 #include <tbb/blocked_range.h>
20 #include <tbb/parallel_for.h>
21 #include <tbb/parallel_reduce.h>
22 #include <tbb/parallel_scan.h>
23 #include <tbb/parallel_invoke.h>
24 #include <tbb/task_arena.h>
25 #include <tbb/tbb_allocator.h>
26 
27 #if TBB_INTERFACE_VERSION < 10000
28 #error Intel(R) Threading Building Blocks 2018 is required; older versions are not supported.
29 #endif
30 
31 namespace __pstl
32 {
33 namespace __par_backend
34 {
35 
36 //! Raw memory buffer with automatic freeing and no exceptions.
37 /** Some of our algorithms need to start with raw memory buffer,
38 not an initialize array, because initialization/destruction
39 would make the span be at least O(N). */
40 // tbb::allocator can improve performance in some cases.
41 template <typename _Tp>
42 class __buffer
43 {
44     tbb::tbb_allocator<_Tp> _M_allocator;
45     _Tp* _M_ptr;
46     const std::size_t _M_buf_size;
47     __buffer(const __buffer&) = delete;
48     void
49     operator=(const __buffer&) = delete;
50 
51   public:
52     //! Try to obtain buffer of given size to store objects of _Tp type
53     __buffer(std::size_t n) : _M_allocator(), _M_ptr(_M_allocator.allocate(n)), _M_buf_size(n) {}
54     //! True if buffer was successfully obtained, zero otherwise.
55     operator bool() const { return _M_ptr != NULL; }
56     //! Return pointer to buffer, or  NULL if buffer could not be obtained.
57     _Tp*
58     get() const
59     {
60         return _M_ptr;
61     }
62     //! Destroy buffer
63     ~__buffer() { _M_allocator.deallocate(_M_ptr, _M_buf_size); }
64 };
65 
66 // Wrapper for tbb::task
67 inline void
68 __cancel_execution()
69 {
70     tbb::task::self().group()->cancel_group_execution();
71 }
72 
73 //------------------------------------------------------------------------
74 // parallel_for
75 //------------------------------------------------------------------------
76 
77 template <class _Index, class _RealBody>
78 class __parallel_for_body
79 {
80   public:
81     __parallel_for_body(const _RealBody& __body) : _M_body(__body) {}
82     __parallel_for_body(const __parallel_for_body& __body) : _M_body(__body._M_body) {}
83     void
84     operator()(const tbb::blocked_range<_Index>& __range) const
85     {
86         _M_body(__range.begin(), __range.end());
87     }
88 
89   private:
90     _RealBody _M_body;
91 };
92 
93 //! Evaluation of brick f[i,j) for each subrange [i,j) of [first,last)
94 // wrapper over tbb::parallel_for
95 template <class _ExecutionPolicy, class _Index, class _Fp>
96 void
97 __parallel_for(_ExecutionPolicy&&, _Index __first, _Index __last, _Fp __f)
98 {
99     tbb::this_task_arena::isolate([=]() {
100         tbb::parallel_for(tbb::blocked_range<_Index>(__first, __last), __parallel_for_body<_Index, _Fp>(__f));
101     });
102 }
103 
104 //! Evaluation of brick f[i,j) for each subrange [i,j) of [first,last)
105 // wrapper over tbb::parallel_reduce
106 template <class _ExecutionPolicy, class _Value, class _Index, typename _RealBody, typename _Reduction>
107 _Value
108 __parallel_reduce(_ExecutionPolicy&&, _Index __first, _Index __last, const _Value& __identity,
109                   const _RealBody& __real_body, const _Reduction& __reduction)
110 {
111     return tbb::this_task_arena::isolate([__first, __last, &__identity, &__real_body, &__reduction]() -> _Value {
112         return tbb::parallel_reduce(
113             tbb::blocked_range<_Index>(__first, __last), __identity,
114             [__real_body](const tbb::blocked_range<_Index>& __r, const _Value& __value) -> _Value {
115                 return __real_body(__r.begin(), __r.end(), __value);
116             },
117             __reduction);
118     });
119 }
120 
121 //------------------------------------------------------------------------
122 // parallel_transform_reduce
123 //
124 // Notation:
125 //      r(i,j,init) returns reduction of init with reduction over [i,j)
126 //      u(i) returns f(i,i+1,identity) for a hypothetical left identity element of r
127 //      c(x,y) combines values x and y that were the result of r or u
128 //------------------------------------------------------------------------
129 
130 template <class _Index, class _Up, class _Tp, class _Cp, class _Rp>
131 struct __par_trans_red_body
132 {
133     alignas(_Tp) char _M_sum_storage[sizeof(_Tp)]; // Holds generalized non-commutative sum when has_sum==true
134     _Rp _M_brick_reduce;                           // Most likely to have non-empty layout
135     _Up _M_u;
136     _Cp _M_combine;
137     bool _M_has_sum; // Put last to minimize size of class
138     _Tp&
139     sum()
140     {
141         __PSTL_ASSERT_MSG(_M_has_sum, "sum expected");
142         return *(_Tp*)_M_sum_storage;
143     }
144     __par_trans_red_body(_Up __u, _Tp __init, _Cp __c, _Rp __r)
145         : _M_brick_reduce(__r), _M_u(__u), _M_combine(__c), _M_has_sum(true)
146     {
147         new (_M_sum_storage) _Tp(__init);
148     }
149 
150     __par_trans_red_body(__par_trans_red_body& __left, tbb::split)
151         : _M_brick_reduce(__left._M_brick_reduce), _M_u(__left._M_u), _M_combine(__left._M_combine), _M_has_sum(false)
152     {
153     }
154 
155     ~__par_trans_red_body()
156     {
157         // 17.6.5.12 tells us to not worry about catching exceptions from destructors.
158         if (_M_has_sum)
159             sum().~_Tp();
160     }
161 
162     void
163     join(__par_trans_red_body& __rhs)
164     {
165         sum() = _M_combine(sum(), __rhs.sum());
166     }
167 
168     void
169     operator()(const tbb::blocked_range<_Index>& __range)
170     {
171         _Index __i = __range.begin();
172         _Index __j = __range.end();
173         if (!_M_has_sum)
174         {
175             __PSTL_ASSERT_MSG(__range.size() > 1, "there should be at least 2 elements");
176             new (&_M_sum_storage)
177                 _Tp(_M_combine(_M_u(__i), _M_u(__i + 1))); // The condition i+1 < j is provided by the grain size of 3
178             _M_has_sum = true;
179             std::advance(__i, 2);
180             if (__i == __j)
181                 return;
182         }
183         sum() = _M_brick_reduce(__i, __j, sum());
184     }
185 };
186 
187 template <class _ExecutionPolicy, class _Index, class _Up, class _Tp, class _Cp, class _Rp>
188 _Tp
189 __parallel_transform_reduce(_ExecutionPolicy&&, _Index __first, _Index __last, _Up __u, _Tp __init, _Cp __combine,
190                             _Rp __brick_reduce)
191 {
192     __par_backend::__par_trans_red_body<_Index, _Up, _Tp, _Cp, _Rp> __body(__u, __init, __combine, __brick_reduce);
193     // The grain size of 3 is used in order to provide mininum 2 elements for each body
194     tbb::this_task_arena::isolate(
195         [__first, __last, &__body]() { tbb::parallel_reduce(tbb::blocked_range<_Index>(__first, __last, 3), __body); });
196     return __body.sum();
197 }
198 
199 //------------------------------------------------------------------------
200 // parallel_scan
201 //------------------------------------------------------------------------
202 
203 template <class _Index, class _Up, class _Tp, class _Cp, class _Rp, class _Sp>
204 class __trans_scan_body
205 {
206     alignas(_Tp) char _M_sum_storage[sizeof(_Tp)]; // Holds generalized non-commutative sum when has_sum==true
207     _Rp _M_brick_reduce;                           // Most likely to have non-empty layout
208     _Up _M_u;
209     _Cp _M_combine;
210     _Sp _M_scan;
211     bool _M_has_sum; // Put last to minimize size of class
212   public:
213     __trans_scan_body(_Up __u, _Tp __init, _Cp __combine, _Rp __reduce, _Sp __scan)
214         : _M_brick_reduce(__reduce), _M_u(__u), _M_combine(__combine), _M_scan(__scan), _M_has_sum(true)
215     {
216         new (_M_sum_storage) _Tp(__init);
217     }
218 
219     __trans_scan_body(__trans_scan_body& __b, tbb::split)
220         : _M_brick_reduce(__b._M_brick_reduce), _M_u(__b._M_u), _M_combine(__b._M_combine), _M_scan(__b._M_scan),
221           _M_has_sum(false)
222     {
223     }
224 
225     ~__trans_scan_body()
226     {
227         // 17.6.5.12 tells us to not worry about catching exceptions from destructors.
228         if (_M_has_sum)
229             sum().~_Tp();
230     }
231 
232     _Tp&
233     sum() const
234     {
235         __PSTL_ASSERT_MSG(_M_has_sum, "sum expected");
236         return *const_cast<_Tp*>(reinterpret_cast<_Tp const*>(_M_sum_storage));
237     }
238 
239     void
240     operator()(const tbb::blocked_range<_Index>& __range, tbb::pre_scan_tag)
241     {
242         _Index __i = __range.begin();
243         _Index __j = __range.end();
244         if (!_M_has_sum)
245         {
246             new (&_M_sum_storage) _Tp(_M_u(__i));
247             _M_has_sum = true;
248             ++__i;
249             if (__i == __j)
250                 return;
251         }
252         sum() = _M_brick_reduce(__i, __j, sum());
253     }
254 
255     void
256     operator()(const tbb::blocked_range<_Index>& __range, tbb::final_scan_tag)
257     {
258         sum() = _M_scan(__range.begin(), __range.end(), sum());
259     }
260 
261     void
262     reverse_join(__trans_scan_body& __a)
263     {
264         if (_M_has_sum)
265         {
266             sum() = _M_combine(__a.sum(), sum());
267         }
268         else
269         {
270             new (&_M_sum_storage) _Tp(__a.sum());
271             _M_has_sum = true;
272         }
273     }
274 
275     void
276     assign(__trans_scan_body& __b)
277     {
278         sum() = __b.sum();
279     }
280 };
281 
282 template <typename _Index>
283 _Index
284 __split(_Index __m)
285 {
286     _Index __k = 1;
287     while (2 * __k < __m)
288         __k *= 2;
289     return __k;
290 }
291 
292 //------------------------------------------------------------------------
293 // __parallel_strict_scan
294 //------------------------------------------------------------------------
295 
296 template <typename _Index, typename _Tp, typename _Rp, typename _Cp>
297 void
298 __upsweep(_Index __i, _Index __m, _Index __tilesize, _Tp* __r, _Index __lastsize, _Rp __reduce, _Cp __combine)
299 {
300     if (__m == 1)
301         __r[0] = __reduce(__i * __tilesize, __lastsize);
302     else
303     {
304         _Index __k = __split(__m);
305         tbb::parallel_invoke(
306 	    [=] { __par_backend::__upsweep(__i, __k, __tilesize, __r, __tilesize, __reduce, __combine); },
307             [=] { __par_backend::__upsweep(__i + __k, __m - __k, __tilesize, __r + __k, __lastsize, __reduce, __combine); });
308         if (__m == 2 * __k)
309             __r[__m - 1] = __combine(__r[__k - 1], __r[__m - 1]);
310     }
311 }
312 
313 template <typename _Index, typename _Tp, typename _Cp, typename _Sp>
314 void
315 __downsweep(_Index __i, _Index __m, _Index __tilesize, _Tp* __r, _Index __lastsize, _Tp __initial, _Cp __combine,
316             _Sp __scan)
317 {
318     if (__m == 1)
319         __scan(__i * __tilesize, __lastsize, __initial);
320     else
321     {
322         const _Index __k = __split(__m);
323         tbb::parallel_invoke([=] { __par_backend::__downsweep(__i, __k, __tilesize, __r, __tilesize, __initial, __combine, __scan); },
324                              // Assumes that __combine never throws.
325                              //TODO: Consider adding a requirement for user functors to be constant.
326                              [=, &__combine] {
327                                  __par_backend::__downsweep(__i + __k, __m - __k, __tilesize, __r + __k, __lastsize,
328                                              __combine(__initial, __r[__k - 1]), __combine, __scan);
329                              });
330     }
331 }
332 
333 // Adapted from Intel(R) Cilk(TM) version from cilkpub.
334 // Let i:len denote a counted interval of length n starting at i.  s denotes a generalized-sum value.
335 // Expected actions of the functors are:
336 //     reduce(i,len) -> s  -- return reduction value of i:len.
337 //     combine(s1,s2) -> s -- return merged sum
338 //     apex(s) -- do any processing necessary between reduce and scan.
339 //     scan(i,len,initial) -- perform scan over i:len starting with initial.
340 // The initial range 0:n is partitioned into consecutive subranges.
341 // reduce and scan are each called exactly once per subrange.
342 // Thus callers can rely upon side effects in reduce.
343 // combine must not throw an exception.
344 // apex is called exactly once, after all calls to reduce and before all calls to scan.
345 // For example, it's useful for allocating a __buffer used by scan but whose size is the sum of all reduction values.
346 // T must have a trivial constructor and destructor.
347 template <class _ExecutionPolicy, typename _Index, typename _Tp, typename _Rp, typename _Cp, typename _Sp, typename _Ap>
348 void
349 __parallel_strict_scan(_ExecutionPolicy&&, _Index __n, _Tp __initial, _Rp __reduce, _Cp __combine, _Sp __scan,
350                        _Ap __apex)
351 {
352     tbb::this_task_arena::isolate([=, &__combine]() {
353         if (__n > 1)
354         {
355             _Index __p = tbb::this_task_arena::max_concurrency();
356             const _Index __slack = 4;
357             _Index __tilesize = (__n - 1) / (__slack * __p) + 1;
358             _Index __m = (__n - 1) / __tilesize;
359             __buffer<_Tp> __buf(__m + 1);
360             _Tp* __r = __buf.get();
361             __par_backend::__upsweep(_Index(0), _Index(__m + 1), __tilesize, __r, __n - __m * __tilesize, __reduce, __combine);
362 
363             // When __apex is a no-op and __combine has no side effects, a good optimizer
364             // should be able to eliminate all code between here and __apex.
365             // Alternatively, provide a default value for __apex that can be
366             // recognized by metaprogramming that conditionlly executes the following.
367             size_t __k = __m + 1;
368             _Tp __t = __r[__k - 1];
369             while ((__k &= __k - 1))
370                 __t = __combine(__r[__k - 1], __t);
371             __apex(__combine(__initial, __t));
372             __par_backend::__downsweep(_Index(0), _Index(__m + 1), __tilesize, __r, __n - __m * __tilesize, __initial, __combine,
373                         __scan);
374             return;
375         }
376         // Fewer than 2 elements in sequence, or out of memory.  Handle has single block.
377         _Tp __sum = __initial;
378         if (__n)
379             __sum = __combine(__sum, __reduce(_Index(0), __n));
380         __apex(__sum);
381         if (__n)
382             __scan(_Index(0), __n, __initial);
383     });
384 }
385 
386 template <class _ExecutionPolicy, class _Index, class _Up, class _Tp, class _Cp, class _Rp, class _Sp>
387 _Tp
388 __parallel_transform_scan(_ExecutionPolicy&&, _Index __n, _Up __u, _Tp __init, _Cp __combine, _Rp __brick_reduce,
389                           _Sp __scan)
390 {
391     __trans_scan_body<_Index, _Up, _Tp, _Cp, _Rp, _Sp> __body(__u, __init, __combine, __brick_reduce, __scan);
392     auto __range = tbb::blocked_range<_Index>(0, __n);
393     tbb::this_task_arena::isolate([__range, &__body]() { tbb::parallel_scan(__range, __body); });
394     return __body.sum();
395 }
396 
397 //------------------------------------------------------------------------
398 // parallel_stable_sort
399 //------------------------------------------------------------------------
400 
401 //------------------------------------------------------------------------
402 // stable_sort utilities
403 //
404 // These are used by parallel implementations but do not depend on them.
405 //------------------------------------------------------------------------
406 
407 template <typename _RandomAccessIterator1, typename _RandomAccessIterator2, typename _RandomAccessIterator3,
408           typename _Compare, typename _Cleanup, typename _LeafMerge>
409 class __merge_task : public tbb::task
410 {
411     /*override*/ tbb::task*
412     execute();
413     _RandomAccessIterator1 _M_xs, _M_xe;
414     _RandomAccessIterator2 _M_ys, _M_ye;
415     _RandomAccessIterator3 _M_zs;
416     _Compare _M_comp;
417     _Cleanup _M_cleanup;
418     _LeafMerge _M_leaf_merge;
419 
420   public:
421     __merge_task(_RandomAccessIterator1 __xs, _RandomAccessIterator1 __xe, _RandomAccessIterator2 __ys,
422                  _RandomAccessIterator2 __ye, _RandomAccessIterator3 __zs, _Compare __comp, _Cleanup __cleanup,
423                  _LeafMerge __leaf_merge)
424         : _M_xs(__xs), _M_xe(__xe), _M_ys(__ys), _M_ye(__ye), _M_zs(__zs), _M_comp(__comp), _M_cleanup(__cleanup),
425           _M_leaf_merge(__leaf_merge)
426     {
427     }
428 };
429 
430 #define __PSTL_MERGE_CUT_OFF 2000
431 
432 template <typename _RandomAccessIterator1, typename _RandomAccessIterator2, typename _RandomAccessIterator3,
433           typename __M_Compare, typename _Cleanup, typename _LeafMerge>
434 tbb::task*
435 __merge_task<_RandomAccessIterator1, _RandomAccessIterator2, _RandomAccessIterator3, __M_Compare, _Cleanup,
436              _LeafMerge>::execute()
437 {
438     typedef typename std::iterator_traits<_RandomAccessIterator1>::difference_type _DifferenceType1;
439     typedef typename std::iterator_traits<_RandomAccessIterator2>::difference_type _DifferenceType2;
440     typedef typename std::common_type<_DifferenceType1, _DifferenceType2>::type _SizeType;
441     const _SizeType __n = (_M_xe - _M_xs) + (_M_ye - _M_ys);
442     const _SizeType __merge_cut_off = __PSTL_MERGE_CUT_OFF;
443     if (__n <= __merge_cut_off)
444     {
445         _M_leaf_merge(_M_xs, _M_xe, _M_ys, _M_ye, _M_zs, _M_comp);
446 
447         //we clean the buffer one time on last step of the sort
448         _M_cleanup(_M_xs, _M_xe);
449         _M_cleanup(_M_ys, _M_ye);
450         return nullptr;
451     }
452     else
453     {
454         _RandomAccessIterator1 __xm;
455         _RandomAccessIterator2 __ym;
456         if (_M_xe - _M_xs < _M_ye - _M_ys)
457         {
458             __ym = _M_ys + (_M_ye - _M_ys) / 2;
459             __xm = std::upper_bound(_M_xs, _M_xe, *__ym, _M_comp);
460         }
461         else
462         {
463             __xm = _M_xs + (_M_xe - _M_xs) / 2;
464             __ym = std::lower_bound(_M_ys, _M_ye, *__xm, _M_comp);
465         }
466         const _RandomAccessIterator3 __zm = _M_zs + ((__xm - _M_xs) + (__ym - _M_ys));
467         tbb::task* __right = new (tbb::task::allocate_additional_child_of(*parent()))
468             __merge_task(__xm, _M_xe, __ym, _M_ye, __zm, _M_comp, _M_cleanup, _M_leaf_merge);
469         tbb::task::spawn(*__right);
470         tbb::task::recycle_as_continuation();
471         _M_xe = __xm;
472         _M_ye = __ym;
473     }
474     return this;
475 }
476 
477 template <typename _RandomAccessIterator1, typename _RandomAccessIterator2, typename _Compare, typename _LeafSort>
478 class __stable_sort_task : public tbb::task
479 {
480   public:
481     typedef typename std::iterator_traits<_RandomAccessIterator1>::difference_type _DifferenceType1;
482     typedef typename std::iterator_traits<_RandomAccessIterator2>::difference_type _DifferenceType2;
483     typedef typename std::common_type<_DifferenceType1, _DifferenceType2>::type _SizeType;
484 
485   private:
486     /*override*/ tbb::task*
487     execute();
488     _RandomAccessIterator1 _M_xs, _M_xe;
489     _RandomAccessIterator2 _M_zs;
490     _Compare _M_comp;
491     _LeafSort _M_leaf_sort;
492     int32_t _M_inplace;
493     _SizeType _M_nsort;
494 
495   public:
496     __stable_sort_task(_RandomAccessIterator1 __xs, _RandomAccessIterator1 __xe, _RandomAccessIterator2 __zs,
497                        int32_t __inplace, _Compare __comp, _LeafSort __leaf_sort, _SizeType __n)
498         : _M_xs(__xs), _M_xe(__xe), _M_zs(__zs), _M_comp(__comp), _M_leaf_sort(__leaf_sort), _M_inplace(__inplace),
499           _M_nsort(__n)
500     {
501     }
502 };
503 
504 //! Binary operator that does nothing
505 struct __binary_no_op
506 {
507     template <typename _T>
508     void operator()(_T, _T)
509     {
510     }
511 };
512 
513 #define __PSTL_STABLE_SORT_CUT_OFF 500
514 
515 template <typename _RandomAccessIterator1, typename _RandomAccessIterator2, typename _Compare, typename _LeafSort>
516 tbb::task*
517 __stable_sort_task<_RandomAccessIterator1, _RandomAccessIterator2, _Compare, _LeafSort>::execute()
518 {
519     const _SizeType __n = _M_xe - _M_xs;
520     const _SizeType __nmerge = _M_nsort > 0 ? _M_nsort : __n;
521     const _SizeType __sort_cut_off = __PSTL_STABLE_SORT_CUT_OFF;
522     if (__n <= __sort_cut_off)
523     {
524         _M_leaf_sort(_M_xs, _M_xe, _M_comp);
525         if (_M_inplace != 2)
526             __par_backend::__init_buf(_M_xs, _M_xe, _M_zs, _M_inplace == 0);
527         return NULL;
528     }
529     else
530     {
531         const _RandomAccessIterator1 __xm = _M_xs + __n / 2;
532         const _RandomAccessIterator2 __zm = _M_zs + (__xm - _M_xs);
533         const _RandomAccessIterator2 __ze = _M_zs + __n;
534         task* __m;
535         auto __move_values = [](_RandomAccessIterator2 __x, _RandomAccessIterator1 __z) { *__z = std::move(*__x); };
536         auto __move_sequences = [](_RandomAccessIterator2 __first1, _RandomAccessIterator2 __last1,
537                                    _RandomAccessIterator1 __first2) { return std::move(__first1, __last1, __first2); };
538         if (_M_inplace == 2)
539 	    __m = new (tbb::task::allocate_continuation())
540                 __merge_task<_RandomAccessIterator2, _RandomAccessIterator2, _RandomAccessIterator1, _Compare,
541                              __serial_destroy,
542                              __par_backend::__serial_move_merge<decltype(__move_values), decltype(__move_sequences)>>(
543                     _M_zs, __zm, __zm, __ze, _M_xs, _M_comp, __serial_destroy(),
544                     __par_backend::__serial_move_merge<decltype(__move_values), decltype(__move_sequences)>(__nmerge, __move_values,
545                                                                                              __move_sequences));
546         else if (_M_inplace)
547             __m = new (tbb::task::allocate_continuation())
548                 __merge_task<_RandomAccessIterator2, _RandomAccessIterator2, _RandomAccessIterator1, _Compare,
549                              __par_backend::__binary_no_op, __par_backend::__serial_move_merge<decltype(__move_values), decltype(__move_sequences)>>(
550                     _M_zs, __zm, __zm, __ze, _M_xs, _M_comp, __par_backend::__binary_no_op(),
551                     __par_backend::__serial_move_merge<decltype(__move_values), decltype(__move_sequences)>(__nmerge, __move_values,
552                                                                                              __move_sequences));
553         else
554         {
555             auto __move_values = [](_RandomAccessIterator1 __x, _RandomAccessIterator2 __z) { *__z = std::move(*__x); };
556             auto __move_sequences = [](_RandomAccessIterator1 __first1, _RandomAccessIterator1 __last1,
557                                        _RandomAccessIterator2 __first2) {
558                 return std::move(__first1, __last1, __first2);
559             };
560             __m = new (tbb::task::allocate_continuation())
561                 __merge_task<_RandomAccessIterator1, _RandomAccessIterator1, _RandomAccessIterator2, _Compare,
562                              __par_backend::__binary_no_op, __par_backend::__serial_move_merge<decltype(__move_values), decltype(__move_sequences)>>(
563                     _M_xs, __xm, __xm, _M_xe, _M_zs, _M_comp, __par_backend::__binary_no_op(),
564                     __par_backend::__serial_move_merge<decltype(__move_values), decltype(__move_sequences)>(__nmerge, __move_values,
565                                                                                              __move_sequences));
566         }
567         __m->set_ref_count(2);
568         task* __right = new (__m->allocate_child())
569             __stable_sort_task(__xm, _M_xe, __zm, !_M_inplace, _M_comp, _M_leaf_sort, __nmerge);
570 	tbb::task::spawn(*__right);
571 	tbb::task::recycle_as_child_of(*__m);
572         _M_xe = __xm;
573         _M_inplace = !_M_inplace;
574     }
575     return this;
576 }
577 
578 template <class _ExecutionPolicy, typename _RandomAccessIterator, typename _Compare, typename _LeafSort>
579 void
580 __parallel_stable_sort(_ExecutionPolicy&&, _RandomAccessIterator __xs, _RandomAccessIterator __xe, _Compare __comp,
581                        _LeafSort __leaf_sort, std::size_t __nsort = 0)
582 {
583     tbb::this_task_arena::isolate([=, &__nsort]() {
584         //sorting based on task tree and parallel merge
585         typedef typename std::iterator_traits<_RandomAccessIterator>::value_type _ValueType;
586         typedef typename std::iterator_traits<_RandomAccessIterator>::difference_type _DifferenceType;
587         const _DifferenceType __n = __xe - __xs;
588         if (__nsort == 0)
589             __nsort = __n;
590 
591         const _DifferenceType __sort_cut_off = __PSTL_STABLE_SORT_CUT_OFF;
592         if (__n > __sort_cut_off)
593         {
594             __PSTL_ASSERT(__nsort > 0 && __nsort <= __n);
595             __buffer<_ValueType> __buf(__n);
596             using tbb::task;
597             task::spawn_root_and_wait(*new (task::allocate_root())
598                                           __stable_sort_task<_RandomAccessIterator, _ValueType*, _Compare, _LeafSort>(
599                                               __xs, __xe, (_ValueType*)__buf.get(), 2, __comp, __leaf_sort, __nsort));
600             return;
601         }
602         //serial sort
603         __leaf_sort(__xs, __xe, __comp);
604     });
605 }
606 
607 //------------------------------------------------------------------------
608 // parallel_merge
609 //------------------------------------------------------------------------
610 
611 template <class _ExecutionPolicy, typename _RandomAccessIterator1, typename _RandomAccessIterator2,
612           typename _RandomAccessIterator3, typename _Compare, typename _LeafMerge>
613 void
614 __parallel_merge(_ExecutionPolicy&&, _RandomAccessIterator1 __xs, _RandomAccessIterator1 __xe,
615                  _RandomAccessIterator2 __ys, _RandomAccessIterator2 __ye, _RandomAccessIterator3 __zs, _Compare __comp,
616                  _LeafMerge __leaf_merge)
617 {
618     typedef typename std::iterator_traits<_RandomAccessIterator1>::difference_type _DifferenceType1;
619     typedef typename std::iterator_traits<_RandomAccessIterator2>::difference_type _DifferenceType2;
620     typedef typename std::common_type<_DifferenceType1, _DifferenceType2>::type _SizeType;
621     const _SizeType __n = (__xe - __xs) + (__ye - __ys);
622     const _SizeType __merge_cut_off = __PSTL_MERGE_CUT_OFF;
623     if (__n <= __merge_cut_off)
624     {
625         // Fall back on serial merge
626         __leaf_merge(__xs, __xe, __ys, __ye, __zs, __comp);
627     }
628     else
629     {
630         tbb::this_task_arena::isolate([=]() {
631             typedef __merge_task<_RandomAccessIterator1, _RandomAccessIterator2, _RandomAccessIterator3, _Compare,
632                                  __par_backend::__binary_no_op, _LeafMerge>
633                 _TaskType;
634             tbb::task::spawn_root_and_wait(*new (tbb::task::allocate_root()) _TaskType(
635                 __xs, __xe, __ys, __ye, __zs, __comp, __par_backend::__binary_no_op(), __leaf_merge));
636         });
637     }
638 }
639 
640 //------------------------------------------------------------------------
641 // parallel_invoke
642 //------------------------------------------------------------------------
643 template <class _ExecutionPolicy, typename _F1, typename _F2>
644 void
645 __parallel_invoke(_ExecutionPolicy&&, _F1&& __f1, _F2&& __f2)
646 {
647     //TODO: a version of tbb::this_task_arena::isolate with variadic arguments pack should be added in the future
648     tbb::this_task_arena::isolate([&]() { tbb::parallel_invoke(std::forward<_F1>(__f1), std::forward<_F2>(__f2)); });
649 }
650 
651 } // namespace __par_backend
652 } // namespace __pstl
653 
654 #endif /* __PSTL_parallel_backend_tbb_H */
655