11
11
#include < core/CLoopProgress.h>
12
12
#include < core/ImportExport.h>
13
13
14
- #include < boost/any.hpp>
15
-
16
14
#include < algorithm>
17
15
#include < functional>
18
16
#include < future>
@@ -72,7 +70,7 @@ auto bindRetrievableState(FUNCTION&& function, STATE&& state) {
72
70
class CExecutor {
73
71
public:
74
72
virtual ~CExecutor () = default ;
75
- virtual void schedule (std::packaged_task<boost::any ()>&& f) = 0;
73
+ virtual void schedule (std::function< void ()>&& f) = 0;
76
74
virtual bool busy () const = 0;
77
75
virtual void busy (bool value) = 0;
78
76
};
@@ -105,35 +103,21 @@ CORE_EXPORT
105
103
std::size_t defaultAsyncThreadPoolSize ();
106
104
107
105
namespace concurrency_detail {
108
- template <typename F>
109
- boost::any resultToAny (F& f, const std::false_type&) {
110
- return boost::any{f ()};
106
+ template <typename F, typename P>
107
+ void invokeAndWriteResultToPromise (F& f, P& promise, const std::false_type&) {
108
+ try {
109
+ promise->set_value (f ());
110
+ } catch (...) { promise->set_exception (std::current_exception ()); }
111
111
}
112
- template <typename F>
113
- boost::any resultToAny (F& f, const std::true_type&) {
114
- f ();
115
- return boost::any{};
112
+ template <typename F, typename P>
113
+ void invokeAndWriteResultToPromise (F& f, P& promise, const std::true_type&) {
114
+ try {
115
+ f ();
116
+ promise->set_value ();
117
+ } catch (...) { promise->set_exception (std::current_exception ()); }
116
118
}
117
-
118
- template <typename R>
119
- class CTypedFutureAnyWrapper {
120
- public:
121
- CTypedFutureAnyWrapper () = default ;
122
- CTypedFutureAnyWrapper (std::future<boost::any>&& future)
123
- : m_Future{std::forward<std::future<boost::any>>(future)} {}
124
-
125
- bool valid () const { return m_Future.valid (); }
126
- void wait () const { m_Future.wait (); }
127
- R get () { return boost::any_cast<R>(m_Future.get ()); }
128
-
129
- private:
130
- std::future<boost::any> m_Future;
131
- };
132
119
}
133
120
134
- template <typename R>
135
- using future = concurrency_detail::CTypedFutureAnyWrapper<R>;
136
-
137
121
// ! An version of std::async which uses a specified executor.
138
122
// !
139
123
// ! \note f must be copy constructible.
@@ -146,18 +130,22 @@ using future = concurrency_detail::CTypedFutureAnyWrapper<R>;
146
130
// ! them. Prefer using high level primitives, such as parallel_for_each, which are
147
131
// ! safer.
148
132
template <typename FUNCTION, typename ... ARGS>
149
- future<std::result_of_t <std::decay_t <FUNCTION>(std::decay_t <ARGS>...)>>
133
+ std:: future<std::result_of_t <std::decay_t <FUNCTION>(std::decay_t <ARGS>...)>>
150
134
async (CExecutor& executor, FUNCTION&& f, ARGS&&... args) {
151
135
using R = std::result_of_t <std::decay_t <FUNCTION>(std::decay_t <ARGS>...)>;
152
136
153
137
// Note g stores copies of the arguments in the pack, which are moved into place
154
138
// if possible, so this is safe to invoke later in the context of a packaged task.
155
139
auto g = std::bind<R>(std::forward<FUNCTION>(f), std::forward<ARGS>(args)...);
156
140
157
- std::packaged_task<boost::any ()> task ([g_ = std::move (g)]() mutable {
158
- return concurrency_detail::resultToAny (g_, std::is_same<R, void >{});
159
- });
160
- auto result = task.get_future ();
141
+ auto promise = std::make_shared<std::promise<R>>();
142
+ auto result = promise->get_future ();
143
+
144
+ std::function<void ()> task (
145
+ [ g_ = std::move (g), promise_ = std::move (promise) ]() mutable {
146
+ concurrency_detail::invokeAndWriteResultToPromise (
147
+ g_, promise_, std::is_same<R, void >{});
148
+ });
161
149
162
150
// Schedule the task to compute the result.
163
151
executor.schedule (std::move (task));
@@ -167,41 +155,41 @@ async(CExecutor& executor, FUNCTION&& f, ARGS&&... args) {
167
155
168
156
// ! Wait for all \p futures to be available.
169
157
template <typename T>
170
- void wait_for_all (const std::vector<future<T>>& futures) {
158
+ void wait_for_all (const std::vector<std:: future<T>>& futures) {
171
159
std::for_each (futures.begin (), futures.end (),
172
- [](const future<T>& future) { future.wait (); });
160
+ [](const std:: future<T>& future) { future.wait (); });
173
161
}
174
162
175
163
// ! Wait for a valid future to be available otherwise return immediately.
176
164
template <typename T>
177
- void wait_for_valid (const future<T>& future) {
165
+ void wait_for_valid (const std:: future<T>& future) {
178
166
if (future.valid ()) {
179
167
future.wait ();
180
168
}
181
169
}
182
170
183
171
// ! Wait for all valid \p futures to be available.
184
172
template <typename T>
185
- void wait_for_all_valid (const std::vector<future<T>>& futures) {
173
+ void wait_for_all_valid (const std::vector<std:: future<T>>& futures) {
186
174
std::for_each (futures.begin (), futures.end (), wait_for_valid);
187
175
}
188
176
189
177
// ! \brief Waits for a future to complete when the object is destroyed.
190
178
template <typename T>
191
179
class CWaitIfValidWhenExitingScope {
192
180
public:
193
- CWaitIfValidWhenExitingScope (future<T>& future) : m_Future{future} {}
181
+ CWaitIfValidWhenExitingScope (std:: future<T>& future) : m_Future{future} {}
194
182
~CWaitIfValidWhenExitingScope () { wait_for_valid (m_Future); }
195
183
CWaitIfValidWhenExitingScope (const CWaitIfValidWhenExitingScope&) = delete ;
196
184
CWaitIfValidWhenExitingScope& operator =(const CWaitIfValidWhenExitingScope&) = delete ;
197
185
198
186
private:
199
- future<T>& m_Future;
187
+ std:: future<T>& m_Future;
200
188
};
201
189
202
190
// ! Get the conjunction of all \p futures.
203
191
CORE_EXPORT
204
- bool get_conjunction_of_all (std::vector<future<bool >>& futures);
192
+ bool get_conjunction_of_all (std::vector<std:: future<bool >>& futures);
205
193
206
194
namespace concurrency_detail {
207
195
class CORE_EXPORT CDefaultAsyncExecutorBusyForScope {
@@ -287,7 +275,7 @@ parallel_for_each(std::size_t partitions,
287
275
// ensure the best possible locality of reference for reads which occur
288
276
// at a similar time in the different threads.
289
277
290
- std::vector<future<bool >> tasks;
278
+ std::vector<std:: future<bool >> tasks;
291
279
292
280
for (std::size_t offset = 0 ; offset < partitions; ++offset, ++start) {
293
281
// Note there is one copy of g for each thread so capture by reference
@@ -371,7 +359,7 @@ parallel_for_each(std::size_t partitions,
371
359
372
360
// See above for the rationale for this access pattern.
373
361
374
- std::vector<future<bool >> tasks;
362
+ std::vector<std:: future<bool >> tasks;
375
363
376
364
for (std::size_t offset = 0 ; offset < partitions; ++offset, ++start) {
377
365
// Note there is one copy of g for each thread so capture by reference
0 commit comments