Skip to content

Commit d9cca33

Browse files
committed
provide a sync api
This is an attempt to provide a sync api, which makes much more sense for expect. The idea is to run the event loop in a separate thread so that we can call `Future::wait()` in the client handle without preventing the event loop from making progress. Note that this currently rely on rust-lang/rust#47760 Fwiw, running our own even loop is not considered good practice, but it the approach reqwest has taken too, so I guess it's not that bad.
1 parent 83f79d0 commit d9cca33

File tree

1 file changed

+67
-24
lines changed

1 file changed

+67
-24
lines changed

src/lib.rs

Lines changed: 67 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ use std::fmt;
1414
use std::io::{self, Read, Write};
1515
use std::collections::VecDeque;
1616
use std::process::Command;
17+
use std::thread;
1718
use std::time::Duration;
1819

1920
use futures::{Async, Canceled, Future, Poll, Stream};
@@ -79,6 +80,8 @@ pub struct Session {
7980
/// FIFO storage for the matching requests. Requests are processed one after another, not
8081
/// concurrently.
8182
match_requests: VecDeque<ActiveMatchRequest>,
83+
84+
drop_rx: oneshot::Receiver<()>,
8285
}
8386

8487
#[derive(Debug)]
@@ -140,40 +143,48 @@ impl fmt::Display for MatchError {
140143

141144
type MatchOutcome = Result<(usize, Vec<u8>), MatchError>;
142145

143-
#[derive(Clone)]
144146
pub struct Handle {
145147
match_requests_tx: mpsc::UnboundedSender<MatchRequest>,
146148
input_requests_tx: mpsc::UnboundedSender<InputRequest>,
149+
thread: Option<thread::JoinHandle<()>>,
150+
drop_tx: Option<oneshot::Sender<()>>,
147151
}
148152

149153
impl Handle {
150-
pub fn send(&self, bytes: Vec<u8>) -> Box<Future<Item = (), Error = Canceled>> {
154+
pub fn send(&self, bytes: Vec<u8>) -> () {
151155
let handle = self.clone();
152156
let (response_tx, response_rx) = oneshot::channel::<()>();
153157
handle
154158
.input_requests_tx
155159
.unbounded_send(InputRequest(bytes, response_tx))
156160
.unwrap();
157-
Box::new(response_rx)
161+
response_rx.wait().unwrap()
158162
}
159163

160164
pub fn expect(
161165
&mut self,
162166
matches: Vec<Match>,
163167
timeout: Option<Duration>,
164-
) -> Box<Future<Item = MatchOutcome, Error = ()>> {
168+
) -> Result<(usize, std::vec::Vec<u8>), MatchError> {
165169
let (response_tx, response_rx) = oneshot::channel::<MatchOutcome>();
166170
let request = MatchRequest {
167171
matches,
168172
response_tx,
169173
timeout,
170174
};
171-
let handle = self.clone();
172-
handle.match_requests_tx.unbounded_send(request).unwrap();
173-
Box::new(response_rx.map_err(|_| ()))
175+
self.match_requests_tx.unbounded_send(request).unwrap();
176+
response_rx.wait().unwrap()
177+
}
178+
}
179+
180+
impl Drop for Handle {
181+
fn drop(&mut self) {
182+
self.drop_tx.take().unwrap().send(()).unwrap();
183+
self.thread.take().unwrap().join();
174184
}
175185
}
176186

187+
177188
// TODO:
178189
//
179190
// - Make stdin evented PollEvented?
@@ -184,25 +195,46 @@ impl Handle {
184195
// precedence over the MIN and TIME settings.
185196

186197
impl Session {
187-
pub fn spawn(cmd: Command, handle: &TokioHandle) -> Result<Handle, ()> {
198+
pub fn spawn(cmd: Command) -> Result<Handle, ()> {
188199
debug!("spawning new command {:?}", cmd);
189200
let (input_tx, input_rx) = mpsc::unbounded::<InputRequest>();
190201
let (match_tx, match_rx) = mpsc::unbounded::<MatchRequest>();
191-
let mut pty = Pty::new::<::std::fs::File>(None, handle).unwrap();
192-
let mut _child = pty.spawn(cmd).unwrap();
193-
let session = Session {
194-
pty: pty,
195-
handle: handle.clone(),
196-
buffer: Vec::new(),
197-
input_requests_rx: input_rx,
198-
match_requests_rx: match_rx,
199-
input_requests: VecDeque::new(),
200-
match_requests: VecDeque::new(),
201-
};
202-
handle.spawn(session);
202+
let (drop_tx, drop_rx) = oneshot::channel::<()>();
203+
204+
// spawn the core future in a separate thread.
205+
//
206+
// it's bad practice to spawn our own core but otherwise, it's complicated to provide a
207+
// synchronous client, since calling `wait()` blocks the current thread, preventing the
208+
// event loop from making progress... But running the event loop in a separate thread, we
209+
// can call `wait()` in the client.
210+
let thread = thread::Builder::new()
211+
.name("expect-internal-core".into())
212+
.spawn(move || {
213+
use tokio_core::reactor::Core;
214+
let mut core = Core::new().unwrap();
215+
216+
let mut pty = Pty::new::<::std::fs::File>(None, &core.handle()).unwrap();
217+
// FIXME: I guess we should do something with the child?
218+
let _child = pty.spawn(cmd).unwrap();
219+
220+
let session = Session {
221+
pty: pty,
222+
handle: core.handle(),
223+
buffer: Vec::new(),
224+
input_requests_rx: input_rx,
225+
match_requests_rx: match_rx,
226+
input_requests: VecDeque::new(),
227+
match_requests: VecDeque::new(),
228+
drop_rx: drop_rx,
229+
};
230+
core.run(session);
231+
})
232+
.unwrap();
203233
Ok(Handle {
204234
match_requests_tx: match_tx.clone(),
205235
input_requests_tx: input_tx.clone(),
236+
thread: Some(thread),
237+
drop_tx: Some(drop_tx),
206238
})
207239
}
208240

@@ -216,7 +248,7 @@ impl Session {
216248
if size == req.0.len() {
217249
return Ok(Async::Ready(()));
218250
}
219-
// FIXME: do we need to check if we wrote 0 bytes to avoid infinite looping?
251+
// FIXME: do we need to check if we wrote 0 bytes to avoid infinite loops?
220252
continue;
221253
}
222254
Err(e) => {
@@ -459,10 +491,21 @@ impl Future for Session {
459491
type Error = ();
460492

461493
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
462-
self.get_input_requests().unwrap();
463-
self.get_match_requests().unwrap();
494+
if let Err(_e) = self.get_input_requests() {
495+
return Err(());
496+
}
497+
if let Err(_e) = self.get_match_requests() {
498+
return Err(());
499+
}
464500
self.process_input();
465-
self.process_matches().unwrap();
501+
if let Err(_e) = self.process_matches() {
502+
return Err(());
503+
}
504+
match self.drop_rx.poll() {
505+
Ok(Async::Ready(())) => return Ok(Async::Ready(())),
506+
Ok(Async::NotReady) => {},
507+
Err(Canceled) => return Err(()),
508+
}
466509
Ok(Async::NotReady)
467510
}
468511
}

0 commit comments

Comments
 (0)