|
| 1 | +//! Simple composite-service TCP echo server. |
| 2 | +//! |
| 3 | +//! Using the following command: |
| 4 | +//! |
| 5 | +//! ```sh |
| 6 | +//! nc 127.0.0.1 8080 |
| 7 | +//! ``` |
| 8 | +//! |
| 9 | +//! Start typing. When you press enter the typed line will be echoed back. The server will log |
| 10 | +//! the length of each line it echos and the total size of data sent when the connection is closed. |
| 11 | +
|
| 12 | +use std::sync::{ |
| 13 | + atomic::{AtomicUsize, Ordering}, |
| 14 | + Arc, |
| 15 | +}; |
| 16 | +use std::{env, io}; |
| 17 | + |
| 18 | +use actix_rt::net::TcpStream; |
| 19 | +use actix_server::Server; |
| 20 | +use actix_service::pipeline_factory; |
| 21 | +use bytes::BytesMut; |
| 22 | +use futures_util::future::ok; |
| 23 | +use log::{error, info}; |
| 24 | +use tokio::io::{AsyncReadExt, AsyncWriteExt}; |
| 25 | + |
| 26 | +#[actix_rt::main] |
| 27 | +async fn main() -> io::Result<()> { |
| 28 | + env::set_var("RUST_LOG", "actix=trace,basic=trace"); |
| 29 | + env_logger::init(); |
| 30 | + |
| 31 | + let count = Arc::new(AtomicUsize::new(0)); |
| 32 | + |
| 33 | + let addr = ("127.0.0.1", 8080); |
| 34 | + info!("starting server on port: {}", &addr.0); |
| 35 | + |
| 36 | + // Bind socket address and start worker(s). By default, the server uses the number of available |
| 37 | + // logical CPU cores as the worker count. For this reason, the closure passed to bind needs |
| 38 | + // to return a service *factory*; so it can be created once per worker. |
| 39 | + Server::build() |
| 40 | + .bind("echo", addr, move || { |
| 41 | + let count = Arc::clone(&count); |
| 42 | + let num2 = Arc::clone(&count); |
| 43 | + |
| 44 | + pipeline_factory(move |mut stream: TcpStream| { |
| 45 | + let count = Arc::clone(&count); |
| 46 | + |
| 47 | + async move { |
| 48 | + let num = count.fetch_add(1, Ordering::SeqCst); |
| 49 | + let num = num + 1; |
| 50 | + |
| 51 | + let mut size = 0; |
| 52 | + let mut buf = BytesMut::new(); |
| 53 | + |
| 54 | + loop { |
| 55 | + match stream.read_buf(&mut buf).await { |
| 56 | + // end of stream; bail from loop |
| 57 | + Ok(0) => break, |
| 58 | + |
| 59 | + // more bytes to process |
| 60 | + Ok(bytes_read) => { |
| 61 | + info!("[{}] read {} bytes", num, bytes_read); |
| 62 | + stream.write_all(&buf[size..]).await.unwrap(); |
| 63 | + size += bytes_read; |
| 64 | + } |
| 65 | + |
| 66 | + // stream error; bail from loop with error |
| 67 | + Err(err) => { |
| 68 | + error!("Stream Error: {:?}", err); |
| 69 | + return Err(()); |
| 70 | + } |
| 71 | + } |
| 72 | + } |
| 73 | + |
| 74 | + // send data down service pipeline |
| 75 | + Ok((buf.freeze(), size)) |
| 76 | + } |
| 77 | + }) |
| 78 | + .map_err(|err| error!("Service Error: {:?}", err)) |
| 79 | + .and_then(move |(_, size)| { |
| 80 | + let num = num2.load(Ordering::SeqCst); |
| 81 | + info!("[{}] total bytes read: {}", num, size); |
| 82 | + ok(size) |
| 83 | + }) |
| 84 | + })? |
| 85 | + .workers(1) |
| 86 | + .run() |
| 87 | + .await |
| 88 | +} |
0 commit comments