Skip to content

Add support for Lambda streaming response #628

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Apr 9, 2023
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 11 additions & 10 deletions lambda-http/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,20 @@ apigw_websockets = []
alb = []

[dependencies]
base64 = "0.13.0"
bytes = "1"
base64 = "0.21"
bytes = "1.4"
futures = "0.3"
http = "0.2"
http-body = "0.4"
hyper = "0.14.20"
hyper = "0.14"
lambda_runtime = { path = "../lambda-runtime", version = "0.7" }
serde = { version = "^1", features = ["derive"] }
serde_json = "^1"
serde_urlencoded = "0.7.0"
mime = "0.3.16"
encoding_rs = "0.8.31"
url = "2.2.2"
percent-encoding = "2.2.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_urlencoded = "0.7"
mime = "0.3"
encoding_rs = "0.8"
url = "2.2"
percent-encoding = "2.2"

[dependencies.aws_lambda_events]
version = "^0.7.2"
Expand Down
3 changes: 3 additions & 0 deletions lambda-http/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,9 @@ use std::{
task::{Context as TaskContext, Poll},
};

mod streaming;
pub use streaming::run_with_streaming_response;

/// Type alias for `http::Request`s with a fixed [`Body`](enum.Body.html) type
pub type Request = http::Request<Body>;

Expand Down
34 changes: 34 additions & 0 deletions lambda-http/src/streaming.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
use crate::request::LambdaRequest;
use crate::tower::ServiceBuilder;
use crate::{Request, RequestExt};
pub use aws_lambda_events::encodings::Body as LambdaEventBody;
use bytes::Bytes;
pub use http::{self, Response};
use http_body::Body;
use lambda_runtime::LambdaEvent;
pub use lambda_runtime::{self, service_fn, tower, Context, Error, Service};
use std::fmt::{Debug, Display};

/// Starts the Lambda Rust runtime and stream response back [Configure Lambda
/// Streaming Response](https://docs.aws.amazon.com/lambda/latest/dg/configuration-response-streaming.html).
///
/// This takes care of transforming the LambdaEvent into a [`Request`] and
/// accepts [`http::Response<http_body::Body>`] as response.
pub async fn run_with_streaming_response<'a, S, B, E>(handler: S) -> Result<(), Error>
where
S: Service<Request, Response = Response<B>, Error = E>,
S::Future: Send + 'a,
E: Debug + Display,
B: Body + Unpin + Send + 'static,
B::Data: Into<Bytes> + Send,
B::Error: Into<Error> + Send + Debug,
{
let svc = ServiceBuilder::new()
.map_request(|req: LambdaEvent<LambdaRequest>| {
let event: Request = req.payload.into();
event.with_lambda_context(req.context)
})
.service(handler);

lambda_runtime::run_with_streaming_response(svc).await
}
4 changes: 3 additions & 1 deletion lambda-runtime-api-client/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,9 @@ where

/// Create a new client with a given base URI and HTTP connector.
pub fn with(base: Uri, connector: C) -> Self {
let client = hyper::Client::builder().build(connector);
let client = hyper::Client::builder()
.http1_max_buf_size(1024 * 1024)
.build(connector);
Self { base, client }
}

Expand Down
3 changes: 3 additions & 0 deletions lambda-runtime/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,9 @@ mod simulated;
/// Types available to a Lambda function.
mod types;

mod streaming;
pub use streaming::run_with_streaming_response;

use requests::{EventCompletionRequest, EventErrorRequest, IntoRequest, NextEventRequest};
pub use types::{Context, LambdaEvent};

Expand Down
258 changes: 258 additions & 0 deletions lambda-runtime/src/streaming.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,258 @@
use crate::{
build_event_error_request, incoming, type_name_of_val, Config, Context, Error, EventErrorRequest, IntoRequest,
LambdaEvent, Runtime,
};
use bytes::Bytes;
use futures::FutureExt;
use http::header::{CONTENT_TYPE, SET_COOKIE};
use http::{Method, Request, Response, Uri};
use hyper::body::HttpBody;
use hyper::{client::connect::Connection, Body};
use lambda_runtime_api_client::{build_request, Client};
use serde::Deserialize;
use serde_json::json;
use std::collections::HashMap;
use std::str::FromStr;
use std::{
env,
fmt::{self, Debug, Display},
future::Future,
panic,
};
use tokio::io::{AsyncRead, AsyncWrite};
use tokio_stream::{Stream, StreamExt};
use tower::{Service, ServiceExt};
use tracing::{error, trace, Instrument};

/// Starts the Lambda Rust runtime and stream response back [Configure Lambda
/// Streaming Response](https://docs.aws.amazon.com/lambda/latest/dg/configuration-response-streaming.html).
///
/// # Example
/// ```no_run
/// use hyper::{body::Body, Response};
/// use lambda_runtime::{service_fn, Error, LambdaEvent};
/// use std::{thread, time::Duration};
/// use serde_json::Value;
///
/// #[tokio::main]
/// async fn main() -> Result<(), Error> {
/// lambda_runtime::run_with_streaming_response(service_fn(func)).await?;
/// Ok(())
/// }
/// async fn func(_event: LambdaEvent<Value>) -> Result<Response<Body>, Error> {
/// let messages = vec!["Hello ", "world ", "from ", "Lambda!"];
///
/// let (mut tx, rx) = Body::channel();
///
/// tokio::spawn(async move {
/// for message in messages.iter() {
/// tx.send_data((*message).into()).await.unwrap();
/// thread::sleep(Duration::from_millis(500));
/// }
/// });
///
/// let resp = Response::builder()
/// .header("content-type", "text/plain")
/// .header("CustomHeader", "outerspace")
/// .body(rx)?;
///
/// Ok(resp)
/// }
/// ```
pub async fn run_with_streaming_response<A, B, F>(handler: F) -> Result<(), Error>
where
F: Service<LambdaEvent<A>>,
F::Future: Future<Output = Result<http::Response<B>, F::Error>>,
F::Error: Debug + Display,
A: for<'de> Deserialize<'de>,
B: HttpBody + Unpin + Send + 'static,
B::Data: Into<Bytes> + Send,
B::Error: Into<Error> + Send + Debug,
{
trace!("Loading config from env");
let config = Config::from_env()?;
let client = Client::builder().build().expect("Unable to create a runtime client");
let runtime = Runtime { client };

let client = &runtime.client;
let incoming = incoming(client);
runtime.run_with_streaming_response(incoming, handler, &config).await
}

impl<C> Runtime<C>
where
C: Service<http::Uri> + Clone + Send + Sync + Unpin + 'static,
C::Future: Unpin + Send,
C::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
C::Response: AsyncRead + AsyncWrite + Connection + Unpin + Send + 'static,
{
pub async fn run_with_streaming_response<F, A, B>(
&self,
incoming: impl Stream<Item = Result<Response<Body>, Error>> + Send,
mut handler: F,
config: &Config,
) -> Result<(), Error>
where
F: Service<LambdaEvent<A>>,
F::Future: Future<Output = Result<Response<B>, F::Error>>,
F::Error: fmt::Debug + fmt::Display,
A: for<'de> Deserialize<'de>,
B: HttpBody + Unpin + Send + 'static,
B::Data: Into<Bytes> + Send,
B::Error: Into<Error> + Send + Debug,
{
let client = &self.client;
tokio::pin!(incoming);
while let Some(next_event_response) = incoming.next().await {
trace!("New event arrived (run loop)");
let event = next_event_response?;
let (parts, body) = event.into_parts();

let ctx: Context = Context::try_from(parts.headers)?;
let ctx: Context = ctx.with_config(config);
let request_id = &ctx.request_id.clone();

let request_span = match &ctx.xray_trace_id {
Some(trace_id) => {
env::set_var("_X_AMZN_TRACE_ID", trace_id);
tracing::info_span!("Lambda runtime invoke", requestId = request_id, xrayTraceId = trace_id)
}
None => {
env::remove_var("_X_AMZN_TRACE_ID");
tracing::info_span!("Lambda runtime invoke", requestId = request_id)
}
};

// Group the handling in one future and instrument it with the span
async {
let body = hyper::body::to_bytes(body).await?;
trace!("incoming request payload - {}", std::str::from_utf8(&body)?);

let body = match serde_json::from_slice(&body) {
Ok(body) => body,
Err(err) => {
let req = build_event_error_request(request_id, err)?;
client.call(req).await.expect("Unable to send response to Runtime APIs");
return Ok(());
}
};

let req = match handler.ready().await {
Ok(handler) => {
// Catches panics outside of a `Future`
let task =
panic::catch_unwind(panic::AssertUnwindSafe(|| handler.call(LambdaEvent::new(body, ctx))));

let task = match task {
// Catches panics inside of the `Future`
Ok(task) => panic::AssertUnwindSafe(task).catch_unwind().await,
Err(err) => Err(err),
};

match task {
Ok(response) => match response {
Ok(response) => {
trace!("Ok response from handler (run loop)");
EventCompletionStreamingRequest {
request_id,
body: response,
}
.into_req()
}
Err(err) => build_event_error_request(request_id, err),
},
Err(err) => {
error!("{:?}", err);
let error_type = type_name_of_val(&err);
let msg = if let Some(msg) = err.downcast_ref::<&str>() {
format!("Lambda panicked: {msg}")
} else {
"Lambda panicked".to_string()
};
EventErrorRequest::new(request_id, error_type, &msg).into_req()
}
}
}
Err(err) => build_event_error_request(request_id, err),
}?;

client.call(req).await.expect("Unable to send response to Runtime APIs");
Ok::<(), Error>(())
}
.instrument(request_span)
.await?;
}
Ok(())
}
}

pub(crate) struct EventCompletionStreamingRequest<'a, B> {
pub(crate) request_id: &'a str,
pub(crate) body: Response<B>,
}

impl<'a, B> EventCompletionStreamingRequest<'a, B>
where
B: HttpBody + Unpin + Send + 'static,
B::Data: Into<Bytes> + Send,
B::Error: Into<Error> + Send + Debug,
{
fn into_req(self) -> Result<Request<Body>, Error> {
let uri = format!("/2018-06-01/runtime/invocation/{}/response", self.request_id);
let uri = Uri::from_str(&uri)?;

let (parts, mut body) = self.body.into_parts();

let mut builder = build_request().method(Method::POST).uri(uri);
let headers = builder.headers_mut().unwrap();

headers.insert("Transfer-Encoding", "chunked".parse()?);
headers.insert("Lambda-Runtime-Function-Response-Mode", "streaming".parse()?);
headers.insert(
"Content-Type",
"application/vnd.awslambda.http-integration-response".parse()?,
);

let (mut tx, rx) = Body::channel();

tokio::spawn(async move {
let mut header_map = parts.headers;
// default Content-Type
header_map
.entry(CONTENT_TYPE)
.or_insert("application/octet-stream".parse().unwrap());

let cookies = header_map.get_all(SET_COOKIE);
let cookies = cookies
.iter()
.map(|c| String::from_utf8_lossy(c.as_bytes()).to_string())
.collect::<Vec<String>>();

let headers = header_map
.iter()
.filter(|(k, _)| *k != SET_COOKIE)
.map(|(k, v)| (k.as_str(), String::from_utf8_lossy(v.as_bytes()).to_string()))
.collect::<HashMap<&str, String>>();

let metadata_prelude = json!({
"statusCode": parts.status.as_u16(),
"headers": headers,
"cookies": cookies,
})
.to_string();

trace!("metadata_prelude: {}", metadata_prelude);

tx.send_data(metadata_prelude.into()).await.unwrap();
tx.send_data("\u{0}".repeat(8).into()).await.unwrap();

while let Some(chunk) = body.data().await {
let chunk = chunk.unwrap();
tx.send_data(chunk.into()).await.unwrap();
}
});

let req = builder.body(rx)?;
Ok(req)
}
}