|
| 1 | +use aws_config::meta::region::RegionProviderChain; |
| 2 | +use aws_lambda_events::{event::s3::S3Event, s3::S3EventRecord}; |
| 3 | +use aws_sdk_s3::Client as S3Client; |
| 4 | +use lambda_runtime::{run, service_fn, Error, LambdaEvent}; |
| 5 | +use s3client::{GetFile, GetThumbnail, PutFile}; |
| 6 | + |
| 7 | +mod s3client; |
| 8 | + |
| 9 | +/** |
| 10 | +This lambda handler |
| 11 | + * listen to file creation events |
| 12 | + * downloads the created file |
| 13 | + * creates a thumbnail from it |
| 14 | + * uploads the thumbnail to bucket "[original bucket name]-thumbs". |
| 15 | +
|
| 16 | +Make sure that |
| 17 | + * the created png file has no strange characters in the name |
| 18 | + * there is another bucket with "-thumbs" suffix in the name |
| 19 | + * this lambda only gets event from png file creation |
| 20 | + * this lambda has permission to put file into the "-thumbs" bucket |
| 21 | +*/ |
| 22 | +pub(crate) async fn function_handler<T: PutFile + GetFile + GetThumbnail>( |
| 23 | + event: LambdaEvent<S3Event>, |
| 24 | + client: &T, |
| 25 | +) -> Result<String, String> { |
| 26 | + let result = Ok("".to_string()); |
| 27 | + let records = event.payload.records; |
| 28 | + for record in records.iter() { |
| 29 | + let (bucket, key) = get_file_props(record); |
| 30 | + if bucket.is_empty() || key.is_empty() { |
| 31 | + // The event is not a create event or bucket/object key is missing |
| 32 | + println!("record skipped"); |
| 33 | + continue; |
| 34 | + } |
| 35 | + |
| 36 | + let reader = client.get_file(&bucket, &key).await; |
| 37 | + |
| 38 | + if reader.is_none() { |
| 39 | + continue; |
| 40 | + } |
| 41 | + |
| 42 | + let thumbnail = client.get_thumbnail(reader.unwrap()); |
| 43 | + |
| 44 | + let mut thumbs_bucket = bucket.to_owned(); |
| 45 | + thumbs_bucket.push_str("-thumbs"); |
| 46 | + |
| 47 | + // It uplaods the thumbnail into a bucket name suffixed with "-thumbs" |
| 48 | + // So it needs file creation permission into that bucket |
| 49 | + |
| 50 | + return client.put_file(&thumbs_bucket, &key, thumbnail).await; |
| 51 | + } |
| 52 | + |
| 53 | + return result; |
| 54 | +} |
| 55 | + |
| 56 | +fn get_file_props(record: &S3EventRecord) -> (String, String) { |
| 57 | + let empty_response = ("".to_string(), "".to_string()); |
| 58 | + |
| 59 | + if record.event_name.is_none() { |
| 60 | + return empty_response; |
| 61 | + } |
| 62 | + if !record.event_name.as_ref().unwrap().starts_with("ObjectCreated") { |
| 63 | + return empty_response; |
| 64 | + } |
| 65 | + |
| 66 | + if record.s3.bucket.name.is_none() || record.s3.object.key.is_none() { |
| 67 | + return empty_response; |
| 68 | + } |
| 69 | + |
| 70 | + let bucket_name = record.s3.bucket.name.to_owned().unwrap(); |
| 71 | + let object_key = record.s3.object.key.to_owned().unwrap(); |
| 72 | + |
| 73 | + if bucket_name.is_empty() || object_key.is_empty() { |
| 74 | + println!("Bucket name or object_key is empty"); |
| 75 | + return empty_response; |
| 76 | + } |
| 77 | + |
| 78 | + println!("Bucket: {}, Object key: {}", bucket_name, object_key); |
| 79 | + |
| 80 | + return (bucket_name, object_key); |
| 81 | +} |
| 82 | + |
| 83 | +async fn get_client() -> S3Client { |
| 84 | + let region_provider = RegionProviderChain::default_provider().or_else("us-east-2"); |
| 85 | + let config = aws_config::from_env().region(region_provider).load().await; |
| 86 | + let client = S3Client::new(&config); |
| 87 | + |
| 88 | + println!("client region {}", client.conf().region().unwrap().to_string()); |
| 89 | + |
| 90 | + return client; |
| 91 | +} |
| 92 | + |
| 93 | +#[tokio::main] |
| 94 | +async fn main() -> Result<(), Error> { |
| 95 | + // required to enable CloudWatch error logging by the runtime |
| 96 | + tracing_subscriber::fmt() |
| 97 | + .with_max_level(tracing::Level::INFO) |
| 98 | + // disable printing the name of the module in every log line. |
| 99 | + .with_target(false) |
| 100 | + // this needs to be set to false, otherwise ANSI color codes will |
| 101 | + // show up in a confusing manner in CloudWatch logs. |
| 102 | + .with_ansi(false) |
| 103 | + // disabling time is handy because CloudWatch will add the ingestion time. |
| 104 | + .without_time() |
| 105 | + .init(); |
| 106 | + |
| 107 | + let client = get_client().await; |
| 108 | + let client_ref = &client; |
| 109 | + |
| 110 | + let func = service_fn(move |event| async move { function_handler(event, client_ref).await }); |
| 111 | + |
| 112 | + run(func).await?; |
| 113 | + |
| 114 | + Ok(()) |
| 115 | +} |
| 116 | + |
| 117 | +#[cfg(test)] |
| 118 | +mod tests { |
| 119 | + use std::collections::HashMap; |
| 120 | + use std::io::Cursor; |
| 121 | + |
| 122 | + use super::*; |
| 123 | + use async_trait::async_trait; |
| 124 | + use aws_lambda_events::chrono::DateTime; |
| 125 | + use aws_lambda_events::s3::S3Bucket; |
| 126 | + use aws_lambda_events::s3::S3Entity; |
| 127 | + use aws_lambda_events::s3::S3Object; |
| 128 | + use aws_lambda_events::s3::S3RequestParameters; |
| 129 | + use aws_lambda_events::s3::S3UserIdentity; |
| 130 | + use aws_sdk_s3::types::ByteStream; |
| 131 | + use lambda_runtime::{Context, LambdaEvent}; |
| 132 | + use mockall::mock; |
| 133 | + use mockall::predicate::eq; |
| 134 | + use s3client::GetFile; |
| 135 | + use s3client::PutFile; |
| 136 | + |
| 137 | + #[tokio::test] |
| 138 | + async fn response_is_good() { |
| 139 | + let mut context = Context::default(); |
| 140 | + context.request_id = "test-request-id".to_string(); |
| 141 | + |
| 142 | + let bucket = "test-bucket"; |
| 143 | + let key = "test-key"; |
| 144 | + |
| 145 | + mock! { |
| 146 | + FakeS3Client {} |
| 147 | + |
| 148 | + #[async_trait] |
| 149 | + impl GetFile for FakeS3Client { |
| 150 | + pub async fn get_file(&self, bucket: &str, key: &str) -> Option<Cursor<Vec<u8>>>; |
| 151 | + } |
| 152 | + #[async_trait] |
| 153 | + impl PutFile for FakeS3Client { |
| 154 | + pub async fn put_file(&self, bucket: &str, key: &str, bytes: ByteStream) -> Result<String, String>; |
| 155 | + } |
| 156 | + |
| 157 | + impl GetThumbnail for FakeS3Client { |
| 158 | + fn get_thumbnail(&self, reader: Cursor<Vec<u8>>) -> ByteStream; |
| 159 | + } |
| 160 | + } |
| 161 | + |
| 162 | + let mut mock = MockFakeS3Client::new(); |
| 163 | + |
| 164 | + mock.expect_get_file() |
| 165 | + .withf(|b: &str, k: &str| b.eq(bucket) && k.eq(key)) |
| 166 | + .returning(|_1, _2| Some(Cursor::new(b"IMAGE".to_vec()))); |
| 167 | + |
| 168 | + mock.expect_get_thumbnail() |
| 169 | + .with(eq(Cursor::new(b"IMAGE".to_vec()))) |
| 170 | + .returning(|_| ByteStream::from_static(b"THUMBNAIL")); |
| 171 | + |
| 172 | + mock.expect_put_file() |
| 173 | + .withf(|bu: &str, ke: &str, _by| bu.eq("test-bucket-thumbs") && ke.eq(key)) |
| 174 | + .returning(|_1, _2, _3| Ok("Done".to_string())); |
| 175 | + |
| 176 | + let payload = get_s3_event("ObjectCreated", bucket, key); |
| 177 | + let event = LambdaEvent { payload, context }; |
| 178 | + |
| 179 | + let result = function_handler(event, &mock).await.unwrap(); |
| 180 | + |
| 181 | + assert_eq!("Done", result); |
| 182 | + } |
| 183 | + |
| 184 | + fn get_s3_event(event_name: &str, bucket_name: &str, object_key: &str) -> S3Event { |
| 185 | + return S3Event { |
| 186 | + records: (vec![get_s3_event_record(event_name, bucket_name, object_key)]), |
| 187 | + }; |
| 188 | + } |
| 189 | + |
| 190 | + fn get_s3_event_record(event_name: &str, bucket_name: &str, object_key: &str) -> S3EventRecord { |
| 191 | + let s3_entity = S3Entity { |
| 192 | + schema_version: (Some(String::default())), |
| 193 | + configuration_id: (Some(String::default())), |
| 194 | + bucket: (S3Bucket { |
| 195 | + name: (Some(bucket_name.to_string())), |
| 196 | + owner_identity: (S3UserIdentity { |
| 197 | + principal_id: (Some(String::default())), |
| 198 | + }), |
| 199 | + arn: (Some(String::default())), |
| 200 | + }), |
| 201 | + object: (S3Object { |
| 202 | + key: (Some(object_key.to_string())), |
| 203 | + size: (Some(1)), |
| 204 | + url_decoded_key: (Some(String::default())), |
| 205 | + version_id: (Some(String::default())), |
| 206 | + e_tag: (Some(String::default())), |
| 207 | + sequencer: (Some(String::default())), |
| 208 | + }), |
| 209 | + }; |
| 210 | + |
| 211 | + return S3EventRecord { |
| 212 | + event_version: (Some(String::default())), |
| 213 | + event_source: (Some(String::default())), |
| 214 | + aws_region: (Some(String::default())), |
| 215 | + event_time: (DateTime::default()), |
| 216 | + event_name: (Some(event_name.to_string())), |
| 217 | + principal_id: (S3UserIdentity { |
| 218 | + principal_id: (Some("X".to_string())), |
| 219 | + }), |
| 220 | + request_parameters: (S3RequestParameters { |
| 221 | + source_ip_address: (Some(String::default())), |
| 222 | + }), |
| 223 | + response_elements: (HashMap::new()), |
| 224 | + s3: (s3_entity), |
| 225 | + }; |
| 226 | + } |
| 227 | +} |
0 commit comments