Skip to content

Commit dbc5d3d

Browse files
committed
add s3 thumbnail creation example (awslabs#613)
1 parent 0f6e2a2 commit dbc5d3d

File tree

3 files changed

+196
-0
lines changed

3 files changed

+196
-0
lines changed
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
[package]
2+
name = "basic-s3-thumbnail"
3+
version = "0.1.0"
4+
edition = "2021"
5+
6+
# Starting in Rust 1.62 you can use `cargo add` to add dependencies
7+
# to your project.
8+
#
9+
# If you're using an older Rust version,
10+
# download cargo-edit(https://github.com/killercup/cargo-edit#installation)
11+
# to install the `add` subcommand.
12+
#
13+
# Running `cargo add DEPENDENCY_NAME` will
14+
# add the latest version of a dependency to the list,
15+
# and it will keep the alphabetic ordering for you.
16+
17+
[dependencies]
18+
aws_lambda_events = "0.7.2"
19+
lambda_runtime = { path = "../../lambda-runtime" }
20+
serde = "1.0.136"
21+
tokio = { version = "1", features = ["macros"] }
22+
tracing = { version = "0.1" }
23+
tracing-subscriber = { version = "0.3", default-features = false, features = ["ansi", "fmt"] }
24+
aws-config = "0.54.1"
25+
aws-sdk-s3 = "0.24.0"
26+
thumbnailer = "0.4.0"
27+
mime = "0.3.16"

examples/basic-s3-thumbnail/README.md

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
# AWS Lambda Function that uses S3
2+
3+
This example processes S3 events. If the event is a CREATE event,
4+
it downloads the created file, generates a thumbnail from it
5+
(it assumes that the file is an image) and uploads it to S3 into a bucket named
6+
[original-bucket-name]-thumbs.
7+
8+
## Build & Deploy
9+
10+
1. Install [cargo-lambda](https://github.com/cargo-lambda/cargo-lambda#installation)
11+
2. Build the function with `cargo lambda build --release`
12+
3. Deploy the function to AWS Lambda with `cargo lambda deploy --iam-role YOUR_ROLE`
13+
14+
## Build for ARM 64
15+
16+
Build the function with `cargo lambda build --release --arm64`
Lines changed: 153 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,153 @@
1+
use std::io::Cursor;
2+
3+
use aws_config::meta::region::RegionProviderChain;
4+
use aws_lambda_events::{event::s3::S3Event, s3::S3EventRecord};
5+
use aws_sdk_s3::{types::ByteStream, Client};
6+
use lambda_runtime::{run, service_fn, Error, LambdaEvent};
7+
use thumbnailer::{create_thumbnails, ThumbnailSize};
8+
9+
struct FileProps {
10+
bucket_name: String,
11+
object_key: String,
12+
}
13+
14+
/// This lambda handler listen to file creation events and it creates a thumbnail
15+
/// and uploads it to s3 into a bucket "[original bucket name]-thumbs".
16+
///
17+
/// Make sure that this lambda only gets event from png file creation
18+
/// Make sure that there is another bucket with "-thumbs" prefix in the name
19+
/// Make sure that this lambda has permission to put file into the "-thumbs" bucket
20+
/// Make sure that the created png file has no strange characters in the name
21+
pub(crate) async fn function_handler(event: LambdaEvent<S3Event>) -> Result<(), Error> {
22+
let client = get_client().await;
23+
24+
let records = event.payload.records;
25+
for record in records.iter() {
26+
let optional_file_props = get_file_props(record);
27+
if optional_file_props.is_none() {
28+
// The event is not a create event or bucket/object key is missing
29+
println!("record skipped");
30+
continue;
31+
}
32+
33+
// The event is a CreateObject and it contains the bucket name and
34+
// object_key
35+
// If the object_key has strange characters, the upload may not work
36+
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html
37+
// Try it with something simple like this: abc_123.png
38+
let file_props = optional_file_props.unwrap();
39+
let name = file_props.bucket_name.as_str();
40+
let key = file_props.object_key.as_str();
41+
42+
let reader = get_file(&client, name, key).await;
43+
44+
if reader.is_none() {
45+
continue;
46+
}
47+
48+
let thumbnail = get_thumbnail(reader.unwrap());
49+
50+
let mut thumbs_bucket_name = name.to_owned();
51+
thumbs_bucket_name.push_str("-thumbs");
52+
53+
// It uplaods the thumbnail into a bucket name suffixed with "-thumbs"
54+
// So it needs file creation permission into that bucket
55+
let _ = put_file(&client, &thumbs_bucket_name, key, thumbnail).await;
56+
}
57+
58+
Ok(())
59+
}
60+
61+
async fn get_file(client: &Client, bucket: &str, key: &str) -> Option<Cursor<Vec<u8>>> {
62+
println!("get file bucket {}, key {}", bucket, key);
63+
64+
let output = client.get_object().bucket(bucket).key(key).send().await;
65+
66+
let mut reader = None;
67+
68+
if output.as_ref().ok().is_some() {
69+
let bytes = output.ok().unwrap().body.collect().await.unwrap().to_vec();
70+
println!("Object is downloaded, size is {}", bytes.len());
71+
reader = Some(Cursor::new(bytes));
72+
} else if output.as_ref().err().is_some() {
73+
let err = output.err().unwrap();
74+
let service_err = err.into_service_error();
75+
let meta = service_err.meta();
76+
println!("Error from aws when downloding: {}", meta.to_string());
77+
} else {
78+
println!("Unknown error when downloading");
79+
}
80+
81+
return reader;
82+
}
83+
84+
async fn put_file(client: &Client, bucket: &str, key: &str, bytes: ByteStream) {
85+
println!("put file bucket {}, key {}", bucket, key);
86+
let _ = client.put_object().bucket(bucket).key(key).body(bytes).send().await;
87+
88+
return;
89+
}
90+
91+
fn get_thumbnail(reader: Cursor<Vec<u8>>) -> ByteStream {
92+
let mut thumbnails = create_thumbnails(reader, mime::IMAGE_PNG, [ThumbnailSize::Small]).unwrap();
93+
94+
let thumbnail = thumbnails.pop().unwrap();
95+
let mut buf = Cursor::new(Vec::new());
96+
thumbnail.write_png(&mut buf).unwrap();
97+
98+
return ByteStream::from(buf.into_inner());
99+
}
100+
101+
fn get_file_props(record: &S3EventRecord) -> Option<FileProps> {
102+
if record.event_name.is_none() {
103+
return None;
104+
}
105+
if !record.event_name.as_ref().unwrap().starts_with("ObjectCreated") {
106+
return None;
107+
}
108+
109+
if record.s3.bucket.name.is_none() || record.s3.object.key.is_none() {
110+
return None;
111+
}
112+
let bucket_name = record.s3.bucket.name.to_owned().unwrap();
113+
let object_key = record.s3.object.key.to_owned().unwrap();
114+
115+
if bucket_name.is_empty() || object_key.is_empty() {
116+
println!("Bucket name ro object_key is empty");
117+
return None;
118+
}
119+
120+
println!("Bucket: {}, Object key: {}", bucket_name, object_key);
121+
122+
return Some(FileProps {
123+
bucket_name: (bucket_name),
124+
object_key: (object_key),
125+
});
126+
}
127+
128+
async fn get_client() -> Client {
129+
let region_provider = RegionProviderChain::default_provider().or_else("us-east-2");
130+
let config = aws_config::from_env().region(region_provider).load().await;
131+
let client = Client::new(&config);
132+
133+
println!("client region {}", client.conf().region().unwrap().to_string());
134+
135+
return client;
136+
}
137+
138+
#[tokio::main]
139+
async fn main() -> Result<(), Error> {
140+
// required to enable CloudWatch error logging by the runtime
141+
tracing_subscriber::fmt()
142+
.with_max_level(tracing::Level::INFO)
143+
// disable printing the name of the module in every log line.
144+
.with_target(false)
145+
// this needs to be set to false, otherwise ANSI color codes will
146+
// show up in a confusing manner in CloudWatch logs.
147+
.with_ansi(false)
148+
// disabling time is handy because CloudWatch will add the ingestion time.
149+
.without_time()
150+
.init();
151+
152+
run(service_fn(function_handler)).await
153+
}

0 commit comments

Comments
 (0)