Rust Quickstart Guide
Learn how to integrate Rabata.io object storage with your Rust application for storing and serving files, handling uploads, and more.
Prerequisites
- Rust installed (1.56.0 or later recommended)
- Cargo package manager
- A Rabata.io account with access keys
- A bucket created in your Rabata.io account
Installation
Add the AWS SDK for Rust to your project’s Cargo.toml file:
[dependencies]
aws-config = "0.55.1"
aws-sdk-s3 = "0.27.0"
tokio = { version = "1", features = ["full"] }
anyhow = "1.0"
bytes = "1.4.0"
Configuration
Create a configuration file or environment variables to store your Rabata.io credentials:
Using Environment Variables
Set the following environment variables:
RABATA_ACCESS_KEY_ID=your_access_key_id
RABATA_SECRET_ACCESS_KEY=your_secret_access_key
RABATA_REGION=eu-west-1
RABATA_ENDPOINT=https://s3.eu-west-1.rabata.io
RABATA_BUCKET_NAME=your-bucket-name
Using a Configuration File
Create a config.rs file to load your configuration:
// src/config.rs
use std::env;
pub struct RabataConfig {
pub access_key_id: String,
pub secret_access_key: String,
pub region: String,
pub endpoint: String,
pub bucket_name: String,
}
impl RabataConfig {
pub fn from_env() -> Self {
Self {
access_key_id: env::var("RABATA_ACCESS_KEY_ID")
.expect("RABATA_ACCESS_KEY_ID must be set"),
secret_access_key: env::var("RABATA_SECRET_ACCESS_KEY")
.expect("RABATA_SECRET_ACCESS_KEY must be set"),
region: env::var("RABATA_REGION")
.unwrap_or_else(|_| "eu-west-1".to_string()),
endpoint: env::var("RABATA_ENDPOINT")
.unwrap_or_else(|_| "https://s3.eu-west-1.rabata.io".to_string()),
bucket_name: env::var("RABATA_BUCKET_NAME")
.expect("RABATA_BUCKET_NAME must be set"),
}
}
}
Security Note: Never commit your credentials to version control. Use environment variables or a secure configuration management system.
Basic Usage
Creating an S3 Client
Create a utility function to initialize the S3 client with Rabata.io configuration:
// src/s3_client.rs
use aws_config::meta::region::RegionProviderChain;
use aws_sdk_s3::{config::Region, Client, Endpoint};
use http::Uri;
use std::env;
use crate::config::RabataConfig;
pub async fn create_s3_client(config: &RabataConfig) -> Client {
let region_provider = RegionProviderChain::first_try(Region::new(config.region.clone()));
let endpoint_uri = config.endpoint.parse::<Uri>().expect("Invalid endpoint URI");
let s3_config = aws_sdk_s3::config::Builder::new()
.region(region_provider.region().await)
.endpoint_resolver(Endpoint::immutable(endpoint_uri))
.credentials_provider(
aws_config::credentials::Credentials::new(
&config.access_key_id,
&config.secret_access_key,
None, None, "rabata-credentials"
)
)
.force_path_style(true) // Required for Rabata.io
.build();
Client::from_conf(s3_config)
}
Uploading Files
Create a function to upload files to your Rabata.io bucket:
// src/operations.rs
use anyhow::Result;
use aws_sdk_s3::{Client, types::ByteStream};
use bytes::Bytes;
use std::path::Path;
use tokio::fs::File;
use tokio::io::AsyncReadExt;
use crate::config::RabataConfig;
pub async fn upload_file(
client: &Client,
config: &RabataConfig,
local_path: &Path,
key: &str,
) -> Result<String> {
// Open the file
let mut file = File::open(local_path).await?;
// Read the file content
let mut contents = Vec::new();
file.read_to_end(&mut contents).await?;
// Create a ByteStream from the file contents
let body = ByteStream::from(Bytes::from(contents));
// Upload the file to Rabata.io
client
.put_object()
.bucket(&config.bucket_name)
.key(key)
.body(body)
.send()
.await?;
// Return the URL of the uploaded file
let url = format!("https://{}.s3.rcs.rabata.io/{}", config.bucket_name, key);
Ok(url)
}
Downloading Files
Create a function to download files from your Rabata.io bucket:
// src/operations.rs
use std::io::Write;
pub async fn download_file(
client: &Client,
config: &RabataConfig,
key: &str,
local_path: &Path,
) -> Result<()> {
// Get the object from Rabata.io
let resp = client
.get_object()
.bucket(&config.bucket_name)
.key(key)
.send()
.await?;
// Read the data
let data = resp.body.collect().await?;
let bytes = data.into_bytes();
// Write the data to a local file
let mut file = std::fs::File::create(local_path)?;
file.write_all(&bytes)?;
Ok(())
}
Listing Files
Create a function to list files in your Rabata.io bucket:
// src/operations.rs
pub async fn list_files(
client: &Client,
config: &RabataConfig,
prefix: Option<&str>,
) -> Result<Vec<String>> {
let mut list_objects_req = client.list_objects_v2().bucket(&config.bucket_name);
if let Some(prefix) = prefix {
list_objects_req = list_objects_req.prefix(prefix);
}
let resp = list_objects_req.send().await?;
let mut keys = Vec::new();
if let Some(objects) = resp.contents {
for obj in objects {
if let Some(key) = obj.key {
keys.push(key);
}
}
}
Ok(keys)
}
Deleting Files
Create a function to delete files from your Rabata.io bucket:
// src/operations.rs
pub async fn delete_file(
client: &Client,
config: &RabataConfig,
key: &str,
) -> Result<()> {
client
.delete_object()
.bucket(&config.bucket_name)
.key(key)
.send()
.await?;
Ok(())
}
Complete Example
Here’s a complete example that demonstrates how to use the functions above:
// src/main.rs
mod config;
mod s3_client;
mod operations;
use anyhow::Result;
use std::path::Path;
use config::RabataConfig;
use s3_client::create_s3_client;
use operations::{upload_file, download_file, list_files, delete_file};
#[tokio::main]
async fn main() -> Result<()> {
// Load configuration from environment variables
let config = RabataConfig::from_env();
// Create S3 client
let client = create_s3_client(&config).await;
// Upload a file
let local_file_path = Path::new("./example.txt");
let key = "uploads/example.txt";
let url = upload_file(&client, &config, local_file_path, key).await?;
println!("File uploaded successfully: {}", url);
// List files
let files = list_files(&client, &config, Some("uploads/")).await?;
println!("Files in bucket:");
for file in files {
println!("- {}", file);
}
// Download a file
let download_path = Path::new("./downloaded_example.txt");
download_file(&client, &config, key, download_path).await?;
println!("File downloaded to: {:?}", download_path);
// Delete a file
delete_file(&client, &config, key).await?;
println!("File deleted successfully");
Ok(())
}
Advanced Usage
Generating Presigned URLs
Create a function to generate presigned URLs for temporary access to files:
// src/operations.rs
use aws_sdk_s3::presigning::PresigningConfig;
use std::time::Duration;
pub async fn generate_presigned_url(
client: &Client,
config: &RabataConfig,
key: &str,
expires_in: Duration,
) -> Result<String> {
let presigned_req = client
.get_object()
.bucket(&config.bucket_name)
.key(key)
.presigned(PresigningConfig::expires_in(expires_in)?)
.await?;
Ok(presigned_req.uri().to_string())
}
Example usage:
// Generate a URL that expires in 1 hour
let presigned_url = generate_presigned_url(
&client,
&config,
"uploads/private_file.pdf",
Duration::from_secs(3600)
).await?;
println!("Presigned URL (expires in 1 hour): {}", presigned_url);
Multipart Uploads
For large files, you can use multipart uploads:
// src/operations.rs
use std::io::SeekFrom;
use tokio::io::AsyncSeekExt;
pub async fn multipart_upload(
client: &Client,
config: &RabataConfig,
local_path: &Path,
key: &str,
chunk_size: usize,
) -> Result<String> {
// Open the file
let mut file = File::open(local_path).await?;
// Get file size
let metadata = file.metadata().await?;
let file_size = metadata.len();
// Create multipart upload
let create_multipart_resp = client
.create_multipart_upload()
.bucket(&config.bucket_name)
.key(key)
.send()
.await?;
let upload_id = create_multipart_resp.upload_id
.ok_or_else(|| anyhow::anyhow!("Failed to get upload ID"))?;
// Upload parts
let mut part_number = 1;
let mut completed_parts = Vec::new();
let mut position = 0;
while position < file_size {
let bytes_to_read = std::cmp::min(chunk_size as u64, file_size - position);
let mut buffer = vec![0; bytes_to_read as usize];
// Seek to the correct position and read the chunk
file.seek(SeekFrom::Start(position)).await?;
file.read_exact(&mut buffer).await?;
// Upload the part
let upload_part_resp = client
.upload_part()
.bucket(&config.bucket_name)
.key(key)
.upload_id(&upload_id)
.part_number(part_number)
.body(ByteStream::from(Bytes::from(buffer)))
.send()
.await?;
// Add the completed part to our list
if let Some(e_tag) = upload_part_resp.e_tag {
completed_parts.push(
aws_sdk_s3::types::CompletedPart::builder()
.part_number(part_number)
.e_tag(e_tag)
.build()
);
}
position += bytes_to_read;
part_number += 1;
}
// Complete multipart upload
client
.complete_multipart_upload()
.bucket(&config.bucket_name)
.key(key)
.upload_id(upload_id)
.multipart_upload(
aws_sdk_s3::types::CompletedMultipartUpload::builder()
.set_parts(Some(completed_parts))
.build()
)
.send()
.await?;
// Return the URL of the uploaded file
let url = format!("https://{}.s3.rcs.rabata.io/{}", config.bucket_name, key);
Ok(url)
}
Error Handling
Implement proper error handling for S3 operations:
// src/error.rs
use thiserror::Error;
#[derive(Error, Debug)]
pub enum RabataError {
#[error("AWS SDK error: {0}")]
AwsSdk(#[from] aws_sdk_s3::Error),
#[error("I/O error: {0}")]
Io(#[from] std::io::Error),
#[error("Configuration error: {0}")]
Config(String),
#[error("Object not found: {0}")]
NotFound(String),
#[error("Unknown error: {0}")]
Unknown(String),
}
// Update your functions to use this error type
pub async fn upload_file(
client: &Client,
config: &RabataConfig,
local_path: &Path,
key: &str,
) -> Result<String, RabataError> {
// Implementation with proper error handling
// ...
}
Web Integration
Here’s an example of integrating Rabata.io with a web application using Actix Web:
// Add these dependencies to Cargo.toml
// actix-web = "4"
// actix-multipart = "0.5"
// futures = "0.3"
// src/web.rs
use actix_web::{web, App, HttpResponse, HttpServer, Responder};
use actix_multipart::Multipart;
use futures::{StreamExt, TryStreamExt};
use std::io::Write;
use std::sync::Arc;
use tokio::sync::Mutex;
use crate::config::RabataConfig;
use crate::s3_client::create_s3_client;
use crate::operations::{upload_file, list_files, generate_presigned_url};
struct AppState {
s3_client: aws_sdk_s3::Client,
config: RabataConfig,
}
async fn upload(
mut payload: Multipart,
state: web::Data<Arc<Mutex<AppState>>>,
) -> impl Responder {
let state_guard = state.lock().await;
// Process multipart form data
while let Ok(Some(mut field)) = payload.try_next().await {
let content_disposition = field.content_disposition();
if let Some(filename) = content_disposition.get_filename() {
// Create a temporary file
let temp_file = format!("./tmp/{}", filename);
let mut file = std::fs::File::create(&temp_file).unwrap();
// Write file content
while let Some(chunk) = field.next().await {
let data = chunk.unwrap();
file.write_all(&data).unwrap();
}
// Upload to Rabata.io
let key = format!("uploads/{}", filename);
let url = upload_file(
&state_guard.s3_client,
&state_guard.config,
std::path::Path::new(&temp_file),
&key
).await.unwrap();
// Clean up temporary file
std::fs::remove_file(temp_file).unwrap();
return HttpResponse::Ok().json(serde_json::json!({
"success": true,
"url": url
}));
}
}
HttpResponse::BadRequest().json(serde_json::json!({
"success": false,
"error": "No file provided"
}))
}
async fn list(state: web::Data<Arc<Mutex<AppState>>>) -> impl Responder {
let state_guard = state.lock().await;
match list_files(&state_guard.s3_client, &state_guard.config, Some("uploads/")).await {
Ok(files) => HttpResponse::Ok().json(files),
Err(e) => HttpResponse::InternalServerError().json(serde_json::json!({
"error": e.to_string()
})),
}
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
// Load configuration
let config = RabataConfig::from_env();
// Create S3 client
let s3_client = create_s3_client(&config).await;
// Create application state
let app_state = Arc::new(Mutex::new(AppState {
s3_client,
config,
}));
// Create temp directory if it doesn't exist
std::fs::create_dir_all("./tmp").unwrap();
// Start HTTP server
HttpServer::new(move || {
App::new()
.app_data(web::Data::new(app_state.clone()))
.route("/upload", web::post().to(upload))
.route("/files", web::get().to(list))
})
.bind("127.0.0.1:8080")?
.run()
.await
}
CORS Configuration
To enable cross-origin requests to your Rabata.io bucket, you need to configure CORS. You can do this using the AWS CLI or programmatically with Rust:
// src/operations.rs
pub async fn configure_cors(
client: &Client,
config: &RabataConfig,
allowed_origins: Vec<&str>,
) -> Result<()> {
let cors_rule = aws_sdk_s3::types::CorsRule::builder()
.allowed_headers(vec!["*"])
.allowed_methods(
aws_sdk_s3::types::AllowedMethods::Get,
aws_sdk_s3::types::AllowedMethods::Put,
aws_sdk_s3::types::AllowedMethods::Post,
aws_sdk_s3::types::AllowedMethods::Delete,
aws_sdk_s3::types::AllowedMethods::Head,
)
.set_allowed_origins(Some(allowed_origins.iter().map(|&s| s.to_string()).collect()))
.expose_headers("ETag")
.max_age_seconds(3600)
.build();
let cors_config = aws_sdk_s3::types::CorsConfiguration::builder()
.cors_rules(cors_rule)
.build();
client
.put_bucket_cors()
.bucket(&config.bucket_name)
.cors_configuration(cors_config)
.send()
.await?;
Ok(())
}
Example usage:
// Configure CORS for your bucket
configure_cors(
&client,
&config,
vec!["https://your-website.com", "http://localhost:8080"]
).await?;
Production Considerations
Performance Optimization
- Use multipart uploads for large files to improve reliability and performance
- Implement connection pooling for multiple S3 operations
- Consider using a CDN in front of Rabata.io for frequently accessed assets
- Implement caching to reduce API calls to Rabata.io
Security Best Practices
- Never hardcode credentials in your application code
- Use environment variables or a secure configuration management system
- Implement proper file type validation before uploading
- Set appropriate CORS policies to prevent unauthorized access
- Use presigned URLs with short expiration times for private content
- Implement rate limiting for file upload endpoints
Error Handling Strategies
- Implement proper error handling and logging for all S3 operations
- Use retries with exponential backoff for transient errors
- Implement circuit breakers to prevent cascading failures
- Provide meaningful error messages to users