Go Quickstart
Learn how to use Go with Rabata.io for managing your object storage using the AWS SDK for Go.
Installation
To use Rabata.io with Go, you’ll need to install the AWS SDK for Go.
Install AWS SDK
$ go get github.com/aws/aws-sdk-go-v2/aws
$ go get github.com/aws/aws-sdk-go-v2/config
$ go get github.com/aws/aws-sdk-go-v2/service/s3
For a new project, you can set up a basic structure:
$ mkdir my-rabata-project
$ cd my-rabata-project
$ go mod init example.com/my-rabata-project
$ go get github.com/aws/aws-sdk-go-v2/aws
$ go get github.com/aws/aws-sdk-go-v2/config
$ go get github.com/aws/aws-sdk-go-v2/service/s3
Configuration
There are several ways to configure the AWS SDK for Go to work with Rabata.io.
Method 1: Using AWS Credentials File
If you’ve already configured the AWS CLI as shown in the AWS CLI Quickstart, the SDK will automatically use those credentials.
Method 2: Explicit Configuration in Code
You can explicitly configure the S3 client in your code:
package main
import (
"context"
"fmt"
"log"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
)
func main() {
// Create a custom resolver that uses the Rabata.io endpoint
customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) {
return aws.Endpoint{
URL: "https://s3.eu-west-1.rabata.io",
SigningRegion: "eu-west-1",
HostnameImmutable: true,
}, nil
})
// Create a configuration with custom credentials and endpoint resolver
cfg, err := config.LoadDefaultConfig(context.TODO(),
config.WithRegion("eu-west-1"),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
"YOUR_ACCESS_KEY",
"YOUR_SECRET_KEY",
"",
)),
config.WithEndpointResolverWithOptions(customResolver),
)
if err != nil {
log.Fatalf("unable to load SDK config, %v", err)
}
// Create an S3 client
client := s3.NewFromConfig(cfg)
// Now you can use the client to interact with Rabata.io
// ...
}
Method 3: Using Environment Variables
You can set environment variables to configure the SDK:
# Set these environment variables before running your Go program
export AWS_ACCESS_KEY_ID=YOUR_ACCESS_KEY
export AWS_SECRET_ACCESS_KEY=YOUR_SECRET_KEY
export AWS_REGION=eu-west-1
Then in your code:
package main
import (
"context"
"fmt"
"log"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/s3"
)
func main() {
// Create a custom resolver that uses the Rabata.io endpoint
customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) {
return aws.Endpoint{
URL: "https://s3.eu-west-1.rabata.io",
SigningRegion: "eu-west-1",
HostnameImmutable: true,
}, nil
})
// Load configuration from environment variables
cfg, err := config.LoadDefaultConfig(context.TODO(),
config.WithEndpointResolverWithOptions(customResolver),
)
if err != nil {
log.Fatalf("unable to load SDK config, %v", err)
}
// Create an S3 client
client := s3.NewFromConfig(cfg)
// Now you can use the client to interact with Rabata.io
// ...
}
Security Note: Never hardcode your credentials in your source code, especially if it’s stored in a version control system. Use environment variables, AWS credentials file, or a secure secrets management system.
Basic Operations
Here are some common operations you can perform with the AWS SDK for Go and Rabata.io.
Bucket Operations
List All Buckets
package main
import (
"context"
"fmt"
"log"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/s3"
)
func main() {
// Create a custom resolver that uses the Rabata.io endpoint
customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) {
return aws.Endpoint{
URL: "https://s3.eu-west-1.rabata.io",
SigningRegion: "eu-west-1",
HostnameImmutable: true,
}, nil
})
// Load configuration
cfg, err := config.LoadDefaultConfig(context.TODO(),
config.WithRegion("eu-west-1"),
config.WithEndpointResolverWithOptions(customResolver),
)
if err != nil {
log.Fatalf("unable to load SDK config, %v", err)
}
// Create an S3 client
client := s3.NewFromConfig(cfg)
// List buckets
result, err := client.ListBuckets(context.TODO(), &s3.ListBucketsInput{})
if err != nil {
log.Fatalf("unable to list buckets, %v", err)
}
fmt.Println("Buckets:")
for _, bucket := range result.Buckets {
fmt.Printf(" %s\n", *bucket.Name)
}
}
Create a Bucket
// Create a bucket
bucketName := "my-bucket-name"
_, err = client.CreateBucket(context.TODO(), &s3.CreateBucketInput{
Bucket: aws.String(bucketName),
})
if err != nil {
log.Fatalf("unable to create bucket, %v", err)
}
fmt.Printf("Bucket created: %s\n", bucketName)
Delete a Bucket
// Delete a bucket
bucketName := "my-bucket-name"
_, err = client.DeleteBucket(context.TODO(), &s3.DeleteBucketInput{
Bucket: aws.String(bucketName),
})
if err != nil {
log.Fatalf("unable to delete bucket, %v", err)
}
fmt.Printf("Bucket deleted: %s\n", bucketName)
Note: The bucket must be empty before it can be deleted.
Object Operations
List Objects in a Bucket
// List objects in a bucket
bucketName := "my-bucket-name"
result, err := client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{
Bucket: aws.String(bucketName),
})
if err != nil {
log.Fatalf("unable to list objects, %v", err)
}
fmt.Printf("Objects in bucket %s:\n", bucketName)
for _, object := range result.Contents {
fmt.Printf(" %s (%d bytes)\n", *object.Key, object.Size)
}
Upload a File
package main
import (
"context"
"fmt"
"log"
"os"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/s3"
)
func main() {
// ... configuration code ...
// Create an S3 client
client := s3.NewFromConfig(cfg)
// Upload a file
bucketName := "my-bucket-name"
key := "remote-file.txt"
localFile := "local-file.txt"
file, err := os.Open(localFile)
if err != nil {
log.Fatalf("unable to open file %s, %v", localFile, err)
}
defer file.Close()
_, err = client.PutObject(context.TODO(), &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Body: file,
})
if err != nil {
log.Fatalf("unable to upload file, %v", err)
}
fmt.Printf("File uploaded: %s to %s/%s\n", localFile, bucketName, key)
}
Download a File
// Download a file
bucketName := "my-bucket-name"
key := "remote-file.txt"
localFile := "local-file.txt"
result, err := client.GetObject(context.TODO(), &s3.GetObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
if err != nil {
log.Fatalf("unable to download file, %v", err)
}
defer result.Body.Close()
file, err := os.Create(localFile)
if err != nil {
log.Fatalf("unable to create file %s, %v", localFile, err)
}
defer file.Close()
_, err = io.Copy(file, result.Body)
if err != nil {
log.Fatalf("unable to write file, %v", err)
}
fmt.Printf("File downloaded: %s/%s to %s\n", bucketName, key, localFile)
Delete a File
// Delete a file
bucketName := "my-bucket-name"
key := "file-to-delete.txt"
_, err = client.DeleteObject(context.TODO(), &s3.DeleteObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
if err != nil {
log.Fatalf("unable to delete file, %v", err)
}
fmt.Printf("File deleted: %s/%s\n", bucketName, key)
Advanced Operations
Here are some more advanced operations you can perform with the AWS SDK for Go and Rabata.io.
Working with Object Metadata
// Upload a file with metadata
bucketName := "my-bucket-name"
key := "file-with-metadata.txt"
content := []byte("Hello, World!")
_, err = client.PutObject(context.TODO(), &s3.PutObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
Body: bytes.NewReader(content),
Metadata: map[string]string{
"custom-key": "custom-value",
"content-type": "text/plain",
},
})
if err != nil {
log.Fatalf("unable to upload file with metadata, %v", err)
}
fmt.Printf("File uploaded with metadata: %s/%s\n", bucketName, key)
Using Presigned URLs
Generate a presigned URL to allow temporary access to an object:
package main
import (
"context"
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/presign"
)
func main() {
// ... configuration code ...
// Create an S3 client
client := s3.NewFromConfig(cfg)
// Create a presigner
presigner := presign.NewPresigner(client)
// Generate a presigned URL
bucketName := "my-bucket-name"
key := "private-file.txt"
request, err := presigner.PresignGetObject(context.TODO(), &s3.GetObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
}, func(opts *presign.GetObjectOptions) {
opts.Expires = time.Hour // URL expires in 1 hour
})
if err != nil {
log.Fatalf("unable to generate presigned URL, %v", err)
}
fmt.Printf("Presigned URL: %s\n", request.URL)
}
Multipart Uploads
For large files, you can use multipart uploads:
package main
import (
"context"
"fmt"
"log"
"os"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
func main() {
// ... configuration code ...
// Create an S3 client
client := s3.NewFromConfig(cfg)
// Start a multipart upload
bucketName := "my-bucket-name"
key := "large-file.txt"
createResp, err := client.CreateMultipartUpload(context.TODO(), &s3.CreateMultipartUploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
if err != nil {
log.Fatalf("unable to create multipart upload, %v", err)
}
uploadID := createResp.UploadId
// Upload parts
// In a real application, you would split your file into parts and upload each part
// Here's a simplified example:
file, err := os.Open("large-file.txt")
if err != nil {
log.Fatalf("unable to open file, %v", err)
}
defer file.Close()
// Get file info
fileInfo, err := file.Stat()
if err != nil {
log.Fatalf("unable to get file info, %v", err)
}
// Calculate part size (5MB minimum)
partSize := int64(5 * 1024 * 1024) // 5MB
numParts := fileInfo.Size() / partSize
if fileInfo.Size()%partSize != 0 {
numParts++
}
// Upload each part
var completedParts []types.CompletedPart
for i := int64(1); i <= numParts; i++ {
// Read part data
partBuffer := make([]byte, partSize)
n, err := file.Read(partBuffer)
if err != nil && err != io.EOF {
log.Fatalf("unable to read file part, %v", err)
}
if n == 0 {
break
}
// Upload part
uploadResp, err := client.UploadPart(context.TODO(), &s3.UploadPartInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
UploadId: uploadID,
PartNumber: aws.Int32(int32(i)),
Body: bytes.NewReader(partBuffer[:n]),
})
if err != nil {
// Abort the upload on error
_, abortErr := client.AbortMultipartUpload(context.TODO(), &s3.AbortMultipartUploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
UploadId: uploadID,
})
if abortErr != nil {
log.Printf("unable to abort multipart upload, %v", abortErr)
}
log.Fatalf("unable to upload part, %v", err)
}
// Add the completed part to our list
completedParts = append(completedParts, types.CompletedPart{
ETag: uploadResp.ETag,
PartNumber: aws.Int32(int32(i)),
})
}
// Complete the multipart upload
_, err = client.CompleteMultipartUpload(context.TODO(), &s3.CompleteMultipartUploadInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
UploadId: uploadID,
MultipartUpload: &types.CompletedMultipartUpload{
Parts: completedParts,
},
})
if err != nil {
log.Fatalf("unable to complete multipart upload, %v", err)
}
fmt.Printf("Large file uploaded: %s/%s\n", bucketName, key)
}
Error Handling
It’s important to handle errors properly when working with S3:
package main
import (
"context"
"fmt"
"io"
"log"
"os"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/smithy-go"
)
func main() {
// ... configuration code ...
// Create an S3 client
client := s3.NewFromConfig(cfg)
// Try to get an object
bucketName := "my-bucket-name"
key := "my-object.txt"
localFile := "local-file.txt"
result, err := client.GetObject(context.TODO(), &s3.GetObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(key),
})
if err != nil {
// Check for specific error types
var apiErr smithy.APIError
if errors.As(err, &apiErr) {
switch apiErr.ErrorCode() {
case "NoSuchKey":
fmt.Printf("The object %s does not exist.\n", key)
case "NoSuchBucket":
fmt.Printf("The bucket %s does not exist.\n", bucketName)
default:
fmt.Printf("API error: %s\n", apiErr.ErrorMessage())
}
} else {
fmt.Printf("An error occurred: %v\n", err)
}
return
}
defer result.Body.Close()
// Create a local file
file, err := os.Create(localFile)
if err != nil {
log.Fatalf("unable to create file %s, %v", localFile, err)
}
defer file.Close()
// Copy the object content to the local file
_, err = io.Copy(file, result.Body)
if err != nil {
log.Fatalf("unable to write file, %v", err)
}
fmt.Printf("File downloaded: %s/%s to %s\n", bucketName, key, localFile)
}