S3 (aiobotocore)
Async S3 helpers using aiobotocore.
Installation
Dependencies
Quick Start
from pathlib import Path
from aiobotocore.session import get_session
from tracktolib.s3.s3 import upload_file, download_file, list_files
async def main():
session = get_session()
async with session.create_client(
's3',
endpoint_url='http://localhost:9000',
aws_access_key_id='access_key',
aws_secret_access_key='secret_key'
) as client:
# Upload a file
await upload_file(client, 'my-bucket', Path('local.txt'), 'remote/path.txt')
# Download a file
content = await download_file(client, 'my-bucket', 'remote/path.txt')
# List files
files = await list_files(client, 'my-bucket', 'remote/')
Functions
upload_file
Upload a file to S3.
from pathlib import Path
from tracktolib.s3.s3 import upload_file
response = await upload_file(
client,
bucket='my-bucket',
file=Path('document.pdf'),
path='uploads/document.pdf',
acl='private' # or 'public-read', etc.
)
download_file
Download a file from S3 with optional progress callbacks.
from tracktolib.s3.s3 import download_file
# Simple download
content = await download_file(client, 'my-bucket', 'path/to/file.txt')
if content:
data = content.read()
# Download with progress tracking
def on_start(total_size: int):
print(f"Starting download: {total_size} bytes")
def on_update(chunk_size: int):
print(f"Downloaded {chunk_size} bytes")
content = await download_file(
client,
'my-bucket',
'large-file.zip',
chunk_size=1024 * 1024, # 1MB chunks
on_start=on_start,
on_update=on_update
)
delete_file
Delete a single file from S3.
from tracktolib.s3.s3 import delete_file
response = await delete_file(client, 'my-bucket', 'path/to/file.txt')
delete_files
Delete multiple files from S3.
from tracktolib.s3.s3 import delete_files
paths = ['file1.txt', 'file2.txt', 'folder/file3.txt']
response = await delete_files(
client,
'my-bucket',
paths,
quiet=True # Suppress output
)
list_files
List files in a bucket with optional filtering.
from tracktolib.s3.s3 import list_files
# List all files in a path
files = await list_files(client, 'my-bucket', 'uploads/')
# With pagination
files = await list_files(
client,
'my-bucket',
'uploads/',
max_items=100,
page_size=50
)
# With JMESPath filter (files larger than 100 bytes)
large_files = await list_files(
client,
'my-bucket',
'uploads/',
search_query="Contents[?Size > `100`][]"
)
Types
S3Item
TypedDict representing an S3 object.
from tracktolib.s3.s3 import S3Item
# S3Item structure:
{
'Key': 'uploads/document.pdf',
'LastModified': datetime(2024, 1, 15, 10, 30, 0),
'ETag': '"d41d8cd98f00b204e9800998ecf8427e"',
'Size': 1024,
'StorageClass': 'STANDARD',
'Owner': {
'DisplayName': 'owner',
'ID': 'owner-id'
}
}
JMESPath Examples
The search_query parameter uses JMESPath syntax: