diff --git a/README.md b/README.md
index 2739e6d1..efd3a3af 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,7 @@ The MinIO Python Client SDK provides high level APIs to access any MinIO Object
This Quickstart Guide covers how to install the MinIO client SDK, connect to the object storage service, and create a sample file uploader.
The example below uses:
-- [Python version 3.7+](https://www.python.org/downloads/)
+- [Python version 3.9+](https://www.python.org/downloads/)
- The [MinIO `mc` command line tool](https://min.io/docs/minio/linux/reference/minio-mc.html)
- The MinIO `play` test server
@@ -17,7 +17,7 @@ For a complete list of APIs and examples, see the [Python Client API Reference](
## Install the MinIO Python SDK
-The Python SDK requires Python version 3.7+.
+The Python SDK requires Python version 3.9+.
You can install the SDK with `pip` or from the [`minio/minio-py` GitHub repository](https://github.com/minio/minio-py):
### Using `pip`
@@ -49,7 +49,8 @@ For example:
```py
from minio import Minio
-client = Minio("play.min.io",
+client = Minio(
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
@@ -74,7 +75,8 @@ from minio.error import S3Error
def main():
# Create a client with the MinIO server playground, its access key
# and secret key.
- client = Minio("play.min.io",
+ client = Minio(
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
@@ -87,16 +89,18 @@ def main():
destination_file = "my-test-file.txt"
# Make the bucket if it doesn't exist.
- found = client.bucket_exists(bucket_name)
+ found = client.bucket_exists(bucket_name=bucket_name)
if not found:
- client.make_bucket(bucket_name)
+ client.make_bucket(bucket_name=bucket_name)
print("Created bucket", bucket_name)
else:
print("Bucket", bucket_name, "already exists")
# Upload the file, renaming it in the process
client.fput_object(
- bucket_name, destination_file, source_file,
+ bucket_name=bucket_name,
+ object_name=destination_file,
+ file_path=source_file,
)
print(
source_file, "successfully uploaded as object",
diff --git a/docs/API.md b/docs/API.md
index 22e7e936..4023e7d4 100644
--- a/docs/API.md
+++ b/docs/API.md
@@ -2,22 +2,22 @@
## 1. Constructor
-### Minio(endpoint, access_key=None, secret_key=None, session_token=None, secure=True, region=None, http_client=None, credentials=None)
+### Minio(*, endpoint: str, access_key: Optional[str] = None, secret_key: Optional[str] = None, session_token: Optional[str] = None, secure: bool = True, region: Optional[str] = None, http_client: Optional[urllib3.PoolManager] = None, credentials: Optional[Provider] = None, cert_check: bool = True)
Initializes a new client object.
__Parameters__
-| Param | Type | Description |
-|:----------------|:----------------------------------|:---------------------------------------------------------------------------------|
-| `endpoint` | _str_ | Hostname of a S3 service. |
-| `access_key` | _str_ | (Optional) Access key (aka user ID) of your account in S3 service. |
-| `secret_key` | _str_ | (Optional) Secret Key (aka password) of your account in S3 service. |
-| `session_token` | _str_ | (Optional) Session token of your account in S3 service. |
-| `secure` | _bool_ | (Optional) Flag to indicate to use secure (TLS) connection to S3 service or not. |
-| `region` | _str_ | (Optional) Region name of buckets in S3 service. |
-| `http_client` | _urllib3.poolmanager.PoolManager_ | (Optional) Customized HTTP client. |
-| `credentials` | _minio.credentials.Provider_ | (Optional) Credentials provider of your account in S3 service. |
-| `cert_check` | _bool_ | (Optional) Flag to check on server certificate for HTTPS connection. |
+| Param | Type | Description |
+|:----------------|:----------------------------------------------|:---------------------------------------------------------------------------------|
+| `endpoint` | _str_ | Hostname of a S3 service. |
+| `access_key` | _Optional[str] = None_ | (Optional) Access key (aka user ID) of your account in S3 service. |
+| `secret_key` | _Optional[str] = None_ | (Optional) Secret Key (aka password) of your account in S3 service. |
+| `session_token` | _Optional[str] = None_ | (Optional) Session token of your account in S3 service. |
+| `secure` | _bool = True_ | (Optional) Flag to indicate to use secure (TLS) connection to S3 service or not. |
+| `region` | _Optional[str] = None_ | (Optional) Region name of buckets in S3 service. |
+| `http_client` | _Optional[urllib3.PoolManager] = None_ | (Optional) Customized HTTP client. |
+| `credentials` | _Optional[minio.credentials.Provider] = None_ | (Optional) Credentials provider of your account in S3 service. |
+| `cert_check` | _bool = True_ | (Optional) Flag to check on server certificate for HTTPS connection. |
**NOTE on concurrent usage:** `Minio` object is thread safe when using the Python `threading` library. Specifically, it is **NOT** safe to share it between multiple processes, for example when using `multiprocessing.Pool`. The solution is simply to create a new `Minio` object in each process, and not share it between processes.
@@ -28,14 +28,18 @@ __Example__
from minio import Minio
# Create client with anonymous access.
-client = Minio("play.min.io")
+client = Minio(endpoint="play.min.io")
# Create client with access and secret key.
-client = Minio("s3.amazonaws.com", "ACCESS-KEY", "SECRET-KEY")
+client = Minio(
+ endpoint="s3.amazonaws.com",
+ access_key="ACCESS-KEY",
+ secret_key="SECRET-KEY",
+)
# Create client with access key and secret key with specific region.
client = Minio(
- "play.minio.io:9000",
+ endpoint="play.minio.io:9000",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
region="my-region",
@@ -44,7 +48,7 @@ client = Minio(
# Create client with custom HTTP client using proxy server.
import urllib3
client = Minio(
- "SERVER:PORT",
+ endpoint="SERVER:PORT",
access_key="ACCESS_KEY",
secret_key="SECRET_KEY",
secure=True,
@@ -87,7 +91,7 @@ client = Minio(
| [`set_bucket_notification`](#set_bucket_notification) | [`presigned_post_policy`](#presigned_post_policy) |
| [`listen_bucket_notification`](#listen_bucket_notification) | [`get_presigned_url`](#get_presigned_url) |
| [`delete_bucket_encryption`](#delete_bucket_encryption) | [`upload_snowball_objects`](#upload_snowball_objects) |
-| [`get_bucket_encryption`](#get_bucket_encryption) | |
+| [`get_bucket_encryption`](#get_bucket_encryption) | [`prompt_object`](#prompt_object) |
| [`set_bucket_encryption`](#set_bucket_encryption) | |
| [`delete_object_lock_config`](#delete_object_lock_config) | |
| [`get_object_lock_config`](#get_object_lock_config) | |
@@ -97,42 +101,52 @@ client = Minio(
-### make_bucket(bucket_name, location='us-east-1', object_lock=False)
+### make_bucket(self, *, bucket_name: str, location: Optional[str] = None, object_lock: bool = False, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Create a bucket with region and object lock.
__Parameters__
-| Param | Type | Description |
-|---------------|--------|---------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `location` | _str_ | Region in which the bucket will be created. |
-| `object_lock` | _bool_ | Flag to set object-lock feature. |
+| Param | Type | Description |
+|----------------------|-------------------------------------------------|--------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `location` | _Optional[str] = None_ | Region in which the bucket to be created. |
+| `object_lock` | _bool = False_ | Flag to set object-lock feature. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
# Create bucket.
-client.make_bucket("my-bucket")
+client.make_bucket(bucket_name="my-bucket")
# Create bucket on specific region.
-client.make_bucket("my-bucket", "us-west-1")
+client.make_bucket(bucket_name="my-bucket", location="us-west-1")
# Create bucket with object-lock feature on specific region.
-client.make_bucket("my-bucket", "eu-west-2", object_lock=True)
+client.make_bucket(bucket_name="my-bucket", location="eu-west-2", object_lock=True)
```
-### list_buckets()
+### list_buckets(self, *, bucket_region: Optional[str] = None, max_buckets: int = 10000, prefix: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None, ) -> Iterator[Bucket]
List information of all accessible buckets.
+| Param | Type | Description |
+|----------------------|-------------------------------------------------|--------------------------------------------|
+| `bucket_region` | _Optional[str] = None_ | Fetch buckets from the region. |
+| `max_buckets` | _int = 10000_ | Fetch maximum number of buckets. |
+| `prefix` | _Optional[str] = None_ | Fetch buckets starts with the prefix. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
+
__Parameters__
-| Return |
-|:-----------------|
-| List of _Bucket_ |
+| Return |
+|:----------------------------------------|
+| An iterator of _minio.datatypes.Bucket_ |
__Example__
@@ -144,20 +158,23 @@ for bucket in buckets:
-### bucket_exists(bucket_name)
+### bucket_exists(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> bool
Check if a bucket exists.
__Parameters__
-| Param | Type | Description |
-|:--------------|:------|:--------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
-if client.bucket_exists("my-bucket"):
+if client.bucket_exists(bucket_name="my-bucket"):
print("my-bucket exists")
else:
print("my-bucket does not exist")
@@ -165,71 +182,76 @@ else:
-### remove_bucket(bucket_name)
+### remove_bucket(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Remove an empty bucket.
__Parameters__
-| Param | Type | Description |
-|:--------------|:------|:--------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
-client.remove_bucket("my-bucket")
+client.remove_bucket(bucket_name="my-bucket")
```
-### list_objects(bucket_name, prefix=None, recursive=False, start_after=None, include_user_meta=False, include_version=False, use_api_v1=False, use_url_encoding_type=True, extra_headers=None, extra_query_params=None)
+### list_objects(self, *, bucket_name: str, prefix: Optional[str] = None, recursive: bool = False, start_after: Optional[str] = None, include_user_meta: bool = False, include_version: bool = False, use_api_v1: bool = False, use_url_encoding_type: bool = True, fetch_owner: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None, ) -> Iterator[Object]
Lists object information of a bucket.
__Parameters__
-| Param | Type | Description |
-|:------------------------|:-------|:-------------------------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `prefix` | _str_ | Object name starts with prefix. |
-| `recursive` | _bool_ | List recursively than directory structure emulation. |
-| `start_after` | _str_ | List objects after this key name. |
-| `include_user_meta` | _bool_ | MinIO specific flag to control to include user metadata. |
-| `include_version` | _bool_ | Flag to control whether include object versions. |
-| `use_api_v1` | _bool_ | Flag to control to use ListObjectV1 S3 API or not. |
-| `use_url_encoding_type` | _bool_ | Flag to control whether URL encoding type to be used or not. |
-| `extra_headers` | _dict_ | Extra HTTP headers for advanced usage. |
-| `extra_query_params` | _dict_ | Extra query parameters for advanced usage. |
+| Param | Type | Description |
+|:------------------------|:------------------------------------------------|:-------------------------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `prefix` | _Optional[str] = None_ | Object name starts with prefix. |
+| `recursive` | _bool = False_ | List recursively than directory structure emulation. |
+| `start_after` | _Optional[str] = None_ | List objects after this key name. |
+| `include_user_meta` | _bool = False_ | MinIO specific flag to control to include user metadata. |
+| `include_version` | _bool = False_ | Flag to control whether include object versions. |
+| `use_api_v1` | _bool = False_ | Flag to control to use ListObjectV1 S3 API or not. |
+| `use_url_encoding_type` | _bool = True_ | Flag to control whether URL encoding type to be used or not. |
+| `fetch_owner` | _bool = False_ | Flag to control to fetch owner information. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Return |
-|:------------------------|
-| An iterator of _Object_ |
+| Return |
+|:----------------------------------------|
+| An iterator of _minio.datatypes.Object_ |
__Example__
```py
# List objects information.
-objects = client.list_objects("my-bucket")
+objects = client.list_objects(bucket_name="my-bucket")
for obj in objects:
print(obj)
# List objects information whose names starts with "my/prefix/".
-objects = client.list_objects("my-bucket", prefix="my/prefix/")
+objects = client.list_objects(bucket_name="my-bucket", prefix="my/prefix/")
for obj in objects:
print(obj)
# List objects information recursively.
-objects = client.list_objects("my-bucket", recursive=True)
+objects = client.list_objects(bucket_name="my-bucket", recursive=True)
for obj in objects:
print(obj)
# List objects information recursively whose names starts with
# "my/prefix/".
objects = client.list_objects(
- "my-bucket", prefix="my/prefix/", recursive=True,
+ bucket_name="my-bucket", prefix="my/prefix/", recursive=True,
)
for obj in objects:
print(obj)
@@ -237,7 +259,7 @@ for obj in objects:
# List objects information recursively after object name
# "my/prefix/world/1".
objects = client.list_objects(
- "my-bucket", recursive=True, start_after="my/prefix/world/1",
+ bucket_name="my-bucket", recursive=True, start_after="my/prefix/world/1",
)
for obj in objects:
print(obj)
@@ -245,15 +267,18 @@ for obj in objects:
-### get_bucket_policy(bucket_name)
+### get_bucket_policy(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> str
Get bucket policy configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
@@ -264,21 +289,24 @@ __Return Value__
__Example__
```py
-policy = client.get_bucket_policy("my-bucket")
+policy = client.get_bucket_policy(bucket_name="my-bucket")
```
-### set_bucket_policy(bucket_name, policy)
+### set_bucket_policy(self, *, bucket_name: str, policy: str | bytes, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Set bucket policy configuration to a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------------------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
-| ``Policy`` | _str_ | Bucket policy configuration as JSON string. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:--------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `policy` | _str \| bytes_ | Bucket policy configuration as JSON string. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
@@ -301,7 +329,7 @@ policy = {
},
],
}
-client.set_bucket_policy("my-bucket", json.dumps(policy))
+client.set_bucket_policy(bucket_name="my-bucket", policy=json.dumps(policy))
# Example anonymous read-write bucket policy.
policy = {
@@ -331,63 +359,72 @@ policy = {
},
],
}
-client.set_bucket_policy("my-bucket", json.dumps(policy))
+client.set_bucket_policy(bucket_name="my-bucket", policy=json.dumps(policy))
```
-### delete_bucket_policy(bucket_name)
+### delete_bucket_policy(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Delete bucket policy configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
-client.delete_bucket_policy("my-bucket")
+client.delete_bucket_policy(bucket_name="my-bucket")
```
-### get_bucket_notification(bucket_name)
+### get_bucket_notification(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> NotificationConfig
Get notification configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Param |
-|:-----------------------------|
-| _NotificationConfig_ object. |
+| Param |
+|:------------------------------------------------------|
+| _minio.notificationconfig.NotificationConfig_ object. |
__Example__
```py
-config = client.get_bucket_notification("my-bucket")
+config = client.get_bucket_notification(bucket_name="my-bucket")
```
-### set_bucket_notification(bucket_name, config)
+### set_bucket_notification(self, *, bucket_name: str, config: NotificationConfig, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Set notification configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:---------------------|:----------------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
-| ``config`` | _NotificationConfig_ | Notification configuration. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `config` | _minio.notificationconfig.NotificationConfig_ | Notification configuration. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
@@ -395,58 +432,64 @@ __Example__
config = NotificationConfig(
queue_config_list=[
QueueConfig(
- "QUEUE-ARN-OF-THIS-BUCKET",
- ["s3:ObjectCreated:*"],
+ queue="QUEUE-ARN-OF-THIS-BUCKET",
+ events=["s3:ObjectCreated:*"],
config_id="1",
prefix_filter_rule=PrefixFilterRule("abc"),
),
],
)
-client.set_bucket_notification("my-bucket", config)
+client.set_bucket_notification(bucket_name="my-bucket", config=config)
```
-### delete_bucket_notification(bucket_name)
+### delete_bucket_notification(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Delete notification configuration of a bucket. On success, S3 service stops notification of events previously set of the bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
-client.delete_bucket_notification("my-bucket")
+client.delete_bucket_notification(bucket_name="my-bucket")
```
-### listen_bucket_notification(bucket_name, prefix='', suffix='', events=('s3:ObjectCreated:\*', 's3:ObjectRemoved:\*', 's3:ObjectAccessed:\*'))
+### listen_bucket_notification(self, *, bucket_name: str, prefix: str = "", suffix: str = "", events: tuple[str, ...] = ('s3:ObjectCreated:*', 's3:ObjectRemoved:*', 's3:ObjectAccessed:*'), region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> EventIterable
Listen events of object prefix and suffix of a bucket. Caller should iterate returned iterator to read new events.
__Parameters__
-| Param | Type | Description |
-|:--------------|:-------|:--------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `prefix` | _str_ | Listen events of object starts with prefix. |
-| `suffix` | _str_ | Listen events of object ends with suffix. |
-| `events` | _list_ | Events to listen. |
+| Param | Type | Description |
+|:---------------------|:----------------------------------------------------------------------------------------|:--------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `prefix` | _str = ""_ | Listen events of object starts with prefix. |
+| `suffix` | _str = ""_ | Listen events of object ends with suffix. |
+| `events` | _tuple[str, ...] = ('s3:ObjectCreated:*', 's3:ObjectRemoved:*', 's3:ObjectAccessed:*')_ | Events to listen. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Param |
-|:------------------------------------|
-| Iterator of event records as _dict_ |
+| Param |
+|:----------------------------------------------------------|
+| Iterator _minio.datatypes.EventIterable_ of event records |
```py
with client.listen_bucket_notification(
- "my-bucket",
+ bucket_name="my-bucket",
prefix="my-prefix/",
events=["s3:ObjectCreated:*", "s3:ObjectRemoved:*"],
) as events:
@@ -456,169 +499,198 @@ with client.listen_bucket_notification(
-### get_bucket_encryption(bucket_name)
-
+### get_bucket_encryption(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Optional[SSEConfig]
Get encryption configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Param |
-|:--------------------|
-| _SSEConfig_ object. |
+| Param |
+|:----------------------------------------------|
+| _Optional[minio.sseconfig.SSEConfig]_ object. |
__Example__
```py
-config = client.get_bucket_encryption("my-bucket")
+config = client.get_bucket_encryption(bucket_name="my-bucket")
```
-### set_bucket_encryption(bucket_name, config)
+### set_bucket_encryption(self, *, bucket_name: str, config: SSEConfig, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Set encryption configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------------|:--------------------------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
-| ``config`` | _SSEConfig_ | Server-side encryption configuration. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `config` | _minio.sseconfig.SSEConfig_ | Server-side encryption configuration. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
client.set_bucket_encryption(
- "my-bucket", SSEConfig(Rule.new_sse_s3_rule()),
+ bucket_name="my-bucket", config=SSEConfig(Rule.new_sse_s3_rule()),
)
```
-### delete_bucket_encryption(bucket_name)
+### delete_bucket_encryption(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Delete encryption configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
-client.delete_bucket_encryption("my-bucket")
+client.delete_bucket_encryption(bucket_name="my-bucket")
```
-### get_bucket_versioning(bucket_name)
+### get_bucket_versioning(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> VersioningConfig
Get versioning configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
+
+__Return Value__
+
+| Param |
+|:--------------------------------------------------|
+| _minio.versioningconfig.VersioningConfig_ object. |
__Example__
```py
-config = client.get_bucket_versioning("my-bucket")
+config = client.get_bucket_versioning(bucket_name="my-bucket")
print(config.status)
```
-### set_bucket_versioning(bucket_name, config)
+### set_bucket_versioning(self, *, bucket_name: str, config: VersioningConfig, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Set versioning configuration to a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:-------------------|:--------------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
-| ``config`` | _VersioningConfig_ | Versioning configuration. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `config` | _minio.versioningconfig.VersioningConfig_ | Versioning configuration. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
-client.set_bucket_versioning("my-bucket", VersioningConfig(ENABLED))
+client.set_bucket_versioning(bucket_name="my-bucket", config=VersioningConfig(ENABLED))
```
-### delete_bucket_replication(bucket_name)
+### delete_bucket_replication(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Delete replication configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
-client.delete_bucket_replication("my-bucket")
+client.delete_bucket_replication(bucket_name="my-bucket")
```
-### get_bucket_replication(bucket_name)
+### get_bucket_replication(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Optional[ReplicationConfig]
Get replication configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
-| Return |
-|:----------------------------------------|
-| _ReplicationConfig_ object. |
+| Return |
+|:--------------------------------------------------------------|
+| _Optional[minio.replicationconfig.ReplicationConfig]_ object. |
__Example__
```py
-config = client.get_bucket_replication("my-bucket")
+config = client.get_bucket_replication(bucket_name="my-bucket")
```
-### set_bucket_replication(bucket_name, config)
+### set_bucket_replication(self, *, bucket_name: str, config: ReplicationConfig, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Set replication configuration to a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:--------------------|:---------------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
-| ``config`` | _ReplicationConfig_ | Replication configuration. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `config` | _minio.replicationconfig.ReplicationConfig_ | Replication configuration. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
config = ReplicationConfig(
- "REPLACE-WITH-ACTUAL-ROLE",
- [
+ role="REPLACE-WITH-ACTUAL-ROLE",
+ rules=[
Rule(
- Destination(
+ destination=Destination(
"REPLACE-WITH-ACTUAL-DESTINATION-BUCKET-ARN",
),
- ENABLED,
+ status=ENABLED,
delete_marker_replication=DeleteMarkerReplication(
DISABLED,
),
@@ -633,62 +705,71 @@ config = ReplicationConfig(
),
],
)
-client.set_bucket_replication("my-bucket", config)
+client.set_bucket_replication(bucket_name="my-bucket", config=config)
```
-### delete_bucket_lifecycle(bucket_name)
+### delete_bucket_lifecycle(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Delete lifecycle configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
-client.delete_bucket_lifecycle("my-bucket")
+client.delete_bucket_lifecycle(bucket_name="my-bucket")
```
-### get_bucket_lifecycle(bucket_name)
+### get_bucket_lifecycle(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Optional[LifecycleConfig]
Get lifecycle configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
-| Return |
-|:--------------------------|
-| _LifecycleConfig_ object. |
+| Return |
+|:----------------------------------------------------------|
+| _Optional[minio.lifecycleconfig.LifecycleConfig]_ object. |
__Example__
```py
-config = client.get_bucket_lifecycle("my-bucket")
+config = client.get_bucket_lifecycle(bucket_name="my-bucket")
```
-### set_bucket_lifecycle(bucket_name, config)
+### set_bucket_lifecycle(self, *, bucket_name: str, config: LifecycleConfig, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Set lifecycle configuration to a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------------------|:-------------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
-| ``config`` | _LifecycleConfig_ | Lifecycle configuration. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `config` | _minio.lifecycleconfig.LifecycleConfig_ | Lifecycle configuration. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
@@ -696,74 +777,83 @@ __Example__
config = LifecycleConfig(
[
Rule(
- ENABLED,
+ status=ENABLED,
rule_filter=Filter(prefix="documents/"),
rule_id="rule1",
transition=Transition(days=30, storage_class="GLACIER"),
),
Rule(
- ENABLED,
+ status=ENABLED,
rule_filter=Filter(prefix="logs/"),
rule_id="rule2",
expiration=Expiration(days=365),
),
],
)
-client.set_bucket_lifecycle("my-bucket", config)
+client.set_bucket_lifecycle(bucket_name="my-bucket", config=config)
```
-### delete_bucket_tags(bucket_name)
+### delete_bucket_tags(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Delete tags configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
-client.delete_bucket_tags("my-bucket")
+client.delete_bucket_tags(bucket_name="my-bucket")
```
-### get_bucket_tags(bucket_name)
+### get_bucket_tags(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Optional[Tags]
Get tags configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
-| Return |
-|:---------------|
-| _Tags_ object. |
+| Return |
+|:--------------------------------------------|
+| _Optional[minio.commonconfig.Tags]_ object. |
__Example__
```py
-tags = client.get_bucket_tags("my-bucket")
+tags = client.get_bucket_tags(bucket_name="my-bucket")
```
-### set_bucket_tags(bucket_name, tags)
+### set_bucket_tags(self, *, bucket_name: str, tags: Tags, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Set tags configuration to a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:-------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
-| ``tags`` | _Tags_ | Tags configuration. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `tags` | _minio.commonconfig.Tags_ | Tags configuration. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
@@ -771,153 +861,182 @@ __Example__
tags = Tags.new_bucket_tags()
tags["Project"] = "Project One"
tags["User"] = "jsmith"
-client.set_bucket_tags("my-bucket", tags)
+client.set_bucket_tags(bucket_name="my-bucket", tags=tags)
```
-### delete_object_lock_config(bucket_name)
+### delete_object_lock_config(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Optional[Tags]
Delete object-lock configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
-client.delete_object_lock_config("my-bucket")
+client.delete_object_lock_config(bucket_name="my-bucket")
```
-### get_object_lock_config(bucket_name)
+### get_object_lock_config(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> ObjectLockConfig
Get object-lock configuration of a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:--------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
-| Return |
-|:---------------------------|
-| _ObjectLockConfig_ object. |
+| Return |
+|:--------------------------------------------------|
+| _minio.objectlockconfig.ObjectLockConfig_ object. |
__Example__
```py
-config = client.get_object_lock_config("my-bucket")
+config = client.get_object_lock_config(bucket_name="my-bucket")
```
-### set_object_lock_config(bucket_name, config)
+### set_object_lock_config(self, *, bucket_name: str, config: ObjectLockConfig, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Set object-lock configuration to a bucket.
__Parameters__
-| Param | Type | Description |
-|:----------------|:-------------------|:---------------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
-| ``config`` | _ObjectLockConfig_ | Object-Lock configuration. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `config` | _minio.objectlockconfig.ObjectLockConfig_ | Object-Lock configuration. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
config = ObjectLockConfig(GOVERNANCE, 15, DAYS)
-client.set_object_lock_config("my-bucket", config)
+client.set_object_lock_config(bucket_name="my-bucket", config=config)
```
## 3. Object operations
-### append_object(bucket_name, object_name, data, length, content_type="application/octet-stream", metadata=None, sse=None, progress=None, part_size=0, num_parallel_uploads=3, tags=None, retention=None, legal_hold=False)
+### append_object(self, *, bucket_name: str, object_name: str, filename: Optional[str | os.PathLike] = None, stream: Optional[BinaryIO] = None, data: Optional[bytes] = None, length: Optional[int] = None, chunk_size: Optional[int] = None, progress: Optional[ProgressType] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
-Appends from a stream to existing object in a bucket.
+Appends data to existing object in a bucket. Only of `filename`, `stream` or `data` must be provided and `length` must be provided if `data` is supplied.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------------|:---------------------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `data` | _object_ | An object having callable read() returning bytes object. |
-| `length` | _int_ | Data size; -1 for unknown size and set valid part_size. |
-| `part_size` | _int_ | Chunk size. |
-| `progress` | _threading_ | A progress object. |
-| `extra_headers` | _dict_ | Extra headers. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-----------------------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `filename` | _Optional[str \| os.PathLike] = None_ | Name of file to append. |
+| `stream` | _Optional[io.BinaryIO] = None_ | An object having callable `read()` returning bytes object. |
+| `data` | _Optional[bytes] = None_ | Data in byte array. |
+| `length` | _Optional[int] = None_ | Data length of `data` or `stream`. |
+| `chunk_size` | _Optional[int] = None_ | Chunk size. |
+| `progress` | _Optional[minio.helpers.ProgressType] = None_ | A progress object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Return |
-|:----------------------------|
-| _ObjectWriteResult_ object. |
+| Return |
+|:------------------------------------------|
+| _minio.helpers.ObjectWriteResult_ object. |
__Example__
```py
# Append data.
result = client.append_object(
- "my-bucket", "my-object", io.BytesIO(b"world"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"world"),
+ length=5,
)
print(f"appended {result.object_name} object; etag: {result.etag}")
# Append data in chunks.
-data = urlopen(
+with urlopen(
"https://www.kernel.org/pub/linux/kernel/v6.x/linux-6.13.12.tar.xz",
-)
-result = client.append_object(
- "my-bucket", "my-object", data, 148611164, 5*1024*1024,
-)
-print(f"appended {result.object_name} object; etag: {result.etag}")
+) as stream:
+ result = client.append_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ stream=stream,
+ length=148611164,
+ chunk_size=5*1024*1024,
+ )
+ print(f"appended {result.object_name} object; etag: {result.etag}")
# Append unknown sized data.
-data = urlopen(
+with urlopen(
"https://www.kernel.org/pub/linux/kernel/v6.x/linux-6.14.3.tar.xz",
-)
-result = client.append_object(
- "my-bucket", "my-object", data, 149426584, 5*1024*1024,
-)
-print(f"appended {result.object_name} object; etag: {result.etag}")
+) as stream:
+ result = client.append_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ stream=stream,
+ chunk_size=5*1024*1024,
+ )
+ print(f"appended {result.object_name} object; etag: {result.etag}")
```
-### get_object(bucket_name, object_name, offset=0, length=0, request_headers=None, ssec=None, version_id=None, extra_query_params=None)
+### get_object(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, ssec: Optional[SseCustomerKey] = None, offset: int = 0, length: Optional[int] = None, match_etag: Optional[str] = None, not_match_etag: Optional[str] = None, modified_since: Optional[datetime] = None, unmodified_since: Optional[datetime] = None, fetch_checksum: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> BaseHTTPResponse
Gets data from offset to length of an object. Returned response should be closed after use to release network resources. To reuse the connection, it's required to call `response.release_conn()` explicitly.
__Parameters__
-| Param | Type | Description |
-|:---------------------|:-----------------|:-----------------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `offset` | _int_ | Start byte position of object data. |
-| `length` | _int_ | Number of bytes of object data from offset. |
-| `request_headers` | _dict_ | Any additional headers to be added with GET request. |
-| `ssec` | _SseCustomerKey_ | Server-side encryption customer key. |
-| `version_id` | _str_ | Version-ID of the object. |
-| `extra_query_params` | _dict_ | Extra query parameters for advanced usage. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:--------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `version_id` | _Optional[str] = None_ | Version-ID of the object. |
+| `ssec` | _Optional[minio.sse.SseCustomerKey] = None_ | Server-side encryption customer key. |
+| `offset` | _int = 0_ | Start byte position of object data. |
+| `length` | _Optional[int] = None_ | Number of bytes of object data from offset. |
+| `match_etag` | _Optional[str] = None_ | Match ETag of the object. |
+| `not_match_etag` | _Optional[str] = None_ | None-match ETag of the object. |
+| `modified_since` | _Optional[datetime.datetime] = None_ | Modified-since of the object. |
+| `unmodified_since` | _Optional[datetime.datetime] = None_ | Unmodified-since of the object. |
+| `fetch_checksum` | _bool = False_ | Fetch object checksum. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Return |
-|:----------------------------------------|
-| _urllib3.response.HTTPResponse_ object. |
+| Return |
+|:-------------------------------------------------------------------------------|
+| _urllib3.response.BaseHTTPResponse_ or _urllib3.response.HTTPResponse_ object. |
__Example__
```py
# Get data of an object.
try:
- response = client.get_object("my-bucket", "my-object")
+ response = client.get_object(bucket_name="my-bucket", object_name="my-object")
# Read data from response.
finally:
response.close()
@@ -926,7 +1045,8 @@ finally:
# Get data of an object of version-ID.
try:
response = client.get_object(
- "my-bucket", "my-object",
+ bucket_name="my-bucket",
+ object_name="my-object",
version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
)
# Read data from response.
@@ -937,7 +1057,10 @@ finally:
# Get data of an object from offset and length.
try:
response = client.get_object(
- "my-bucket", "my-object", offset=512, length=1024,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ offset=512,
+ length=1024,
)
# Read data from response.
finally:
@@ -947,7 +1070,8 @@ finally:
# Get data of an SSE-C encrypted object.
try:
response = client.get_object(
- "my-bucket", "my-object",
+ bucket_name="my-bucket",
+ object_name="my-object",
ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
)
# Read data from response.
@@ -958,34 +1082,37 @@ finally:
-### select_object_content(bucket_name, object_name, request)
+### select_object_content(self, *, bucket_name: str, object_name: str, request: SelectRequest, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> SelectObjectReader
Select content of an object by SQL expression.
__Parameters__
-| Param | Type | Description |
-|:--------------|:----------------|:---------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `request` | _SelectRequest_ | Select request. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:--------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `request` | _minio.select.SelectRequest_ | Select request. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Return |
-|:-------------------------------------------------------------------------------------|
-| A reader contains requested records and progress information as _SelectObjectReader_ |
+| Return |
+|:------------------------------------------|
+| _minio.select.SelectObjectReader_ object. |
__Example__
```py
with client.select_object_content(
- "my-bucket",
- "my-object.csv",
- SelectRequest(
- "select * from S3Object",
- CSVInputSerialization(),
- CSVOutputSerialization(),
+ bucket_name="my-bucket",
+ object_name="my-object.csv",
+ request=SelectRequest(
+ expression="select * from S3Object",
+ input_serialization=CSVInputSerialization(),
+ output_serialization=CSVOutputSerialization(),
request_progress=True,
),
) as result:
@@ -996,74 +1123,89 @@ with client.select_object_content(
-### fget_object(bucket_name, object_name, file_path, request_headers=None, ssec=None, version_id=None, extra_query_params=None, tmp_file_path=None)
+### fget_object(self, *, bucket_name: str, object_name: str, file_path: str, match_etag: Optional[str] = None, not_match_etag: Optional[str] = None, modified_since: Optional[datetime] = None, unmodified_since: Optional[datetime] = None, fetch_checksum: bool = False, ssec: Optional[SseCustomerKey] = None, version_id: Optional[str] = None, tmp_file_path: Optional[str] = None, progress: Optional[ProgressType] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
+
Downloads data of an object to file.
__Parameters__
-| Param | Type | Description |
-|:---------------------|:-----------------|:-----------------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `file_path` | _str_ | Name of file to download. |
-| `request_headers` | _dict_ | Any additional headers to be added with GET request. |
-| `ssec` | _SseCustomerKey_ | Server-side encryption customer key. |
-| `version_id` | _str_ | Version-ID of the object. |
-| `extra_query_params` | _dict_ | Extra query parameters for advanced usage. |
-| `tmp_file_path` | _str_ | Path to a temporary file. |
-
-__Return Value__
-
-| Return |
-|:-------------------------------|
-| Object information as _Object_ |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:--------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `file_path` | _str_ | Name of file to download. |
+| `version_id` | _Optional[str] = None_ | Version-ID of the object. |
+| `ssec` | _Optional[minio.sse.SseCustomerKey] = None_ | Server-side encryption customer key. |
+| `offset` | _int = 0_ | Start byte position of object data. |
+| `length` | _Optional[int] = None_ | Number of bytes of object data from offset. |
+| `tmp_file_path` | _Optional[str] = None_ | Path to a temporary file. |
+| `progress` | _Optional[minio.helpers.ProgressType] = None_ | A progress object. |
+| `match_etag` | _Optional[str] = None_ | Match ETag of the object. |
+| `not_match_etag` | _Optional[str] = None_ | None-match ETag of the object. |
+| `modified_since` | _Optional[datetime.datetime] = None_ | Modified-since of the object. |
+| `unmodified_since` | _Optional[datetime.datetime] = None_ | Unmodified-since of the object. |
+| `fetch_checksum` | _bool = False_ | Fetch object checksum. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
# Download data of an object.
-client.fget_object("my-bucket", "my-object", "my-filename")
+client.fget_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
+)
# Download data of an object of version-ID.
client.fget_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
)
# Download data of an SSE-C encrypted object.
client.fget_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
)
```
-### copy_object(bucket_name, object_name, source, sse=None, metadata=None, tags=None, retention=None, legal_hold=False, metadata_directive=None, tagging_directive=None)
+### copy_object(self, *, bucket_name: str, object_name: str, source: CopySource, sse: Optional[Sse] = None, user_metadata: Optional[HTTPHeaderDict] = None, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, metadata_directive: Optional[str] = None, tagging_directive: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> ObjectWriteResult
Create an object by server-side copying data from another object. In this API maximum supported source object size is 5GiB.
__Parameters__
-| Param | Type | Description |
-|:---------------------|:-------------|:----------------------------------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `source` | _CopySource_ | Source object information. |
-| `sse` | _Sse_ | Server-side encryption of destination object. |
-| `metadata` | _dict_ | Any user-defined metadata to be copied along with destination object. |
-| `tags` | _Tags_ | Tags for destination object. |
-| `retention` | _Retention_ | Retention configuration. |
-| `legal_hold` | _bool_ | Flag to set legal hold for destination object. |
-| `metadata_directive` | _str_ | Directive used to handle user metadata for destination object. |
-| `tagging_directive` | _str_ | Directive used to handle tags for destination object. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:----------------------------------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `source` | _minio.commonconfig.CopySource_ | Source object information. |
+| `sse` | _Optional[minio.sse.Sse] = None_ | Server-side encryption of destination object. |
+| `user_metadata` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Any user-defined metadata to be copied along with destination object. |
+| `tags` | _Optional[minio.commonconfig.Tags] = None_ | Tags for destination object. |
+| `retention` | _Optional[minio.retention.Retention] = None_ | Retention configuration. |
+| `legal_hold` | _bool = False_ | Flag to set legal hold for destination object. |
+| `metadata_directive` | _Optional[str] = None_ | Directive used to handle user metadata for destination object. |
+| `tagging_directive` | _Optional[str] = None_ | Directive used to handle tags for destination object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Return |
-|:----------------------------|
-| _ObjectWriteResult_ object. |
+| Return |
+|:------------------------------------------|
+| _minio.helpers.ObjectWriteResult_ object. |
__Example__
@@ -1073,31 +1215,37 @@ from minio.commonconfig import REPLACE, CopySource
# copy an object from a bucket to another.
result = client.copy_object(
- "my-bucket",
- "my-object",
- CopySource("my-sourcebucket", "my-sourceobject"),
+ bucket_name="my-bucket",
+ object_name="my-object",
+ CopySource(
+ bucket_name="my-sourcebucket",
+ object_name="my-sourceobject",
+ ),
)
print(result.object_name, result.version_id)
# copy an object with condition.
result = client.copy_object(
- "my-bucket",
- "my-object",
+ bucket_name="my-bucket",
+ object_name="my-object",
CopySource(
- "my-sourcebucket",
- "my-sourceobject",
+ bucket_name="my-sourcebucket",
+ object_name="my-sourceobject",
modified_since=datetime(2014, 4, 1, tzinfo=timezone.utc),
),
)
print(result.object_name, result.version_id)
# copy an object from a bucket with replacing metadata.
-metadata = {"test_meta_key": "test_meta_value"}
+user_metadata = {"test_meta_key": "test_meta_value"}
result = client.copy_object(
- "my-bucket",
- "my-object",
- CopySource("my-sourcebucket", "my-sourceobject"),
- metadata=metadata,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ CopySource(
+ bucket_name="my-sourcebucket",
+ object_name="my-sourceobject",
+ ),
+ user_metadata=user_metadata,
metadata_directive=REPLACE,
)
print(result.object_name, result.version_id)
@@ -1105,29 +1253,32 @@ print(result.object_name, result.version_id)
-### compose_object(bucket_name, object_name, sources, sse=None, metadata=None, tags=None, retention=None, legal_hold=False)
+### compose_object(self, *, bucket_name: str, object_name: str, sources: list[ComposeSource], sse: Optional[Sse] = None, user_metadata: Optional[HTTPHeaderDict] = None, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> ObjectWriteResult
Create an object by combining data from different source objects using server-side copy.
__Parameters__
-| Param | Type | Description |
-|:--------------|:------------|:----------------------------------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `sources` | _list_ | List of _ComposeSource_ object. |
-| `sse` | _Sse_ | Server-side encryption of destination object. |
-| `metadata` | _dict_ | Any user-defined metadata to be copied along with destination object. |
-| `tags` | _Tags_ | Tags for destination object. |
-| `retention` | _Retention_ | Retention configuration. |
-| `legal_hold` | _bool_ | Flag to set legal hold for destination object. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:----------------------------------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `sources` | _list[minio.commonconfig.ComposeSource]_ | List of _ComposeSource_ object. |
+| `sse` | _Optional[minio.sse.Sse] = None_ | Server-side encryption of destination object. |
+| `user_metadata` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Any user-defined metadata to be copied along with destination object. |
+| `tags` | _Optional[minio.commonconfig.Tags] = None_ | Tags for destination object. |
+| `retention` | _Optional[minio.retention.Retention] = None_ | Retention configuration. |
+| `legal_hold` | _bool = False_ | Flag to set legal hold for destination object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Return |
-|:----------------------------|
-| _ObjectWriteResult_ object. |
+| Return |
+|:------------------------------------------|
+| _minio.helpers.ObjectWriteResult_ object. |
__Example__
@@ -1136,139 +1287,178 @@ from minio.commonconfig import ComposeSource
from minio.sse import SseS3
sources = [
- ComposeSource("my-job-bucket", "my-object-part-one"),
- ComposeSource("my-job-bucket", "my-object-part-two"),
- ComposeSource("my-job-bucket", "my-object-part-three"),
+ ComposeSource(
+ bucket_name="my-job-bucket",
+ object_name="my-object-part-one",
+ ),
+ ComposeSource(
+ bucket_name="my-job-bucket",
+ object_name="my-object-part-two",
+ ),
+ ComposeSource(
+ bucket_name="my-job-bucket",
+ object_name="my-object-part-three",
+ ),
]
# Create my-bucket/my-object by combining source object
# list.
-result = client.compose_object("my-bucket", "my-object", sources)
+result = client.compose_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ sources=sources,
+)
print(result.object_name, result.version_id)
# Create my-bucket/my-object with user metadata by combining
# source object list.
result = client.compose_object(
- "my-bucket",
- "my-object",
- sources,
- metadata={"test_meta_key": "test_meta_value"},
+ bucket_name="my-bucket",
+ object_name="my-object",
+ sources=sources,
+ user_metadata={"test_meta_key": "test_meta_value"},
)
print(result.object_name, result.version_id)
# Create my-bucket/my-object with user metadata and
# server-side encryption by combining source object list.
-client.compose_object("my-bucket", "my-object", sources, sse=SseS3())
+client.compose_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ sources=sources,
+ sse=SseS3(),
+)
print(result.object_name, result.version_id)
```
-### put_object(bucket_name, object_name, data, length, content_type="application/octet-stream", metadata=None, sse=None, progress=None, part_size=0, num_parallel_uploads=3, tags=None, retention=None, legal_hold=False)
+### put_object(self, *, bucket_name: str, object_name: str, data: BinaryIO, length: int, content_type: str = "application/octet-stream", headers: Optional[HTTPHeaderDict] = None, user_metadata: Optional[HTTPHeaderDict] = None, sse: Optional[Sse] = None, progress: Optional[ProgressType] = None, part_size: int = 0, checksum: Optional[Algorithm] = None, num_parallel_uploads: int = 3, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> ObjectWriteResult
Uploads data from a stream to an object in a bucket.
__Parameters__
-| Param | Type | Description |
-|:---------------|:------------|:--------------------------------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `data` | _object_ | An object having callable read() returning bytes object. |
-| `length` | _int_ | Data size; -1 for unknown size and set valid part_size. |
-| `content_type` | _str_ | Content type of the object. |
-| `metadata` | _dict_ | Any additional metadata to be uploaded along with your PUT request. |
-| `sse` | _Sse_ | Server-side encryption. |
-| `progress` | _threading_ | A progress object. |
-| `part_size` | _int_ | Multipart part size. |
-| `tags` | _Tags_ | Tags for the object. |
-| `retention` | _Retention_ | Retention configuration. |
-| `legal_hold` | _bool_ | Flag to set legal hold for the object. |
+| Param | Type | Description |
+|:-----------------------|:------------------------------------------------|:----------------------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `data` | _io.BinaryIO_ | An object having callable read() returning bytes object. |
+| `length` | _int_ | Data size; -1 for unknown size and set valid `part_size`. |
+| `content_type` | _str = "application/octet-stream"_ | Content type of the object. |
+| `headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Additional headers. |
+| `user_metadata` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | User metadata of the object. |
+| `sse` | _Optional[minio.sse.Sse] = None_ | Server-side encryption. |
+| `progress` | _Optional[minio.helpers.ProgressType] = None_ | A progress object. |
+| `part_size` | _int = 0_ | Multipart part size. |
+| `checksum` | _Optional[minio.checksum.Algorithm] = None_ | Algorithm for checksum computation. |
+| `num_parallel_uploads` | _int = 3_ | Number of parallel uploads. |
+| `tags` | _Optional[minio.commonconfig.Tags] = None_ | Tags for the object. |
+| `retention` | _Optional[minio.retention.Retention] = None_ | Retention configuration. |
+| `legal_hold` | _bool = False_ | Flag to set legal hold for the object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Return |
-|:----------------------------|
-| _ObjectWriteResult_ object. |
+| Return |
+|:------------------------------------------|
+| _minio.helpers.ObjectWriteResult_ object. |
__Example__
```py
# Upload data.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload unknown sized data.
-data = urlopen(
+with urlopen(
"https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.4.81.tar.xz",
-)
-result = client.put_object(
- "my-bucket", "my-object", data, length=-1, part_size=10*1024*1024,
-)
-print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
-)
+) as data:
+ result = client.put_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=data,
+ length=-1,
+ part_size=10*1024*1024,
+ )
+ print(
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
+ )
# Upload data with content-type.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
content_type="application/csv",
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with metadata.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
metadata={"My-Project": "one"},
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with customer key type of server-side encryption.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
sse=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with KMS type of server-side encryption.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
sse=SseKMS("KMS-KEY-ID", {"Key1": "Value1", "Key2": "Value2"}),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with S3 type of server-side encryption.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
sse=SseS3(),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with tags, retention and legal-hold.
@@ -1278,121 +1468,151 @@ date = datetime.utcnow().replace(
tags = Tags(for_object=True)
tags["User"] = "jsmith"
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
tags=tags,
retention=Retention(GOVERNANCE, date),
legal_hold=True,
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with progress bar.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
progress=Progress(),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
```
-### fput_object(bucket_name, object_name, file_path, content_type="application/octet-stream", metadata=None, sse=None, progress=None, part_size=0, num_parallel_uploads=3, tags=None, retention=None, legal_hold=False)
+### fput_object(self, *, bucket_name: str, object_name: str, file_path: str, content_type: str = "application/octet-stream", headers: Optional[HTTPHeaderDict] = None, user_metadata: Optional[HTTPHeaderDict] = None, sse: Optional[Sse] = None, progress: Optional[ProgressType] = None, part_size: int = 0, checksum: Optional[Algorithm] = None, num_parallel_uploads: int = 3, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> ObjectWriteResult
Uploads data from a file to an object in a bucket.
-| Param | Type | Description |
-|:---------------|:------------|:--------------------------------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `file_path` | _str_ | Name of file to upload. |
-| `content_type` | _str_ | Content type of the object. |
-| `metadata` | _dict_ | Any additional metadata to be uploaded along with your PUT request. |
-| `sse` | _Sse_ | Server-side encryption. |
-| `progress` | _threading_ | A progress object. |
-| `part_size` | _int_ | Multipart part size. |
-| `tags` | _Tags_ | Tags for the object. |
-| `retention` | _Retention_ | Retention configuration. |
-| `legal_hold` | _bool_ | Flag to set legal hold for the object. |
+__Parameters__
+
+| Param | Type | Description |
+|:-----------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `file_path` | _str_ | Name of file to upload. |
+| `content_type` | _str = "application/octet-stream"_ | Content type of the object. |
+| `headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Additional headers. |
+| `user_metadata` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | User metadata of the object. |
+| `sse` | _Optional[minio.sse.Sse] = None_ | Server-side encryption. |
+| `progress` | _Optional[minio.helpers.ProgressType] = None_ | A progress object. |
+| `part_size` | _int = 0_ | Multipart part size. |
+| `checksum` | _Optional[minio.checksum.Algorithm] = None_ | Algorithm for checksum computation. |
+| `num_parallel_uploads` | _int = 3_ | Number of parallel uploads. |
+| `tags` | _Optional[minio.commonconfig.Tags] = None_ | Tags for the object. |
+| `retention` | _Optional[minio.retention.Retention] = None_ | Retention configuration. |
+| `legal_hold` | _bool = False_ | Flag to set legal hold for the object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Return |
-|:----------------------------|
-| _ObjectWriteResult_ object. |
+| Return |
+|:------------------------------------------|
+| _minio.helpers.ObjectWriteResult_ object. |
__Example__
```py
# Upload data.
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
+)
+
+# Upload data with part size.
+result = client.fput_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
+ part_size=10*1024*1024,
+)
+print(
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with content-type.
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
content_type="application/csv",
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with metadata.
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
metadata={"My-Project": "one"},
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with customer key type of server-side encryption.
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
sse=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with KMS type of server-side encryption.
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
sse=SseKMS("KMS-KEY-ID", {"Key1": "Value1", "Key2": "Value2"}),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with S3 type of server-side encryption.
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
sse=SseS3(),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with tags, retention and legal-hold.
@@ -1402,143 +1622,159 @@ date = datetime.utcnow().replace(
tags = Tags(for_object=True)
tags["User"] = "jsmith"
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
tags=tags,
retention=Retention(GOVERNANCE, date),
legal_hold=True,
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with progress bar.
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
progress=Progress(),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
```
-### stat_object(bucket_name, object_name, ssec=None, version_id=None, extra_headers=None, extra_query_params=None)
+### stat_object(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, ssec: Optional[SseCustomerKey] = None, offset: int = 0, length: Optional[int] = None, match_etag: Optional[str] = None, not_match_etag: Optional[str] = None, modified_since: Optional[datetime] = None, unmodified_since: Optional[datetime] = None, fetch_checksum: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Object:
Get object information and metadata of an object.
__Parameters__
-| Param | Type | Description |
-|:---------------------|:-----------------|:-------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `ssec` | _SseCustomerKey_ | Server-side encryption customer key. |
-| `version_id` | _str_ | Version ID of the object. |
-| `extra_headers` | _dict_ | Extra HTTP headers for advanced usage. |
-| `extra_query_params` | _dict_ | Extra query parameters for advanced usage. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:--------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `version_id` | _Optional[str] = None_ | Version ID of the object. |
+| `ssec` | _Optional[minio.sse.SseCustomerKey] = None_ | Server-side encryption customer key. |
+| `offset` | _int = 0_ | Start byte position of object data. |
+| `length` | _Optional[int] = None_ | Number of bytes of object data from offset. |
+| `match_etag` | _Optional[str] = None_ | Match ETag of the object. |
+| `not_match_etag` | _Optional[str] = None_ | None-match ETag of the object. |
+| `modified_since` | _Optional[datetime.datetime] = None_ | Modified-since of the object. |
+| `unmodified_since` | _Optional[datetime.datetime] = None_ | Unmodified-since of the object. |
+| `fetch_checksum` | _bool = False_ | Fetch object checksum. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Return |
-|:-------------------------------|
-| Object information as _Object_ |
+| Return |
+|:---------------------------------|
+| _minio.datatypes.Object_ object. |
__Example__
```py
# Get object information.
-result = client.stat_object("my-bucket", "my-object")
-print(
- "last-modified: {0}, size: {1}".format(
- result.last_modified, result.size,
- ),
+result = client.stat_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
)
+print(f"last-modified: {result.last_modified}, size: {result.size}")
# Get object information of version-ID.
result = client.stat_object(
- "my-bucket", "my-object",
+ bucket_name="my-bucket",
+ object_name="my-object",
version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
)
-print(
- "last-modified: {0}, size: {1}".format(
- result.last_modified, result.size,
- ),
-)
+print(f"last-modified: {result.last_modified}, size: {result.size}")
# Get SSE-C encrypted object information.
result = client.stat_object(
- "my-bucket", "my-object",
+ bucket_name="my-bucket",
+ object_name="my-object",
ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
)
-print(
- "last-modified: {0}, size: {1}".format(
- result.last_modified, result.size,
- ),
-)
+print(f"last-modified: {result.last_modified}, size: {result.size}")
```
-### remove_object(bucket_name, object_name, version_id=None)
+### remove_object(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Remove an object.
__Parameters__
-| Param | Type | Description |
-|:--------------|:------|:---------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `version_id` | _str_ | Version ID of the object. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `version_id` | _Optional[str] = None_ | Version ID of the object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
# Remove object.
-client.remove_object("my-bucket", "my-object")
+client.remove_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+)
# Remove version of an object.
client.remove_object(
- "my-bucket", "my-object",
+ bucket_name="my-bucket",
+ object_name="my-object",
version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
)
```
-### remove_objects(bucket_name, delete_object_list, bypass_governance_mode=False)
+### remove_objects(self, *, bucket_name: str, delete_object_list: Iterable[DeleteObject], bypass_governance_mode: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Iterator[DeleteError]
Remove multiple objects.
__Parameters__
-| Param | Type | Description |
-|:-------------------------|:-----------|:--------------------------------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `delete_object_list` | _iterable_ | An iterable containing :class:`DeleteObject ` object. |
-| `bypass_governance_mode` | _bool_ | Bypass Governance retention mode. |
+| Param | Type | Description |
+|:-------------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `delete_object_list` | _Iterable[minio.deleteobjects.DeleteObject]_ | DeleteObject iterable. |
+| `bypass_governance_mode` | _bool = False_ | Bypass Governance retention mode. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Return |
-|:-----------------------------------------------------------------|
-| An iterator containing :class:`DeleteError ` object |
+| Return |
+|:----------------------------------------------------|
+| _Iterator[minio.deleteobjects.DeleteError]_ object. |
__Example__
```py
# Remove list of objects.
errors = client.remove_objects(
- "my-bucket",
- [
- DeleteObject("my-object1"),
- DeleteObject("my-object2"),
- DeleteObject("my-object3", "13f88b18-8dcd-4c83-88f2-8631fdb6250c"),
+ bucket_name="my-bucket",
+ delete_object_list=[
+ DeleteObject(name="my-object1"),
+ DeleteObject(name="my-object2"),
+ DeleteObject(
+ name="my-object3",
+ version_id="13f88b18-8dcd-4c83-88f2-8631fdb6250c",
+ ),
],
)
for error in errors:
@@ -1547,71 +1783,87 @@ for error in errors:
# Remove a prefix recursively.
delete_object_list = map(
lambda x: DeleteObject(x.object_name),
- client.list_objects("my-bucket", "my/prefix/", recursive=True),
+ client.list_objects(
+ bucket_name="my-bucket",
+ prefix="my/prefix/",
+ recursive=True,
+ ),
+)
+errors = client.remove_objects(
+ bucket_name="my-bucket",
+ delete_object_list=delete_object_list,
)
-errors = client.remove_objects("my-bucket", delete_object_list)
for error in errors:
print("error occurred when deleting object", error)
```
-### delete_object_tags(bucket_name, object_name, version_id=None)
+### delete_object_tags(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Delete tags configuration of an object.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:---------------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
-| ``object_name`` | _str_ | Object name in the bucket. |
-| ``version_id`` | _str_ | Version ID of the object. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `version_id` | _Optional[str] = None_ | Version ID of the object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
-client.delete_object_tags("my-bucket", "my-object")
+client.delete_object_tags(bucket_name="my-bucket", object_name="my-object")
```
-### get_object_tags(bucket_name, object_name, version_id=None)
+### get_object_tags(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Optional[Tags]
Get tags configuration of an object.
__Parameters__
-| Param | Type | Description |
-|:----------------|:------|:---------------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
-| ``object_name`` | _str_ | Object name in the bucket. |
-| ``version_id`` | _str_ | Version ID of the object. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `version_id` | _Optional[str] = None_ | Version ID of the object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
-| Return |
-|:---------------|
-| _Tags_ object. |
+| Return |
+|:--------------------------------------------|
+| _Optional[minio.commonconfig.Tags]_ object. |
__Example__
```py
-tags = client.get_object_tags("my-bucket", "my-object")
+tags = client.get_object_tags(bucket_name="my-bucket", object_name="my-object")
```
-### set_object_tags(bucket_name, object_name, tags, version_id=None)
+### set_object_tags(self, *, bucket_name: str, object_name: str, tags: Tags, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Set tags configuration to an object.
__Parameters__
-| Param | Type | Description |
-|:----------------|:-------|:---------------------------|
-| ``bucket_name`` | _str_ | Name of the bucket. |
-| ``object_name`` | _str_ | Object name in the bucket. |
-| ``tags`` | _Tags_ | Tags configuration. |
-| ``version_id`` | _str_ | Version ID of the object. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `tags` | _minio.commonconfig.Tags_ | Tags configuration. |
+| `version_id` | _Optional[str] = None_ | Version ID of the object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
@@ -1619,67 +1871,79 @@ __Example__
tags = Tags.new_object_tags()
tags["Project"] = "Project One"
tags["User"] = "jsmith"
-client.set_object_tags("my-bucket", "my-object", tags)
+client.set_object_tags(bucket_name="my-bucket", object_name="my-object", tags=tags)
```
-### enable_object_legal_hold(bucket_name, object_name, version_id=None)
+### enable_object_legal_hold(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Enable legal hold on an object.
__Parameters__
-| Param | Type | Description |
-|:--------------|:------|:---------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `version_id` | _str_ | Version ID of the object. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `version_id` | _Optional[str] = None_ | Version ID of the object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
-client.enable_object_legal_hold("my-bucket", "my-object")
+client.enable_object_legal_hold(bucket_name="my-bucket", object_name="my-object")
```
-### disable_object_legal_hold(bucket_name, object_name, version_id=None)
+### disable_object_legal_hold(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Disable legal hold on an object.
__Parameters__
-| Param | Type | Description |
-|:--------------|:------|:---------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `version_id` | _str_ | Version ID of the object. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `version_id` | _Optional[str] = None_ | Version ID of the object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
-client.disable_object_legal_hold("my-bucket", "my-object")
+client.disable_object_legal_hold(bucket_name="my-bucket", object_name="my-object")
```
-### is_object_legal_hold_enabled(bucket_name, object_name, version_id=None)
+### is_object_legal_hold_enabled(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> bool
Returns true if legal hold is enabled on an object.
__Parameters__
-| Param | Type | Description |
-|:--------------|:------|:---------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `version_id` | _str_ | Version ID of the object. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `version_id` | _Optional[str] = None_ | Version ID of the object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
-if client.is_object_legal_hold_enabled("my-bucket", "my-object"):
+if client.is_object_legal_hold_enabled(
+ bucket_name="my-bucket",
+ object_name="my-object",
+):
print("legal hold is enabled on my-object")
else:
print("legal hold is not enabled on my-object")
@@ -1687,70 +1951,127 @@ else:
-### get_object_retention(bucket_name, object_name, version_id=None)
+### get_object_retention(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Optional[Retention]
Get retention information of an object.
__Parameters__
-| Param | Type | Description |
-|:---------------------|:-----------------|:-------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `version_id` | _str_ | Version ID of the object. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `version_id` | _Optional[str] = None_ | Version ID of the object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Return |
-|:-------------------|
-| _Retention_ object |
+| Return |
+|:----------------------------------------------|
+| _Optional[minio.retention.Retention]_ object. |
__Example__
```py
-config = client.get_object_retention("my-bucket", "my-object")
+config = client.get_object_retention(
+ bucket_name="my-bucket",
+ object_name="my-object",
+)
```
-### set_object_retention(bucket_name, object_name, config, version_id=None)
+### set_object_retention(self, *, bucket_name: str, object_name: str, config: Retention, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None)
Set retention information to an object.
__Parameters__
-| Param | Type | Description |
-|:--------------|:------------|:---------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `config` | _Retention_ | Retention configuration. |
-| `version_id` | _str_ | Version ID of the object. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `config` | _minio.retention.Retention_ | Retention configuration. |
+| `version_id` | _Optional[str] = None_ | Version ID of the object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Example__
```py
config = Retention(GOVERNANCE, datetime.utcnow() + timedelta(days=10))
-client.set_object_retention("my-bucket", "my-object", config)
+client.set_object_retention(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ config=config,
+)
+```
+
+
+
+### prompt_object(self, *, bucket_name: str, object_name: str, prompt: str, lambda_arn: Optional[str] = None, ssec: Optional[SseCustomerKey] = None, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None, **kwargs: Optional[Any]) -> BaseHTTPResponse
+
+Prompt an object using natural language.
+
+__Parameters__
+
+| Param | Type | Description |
+|----------------------|-------------------------------------------------|-------------------------------------------------------------------------|
+| `bucket_name` | `str` | Name of the bucket. |
+| `object_name` | `str` | Object name in the bucket. |
+| `prompt` | `str` | Natural language prompt to interact with the object using the AI model. |
+| `lambda_arn` | `Optional[str] = None` | AWS Lambda ARN to use for processing the prompt. |
+| `ssec` | `Optional[minio.sse.SseCustomerKey] = None` | Server-side encryption customer key. |
+| `version_id` | `Optional[str] = None` | Version ID of the object. |
+| `region` | `Optional[str] = None` | Region of the bucket to skip auto probing. |
+| `extra_headers` | `Optional[minio.helpers.HTTPHeaderDict] = None` | Extra headers for advanced usage. |
+| `extra_query_params` | `Optional[minio.helpers.HTTPQueryDict] = None` | Extra query parameters for advanced usage. |
+| `**kwargs` | `Optional[Any]` | Additional parameters for advanced usage. |
+
+__Return Value__
+
+| Return |
+|:-------------------------------------------------------------------------------|
+| _urllib3.response.BaseHTTPResponse_ or _urllib3.response.HTTPResponse_ object. |
+
+__Example__
+
+```py
+response = None
+try:
+ response = client.prompt_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ prompt="Describe the object for me",
+ )
+ # Read data from response
+finally:
+ if response:
+ response.close()
+ response.release_conn()
```
-### presigned_get_object(bucket_name, object_name, expires=timedelta(days=7), response_headers=None, request_date=None, version_id=None, extra_query_params=None)
+### presigned_get_object(self, *, bucket_name: str, object_name: str, expires: timedelta = timedelta(days=7), request_date: Optional[datetime] = None, version_id: Optional[str] = None, region: Optional[str] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> str
Get presigned URL of an object to download its data with expiry time and custom request parameters.
__Parameters__
-| Param | Type | Description |
-|:---------------------|:---------------------|:---------------------------------------------------------------------------------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `expires` | _datetime.timedelta_ | Expiry in seconds; defaults to 7 days. |
-| `response_headers` | _dict_ | Optional response_headers argument to specify response fields like date, size, type of file, data about server, etc. |
-| `request_date` | _datetime.datetime_ | Optional request_date argument to specify a different request date. Default is current date. |
-| `version_id` | _str_ | Version ID of the object. |
-| `extra_query_params` | _dict_ | Extra query parameters for advanced usage. |
+| Param | Type | Description |
+|:---------------------|:--------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `expires` | _datetime.timedelta = datetime.timedelta(days=7)_ | Expiry in seconds. |
+| `request_date` | _Optional[datetime.datetime] = None_ | Request time instead of current time. |
+| `version_id` | _Optional[str] = None_ | Version ID of the object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
@@ -1763,30 +2084,37 @@ __Example__
```py
# Get presigned URL string to download 'my-object' in
# 'my-bucket' with default expiry (i.e. 7 days).
-url = client.presigned_get_object("my-bucket", "my-object")
+url = client.presigned_get_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+)
print(url)
# Get presigned URL string to download 'my-object' in
# 'my-bucket' with two hours expiry.
url = client.presigned_get_object(
- "my-bucket", "my-object", expires=timedelta(hours=2),
+ bucket_name="my-bucket",
+ object_name="my-object",
+ expires=timedelta(hours=2),
)
print(url)
```
-### presigned_put_object(bucket_name, object_name, expires=timedelta(days=7))
+### presigned_put_object(self, *, bucket_name: str, object_name: str, expires: timedelta = timedelta(days=7), region: Optional[str] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> str
Get presigned URL of an object to upload data with expiry time and custom request parameters.
__Parameters__
-| Param | Type | Description |
-|:--------------|:---------------------|:---------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `expires` | _datetime.timedelta_ | Expiry in seconds; defaults to 7 days. |
+| Param | Type | Description |
+|:---------------------|:--------------------------------------------------|:-------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `expires` | _datetime.timedelta = datetime.timedelta(days=7)_ | Expiry in seconds. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
@@ -1799,34 +2127,39 @@ __Example__
```py
# Get presigned URL string to upload data to 'my-object' in
# 'my-bucket' with default expiry (i.e. 7 days).
-url = client.presigned_put_object("my-bucket", "my-object")
+url = client.presigned_put_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+)
print(url)
# Get presigned URL string to upload data to 'my-object' in
# 'my-bucket' with two hours expiry.
url = client.presigned_put_object(
- "my-bucket", "my-object", expires=timedelta(hours=2),
+ bucket_name="my-bucket",
+ object_name="my-object",
+ expires=timedelta(hours=2),
)
print(url)
```
-### presigned_post_policy(policy)
+### presigned_post_policy(policy: PostPolicy) -> dict[str, str]
Get form-data of PostPolicy of an object to upload its data using POST method.
__Parameters__
-| Param | Type | Description |
-|:---------|:-------------|:-------------|
-| `policy` | _PostPolicy_ | Post policy. |
+| Param | Type | Description |
+|:---------|:-----------------------------|:-------------|
+| `policy` | _minio.datatypes.PostPolicy_ | Post policy. |
__Return Value__
-| Return |
-|:----------------------------|
-| Form-data containing _dict_ |
+| Return |
+|:----------------------------------------------|
+| _dict[str, str]_ object containing form-data. |
__Example__
@@ -1843,22 +2176,22 @@ form_data = client.presigned_post_policy(policy)
-### get_presigned_url(method, bucket_name, object_name, expires=timedelta(days=7), response_headers=None, request_date=None, version_id=None, extra_query_params=None)
+### get_presigned_url(self, *, method: str, bucket_name: str, object_name: str, expires: timedelta = timedelta(days=7), request_date: Optional[datetime] = None, version_id: Optional[str] = None, region: Optional[str] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> str
Get presigned URL of an object for HTTP method, expiry time and custom request parameters.
__Parameters__
-| Param | Type | Description |
-|:---------------------|:---------------------|:---------------------------------------------------------------------------------------------------------------------|
-| `method` | _str_ | HTTP method. |
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_name` | _str_ | Object name in the bucket. |
-| `expires` | _datetime.timedelta_ | Expiry in seconds; defaults to 7 days. |
-| `response_headers` | _dict_ | Optional response_headers argument to specify response fields like date, size, type of file, data about server, etc. |
-| `request_date` | _datetime.datetime_ | Optional request_date argument to specify a different request date. Default is current date. |
-| `version_id` | _str_ | Version ID of the object. |
-| `extra_query_params` | _dict_ | Extra query parameters for advanced usage. |
+| Param | Type | Description |
+|:---------------------|:--------------------------------------------------|:-------------------------------------------|
+| `method` | _str_ | HTTP method. |
+| `bucket_name` | _str_ | Name of the bucket. |
+| `object_name` | _str_ | Object name in the bucket. |
+| `expires` | _datetime.timedelta = datetime.timedelta(days=7)_ | Expiry in seconds. |
+| `request_date` | _Optional[datetime.datetime] = None_ | Request time instead of current time. |
+| `version_id` | _Optional[str] = None_ | Version ID of the object. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
@@ -1872,9 +2205,9 @@ __Example__
# Get presigned URL string to delete 'my-object' in
# 'my-bucket' with one day expiry.
url = client.get_presigned_url(
- "DELETE",
- "my-bucket",
- "my-object",
+ method="DELETE",
+ bucket_name="my-bucket",
+ object_name="my-object",
expires=timedelta(days=1),
)
print(url)
@@ -1883,20 +2216,20 @@ print(url)
# 'my-bucket' with response-content-type as application/json
# and one day expiry.
url = client.get_presigned_url(
- "PUT",
- "my-bucket",
- "my-object",
+ method="PUT",
+ bucket_name="my-bucket",
+ object_name="my-object",
expires=timedelta(days=1),
- response_headers={"response-content-type": "application/json"},
+ extra_query_params=HTTPQueryDict({"response-content-type": "application/json"}),
)
print(url)
# Get presigned URL string to download 'my-object' in
# 'my-bucket' with two hours expiry.
url = client.get_presigned_url(
- "GET",
- "my-bucket",
- "my-object",
+ method="GET",
+ bucket_name="my-bucket",
+ object_name="my-object",
expires=timedelta(hours=2),
)
print(url)
@@ -1904,43 +2237,54 @@ print(url)
-### upload_snowball_objects(bucket_name, object_list, metadata=None, sse=None, tags=None, retention=None, legal_hold=False, staging_filename=None, compression=False)
+### upload_snowball_objects(self, *, bucket_name: str, objects: Iterable[SnowballObject], headers: Optional[HTTPHeaderDict] = None, user_metadata: Optional[HTTPHeaderDict] = None, sse: Optional[Sse] = None, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, staging_filename: Optional[str] = None, compression: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> ObjectWriteResult
Uploads multiple objects in a single put call. It is done by creating intermediate TAR file optionally compressed which is uploaded to S3 service.
__Parameters__
-| Param | Type | Description |
-|:-------------------|:------------|:------------------------------------------------------------------------|
-| `bucket_name` | _str_ | Name of the bucket. |
-| `object_list` | _iterable_ | An iterable containing :class:`SnowballObject ` object. |
-| `metadata` | _dict_ | Any additional metadata to be uploaded along with your PUT request. |
-| `sse` | _Sse_ | Server-side encryption. |
-| `tags` | _Tags_ | Tags for the object. |
-| `retention` | _Retention_ | Retention configuration. |
-| `legal_hold` | _bool_ | Flag to set legal hold for the object. |
-| `staging_filename` | _str_ | A staging filename to create intermediate tarball. |
-| `compression` | _bool_ | Flag to compress tarball. |
+| Param | Type | Description |
+|:---------------------|:------------------------------------------------|:---------------------------------------------------|
+| `bucket_name` | _str_ | Name of the bucket. |
+| `objects` | _Iterable[minio.commonconfig.SnowballObject]_ | An iterable contain snowball object. |
+| `headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Additional headers. |
+| `user_metadata` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | User metadata. |
+| `sse` | _Optional[minio.sse.Sse] = None_ | Server-side encryption. |
+| `tags` | _Optional[minio.commonconfig.Tags] = None_ | Tags for the object. |
+| `retention` | _Optional[minio.retention.Retention] = None_ | Retention configuration. |
+| `legal_hold` | _bool = False_ | Flag to set legal hold for the object. |
+| `staging_filename` | _Optional[str] = None_ | A staging filename to create intermediate tarball. |
+| `compression` | _bool = False_ | Flag to compress tarball. |
+| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. |
+| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. |
+| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. |
__Return Value__
-| Return |
-|:----------------------------|
-| _ObjectWriteResult_ object. |
+| Return |
+|:------------------------------------------|
+| _minio.helpers.ObjectWriteResult_ object. |
__Example__
```py
# Upload snowball object.
client.upload_snowball_objects(
- "my-bucket",
- [
- SnowballObject("my-object1", filename="/etc/hostname"),
+ bucket_name="my-bucket",
+ objects=[
+ SnowballObject(
+ object_name="my-object1",
+ filename="/etc/hostname",
+ ),
SnowballObject(
- "my-object2", data=io.BytesIO("hello"), length=5,
+ object_name="my-object2",
+ data=io.BytesIO(b"hello"),
+ length=5,
),
SnowballObject(
- "my-object3", data=io.BytesIO("world"), length=5,
+ object_name="my-object3",
+ data=io.BytesIO(b"world"),
+ length=5,
mod_time=datetime.now(),
),
],
diff --git a/examples/append_object.py b/examples/append_object.py
index b1d340e5..90b4569d 100644
--- a/examples/append_object.py
+++ b/examples/append_object.py
@@ -17,41 +17,53 @@
import io
from urllib.request import urlopen
-from examples.progress import Progress
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
# Upload data.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello, "), 7,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello, "),
+ length=7,
)
print(f"created {result.object_name} object; etag: {result.etag}")
# Append data.
result = client.append_object(
- "my-bucket", "my-object", io.BytesIO(b"world"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"world"),
+ length=5,
)
print(f"appended {result.object_name} object; etag: {result.etag}")
# Append data in chunks.
-data = urlopen(
+with urlopen(
"https://www.kernel.org/pub/linux/kernel/v6.x/linux-6.13.12.tar.xz",
-)
-result = client.append_object(
- "my-bucket", "my-object", data, 148611164, 5*1024*1024,
-)
-print(f"appended {result.object_name} object; etag: {result.etag}")
+) as stream:
+ result = client.append_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ stream=stream,
+ length=148611164,
+ chunk_size=5*1024*1024,
+ )
+ print(f"appended {result.object_name} object; etag: {result.etag}")
# Append unknown sized data.
-data = urlopen(
+with urlopen(
"https://www.kernel.org/pub/linux/kernel/v6.x/linux-6.14.3.tar.xz",
-)
-result = client.append_object(
- "my-bucket", "my-object", data, 149426584, 5*1024*1024,
-)
-print(f"appended {result.object_name} object; etag: {result.etag}")
+) as stream:
+ result = client.append_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ stream=stream,
+ chunk_size=5*1024*1024,
+ )
+ print(f"appended {result.object_name} object; etag: {result.etag}")
diff --git a/examples/bucket_exists.py b/examples/bucket_exists.py
index 13a7df88..eb3bd269 100644
--- a/examples/bucket_exists.py
+++ b/examples/bucket_exists.py
@@ -17,12 +17,12 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-if client.bucket_exists("my-bucket"):
+if client.bucket_exists(bucket_name="my-bucket"):
print("my-bucket exists")
else:
print("my-bucket does not exist")
diff --git a/examples/compose_object.py b/examples/compose_object.py
index 265f2e6c..9d092a44 100644
--- a/examples/compose_object.py
+++ b/examples/compose_object.py
@@ -19,33 +19,48 @@
from minio.sse import SseS3
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
sources = [
- ComposeSource("my-job-bucket", "my-object-part-one"),
- ComposeSource("my-job-bucket", "my-object-part-two"),
- ComposeSource("my-job-bucket", "my-object-part-three"),
+ ComposeSource(
+ bucket_name="my-job-bucket", object_name="my-object-part-one",
+ ),
+ ComposeSource(
+ bucket_name="my-job-bucket", object_name="my-object-part-two",
+ ),
+ ComposeSource(
+ bucket_name="my-job-bucket", object_name="my-object-part-three",
+ ),
]
# Create my-bucket/my-object by combining source object
# list.
-result = client.compose_object("my-bucket", "my-object", sources)
+result = client.compose_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ sources=sources,
+)
print(result.object_name, result.version_id)
# Create my-bucket/my-object with user metadata by combining
# source object list.
result = client.compose_object(
- "my-bucket",
- "my-object",
- sources,
- metadata={"test_meta_key": "test_meta_value"},
+ bucket_name="my-bucket",
+ object_name="my-object",
+ sources=sources,
+ user_metadata={"test_meta_key": "test_meta_value"},
)
print(result.object_name, result.version_id)
# Create my-bucket/my-object with user metadata and
# server-side encryption by combining source object list.
-client.compose_object("my-bucket", "my-object", sources, sse=SseS3())
+client.compose_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ sources=sources,
+ sse=SseS3(),
+)
print(result.object_name, result.version_id)
diff --git a/examples/copy_object.py b/examples/copy_object.py
index 06f6d136..9314743b 100644
--- a/examples/copy_object.py
+++ b/examples/copy_object.py
@@ -20,38 +20,41 @@
from minio.commonconfig import REPLACE, CopySource
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
# copy an object from a bucket to another.
result = client.copy_object(
- "my-bucket",
- "my-object",
- CopySource("my-sourcebucket", "my-sourceobject"),
+ bucket_name="my-bucket",
+ object_name="my-object",
+ source=CopySource(
+ bucket_name="my-sourcebucket", object_name="my-sourceobject",
+ ),
)
print(result.object_name, result.version_id)
# copy an object with condition.
result = client.copy_object(
- "my-bucket",
- "my-object",
- CopySource(
- "my-sourcebucket",
- "my-sourceobject",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ source=CopySource(
+ bucket_name="my-sourcebucket",
+ object_name="my-sourceobject",
modified_since=datetime(2014, 4, 1, tzinfo=timezone.utc),
),
)
print(result.object_name, result.version_id)
# copy an object from a bucket with replacing metadata.
-metadata = {"test_meta_key": "test_meta_value"}
result = client.copy_object(
- "my-bucket",
- "my-object",
- CopySource("my-sourcebucket", "my-sourceobject"),
- metadata=metadata,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ source=CopySource(
+ bucket_name="my-sourcebucket", object_name="my-sourceobject",
+ ),
+ user_metadata={"test_meta_key": "test_meta_value"},
metadata_directive=REPLACE,
)
print(result.object_name, result.version_id)
diff --git a/examples/delete_bucket_encryption.py b/examples/delete_bucket_encryption.py
index 324ea7c9..c616d4de 100644
--- a/examples/delete_bucket_encryption.py
+++ b/examples/delete_bucket_encryption.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-client.delete_bucket_encryption("my-bucket")
+client.delete_bucket_encryption(bucket_name="my-bucket")
diff --git a/examples/delete_bucket_lifecycle.py b/examples/delete_bucket_lifecycle.py
index 82e3d28a..972ec411 100644
--- a/examples/delete_bucket_lifecycle.py
+++ b/examples/delete_bucket_lifecycle.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-client.delete_bucket_lifecycle("my-bucket")
+client.delete_bucket_lifecycle(bucket_name="my-bucket")
diff --git a/examples/delete_bucket_notification.py b/examples/delete_bucket_notification.py
index b1e3f5ed..a8d2ebcc 100644
--- a/examples/delete_bucket_notification.py
+++ b/examples/delete_bucket_notification.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-client.delete_bucket_notification("my-bucket")
+client.delete_bucket_notification(bucket_name="my-bucket")
diff --git a/examples/delete_bucket_policy.py b/examples/delete_bucket_policy.py
index 0c985383..c0a126ce 100644
--- a/examples/delete_bucket_policy.py
+++ b/examples/delete_bucket_policy.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-client.delete_bucket_policy("my-bucket")
+client.delete_bucket_policy(bucket_name="my-bucket")
diff --git a/examples/delete_bucket_replication.py b/examples/delete_bucket_replication.py
index a7ebaeb2..2a3573fe 100644
--- a/examples/delete_bucket_replication.py
+++ b/examples/delete_bucket_replication.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-client.delete_bucket_replication("my-bucket")
+client.delete_bucket_replication(bucket_name="my-bucket")
diff --git a/examples/delete_bucket_tags.py b/examples/delete_bucket_tags.py
index ab4dbbcd..26414764 100644
--- a/examples/delete_bucket_tags.py
+++ b/examples/delete_bucket_tags.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-client.delete_bucket_tags("my-bucket")
+client.delete_bucket_tags(bucket_name="my-bucket")
diff --git a/examples/delete_object_lock_config.py b/examples/delete_object_lock_config.py
index 7bb333b8..6274c7c6 100644
--- a/examples/delete_object_lock_config.py
+++ b/examples/delete_object_lock_config.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-client.delete_object_lock_config("my-bucket")
+client.delete_object_lock_config(bucket_name="my-bucket")
diff --git a/examples/delete_object_tags.py b/examples/delete_object_tags.py
index 66a22edf..6f92aefb 100644
--- a/examples/delete_object_tags.py
+++ b/examples/delete_object_tags.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-client.delete_object_tags("my-bucket", "my-object")
+client.delete_object_tags(bucket_name="my-bucket", object_name="my-object")
diff --git a/examples/disable_object_legal_hold.py b/examples/disable_object_legal_hold.py
index ea07c452..92060fcd 100644
--- a/examples/disable_object_legal_hold.py
+++ b/examples/disable_object_legal_hold.py
@@ -17,9 +17,11 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-client.disable_object_legal_hold("my-bucket", "my-object")
+client.disable_object_legal_hold(
+ bucket_name="my-bucket", object_name="my-object",
+)
diff --git a/examples/enable_object_legal_hold.py b/examples/enable_object_legal_hold.py
index d78706bd..ef29cafb 100644
--- a/examples/enable_object_legal_hold.py
+++ b/examples/enable_object_legal_hold.py
@@ -17,9 +17,11 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-client.enable_object_legal_hold("my-bucket", "my-object")
+client.enable_object_legal_hold(
+ bucket_name="my-bucket", object_name="my-object",
+)
diff --git a/examples/fget_object.py b/examples/fget_object.py
index eb157fd5..3245bac3 100644
--- a/examples/fget_object.py
+++ b/examples/fget_object.py
@@ -18,22 +18,30 @@
from minio.sse import SseCustomerKey
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
# Download data of an object.
-client.fget_object("my-bucket", "my-object", "my-filename")
+client.fget_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
+)
# Download data of an object of version-ID.
client.fget_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
)
# Download data of an SSE-C encrypted object.
client.fget_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
)
diff --git a/examples/fput_object.py b/examples/fput_object.py
index 8d79fd47..cfe6d36f 100644
--- a/examples/fput_object.py
+++ b/examples/fput_object.py
@@ -23,74 +23,80 @@
from minio.sse import SseCustomerKey, SseKMS, SseS3
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
# Upload data.
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with content-type.
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
content_type="application/csv",
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with metadata.
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
- metadata={"My-Project": "one"},
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
+ user_metadata={"My-Project": "one"},
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with customer key type of server-side encryption.
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
sse=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with KMS type of server-side encryption.
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
sse=SseKMS("KMS-KEY-ID", {"Key1": "Value1", "Key2": "Value2"}),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with S3 type of server-side encryption.
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
sse=SseS3(),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with tags, retention and legal-hold.
@@ -100,24 +106,26 @@
tags = Tags(for_object=True)
tags["User"] = "jsmith"
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
tags=tags,
retention=Retention(GOVERNANCE, date),
legal_hold=True,
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with progress bar.
result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
+ bucket_name="my-bucket",
+ object_name="my-object",
+ file_path="my-filename",
progress=Progress(),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
diff --git a/examples/get_bucket_encryption.py b/examples/get_bucket_encryption.py
index 8dd8efde..997863b4 100644
--- a/examples/get_bucket_encryption.py
+++ b/examples/get_bucket_encryption.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-config = client.get_bucket_encryption("my-bucket")
+config = client.get_bucket_encryption(bucket_name="my-bucket")
diff --git a/examples/get_bucket_lifecycle.py b/examples/get_bucket_lifecycle.py
index 1cb89d9f..cc9e3381 100644
--- a/examples/get_bucket_lifecycle.py
+++ b/examples/get_bucket_lifecycle.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-config = client.get_bucket_lifecycle("my-bucket")
+config = client.get_bucket_lifecycle(bucket_name="my-bucket")
diff --git a/examples/get_bucket_notification.py b/examples/get_bucket_notification.py
index ddf2a8af..226a8159 100644
--- a/examples/get_bucket_notification.py
+++ b/examples/get_bucket_notification.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-config = client.get_bucket_notification("my-bucket")
+config = client.get_bucket_notification(bucket_name="my-bucket")
diff --git a/examples/get_bucket_policy.py b/examples/get_bucket_policy.py
index 67b95e17..378f9d41 100644
--- a/examples/get_bucket_policy.py
+++ b/examples/get_bucket_policy.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-policy = client.get_bucket_policy("my-bucket")
+policy = client.get_bucket_policy(bucket_name="my-bucket")
diff --git a/examples/get_bucket_replication.py b/examples/get_bucket_replication.py
index 0c29597d..ec2dc373 100644
--- a/examples/get_bucket_replication.py
+++ b/examples/get_bucket_replication.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-config = client.get_bucket_replication("my-bucket")
+config = client.get_bucket_replication(bucket_name="my-bucket")
diff --git a/examples/get_bucket_tags.py b/examples/get_bucket_tags.py
index 95c7b1f9..89aebeb2 100644
--- a/examples/get_bucket_tags.py
+++ b/examples/get_bucket_tags.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-tags = client.get_bucket_tags("my-bucket")
+tags = client.get_bucket_tags(bucket_name="my-bucket")
diff --git a/examples/get_bucket_versioning.py b/examples/get_bucket_versioning.py
index 4ef41e2a..20e1e2a7 100644
--- a/examples/get_bucket_versioning.py
+++ b/examples/get_bucket_versioning.py
@@ -17,10 +17,10 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-config = client.get_bucket_versioning("my-bucket")
+config = client.get_bucket_versioning(bucket_name="my-bucket")
print(config.status)
diff --git a/examples/get_object.py b/examples/get_object.py
index aa534984..0a247a7b 100644
--- a/examples/get_object.py
+++ b/examples/get_object.py
@@ -18,7 +18,7 @@
from minio.sse import SseCustomerKey
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
@@ -26,7 +26,10 @@
# Get data of an object.
response = None
try:
- response = client.get_object("my-bucket", "my-object")
+ response = client.get_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ )
# Read data from response.
finally:
if response:
@@ -37,7 +40,8 @@
response = None
try:
response = client.get_object(
- "my-bucket", "my-object",
+ bucket_name="my-bucket",
+ object_name="my-object",
version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
)
# Read data from response.
@@ -50,7 +54,10 @@
response = None
try:
response = client.get_object(
- "my-bucket", "my-object", offset=512, length=1024,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ offset=512,
+ length=1024,
)
# Read data from response.
finally:
@@ -62,7 +69,8 @@
response = None
try:
response = client.get_object(
- "my-bucket", "my-object",
+ bucket_name="my-bucket",
+ object_name="my-object",
ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
)
# Read data from response.
diff --git a/examples/get_object_lock_config.py b/examples/get_object_lock_config.py
index 2255edc4..8a4493c4 100644
--- a/examples/get_object_lock_config.py
+++ b/examples/get_object_lock_config.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-config = client.get_object_lock_config("my-bucket")
+config = client.get_object_lock_config(bucket_name="my-bucket")
diff --git a/examples/get_object_retention.py b/examples/get_object_retention.py
index d060bffd..5a6d786c 100644
--- a/examples/get_object_retention.py
+++ b/examples/get_object_retention.py
@@ -17,9 +17,11 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-config = client.get_object_retention("my-bucket", "my-object")
+config = client.get_object_retention(
+ bucket_name="my-bucket", object_name="my-object",
+)
diff --git a/examples/get_object_tags.py b/examples/get_object_tags.py
index 3631acc8..47b030c9 100644
--- a/examples/get_object_tags.py
+++ b/examples/get_object_tags.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-tags = client.get_object_tags("my-bucket", "my-object")
+tags = client.get_object_tags(bucket_name="my-bucket", object_name="my-object")
diff --git a/examples/get_presigned_url.py b/examples/get_presigned_url.py
index 5532317b..545c300e 100644
--- a/examples/get_presigned_url.py
+++ b/examples/get_presigned_url.py
@@ -17,9 +17,10 @@
from datetime import timedelta
from minio import Minio
+from minio.helpers import HTTPQueryDict
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
@@ -27,9 +28,9 @@
# Get presigned URL string to delete 'my-object' in
# 'my-bucket' with one day expiry.
url = client.get_presigned_url(
- "DELETE",
- "my-bucket",
- "my-object",
+ method="DELETE",
+ bucket_name="my-bucket",
+ object_name="my-object",
expires=timedelta(days=1),
)
print(url)
@@ -38,20 +39,22 @@
# 'my-bucket' with response-content-type as application/json
# and one day expiry.
url = client.get_presigned_url(
- "PUT",
- "my-bucket",
- "my-object",
+ method="PUT",
+ bucket_name="my-bucket",
+ object_name="my-object",
expires=timedelta(days=1),
- response_headers={"response-content-type": "application/json"},
+ extra_query_params=HTTPQueryDict(
+ {"response-content-type": "application/json"},
+ ),
)
print(url)
# Get presigned URL string to download 'my-object' in
# 'my-bucket' with two hours expiry.
url = client.get_presigned_url(
- "GET",
- "my-bucket",
- "my-object",
+ method="GET",
+ bucket_name="my-bucket",
+ object_name="my-object",
expires=timedelta(hours=2),
)
print(url)
diff --git a/examples/is_object_legal_hold_enabled.py b/examples/is_object_legal_hold_enabled.py
index 82b66539..c22b3aa5 100644
--- a/examples/is_object_legal_hold_enabled.py
+++ b/examples/is_object_legal_hold_enabled.py
@@ -17,12 +17,14 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-if client.is_object_legal_hold_enabled("my-bucket", "my-object"):
+if client.is_object_legal_hold_enabled(
+ bucket_name="my-bucket", object_name="my-object",
+):
print("legal hold is enabled on my-object")
else:
print("legal hold is not enabled on my-object")
diff --git a/examples/list_buckets.py b/examples/list_buckets.py
index 373b3555..88bfa99c 100644
--- a/examples/list_buckets.py
+++ b/examples/list_buckets.py
@@ -17,7 +17,7 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
diff --git a/examples/list_objects.py b/examples/list_objects.py
index fcabe4b5..498597cc 100644
--- a/examples/list_objects.py
+++ b/examples/list_objects.py
@@ -17,30 +17,30 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
# List objects information.
-objects = client.list_objects("my-bucket")
+objects = client.list_objects(bucket_name="my-bucket")
for obj in objects:
print(obj)
# List objects information whose names starts with "my/prefix/".
-objects = client.list_objects("my-bucket", prefix="my/prefix/")
+objects = client.list_objects(bucket_name="my-bucket", prefix="my/prefix/")
for obj in objects:
print(obj)
# List objects information recursively.
-objects = client.list_objects("my-bucket", recursive=True)
+objects = client.list_objects(bucket_name="my-bucket", recursive=True)
for obj in objects:
print(obj)
# List objects information recursively whose names starts with
# "my/prefix/".
objects = client.list_objects(
- "my-bucket", prefix="my/prefix/", recursive=True,
+ bucket_name="my-bucket", prefix="my/prefix/", recursive=True,
)
for obj in objects:
print(obj)
@@ -48,7 +48,7 @@
# List objects information recursively after object name
# "my/prefix/world/1".
objects = client.list_objects(
- "my-bucket", recursive=True, start_after="my/prefix/world/1",
+ bucket_name="my-bucket", recursive=True, start_after="my/prefix/world/1",
)
for obj in objects:
print(obj)
diff --git a/examples/listen_bucket_notification.py b/examples/listen_bucket_notification.py
index 28cb8a9a..f48b2e2d 100644
--- a/examples/listen_bucket_notification.py
+++ b/examples/listen_bucket_notification.py
@@ -17,13 +17,13 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
with client.listen_bucket_notification(
- "my-bucket",
+ bucket_name="my-bucket",
prefix="my-prefix/",
events=["s3:ObjectCreated:*", "s3:ObjectRemoved:*"],
) as events:
diff --git a/examples/make_bucket.py b/examples/make_bucket.py
index 3e7720e5..63c42677 100644
--- a/examples/make_bucket.py
+++ b/examples/make_bucket.py
@@ -17,16 +17,18 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
# Create bucket.
-client.make_bucket("my-bucket")
+client.make_bucket(bucket_name="my-bucket")
# Create bucket on specific region.
-client.make_bucket("my-bucket", "us-west-1")
+client.make_bucket(bucket_name="my-bucket", location="us-west-1")
# Create bucket with object-lock feature on specific region.
-client.make_bucket("my-bucket", "eu-west-2", object_lock=True)
+client.make_bucket(
+ bucket_name="my-bucket", location="eu-west-2", object_lock=True,
+)
diff --git a/examples/minio_with_assume_role_provider.py b/examples/minio_with_assume_role_provider.py
index ac352e22..9c539f5f 100644
--- a/examples/minio_with_assume_role_provider.py
+++ b/examples/minio_with_assume_role_provider.py
@@ -43,9 +43,9 @@
region = "REGION"
provider = AssumeRoleProvider(
- sts_endpoint,
- access_key,
- secret_key,
+ sts_endpoint=sts_endpoint,
+ access_key=access_key,
+ secret_key=secret_key,
policy=policy,
region=region,
role_arn=role_arn,
@@ -53,8 +53,8 @@
external_id=external_id,
)
-client = Minio("MINIO-HOST:MINIO-PORT", credentials=provider)
+client = Minio(endpoint="MINIO-HOST:MINIO-PORT", credentials=provider)
# Get information of an object.
-stat = client.stat_object("my-bucket", "my-object")
+stat = client.stat_object(bucket_name="my-bucket", object_name="my-object")
print(stat)
diff --git a/examples/minio_with_aws_config_provider.py b/examples/minio_with_aws_config_provider.py
index ac134699..2315e098 100644
--- a/examples/minio_with_aws_config_provider.py
+++ b/examples/minio_with_aws_config_provider.py
@@ -18,8 +18,8 @@
from minio import Minio
from minio.credentials import AWSConfigProvider
-client = Minio("s3.amazonaws.com", credentials=AWSConfigProvider())
+client = Minio(endpoint="s3.amazonaws.com", credentials=AWSConfigProvider())
# Get information of an object.
-stat = client.stat_object("my-bucket", "my-object")
+stat = client.stat_object(bucket_name="my-bucket", object_name="my-object")
print(stat)
diff --git a/examples/minio_with_certificate_identity_provider.py b/examples/minio_with_certificate_identity_provider.py
index b00f2ce3..a9229f74 100644
--- a/examples/minio_with_certificate_identity_provider.py
+++ b/examples/minio_with_certificate_identity_provider.py
@@ -28,11 +28,13 @@
key_file = "/path/to/client.key"
provider = CertificateIdentityProvider(
- sts_endpoint, cert_file=cert_file, key_file=key_file,
+ sts_endpoint=sts_endpoint,
+ cert_file=cert_file,
+ key_file=key_file,
)
-client = Minio("MINIO-HOST:MINIO-PORT", credentials=provider)
+client = Minio(endpoint="MINIO-HOST:MINIO-PORT", credentials=provider)
# Get information of an object.
-stat = client.stat_object("my-bucket", "my-object")
+stat = client.stat_object(bucket_name="my-bucket", object_name="my-object")
print(stat)
diff --git a/examples/minio_with_chained_provider.py b/examples/minio_with_chained_provider.py
index d68b04b6..3886693f 100644
--- a/examples/minio_with_chained_provider.py
+++ b/examples/minio_with_chained_provider.py
@@ -23,7 +23,7 @@
EnvAWSProvider, IamAwsProvider)
client = Minio(
- "s3.amazonaws.com",
+ endpoint="s3.amazonaws.com",
credentials=ChainedProvider(
[
IamAwsProvider(),
@@ -34,5 +34,5 @@
)
# Get information of an object.
-stat = client.stat_object("my-bucket", "my-object")
+stat = client.stat_object(bucket_name="my-bucket", object_name="my-object")
print(stat)
diff --git a/examples/minio_with_client_grants_provider.py b/examples/minio_with_client_grants_provider.py
index 958126e6..5045e666 100644
--- a/examples/minio_with_client_grants_provider.py
+++ b/examples/minio_with_client_grants_provider.py
@@ -52,11 +52,12 @@ def get_jwt(client_id, client_secret, idp_endpoint):
sts_endpoint = "http://STS-HOST:STS-PORT/"
provider = ClientGrantsProvider(
- lambda: get_jwt(client_id, client_secret, idp_endpoint), sts_endpoint,
+ jwt_provider_func=lambda: get_jwt(client_id, client_secret, idp_endpoint),
+ sts_endpoint=sts_endpoint,
)
-client = Minio("MINIO-HOST:MINIO-PORT", credentials=provider)
+client = Minio(endpoint="MINIO-HOST:MINIO-PORT", credentials=provider)
# Get information of an object.
-stat = client.stat_object("my-bucket", "my-object")
+stat = client.stat_object(bucket_name="my-bucket", object_name="my-object")
print(stat)
diff --git a/examples/minio_with_env_aws_provider.py b/examples/minio_with_env_aws_provider.py
index 69898149..32dfe32b 100644
--- a/examples/minio_with_env_aws_provider.py
+++ b/examples/minio_with_env_aws_provider.py
@@ -18,8 +18,8 @@
from minio import Minio
from minio.credentials import EnvAWSProvider
-client = Minio("s3.amazonaws.com", credentials=EnvAWSProvider())
+client = Minio(endpoint="s3.amazonaws.com", credentials=EnvAWSProvider())
# Get information of an object.
-stat = client.stat_object("my-bucket", "my-object")
+stat = client.stat_object(bucket_name="my-bucket", object_name="my-object")
print(stat)
diff --git a/examples/minio_with_env_minio_provider.py b/examples/minio_with_env_minio_provider.py
index f0f985b5..a31d67d4 100644
--- a/examples/minio_with_env_minio_provider.py
+++ b/examples/minio_with_env_minio_provider.py
@@ -18,8 +18,11 @@
from minio import Minio
from minio.credentials import EnvMinioProvider
-client = Minio("MINIO-HOST:MINIO-PORT", credentials=EnvMinioProvider())
+client = Minio(
+ endpoint="MINIO-HOST:MINIO-PORT",
+ credentials=EnvMinioProvider(),
+)
# Get information of an object.
-stat = client.stat_object("my-bucket", "my-object")
+stat = client.stat_object(bucket_name="my-bucket", object_name="my-object")
print(stat)
diff --git a/examples/minio_with_iam_aws_provider.py b/examples/minio_with_iam_aws_provider.py
index 5c9e5713..6b745364 100644
--- a/examples/minio_with_iam_aws_provider.py
+++ b/examples/minio_with_iam_aws_provider.py
@@ -18,8 +18,8 @@
from minio import Minio
from minio.credentials import IamAwsProvider
-client = Minio("s3.amazonaws.com", credentials=IamAwsProvider())
+client = Minio(endpoint="s3.amazonaws.com", credentials=IamAwsProvider())
# Get information of an object.
-stat = client.stat_object("my-bucket", "my-object")
+stat = client.stat_object(bucket_name="my-bucket", object_name="my-object")
print(stat)
diff --git a/examples/minio_with_ldap_identity_provider.py b/examples/minio_with_ldap_identity_provider.py
index 55b8468a..1ccdb82e 100644
--- a/examples/minio_with_ldap_identity_provider.py
+++ b/examples/minio_with_ldap_identity_provider.py
@@ -27,10 +27,14 @@
# LDAP password.
ldap_password = "LDAP-PASSWORD"
-provider = LdapIdentityProvider(sts_endpoint, ldap_username, ldap_password)
+provider = LdapIdentityProvider(
+ sts_endpoint=sts_endpoint,
+ ldap_username=ldap_username,
+ ldap_password=ldap_password,
+)
-client = Minio("MINIO-HOST:MINIO-PORT", credentials=provider)
+client = Minio(endpoint="MINIO-HOST:MINIO-PORT", credentials=provider)
# Get information of an object.
-stat = client.stat_object("my-bucket", "my-object")
+stat = client.stat_object(bucket_name="my-bucket", object_name="my-object")
print(stat)
diff --git a/examples/minio_with_minio_client_config_provider.py b/examples/minio_with_minio_client_config_provider.py
index 283c7e17..9ae5d9f1 100644
--- a/examples/minio_with_minio_client_config_provider.py
+++ b/examples/minio_with_minio_client_config_provider.py
@@ -19,9 +19,9 @@
from minio.credentials import MinioClientConfigProvider
client = Minio(
- "MINIO-HOST:MINIO-PORT", credentials=MinioClientConfigProvider(),
+ endpoint="MINIO-HOST:MINIO-PORT", credentials=MinioClientConfigProvider(),
)
# Get information of an object.
-stat = client.stat_object("my-bucket", "my-object")
+stat = client.stat_object(bucket_name="my-bucket", object_name="my-object")
print(stat)
diff --git a/examples/minio_with_web_identity_provider.py b/examples/minio_with_web_identity_provider.py
index 0135f8ff..57d624b1 100644
--- a/examples/minio_with_web_identity_provider.py
+++ b/examples/minio_with_web_identity_provider.py
@@ -63,14 +63,16 @@ def get_jwt(client_id, client_secret, idp_client_id, idp_endpoint):
role_session_name = "ROLE-SESSION-NAME"
provider = WebIdentityProvider(
- lambda: get_jwt(client_id, client_secret, idp_client_id, idp_endpoint),
- sts_endpoint,
+ jwt_provider_func=lambda: get_jwt(
+ client_id, client_secret, idp_client_id, idp_endpoint,
+ ),
+ sts_endpoint=sts_endpoint,
role_arn=role_arn,
role_session_name=role_session_name,
)
-client = Minio("MINIO-HOST:MINIO-PORT", credentials=provider)
+client = Minio(endpoint="MINIO-HOST:MINIO-PORT", credentials=provider)
# Get information of an object.
-stat = client.stat_object("my-bucket", "my-object")
+stat = client.stat_object(bucket_name="my-bucket", object_name="my-object")
print(stat)
diff --git a/examples/presigned_get_object.py b/examples/presigned_get_object.py
index fbf8a90e..7ce66e7c 100644
--- a/examples/presigned_get_object.py
+++ b/examples/presigned_get_object.py
@@ -19,19 +19,24 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
# Get presigned URL string to download 'my-object' in
# 'my-bucket' with default expiry (i.e. 7 days).
-url = client.presigned_get_object("my-bucket", "my-object")
+url = client.presigned_get_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+)
print(url)
# Get presigned URL string to download 'my-object' in
# 'my-bucket' with two hours expiry.
url = client.presigned_get_object(
- "my-bucket", "my-object", expires=timedelta(hours=2),
+ bucket_name="my-bucket",
+ object_name="my-object",
+ expires=timedelta(hours=2),
)
print(url)
diff --git a/examples/presigned_post_policy.py b/examples/presigned_post_policy.py
index b7c7c0b7..8decea86 100644
--- a/examples/presigned_post_policy.py
+++ b/examples/presigned_post_policy.py
@@ -20,24 +20,20 @@
from minio.datatypes import PostPolicy
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-policy = PostPolicy(
- "my-bucket", datetime.utcnow() + timedelta(days=10),
-)
+policy = PostPolicy("my-bucket", datetime.utcnow() + timedelta(days=10))
policy.add_starts_with_condition("key", "my/object/prefix/")
policy.add_content_length_range_condition(1*1024*1024, 10*1024*1024)
form_data = client.presigned_post_policy(policy)
+args = " ".join([f"-F {k}={v}" for k, v in form_data.items()])
curl_cmd = (
- "curl -X POST "
- "https://play.min.io/my-bucket "
- "{0} -F file=@ -F key="
-).format(
- " ".join(["-F {0}={1}".format(k, v) for k, v in form_data.items()]),
+ "curl -X POST https://play.min.io/my-bucket "
+ f"{args} -F file=@ -F key="
)
print(curl_cmd)
diff --git a/examples/presigned_put_object.py b/examples/presigned_put_object.py
index bcfdfb28..0ddd2717 100644
--- a/examples/presigned_put_object.py
+++ b/examples/presigned_put_object.py
@@ -19,19 +19,24 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
# Get presigned URL string to upload data to 'my-object' in
# 'my-bucket' with default expiry (i.e. 7 days).
-url = client.presigned_put_object("my-bucket", "my-object")
+url = client.presigned_put_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+)
print(url)
# Get presigned URL string to upload data to 'my-object' in
# 'my-bucket' with two hours expiry.
url = client.presigned_put_object(
- "my-bucket", "my-object", expires=timedelta(hours=2),
+ bucket_name="my-bucket",
+ object_name="my-object",
+ expires=timedelta(hours=2),
)
print(url)
diff --git a/examples/progress.py b/examples/progress.py
index ab35a48e..f4551e44 100644
--- a/examples/progress.py
+++ b/examples/progress.py
@@ -69,6 +69,7 @@ def __init__(self, interval=1, stdout=sys.stdout):
self.display_queue = Queue()
self.initial_time = time.time()
self.stdout = stdout
+ self.prefix = None
self.start()
def set_meta(self, total_length, object_name):
@@ -89,9 +90,9 @@ def run(self):
# display every interval secs
task = self.display_queue.get(timeout=self.interval)
except Empty:
- elapsed_time = time.time() - self.initial_time
- if elapsed_time > displayed_time:
- displayed_time = elapsed_time
+ displayed_time = max(
+ displayed_time, time.time() - self.initial_time,
+ )
self.print_status(current_size=self.current_size,
total_length=self.total_length,
displayed_time=displayed_time,
@@ -117,8 +118,8 @@ def update(self, size):
bytes.
"""
if not isinstance(size, int):
- raise ValueError('{} type can not be displayed. '
- 'Please change it to Int.'.format(type(size)))
+ raise ValueError(f"{type(size)} type can not be displayed. "
+ "Please change it to Int.")
self.current_size += size
self.display_queue.put((self.current_size, self.total_length))
@@ -147,8 +148,7 @@ def seconds_to_time(seconds):
hours, m = divmod(minutes, 60)
if hours:
return _HOURS_OF_ELAPSED % (hours, m, seconds)
- else:
- return _MINUTES_OF_ELAPSED % (m, seconds)
+ return _MINUTES_OF_ELAPSED % (m, seconds)
def format_string(current_size, total_length, elapsed_time):
diff --git a/examples/prompt_object.py b/examples/prompt_object.py
new file mode 100644
index 00000000..0ffd723d
--- /dev/null
+++ b/examples/prompt_object.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+# MinIO Python Library for Amazon S3 Compatible Cloud Storage,
+# (C) 2025 MinIO, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from minio import Minio
+
+client = Minio(
+ endpoint="play.min.io",
+ access_key="Q3AM3UQ867SPQQA43P2F",
+ secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
+)
+
+response = None
+try:
+ response = client.prompt_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ prompt="Describe the object for me",
+ )
+ # Read data from response
+finally:
+ if response:
+ response.close()
+ response.release_conn()
diff --git a/examples/put_object.py b/examples/put_object.py
index 92395dc2..afab26f4 100644
--- a/examples/put_object.py
+++ b/examples/put_object.py
@@ -25,87 +25,102 @@
from minio.sse import SseCustomerKey, SseKMS, SseS3
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
# Upload data.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload unknown sized data.
-data = urlopen(
+with urlopen(
"https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.4.81.tar.xz",
-)
-result = client.put_object(
- "my-bucket", "my-object", data, length=-1, part_size=10*1024*1024,
-)
-print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
-)
+) as data:
+ result = client.put_object(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=data,
+ length=-1,
+ part_size=10*1024*1024,
+ )
+ print(
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
+ )
# Upload data with content-type.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
content_type="application/csv",
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with metadata.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
- metadata={"My-Project": "one"},
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
+ user_metadata={"My-Project": "one"},
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with customer key type of server-side encryption.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
sse=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with KMS type of server-side encryption.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
sse=SseKMS("KMS-KEY-ID", {"Key1": "Value1", "Key2": "Value2"}),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with S3 type of server-side encryption.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
sse=SseS3(),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with tags, retention and legal-hold.
@@ -115,24 +130,28 @@
tags = Tags(for_object=True)
tags["User"] = "jsmith"
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
tags=tags,
retention=Retention(GOVERNANCE, date),
legal_hold=True,
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
# Upload data with progress bar.
result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
+ bucket_name="my-bucket",
+ object_name="my-object",
+ data=io.BytesIO(b"hello"),
+ length=5,
progress=Progress(),
)
print(
- "created {0} object; etag: {1}, version-id: {2}".format(
- result.object_name, result.etag, result.version_id,
- ),
+ f"created {result.object_name} object; etag: {result.etag}, "
+ f"version-id: {result.version_id}",
)
diff --git a/examples/remove_bucket.py b/examples/remove_bucket.py
index a3c60795..bbe4fd74 100644
--- a/examples/remove_bucket.py
+++ b/examples/remove_bucket.py
@@ -17,9 +17,9 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-client.remove_bucket("my-bucket")
+client.remove_bucket(bucket_name="my-bucket")
diff --git a/examples/remove_object.py b/examples/remove_object.py
index 8828509f..aff25033 100644
--- a/examples/remove_object.py
+++ b/examples/remove_object.py
@@ -17,16 +17,17 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
# Remove object.
-client.remove_object("my-bucket", "my-object")
+client.remove_object(bucket_name="my-bucket", object_name="my-object")
# Remove version of an object.
client.remove_object(
- "my-bucket", "my-object",
+ bucket_name="my-bucket",
+ object_name="my-object",
version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
)
diff --git a/examples/remove_objects.py b/examples/remove_objects.py
index 1369132a..60d1b7f3 100644
--- a/examples/remove_objects.py
+++ b/examples/remove_objects.py
@@ -18,18 +18,21 @@
from minio.deleteobjects import DeleteObject
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
# Remove list of objects.
errors = client.remove_objects(
- "my-bucket",
- [
- DeleteObject("my-object1"),
- DeleteObject("my-object2"),
- DeleteObject("my-object3", "13f88b18-8dcd-4c83-88f2-8631fdb6250c"),
+ bucket_name="my-bucket",
+ delete_object_list=[
+ DeleteObject(name="my-object1"),
+ DeleteObject(name="my-object2"),
+ DeleteObject(
+ name="my-object3",
+ version_id="13f88b18-8dcd-4c83-88f2-8631fdb6250c",
+ ),
],
)
for error in errors:
@@ -38,8 +41,15 @@
# Remove a prefix recursively.
delete_object_list = map(
lambda x: DeleteObject(x.object_name),
- client.list_objects("my-bucket", "my/prefix/", recursive=True),
+ client.list_objects(
+ bucket_name="my-bucket",
+ prefix="my/prefix/",
+ recursive=True,
+ ),
+)
+errors = client.remove_objects(
+ bucket_name="my-bucket",
+ delete_object_list=delete_object_list,
)
-errors = client.remove_objects("my-bucket", delete_object_list)
for error in errors:
print("error occurred when deleting object", error)
diff --git a/examples/select_object_content.py b/examples/select_object_content.py
index 59f5a4bc..3e6fff75 100644
--- a/examples/select_object_content.py
+++ b/examples/select_object_content.py
@@ -20,18 +20,18 @@
SelectRequest)
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
with client.select_object_content(
- "my-bucket",
- "my-object.csv",
- SelectRequest(
- "select * from S3Object",
- CSVInputSerialization(),
- CSVOutputSerialization(),
+ bucket_name="my-bucket",
+ object_name="my-object.csv",
+ request=SelectRequest(
+ expression="select * from S3Object",
+ input_serialization=CSVInputSerialization(),
+ output_serialization=CSVOutputSerialization(),
request_progress=True,
),
) as result:
diff --git a/examples/set_bucket_encryption.py b/examples/set_bucket_encryption.py
index fe840c5c..33652684 100644
--- a/examples/set_bucket_encryption.py
+++ b/examples/set_bucket_encryption.py
@@ -18,11 +18,11 @@
from minio.sseconfig import Rule, SSEConfig
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
client.set_bucket_encryption(
- "my-bucket", SSEConfig(Rule.new_sse_s3_rule()),
+ bucket_name="my-bucket", config=SSEConfig(Rule.new_sse_s3_rule()),
)
diff --git a/examples/set_bucket_lifecycle.py b/examples/set_bucket_lifecycle.py
index ec053b8c..4ea26e52 100644
--- a/examples/set_bucket_lifecycle.py
+++ b/examples/set_bucket_lifecycle.py
@@ -19,7 +19,7 @@
from minio.lifecycleconfig import Expiration, LifecycleConfig, Rule, Transition
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
@@ -27,17 +27,17 @@
config = LifecycleConfig(
[
Rule(
- ENABLED,
+ status=ENABLED,
rule_filter=Filter(prefix="documents/"),
rule_id="rule1",
transition=Transition(days=30, storage_class="GLACIER"),
),
Rule(
- ENABLED,
+ status=ENABLED,
rule_filter=Filter(prefix="logs/"),
rule_id="rule2",
expiration=Expiration(days=365),
),
],
)
-client.set_bucket_lifecycle("my-bucket", config)
+client.set_bucket_lifecycle(bucket_name="my-bucket", config=config)
diff --git a/examples/set_bucket_notification.py b/examples/set_bucket_notification.py
index 89c43f45..7d20ab75 100644
--- a/examples/set_bucket_notification.py
+++ b/examples/set_bucket_notification.py
@@ -19,7 +19,7 @@
QueueConfig)
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
@@ -27,11 +27,11 @@
config = NotificationConfig(
queue_config_list=[
QueueConfig(
- "QUEUE-ARN-OF-THIS-BUCKET",
- ["s3:ObjectCreated:*"],
+ queue="QUEUE-ARN-OF-THIS-BUCKET",
+ events=["s3:ObjectCreated:*"],
config_id="1",
prefix_filter_rule=PrefixFilterRule("abc"),
),
],
)
-client.set_bucket_notification("my-bucket", config)
+client.set_bucket_notification(bucket_name="my-bucket", config=config)
diff --git a/examples/set_bucket_policy.py b/examples/set_bucket_policy.py
index f1c938f6..bc25b73e 100644
--- a/examples/set_bucket_policy.py
+++ b/examples/set_bucket_policy.py
@@ -19,7 +19,7 @@
from minio import Minio
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
@@ -42,7 +42,7 @@
},
],
}
-client.set_bucket_policy("my-bucket", json.dumps(policy))
+client.set_bucket_policy(bucket_name="my-bucket", policy=json.dumps(policy))
# Example anonymous read-write bucket policy.
policy = {
@@ -72,4 +72,4 @@
},
],
}
-client.set_bucket_policy("my-bucket", json.dumps(policy))
+client.set_bucket_policy(bucket_name="my-bucket", policy=json.dumps(policy))
diff --git a/examples/set_bucket_replication.py b/examples/set_bucket_replication.py
index de5d38c6..76b6025c 100644
--- a/examples/set_bucket_replication.py
+++ b/examples/set_bucket_replication.py
@@ -20,19 +20,19 @@
ReplicationConfig, Rule)
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
config = ReplicationConfig(
- "REPLACE-WITH-ACTUAL-ROLE",
- [
+ role="REPLACE-WITH-ACTUAL-ROLE",
+ rules=[
Rule(
- Destination(
+ destination=Destination(
"REPLACE-WITH-ACTUAL-DESTINATION-BUCKET-ARN",
),
- ENABLED,
+ status=ENABLED,
delete_marker_replication=DeleteMarkerReplication(
DISABLED,
),
@@ -47,4 +47,4 @@
),
],
)
-client.set_bucket_replication("my-bucket", config)
+client.set_bucket_replication(bucket_name="my-bucket", config=config)
diff --git a/examples/set_bucket_tags.py b/examples/set_bucket_tags.py
index 0d759765..af513667 100644
--- a/examples/set_bucket_tags.py
+++ b/examples/set_bucket_tags.py
@@ -18,7 +18,7 @@
from minio.commonconfig import Tags
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
@@ -26,4 +26,4 @@
tags = Tags.new_bucket_tags()
tags["Project"] = "Project One"
tags["User"] = "jsmith"
-client.set_bucket_tags("my-bucket", tags)
+client.set_bucket_tags(bucket_name="my-bucket", tags=tags)
diff --git a/examples/set_bucket_versioning.py b/examples/set_bucket_versioning.py
index 418616a7..5f164e82 100644
--- a/examples/set_bucket_versioning.py
+++ b/examples/set_bucket_versioning.py
@@ -19,9 +19,12 @@
from minio.versioningconfig import VersioningConfig
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-client.set_bucket_versioning("my-bucket", VersioningConfig(ENABLED))
+client.set_bucket_versioning(
+ bucket_name="my-bucket",
+ config=VersioningConfig(ENABLED),
+)
diff --git a/examples/set_object_lock_config.py b/examples/set_object_lock_config.py
index 6a77a190..317bc4a4 100644
--- a/examples/set_object_lock_config.py
+++ b/examples/set_object_lock_config.py
@@ -19,10 +19,10 @@
from minio.objectlockconfig import DAYS, ObjectLockConfig
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
-config = ObjectLockConfig(GOVERNANCE, 15, DAYS)
-client.set_object_lock_config("my-bucket", config)
+config = ObjectLockConfig(mode=GOVERNANCE, duration=15, duration_unit=DAYS)
+client.set_object_lock_config(bucket_name="my-bucket", config=config)
diff --git a/examples/set_object_retention.py b/examples/set_object_retention.py
index a0afc8b8..189fad04 100644
--- a/examples/set_object_retention.py
+++ b/examples/set_object_retention.py
@@ -21,10 +21,14 @@
from minio.retention import Retention
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
config = Retention(GOVERNANCE, datetime.utcnow() + timedelta(days=10))
-client.set_object_retention("my-bucket", "my-object", config)
+client.set_object_retention(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ config=config,
+)
diff --git a/examples/set_object_tags.py b/examples/set_object_tags.py
index edcd5382..36e9dbe2 100644
--- a/examples/set_object_tags.py
+++ b/examples/set_object_tags.py
@@ -18,7 +18,7 @@
from minio.commonconfig import Tags
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
@@ -26,4 +26,8 @@
tags = Tags.new_object_tags()
tags["Project"] = "Project One"
tags["User"] = "jsmith"
-client.set_object_tags("my-bucket", "my-object", tags)
+client.set_object_tags(
+ bucket_name="my-bucket",
+ object_name="my-object",
+ tags=tags,
+)
diff --git a/examples/stat_object.py b/examples/stat_object.py
index 3a38ff02..707685cc 100644
--- a/examples/stat_object.py
+++ b/examples/stat_object.py
@@ -18,37 +18,27 @@
from minio.sse import SseCustomerKey
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
# Get object information.
-result = client.stat_object("my-bucket", "my-object")
-print(
- "last-modified: {0}, size: {1}".format(
- result.last_modified, result.size,
- ),
-)
+result = client.stat_object(bucket_name="my-bucket", object_name="my-object")
+print(f"last-modified: {result.last_modified}, size: {result.size}")
# Get object information of version-ID.
result = client.stat_object(
- "my-bucket", "my-object",
+ bucket_name="my-bucket",
+ object_name="my-object",
version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
)
-print(
- "last-modified: {0}, size: {1}".format(
- result.last_modified, result.size,
- ),
-)
+print(f"last-modified: {result.last_modified}, size: {result.size}")
# Get SSE-C encrypted object information.
result = client.stat_object(
- "my-bucket", "my-object",
+ bucket_name="my-bucket",
+ object_name="my-object",
ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
)
-print(
- "last-modified: {0}, size: {1}".format(
- result.last_modified, result.size,
- ),
-)
+print(f"last-modified: {result.last_modified}, size: {result.size}")
diff --git a/examples/upload_snowball_objects.py b/examples/upload_snowball_objects.py
index c73aa0a0..6270a2d4 100644
--- a/examples/upload_snowball_objects.py
+++ b/examples/upload_snowball_objects.py
@@ -21,20 +21,27 @@
from minio.commonconfig import SnowballObject
client = Minio(
- "play.min.io",
+ endpoint="play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
client.upload_snowball_objects(
- "my-bucket",
- [
- SnowballObject("my-object1", filename="/etc/hostname"),
+ bucket_name="my-bucket",
+ objects=[
SnowballObject(
- "my-object2", data=io.BytesIO(b"hello"), length=5,
+ object_name="my-object1",
+ filename="/etc/hostname",
),
SnowballObject(
- "my-object3", data=io.BytesIO(b"world"), length=5,
+ object_name="my-object2",
+ data=io.BytesIO(b"hello"),
+ length=5,
+ ),
+ SnowballObject(
+ object_name="my-object3",
+ data=io.BytesIO(b"world"),
+ length=5,
mod_time=datetime.now(),
),
],
diff --git a/minio/api.py b/minio/api.py
index 285920ef..4c583140 100644
--- a/minio/api.py
+++ b/minio/api.py
@@ -21,7 +21,6 @@
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-statements
# pylint: disable=too-many-locals
-# pylint: disable=too-many-positional-arguments
"""
Simple Storage Service (aka S3) client to perform bucket and object operations.
@@ -29,6 +28,7 @@
from __future__ import absolute_import, annotations
+import io
import itertools
import json
import os
@@ -37,9 +37,8 @@
from datetime import datetime, timedelta
from io import BytesIO
from random import random
-from typing import (Any, BinaryIO, Iterator, Optional, TextIO, Tuple, Union,
- cast)
-from urllib.parse import urlunsplit
+from typing import Any, BinaryIO, Iterator, Optional, TextIO, Union, cast
+from urllib.parse import quote, urlencode, urlunsplit
from xml.etree import ElementTree as ET
import certifi
@@ -55,9 +54,13 @@
from urllib3.util import Timeout
from . import time
+from .checksum import (MD5, SHA256, UNSIGNED_PAYLOAD, ZERO_MD5_HASH,
+ ZERO_SHA256_HASH, Algorithm, base64_string,
+ base64_string_to_sum, hex_string, make_headers,
+ new_hashers)
from .commonconfig import (COPY, REPLACE, ComposeSource, CopySource,
SnowballObject, Tags)
-from .credentials import Credentials, StaticProvider
+from .credentials import StaticProvider
from .credentials.providers import Provider
from .datatypes import (Bucket, CompleteMultipartUploadResult, EventIterable,
ListAllMyBucketsResult, ListMultipartUploadsResult,
@@ -68,11 +71,11 @@
from .error import InvalidResponseError, S3Error, ServerError
from .helpers import (_DEFAULT_USER_AGENT, MAX_MULTIPART_COUNT,
MAX_MULTIPART_OBJECT_SIZE, MAX_PART_SIZE, MIN_PART_SIZE,
- BaseURL, DictType, ObjectWriteResult, ProgressType,
- ThreadPool, check_bucket_name, check_object_name,
- check_sse, check_ssec, genheaders, get_part_info,
+ BaseURL, HTTPQueryDict, ObjectWriteResult, ProgressType,
+ RegionMap, ThreadPool, check_bucket_name,
+ check_object_name, check_sse, check_ssec, get_part_info,
headers_to_strings, is_valid_policy_type, makedirs,
- md5sum_hash, queryencode, read_part_data, sha256_hash)
+ normalize_headers, queryencode, read_part_data)
from .legalhold import LegalHold
from .lifecycleconfig import LifecycleConfig
from .notificationconfig import NotificationConfig
@@ -84,6 +87,7 @@
from .sse import Sse, SseCustomerKey
from .sseconfig import SSEConfig
from .tagging import Tagging
+from .time import to_http_header, to_iso8601utc
from .versioningconfig import VersioningConfig
from .xml import Element, SubElement, findtext, getbytes, marshal, unmarshal
@@ -92,42 +96,8 @@ class Minio:
"""
Simple Storage Service (aka S3) client to perform bucket and object
operations.
-
- :param endpoint: Hostname of a S3 service.
- :param access_key: Access key (aka user ID) of your account in S3 service.
- :param secret_key: Secret Key (aka password) of your account in S3 service.
- :param session_token: Session token of your account in S3 service.
- :param secure: Flag to indicate to use secure (TLS) connection to S3
- service or not.
- :param region: Region name of buckets in S3 service.
- :param http_client: Customized HTTP client.
- :param credentials: Credentials provider of your account in S3 service.
- :param cert_check: Flag to indicate to verify SSL certificate or not.
- :return: :class:`Minio ` object
-
- Example::
- # Create client with anonymous access.
- client = Minio("play.min.io")
-
- # Create client with access and secret key.
- client = Minio("s3.amazonaws.com", "ACCESS-KEY", "SECRET-KEY")
-
- # Create client with access key and secret key with specific region.
- client = Minio(
- "play.minio.io:9000",
- access_key="Q3AM3UQ867SPQQA43P2F",
- secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
- region="my-region",
- )
-
- **NOTE on concurrent usage:** `Minio` object is thread safe when using
- the Python `threading` library. Specifically, it is **NOT** safe to share
- it between multiple processes, for example when using
- `multiprocessing.Pool`. The solution is simply to create a new `Minio`
- object in each process, and not share it between processes.
-
"""
- _region_map: dict[str, str]
+ _region_map: RegionMap
_base_url: BaseURL
_user_agent: str
_trace_stream: Optional[TextIO]
@@ -136,6 +106,7 @@ class Minio:
def __init__(
self,
+ *,
endpoint: str,
access_key: Optional[str] = None,
secret_key: Optional[str] = None,
@@ -146,13 +117,94 @@ def __init__(
credentials: Optional[Provider] = None,
cert_check: bool = True,
):
+ """
+ Initializes a new Minio client object.
+
+ Args:
+ endpoint (str):
+ Hostname of an S3 service.
+
+ access_key (Optional[str], default=None):
+ Access key (aka user ID) of your account in the S3 service.
+
+ secret_key (Optional[str], default=None):
+ Secret key (aka password) of your account in the S3 service.
+
+ session_token (Optional[str], default=None):
+ Session token of your account in the S3 service.
+
+ secure (bool, default=True):
+ Flag to indicate whether to use a secure (TLS) connection
+ to the S3 service.
+
+ region (Optional[str], default=None):
+ Region name of buckets in the S3 service.
+
+ http_client (Optional[urllib3.PoolManager], default=None):
+ Customized HTTP client.
+
+ credentials (Optional[Provider], default=None):
+ Credentials provider of your account in the S3 service.
+
+ cert_check (bool, default=True):
+ Flag to enable/disable server certificate validation
+ for HTTPS connections.
+
+ Notes:
+ The `Minio` object is thread-safe when used with the Python
+ `threading` library. However, it is **not** safe to share it
+ between multiple processes, for example when using
+ `multiprocessing.Pool`. To avoid issues, create a new `Minio`
+ object in each process instead of sharing it.
+
+ Example:
+ >>> from minio import Minio
+ >>>
+ >>> # Create client with anonymous access
+ >>> client = Minio(endpoint="play.min.io")
+ >>>
+ >>> # Create client with access and secret key
+ >>> client = Minio(
+ ... endpoint="s3.amazonaws.com",
+ ... access_key="ACCESS-KEY",
+ ... secret_key="SECRET-KEY",
+ ... )
+ >>>
+ >>> # Create client with specific region
+ >>> client = Minio(
+ ... endpoint="play.minio.io:9000",
+ ... access_key="Q3AM3UQ867SPQQA43P2F",
+ ... secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
+ ... region="my-region",
+ ... )
+ >>>
+ >>> # Create client with custom HTTP client using proxy
+ >>> import urllib3
+ >>> client = Minio(
+ ... endpoint="SERVER:PORT",
+ ... access_key="ACCESS_KEY",
+ ... secret_key="SECRET_KEY",
+ ... secure=True,
+ ... http_client=urllib3.ProxyManager(
+ ... "https://PROXYSERVER:PROXYPORT/",
+ ... timeout=urllib3.Timeout.DEFAULT_TIMEOUT,
+ ... cert_reqs="CERT_REQUIRED",
+ ... retries=urllib3.Retry(
+ ... total=5,
+ ... backoff_factor=0.2,
+ ... status_forcelist=[500, 502, 503, 504],
+ ... ),
+ ... ),
+ ... )
+ """
# Validate http client has correct base class.
if http_client and not isinstance(http_client, urllib3.PoolManager):
- raise ValueError(
- "HTTP client should be instance of `urllib3.PoolManager`"
+ raise TypeError(
+ "HTTP client should be urllib3.PoolManager like object, "
+ f"got {type(http_client).__name__}",
)
- self._region_map = {}
+ self._region_map = RegionMap()
self._base_url = BaseURL(
("https://" if secure else "http://") + endpoint,
region,
@@ -183,6 +235,67 @@ def __del__(self):
if hasattr(self, "_http"): # Only required for unit test run
self._http.clear()
+ @staticmethod
+ def _gen_read_headers(
+ *,
+ ssec: Optional[SseCustomerKey] = None,
+ offset: int = 0,
+ length: Optional[int] = None,
+ match_etag: Optional[str] = None,
+ not_match_etag: Optional[str] = None,
+ modified_since: Optional[datetime] = None,
+ unmodified_since: Optional[datetime] = None,
+ fetch_checksum: bool = False,
+ ) -> HTTPHeaderDict:
+ """Generates conditional headers for get/head object."""
+ headers = HTTPHeaderDict()
+ if ssec:
+ headers.extend(ssec.headers())
+ if offset or length:
+ end = (offset + length - 1) if length else ""
+ headers['Range'] = f"bytes={offset}-{end}"
+ if match_etag:
+ headers["if-match"] = match_etag
+ if not_match_etag:
+ headers["if-none-match"] = not_match_etag
+ if modified_since:
+ headers["if-modified-since"] = to_http_header(modified_since)
+ if unmodified_since:
+ headers["if-unmodified-since"] = to_http_header(unmodified_since)
+ if fetch_checksum:
+ headers["x-amz-checksum-mode"] = "ENABLED"
+ return headers
+
+ @staticmethod
+ def _gen_write_headers(
+ *,
+ headers: Optional[HTTPHeaderDict] = None,
+ user_metadata: Optional[HTTPHeaderDict] = None,
+ sse: Optional[Sse] = None,
+ tags: Optional[Tags] = None,
+ retention: Optional[Retention] = None,
+ legal_hold: bool = False,
+ ) -> HTTPHeaderDict:
+ """Generate headers for given parameters."""
+ headers = headers.copy() if headers else HTTPHeaderDict()
+ if user_metadata:
+ headers.extend(user_metadata)
+ headers = normalize_headers(headers)
+ if sse:
+ headers.extend(sse.headers())
+ if tags:
+ headers["x-amz-tagging"] = urlencode(
+ list(tags.items()), quote_via=quote,
+ )
+ if retention and retention.mode:
+ headers["x-amz-object-lock-mode"] = retention.mode
+ headers["x-amz-object-lock-retain-until-date"] = cast(
+ str, to_iso8601utc(retention.retain_until_date),
+ )
+ if legal_hold:
+ headers["x-amz-object-lock-legal-hold"] = "ON"
+ return headers
+
def _handle_redirect_response(
self,
method: str,
@@ -211,71 +324,76 @@ def _handle_redirect_response(
return code, message
- def _build_headers(
- self,
- host: str,
- headers: Optional[DictType] = None,
- body: Optional[bytes] = None,
- creds: Optional[Credentials] = None,
- ) -> tuple[DictType, datetime]:
- """Build headers with given parameters."""
- headers = headers or {}
- md5sum_added = headers.get("Content-MD5")
- headers["Host"] = host
- headers["User-Agent"] = self._user_agent
- sha256 = None
- md5sum = None
-
- if body:
- headers["Content-Length"] = str(len(body))
- if creds:
- if self._base_url.is_https:
- sha256 = "UNSIGNED-PAYLOAD"
- md5sum = None if md5sum_added else md5sum_hash(body)
- else:
- sha256 = sha256_hash(body)
- else:
- md5sum = None if md5sum_added else md5sum_hash(body)
- if md5sum:
- headers["Content-MD5"] = md5sum
- if sha256:
- headers["x-amz-content-sha256"] = sha256
- if creds and creds.session_token:
- headers["X-Amz-Security-Token"] = creds.session_token
- date = time.utcnow()
- headers["x-amz-date"] = time.to_amz_date(date)
- return headers, date
-
def _url_open(
self,
+ *,
method: str,
region: str,
bucket_name: Optional[str] = None,
object_name: Optional[str] = None,
body: Optional[bytes] = None,
- headers: Optional[DictType] = None,
- query_params: Optional[DictType] = None,
+ headers: Optional[HTTPHeaderDict] = None,
+ query_params: Optional[HTTPQueryDict] = None,
preload_content: bool = True,
no_body_trace: bool = False,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> BaseHTTPResponse:
"""Execute HTTP request."""
- creds = self._provider.retrieve() if self._provider else None
url = self._base_url.build(
method=method,
region=region,
bucket_name=bucket_name,
object_name=object_name,
query_params=query_params,
+ extra_query_params=extra_query_params,
)
- headers, date = self._build_headers(url.netloc, headers, body, creds)
- if creds:
+
+ headers = headers.copy() if headers else HTTPHeaderDict()
+ if extra_headers:
+ headers.extend(extra_headers)
+
+ headers["Host"] = url.netloc
+ headers["User-Agent"] = self._user_agent
+ content_sha256 = headers.get("x-amz-content-sha256")
+ content_md5 = headers.get("Content-MD5")
+ if method in ["PUT", "POST"]:
+ headers["Content-Length"] = str(len(body or b""))
+ if not headers.get("Content-Type"):
+ headers["Content-Type"] = "application/octet-stream"
+ if body is None:
+ content_sha256 = content_sha256 or ZERO_SHA256_HASH
+ content_md5 = content_md5 or ZERO_MD5_HASH
+ else:
+ if not content_sha256:
+ if self._base_url.is_https:
+ content_sha256 = UNSIGNED_PAYLOAD
+ else:
+ sha256_checksum = headers.get("x-amz-checksum-sha256")
+ content_sha256 = hex_string(
+ base64_string_to_sum(sha256_checksum) if sha256_checksum
+ else SHA256.hash(body),
+ )
+ if not content_md5 and content_sha256 == UNSIGNED_PAYLOAD:
+ content_md5 = base64_string(MD5.hash(body))
+ if not headers.get("x-amz-content-sha256"):
+ headers["x-amz-content-sha256"] = cast(str, content_sha256)
+ if not headers.get("Content-MD5") and content_md5:
+ headers["Content-MD5"] = content_md5
+ date = time.utcnow()
+ headers["x-amz-date"] = time.to_amz_date(date)
+
+ if self._provider is not None:
+ creds = self._provider.retrieve()
+ if creds.session_token:
+ headers["X-Amz-Security-Token"] = creds.session_token
headers = sign_v4_s3(
method=method,
url=url,
region=region,
headers=headers,
credentials=creds,
- content_sha256=cast(str, headers.get("x-amz-content-sha256")),
+ content_sha256=cast(str, content_sha256),
date=date,
)
@@ -295,19 +413,11 @@ def _url_open(
self._trace_stream.write("\n")
self._trace_stream.write("\n")
- http_headers = HTTPHeaderDict()
- for key, value in (headers or {}).items():
- if isinstance(value, (list, tuple)):
- for val in value:
- http_headers.add(key, val)
- else:
- http_headers.add(key, value)
-
response = self._http.urlopen(
method,
urlunsplit(url),
body=body,
- headers=http_headers,
+ headers=headers,
preload_content=preload_content,
)
@@ -422,28 +532,35 @@ def _url_open(
if response_error.code in ["NoSuchBucket", "RetryHead"]:
if bucket_name is not None:
- self._region_map.pop(bucket_name, None)
+ self._region_map.remove(bucket_name)
raise response_error
def _execute(
self,
+ *,
method: str,
bucket_name: Optional[str] = None,
object_name: Optional[str] = None,
body: Optional[bytes] = None,
- headers: Optional[DictType] = None,
- query_params: Optional[DictType] = None,
+ headers: Optional[HTTPHeaderDict] = None,
+ query_params: Optional[HTTPQueryDict] = None,
preload_content: bool = True,
no_body_trace: bool = False,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> BaseHTTPResponse:
"""Execute HTTP request."""
- region = self._get_region(bucket_name)
+ region = self._get_region(
+ bucket_name=bucket_name,
+ region=region,
+ )
try:
return self._url_open(
- method,
- region,
+ method=method,
+ region=region,
bucket_name=bucket_name,
object_name=object_name,
body=body,
@@ -451,6 +568,8 @@ def _execute(
query_params=query_params,
preload_content=preload_content,
no_body_trace=no_body_trace,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
except S3Error as exc:
if exc.code != "RetryHead":
@@ -459,8 +578,8 @@ def _execute(
# Retry only once on RetryHead error.
try:
return self._url_open(
- method,
- region,
+ method=method,
+ region=region,
bucket_name=bucket_name,
object_name=object_name,
body=body,
@@ -468,6 +587,8 @@ def _execute(
query_params=query_params,
preload_content=preload_content,
no_body_trace=no_body_trace,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
except S3Error as exc:
if exc.code != "RetryHead":
@@ -478,13 +599,31 @@ def _execute(
)
raise exc.copy(cast(str, code), cast(str, message))
- def _get_region(self, bucket_name: Optional[str] = None) -> str:
+ def _get_region(
+ self,
+ *,
+ bucket_name: Optional[str] = None,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ) -> str:
"""
Return region of given bucket either from region cache or set in
constructor.
"""
- if self._base_url.region:
+ if (
+ region is not None and self._base_url.region is not None and
+ region != self._base_url.region
+ ):
+ raise ValueError(
+ f"region must be {self._base_url.region}, but passed {region}",
+ )
+
+ if region is not None:
+ return region
+
+ if self._base_url.region is not None:
return self._base_url.region
if not bucket_name or not self._provider:
@@ -496,10 +635,12 @@ def _get_region(self, bucket_name: Optional[str] = None) -> str:
# Execute GetBucketLocation REST API to get region of the bucket.
response = self._url_open(
- "GET",
- "us-east-1",
+ method="GET",
+ region="us-east-1",
bucket_name=bucket_name,
- query_params={"location": ""},
+ query_params=HTTPQueryDict({"location": ""}),
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
element = ET.fromstring(response.data.decode())
@@ -510,18 +651,22 @@ def _get_region(self, bucket_name: Optional[str] = None) -> str:
else:
region = element.text
- self._region_map[bucket_name] = region
+ self._region_map.set(bucket_name, region)
return region
def set_app_info(self, app_name: str, app_version: str):
"""
Set your application name and version to user agent header.
- :param app_name: Application name.
- :param app_version: Application version.
+ Args:
+ app_name (str):
+ Application name.
- Example::
- client.set_app_info('my_app', '1.0.2')
+ app_version (str):
+ Application version.
+
+ Example:
+ >>> client.set_app_info("my_app", "1.0.2")
"""
if not (app_name and app_version):
raise ValueError("Application name/version cannot be empty.")
@@ -531,7 +676,12 @@ def trace_on(self, stream: TextIO):
"""
Enable http trace.
- :param stream: Stream for writing HTTP call tracing.
+ Args:
+ stream (TextIO):
+ Stream for writing HTTP call tracing.
+
+ Example:
+ >>> client.trace_on(sys.stdout)
"""
if not stream:
raise ValueError('Input stream for trace output is invalid.')
@@ -539,9 +689,7 @@ def trace_on(self, stream: TextIO):
self._trace_stream = stream
def trace_off(self):
- """
- Disable HTTP trace.
- """
+ """Disable HTTP trace."""
self._trace_stream = None
def enable_accelerate_endpoint(self):
@@ -570,71 +718,122 @@ def disable_virtual_style_endpoint(self):
def select_object_content(
self,
+ *,
bucket_name: str,
object_name: str,
request: SelectRequest,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> SelectObjectReader:
"""
Select content of an object by SQL expression.
- :param bucket_name: Name of the bucket.
- :param object_name: Object name in the bucket.
- :param request: :class:`SelectRequest ` object.
- :return: A reader contains requested records and progress information.
-
- Example::
- with client.select_object_content(
- "my-bucket",
- "my-object.csv",
- SelectRequest(
- "select * from S3Object",
- CSVInputSerialization(),
- CSVOutputSerialization(),
- request_progress=True,
- ),
- ) as result:
- for data in result.stream():
- print(data.decode())
- print(result.stats())
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ object_name (str):
+ Object name in the bucket.
+
+ request (SelectRequest):
+ Select request.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Returns:
+ SelectObjectReader:
+ A reader object representing the results of the select
+ operation.
+
+ Example:
+ >>> with client.select_object_content(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object.csv",
+ ... request=SelectRequest(
+ ... expression="select * from S3Object",
+ ... input_serialization=CSVInputSerialization(),
+ ... output_serialization=CSVOutputSerialization(),
+ ... request_progress=True,
+ ... ),
+ ... ) as result:
+ ... for data in result.stream():
+ ... print(data.decode())
+ ... print(result.stats())
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
check_object_name(object_name)
if not isinstance(request, SelectRequest):
raise ValueError("request must be SelectRequest type")
body = marshal(request)
+ headers = HTTPHeaderDict(
+ {"Content-MD5": base64_string(MD5.hash(body))},
+ )
response = self._execute(
- "POST",
+ method="POST",
bucket_name=bucket_name,
object_name=object_name,
body=body,
- headers={"Content-MD5": cast(str, md5sum_hash(body))},
- query_params={"select": "", "select-type": "2"},
+ headers=headers,
+ query_params=HTTPQueryDict({"select": "", "select-type": "2"}),
preload_content=False,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
return SelectObjectReader(response)
def make_bucket(
self,
+ *,
bucket_name: str,
location: Optional[str] = None,
object_lock: bool = False,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
):
"""
- Create a bucket with region and object lock.
-
- :param bucket_name: Name of the bucket.
- :param location: Region in which the bucket will be created.
- :param object_lock: Flag to set object-lock feature.
-
- Examples::
- # Create bucket.
- client.make_bucket("my-bucket")
-
- # Create bucket on specific region.
- client.make_bucket("my-bucket", "us-west-1")
-
- # Create bucket with object-lock feature on specific region.
- client.make_bucket("my-bucket", "eu-west-2", object_lock=True)
+ Create a bucket with region and optional object lock.
+
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ location (Optional[str], default=None):
+ Region in which the bucket is to be created.
+
+ object_lock (bool, default=False):
+ Flag to enable the object-lock feature.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Example:
+ >>> # Create bucket
+ >>> client.make_bucket(bucket_name="my-bucket")
+ >>>
+ >>> # Create bucket in a specific region
+ >>> client.make_bucket(
+ ... bucket_name="my-bucket",
+ ... location="eu-west-1",
+ ... )
+ >>>
+ >>> # Create bucket with object-lock in a region
+ >>> client.make_bucket(
+ ... bucket_name="my-bucket",
+ ... location="eu-west-2",
+ ... object_lock=True,
+ ... )
"""
check_bucket_name(bucket_name, True,
s3_check=self._base_url.is_aws_host)
@@ -647,228 +846,624 @@ def make_bucket(
f"but passed {location}"
)
location = self._base_url.region or location or "us-east-1"
- headers: Optional[DictType] = (
- {"x-amz-bucket-object-lock-enabled": "true"}
- if object_lock else None
- )
-
+ headers = HTTPHeaderDict()
+ if object_lock:
+ headers["x-amz-bucket-object-lock-enabled"] = "true"
body = None
if location != "us-east-1":
element = Element("CreateBucketConfiguration")
SubElement(element, "LocationConstraint", location)
body = getbytes(element)
self._url_open(
- "PUT",
- location,
+ method="PUT",
+ region=location,
bucket_name=bucket_name,
body=body,
headers=headers,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
+ )
+ self._region_map.set(bucket_name, location)
+
+ def _list_buckets(
+ self,
+ *,
+ bucket_region: Optional[str] = None,
+ max_buckets: int = 10000,
+ prefix: Optional[str] = None,
+ continuation_token: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ) -> ListAllMyBucketsResult:
+ """Do ListBuckets S3 API."""
+ query_params = HTTPQueryDict()
+ query_params["max-buckets"] = str(
+ max_buckets if max_buckets > 0 else 10000,
+ )
+ if bucket_region is not None:
+ query_params["bucket-region"] = bucket_region
+ if prefix:
+ query_params["prefix"] = prefix
+ if continuation_token:
+ query_params["continuation-token"] = continuation_token
+
+ response = self._execute(
+ method="GET",
+ query_params=query_params,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
- self._region_map[bucket_name] = location
+ return unmarshal(ListAllMyBucketsResult, response.data.decode())
- def list_buckets(self) -> list[Bucket]:
+ def list_buckets(
+ self,
+ *,
+ bucket_region: Optional[str] = None,
+ max_buckets: int = 10000,
+ prefix: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ) -> Iterator[Bucket]:
"""
List information of all accessible buckets.
- :return: List of :class:`Bucket ` object.
+ Args:
+ bucket_region (Optional[str], default=None):
+ Fetch buckets from the specified region.
- Example::
- buckets = client.list_buckets()
- for bucket in buckets:
- print(bucket.name, bucket.creation_date)
- """
+ max_buckets (int, default=10000):
+ Maximum number of buckets to fetch.
+
+ prefix (Optional[str], default=None):
+ Return only buckets whose names start with this prefix.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
- response = self._execute("GET")
- result = unmarshal(ListAllMyBucketsResult, response.data.decode())
- return result.buckets
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
- def bucket_exists(self, bucket_name: str) -> bool:
+ Returns:
+ Iterator[Bucket]:
+ An iterator of :class:`minio.datatypes.Bucket` objects.
+
+ Example:
+ >>> buckets = client.list_buckets()
+ >>> for bucket in buckets:
+ ... print(bucket.name, bucket.creation_date)
+ """
+ continuation_token: Optional[str] = ""
+ while continuation_token is not None:
+ result = self._list_buckets(
+ bucket_region=bucket_region,
+ max_buckets=max_buckets,
+ prefix=prefix,
+ continuation_token=continuation_token,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
+ )
+ continuation_token = result.continuation_token
+ yield from result.buckets
+
+ def bucket_exists(
+ self,
+ *,
+ bucket_name: str,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ) -> bool:
"""
Check if a bucket exists.
- :param bucket_name: Name of the bucket.
- :return: True if the bucket exists.
+ Args:
+ bucket_name (str):
+ Name of the bucket.
- Example::
- if client.bucket_exists("my-bucket"):
- print("my-bucket exists")
- else:
- print("my-bucket does not exist")
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Returns:
+ bool:
+ True if the bucket exists, False otherwise.
+
+ Example:
+ >>> if client.bucket_exists(bucket_name="my-bucket"):
+ ... print("my-bucket exists")
+ ... else:
+ ... print("my-bucket does not exist")
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
try:
- self._execute("HEAD", bucket_name)
+ self._execute(
+ method="HEAD",
+ bucket_name=bucket_name,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
+ )
return True
except S3Error as exc:
if exc.code != "NoSuchBucket":
raise
return False
- def remove_bucket(self, bucket_name: str):
+ def remove_bucket(
+ self,
+ *,
+ bucket_name: str,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ):
"""
Remove an empty bucket.
- :param bucket_name: Name of the bucket.
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
- Example::
- client.remove_bucket("my-bucket")
+ Example:
+ >>> client.remove_bucket(bucket_name="my-bucket")
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
- self._execute("DELETE", bucket_name)
- self._region_map.pop(bucket_name, None)
+ self._execute(
+ method="DELETE",
+ bucket_name=bucket_name,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
+ )
+ self._region_map.remove(bucket_name)
- def get_bucket_policy(self, bucket_name: str) -> str:
+ def get_bucket_policy(
+ self,
+ *,
+ bucket_name: str,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ) -> str:
"""
- Get bucket policy configuration of a bucket.
+ Get the bucket policy configuration of a bucket.
- :param bucket_name: Name of the bucket.
- :return: Bucket policy configuration as JSON string.
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
- Example::
- policy = client.get_bucket_policy("my-bucket")
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Returns:
+ str:
+ Bucket policy configuration as a JSON string.
+
+ Example:
+ >>> policy = client.get_bucket_policy(bucket_name="my-bucket")
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
response = self._execute(
- "GET", bucket_name, query_params={"policy": ""},
+ method="GET",
+ bucket_name=bucket_name,
+ query_params=HTTPQueryDict({"policy": ""}),
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
return response.data.decode()
- def delete_bucket_policy(self, bucket_name: str):
+ def _execute_delete_bucket(
+ self,
+ *,
+ bucket_name: str,
+ query_params: HTTPQueryDict,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ):
+ """ Delete any bucket API. """
+ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
+ self._execute(
+ method="DELETE",
+ bucket_name=bucket_name,
+ query_params=query_params,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
+ )
+
+ def delete_bucket_policy(
+ self,
+ *,
+ bucket_name: str,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ):
"""
- Delete bucket policy configuration of a bucket.
+ Delete the bucket policy configuration of a bucket.
- :param bucket_name: Name of the bucket.
+ Args:
+ bucket_name (str):
+ Name of the bucket.
- Example::
- client.delete_bucket_policy("my-bucket")
- """
- check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
- self._execute("DELETE", bucket_name, query_params={"policy": ""})
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
- def set_bucket_policy(self, bucket_name: str, policy: str | bytes):
- """
- Set bucket policy configuration to a bucket.
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
- :param bucket_name: Name of the bucket.
- :param policy: Bucket policy configuration as JSON string.
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Example:
+ >>> client.delete_bucket_policy(bucket_name="my-bucket")
+ """
+ self._execute_delete_bucket(
+ bucket_name=bucket_name,
+ query_params=HTTPQueryDict({"policy": ""}),
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
+ )
- Example::
- client.set_bucket_policy("my-bucket", policy)
+ def set_bucket_policy(
+ self,
+ *,
+ bucket_name: str,
+ policy: str | bytes,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ):
+ """
+ Set the bucket policy configuration for a bucket.
+
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ policy (str | bytes):
+ Bucket policy configuration as a JSON string.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Example:
+ >>> # Example anonymous read-only bucket policy
+ >>> policy = {
+ ... "Version": "2012-10-17",
+ ... "Statement": [
+ ... {
+ ... "Effect": "Allow",
+ ... "Principal": {"AWS": "*"},
+ ... "Action": ["s3:GetBucketLocation", "s3:ListBucket"],
+ ... "Resource": "arn:aws:s3:::my-bucket",
+ ... },
+ ... {
+ ... "Effect": "Allow",
+ ... "Principal": {"AWS": "*"},
+ ... "Action": "s3:GetObject",
+ ... "Resource": "arn:aws:s3:::my-bucket/*",
+ ... },
+ ... ],
+ ... }
+ >>> client.set_bucket_policy(
+ ... bucket_name="my-bucket",
+ ... policy=json.dumps(policy),
+ ... )
+ >>> # Example anonymous read-write bucket policy
+ >>> policy = {
+ ... "Version": "2012-10-17",
+ ... "Statement": [
+ ... {
+ ... "Effect": "Allow",
+ ... "Principal": {"AWS": "*"},
+ ... "Action": [
+ ... "s3:GetBucketLocation",
+ ... "s3:ListBucket",
+ ... "s3:ListBucketMultipartUploads",
+ ... ],
+ ... "Resource": "arn:aws:s3:::my-bucket",
+ ... },
+ ... {
+ ... "Effect": "Allow",
+ ... "Principal": {"AWS": "*"},
+ ... "Action": [
+ ... "s3:GetObject",
+ ... "s3:PutObject",
+ ... "s3:DeleteObject",
+ ... "s3:ListMultipartUploadParts",
+ ... "s3:AbortMultipartUpload",
+ ... ],
+ ... "Resource": "arn:aws:s3:::my-bucket/images/*",
+ ... },
+ ... ],
+ ... }
+ >>> client.set_bucket_policy(
+ ... bucket_name="my-bucket",
+ ... policy=json.dumps(policy),
+ ... )
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
is_valid_policy_type(policy)
+ body = policy if isinstance(policy, bytes) else policy.encode()
+ headers = HTTPHeaderDict(
+ {"Content-MD5": base64_string(MD5.hash(body))},
+ )
self._execute(
- "PUT",
- bucket_name,
- body=policy if isinstance(policy, bytes) else policy.encode(),
- headers={"Content-MD5": cast(str, md5sum_hash(policy))},
- query_params={"policy": ""},
+ method="PUT",
+ bucket_name=bucket_name,
+ body=body,
+ headers=headers,
+ query_params=HTTPQueryDict({"policy": ""}),
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
- def get_bucket_notification(self, bucket_name: str) -> NotificationConfig:
+ def get_bucket_notification(
+ self,
+ *,
+ bucket_name: str,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ) -> NotificationConfig:
"""
- Get notification configuration of a bucket.
+ Get the notification configuration of a bucket.
- :param bucket_name: Name of the bucket.
- :return: :class:`NotificationConfig ` object.
+ Args:
+ bucket_name (str):
+ Name of the bucket.
- Example::
- config = client.get_bucket_notification("my-bucket")
- """
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Returns:
+ NotificationConfig:
+ The notification configuration of the bucket.
+
+ Example:
+ >>> config = client.get_bucket_notification(bucket_name="my-bucket")
+ """
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
response = self._execute(
- "GET", bucket_name, query_params={"notification": ""},
+ method="GET",
+ bucket_name=bucket_name,
+ query_params=HTTPQueryDict({"notification": ""}),
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
return unmarshal(NotificationConfig, response.data.decode())
def set_bucket_notification(
self,
+ *,
bucket_name: str,
config: NotificationConfig,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
):
"""
- Set notification configuration of a bucket.
-
- :param bucket_name: Name of the bucket.
- :param config: class:`NotificationConfig ` object.
-
- Example::
- config = NotificationConfig(
- queue_config_list=[
- QueueConfig(
- "QUEUE-ARN-OF-THIS-BUCKET",
- ["s3:ObjectCreated:*"],
- config_id="1",
- prefix_filter_rule=PrefixFilterRule("abc"),
- ),
- ],
- )
- client.set_bucket_notification("my-bucket", config)
+ Set the notification configuration of a bucket.
+
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ config (NotificationConfig):
+ Notification configuration.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Example:
+ >>> config = NotificationConfig(
+ ... queue_config_list=[
+ ... QueueConfig(
+ ... queue="QUEUE-ARN-OF-THIS-BUCKET",
+ ... events=["s3:ObjectCreated:*"],
+ ... config_id="1",
+ ... prefix_filter_rule=PrefixFilterRule("abc"),
+ ... ),
+ ... ],
+ ... )
+ >>> client.set_bucket_notification(
+ ... bucket_name="my-bucket",
+ ... config=config,
+ ... )
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
if not isinstance(config, NotificationConfig):
raise ValueError("config must be NotificationConfig type")
body = marshal(config)
+ headers = HTTPHeaderDict(
+ {"Content-MD5": base64_string(MD5.hash(body))},
+ )
self._execute(
- "PUT",
- bucket_name,
+ method="PUT",
+ bucket_name=bucket_name,
body=body,
- headers={"Content-MD5": cast(str, md5sum_hash(body))},
- query_params={"notification": ""},
+ headers=headers,
+ query_params=HTTPQueryDict({"notification": ""}),
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
- def delete_bucket_notification(self, bucket_name: str):
+ def delete_bucket_notification(
+ self,
+ *,
+ bucket_name: str,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ):
"""
- Delete notification configuration of a bucket. On success, S3 service
- stops notification of events previously set of the bucket.
+ Delete the notification configuration of a bucket.
- :param bucket_name: Name of the bucket.
+ On success, the S3 service stops sending event notifications
+ that were previously configured for the bucket.
+
+ Args:
+ bucket_name (str):
+ Name of the bucket.
- Example::
- client.delete_bucket_notification("my-bucket")
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Example:
+ >>> client.delete_bucket_notification(bucket_name="my-bucket")
"""
- self.set_bucket_notification(bucket_name, NotificationConfig())
+ self.set_bucket_notification(
+ bucket_name=bucket_name,
+ config=NotificationConfig(),
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
+ )
- def set_bucket_encryption(self, bucket_name: str, config: SSEConfig):
+ def set_bucket_encryption(
+ self,
+ *,
+ bucket_name: str,
+ config: SSEConfig,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ):
"""
- Set encryption configuration of a bucket.
+ Set the encryption configuration of a bucket.
- :param bucket_name: Name of the bucket.
- :param config: :class:`SSEConfig ` object.
+ Args:
+ bucket_name (str):
+ Name of the bucket.
- Example::
- client.set_bucket_encryption(
- "my-bucket", SSEConfig(Rule.new_sse_s3_rule()),
- )
+ config (SSEConfig):
+ Server-side encryption configuration.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Example:
+ >>> client.set_bucket_encryption(
+ ... bucket_name="my-bucket",
+ ... config=SSEConfig(Rule.new_sse_s3_rule()),
+ ... )
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
if not isinstance(config, SSEConfig):
raise ValueError("config must be SSEConfig type")
body = marshal(config)
+ headers = HTTPHeaderDict(
+ {"Content-MD5": base64_string(MD5.hash(body))},
+ )
self._execute(
- "PUT",
- bucket_name,
+ method="PUT",
+ bucket_name=bucket_name,
body=body,
- headers={"Content-MD5": cast(str, md5sum_hash(body))},
- query_params={"encryption": ""},
+ headers=headers,
+ query_params=HTTPQueryDict({"encryption": ""}),
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
- def get_bucket_encryption(self, bucket_name: str) -> Optional[SSEConfig]:
+ def get_bucket_encryption(
+ self,
+ *,
+ bucket_name: str,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ) -> Optional[SSEConfig]:
"""
- Get encryption configuration of a bucket.
+ Get the encryption configuration of a bucket.
- :param bucket_name: Name of the bucket.
- :return: :class:`SSEConfig ` object.
+ Args:
+ bucket_name (str):
+ Name of the bucket.
- Example::
- config = client.get_bucket_encryption("my-bucket")
- """
- check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
- try:
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Returns:
+ Optional[SSEConfig]:
+ The server-side encryption configuration of the bucket, or
+ None if no encryption configuration is set.
+
+ Example:
+ >>> config = client.get_bucket_encryption(bucket_name="my-bucket")
+ """
+ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
+ try:
response = self._execute(
- "GET",
- bucket_name,
- query_params={"encryption": ""},
+ method="GET",
+ bucket_name=bucket_name,
+ query_params=HTTPQueryDict({"encryption": ""}),
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
return unmarshal(SSEConfig, response.data.decode())
except S3Error as exc:
@@ -876,21 +1471,40 @@ def get_bucket_encryption(self, bucket_name: str) -> Optional[SSEConfig]:
raise
return None
- def delete_bucket_encryption(self, bucket_name: str):
+ def delete_bucket_encryption(
+ self,
+ *,
+ bucket_name: str,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ):
"""
- Delete encryption configuration of a bucket.
+ Delete the encryption configuration of a bucket.
- :param bucket_name: Name of the bucket.
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
- Example::
- client.delete_bucket_encryption("my-bucket")
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Example:
+ >>> client.delete_bucket_encryption(bucket_name="my-bucket")
"""
- check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
try:
- self._execute(
- "DELETE",
- bucket_name,
- query_params={"encryption": ""},
+ self._execute_delete_bucket(
+ bucket_name=bucket_name,
+ query_params=HTTPQueryDict({"encryption": ""}),
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
except S3Error as exc:
if exc.code != "ServerSideEncryptionConfigurationNotFoundError":
@@ -898,31 +1512,61 @@ def delete_bucket_encryption(self, bucket_name: str):
def listen_bucket_notification(
self,
+ *,
bucket_name: str,
prefix: str = "",
suffix: str = "",
- events: tuple[str, ...] = ('s3:ObjectCreated:*',
- 's3:ObjectRemoved:*',
- 's3:ObjectAccessed:*'),
+ events: tuple[str, ...] = (
+ 's3:ObjectCreated:*',
+ 's3:ObjectRemoved:*',
+ 's3:ObjectAccessed:*',
+ ),
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> EventIterable:
"""
- Listen events of object prefix and suffix of a bucket. Caller should
- iterate returned iterator to read new events.
+ Listen for events on objects in a bucket matching prefix and/or suffix.
- :param bucket_name: Name of the bucket.
- :param prefix: Listen events of object starts with prefix.
- :param suffix: Listen events of object ends with suffix.
- :param events: Events to listen.
- :return: Iterator of event records as :dict:.
-
- Example::
- with client.listen_bucket_notification(
- "my-bucket",
- prefix="my-prefix/",
- events=["s3:ObjectCreated:*", "s3:ObjectRemoved:*"],
- ) as events:
- for event in events:
- print(event)
+ The caller should iterate over the returned iterator to read new events
+ as they occur.
+
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ prefix (str, default=""):
+ Listen for events on objects whose names start with this prefix.
+
+ suffix (str, default=""):
+ Listen for events on objects whose names end with this suffix.
+
+ events (tuple[str, ...], default=("s3:ObjectCreated:*",
+ "s3:ObjectRemoved:*", "s3:ObjectAccessed:*")):
+ Events to listen for.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Returns:
+ EventIterable:
+ An iterator of :class:`minio.datatypes.EventIterable` containing
+ event records.
+
+ Example:
+ >>> with client.listen_bucket_notification(
+ ... bucket_name="my-bucket",
+ ... prefix="my-prefix/",
+ ... events=["s3:ObjectCreated:*", "s3:ObjectRemoved:*"],
+ ... ) as events:
+ ... for event in events:
+ ... print(event)
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
if self._base_url.is_aws_host:
@@ -930,185 +1574,398 @@ def listen_bucket_notification(
"ListenBucketNotification API is not supported in Amazon S3",
)
+ query_params = HTTPQueryDict({
+ "prefix": prefix or "",
+ "suffix": suffix or "",
+ "events": events,
+ })
return EventIterable(
lambda: self._execute(
- "GET",
- bucket_name,
- query_params={
- "prefix": prefix or "",
- "suffix": suffix or "",
- "events": cast(Tuple[str], events),
- },
+ method="GET",
+ bucket_name=bucket_name,
+ query_params=query_params,
preload_content=False,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
),
)
def set_bucket_versioning(
self,
+ *,
bucket_name: str,
config: VersioningConfig,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
):
"""
- Set versioning configuration to a bucket.
+ Set the versioning configuration for a bucket.
- :param bucket_name: Name of the bucket.
- :param config: :class:`VersioningConfig `.
+ Args:
+ bucket_name (str):
+ Name of the bucket.
- Example::
- client.set_bucket_versioning(
- "my-bucket", VersioningConfig(ENABLED),
- )
+ config (VersioningConfig):
+ Versioning configuration.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Example:
+ >>> client.set_bucket_versioning(
+ ... bucket_name="my-bucket",
+ ... config=VersioningConfig(ENABLED),
+ ... )
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
if not isinstance(config, VersioningConfig):
raise ValueError("config must be VersioningConfig type")
body = marshal(config)
+ headers = HTTPHeaderDict(
+ {"Content-MD5": base64_string(MD5.hash(body))},
+ )
self._execute(
- "PUT",
- bucket_name,
+ method="PUT",
+ bucket_name=bucket_name,
body=body,
- headers={"Content-MD5": cast(str, md5sum_hash(body))},
- query_params={"versioning": ""},
+ headers=headers,
+ query_params=HTTPQueryDict({"versioning": ""}),
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
- def get_bucket_versioning(self, bucket_name: str) -> VersioningConfig:
+ def get_bucket_versioning(
+ self,
+ *,
+ bucket_name: str,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ) -> VersioningConfig:
"""
- Get versioning configuration of a bucket.
+ Get the versioning configuration of a bucket.
- :param bucket_name: Name of the bucket.
- :return: :class:`VersioningConfig `.
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
- Example::
- config = client.get_bucket_versioning("my-bucket")
- print(config.status)
+ Returns:
+ VersioningConfig:
+ The versioning configuration of the bucket.
+
+ Example:
+ >>> config = client.get_bucket_versioning(bucket_name="my-bucket")
+ >>> print(config.status)
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
response = self._execute(
- "GET",
- bucket_name,
- query_params={"versioning": ""},
+ method="GET",
+ bucket_name=bucket_name,
+ query_params=HTTPQueryDict({"versioning": ""}),
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
return unmarshal(VersioningConfig, response.data.decode())
def fput_object(
self,
+ *,
bucket_name: str,
object_name: str,
file_path: str,
content_type: str = "application/octet-stream",
- metadata: Optional[DictType] = None,
+ headers: Optional[HTTPHeaderDict] = None,
+ user_metadata: Optional[HTTPHeaderDict] = None,
sse: Optional[Sse] = None,
progress: Optional[ProgressType] = None,
part_size: int = 0,
+ checksum: Optional[Algorithm] = None,
num_parallel_uploads: int = 3,
tags: Optional[Tags] = None,
retention: Optional[Retention] = None,
legal_hold: bool = False,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> ObjectWriteResult:
"""
- Uploads data from a file to an object in a bucket.
-
- :param bucket_name: Name of the bucket.
- :param object_name: Object name in the bucket.
- :param file_path: Name of file to upload.
- :param content_type: Content type of the object.
- :param metadata: Any additional metadata to be uploaded along
- with your PUT request.
- :param sse: Server-side encryption.
- :param progress: A progress object
- :param part_size: Multipart part size
- :param num_parallel_uploads: Number of parallel uploads.
- :param tags: :class:`Tags` for the object.
- :param retention: :class:`Retention` configuration object.
- :param legal_hold: Flag to set legal hold for the object.
- :return: :class:`ObjectWriteResult` object.
-
- Example::
- # Upload data.
- result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
- )
-
- # Upload data with metadata.
- result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
- metadata={"My-Project": "one"},
- )
-
- # Upload data with tags, retention and legal-hold.
- date = datetime.utcnow().replace(
- hour=0, minute=0, second=0, microsecond=0,
- ) + timedelta(days=30)
- tags = Tags(for_object=True)
- tags["User"] = "jsmith"
- result = client.fput_object(
- "my-bucket", "my-object", "my-filename",
- tags=tags,
- retention=Retention(GOVERNANCE, date),
- legal_hold=True,
- )
+ Upload data from a file to an object in a bucket.
+
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ object_name (str):
+ Object name in the bucket.
+
+ file_path (str):
+ Path to the file to upload.
+
+ content_type (str, default="application/octet-stream"):
+ Content type of the object.
+
+ headers (Optional[HTTPHeaderDict], default=None):
+ Additional headers.
+
+ user_metadata (Optional[HTTPHeaderDict], default=None):
+ User metadata of the object.
+
+ sse (Optional[Sse], default=None):
+ Server-side encryption configuration.
+
+ progress (Optional[ProgressType], default=None):
+ Progress object to track upload progress.
+
+ part_size (int, default=0):
+ Multipart upload part size in bytes.
+
+ checksum (Optional[Algorithm], default=None):
+ Algorithm for checksum computation.
+
+ num_parallel_uploads (int, default=3):
+ Number of parallel uploads.
+
+ tags (Optional[Tags], default=None):
+ Tags for the object.
+
+ retention (Optional[Retention], default=None):
+ Retention configuration.
+
+ legal_hold (bool, default=False):
+ Flag to set legal hold for the object.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Returns:
+ ObjectWriteResult:
+ The result of the object upload operation.
+
+ Example:
+ >>> # Upload data
+ >>> result = client.fput_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... file_path="my-filename",
+ ... )
+ >>> print(
+ ... f"created {result.object_name} object; "
+ ... f"etag: {result.etag}, version-id: {result.version_id}",
+ ... )
+
+ >>> # Upload with part size
+ >>> result = client.fput_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... file_path="my-filename",
+ ... part_size=10*1024*1024,
+ ... )
+
+ >>> # Upload with content type
+ >>> result = client.fput_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... file_path="my-filename",
+ ... content_type="application/csv",
+ ... )
+
+ >>> # Upload with metadata
+ >>> result = client.fput_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... file_path="my-filename",
+ ... metadata={"My-Project": "one"},
+ ... )
+
+ >>> # Upload with customer key encryption
+ >>> result = client.fput_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... file_path="my-filename",
+ ... sse=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
+ ... )
+
+ >>> # Upload with KMS encryption
+ >>> result = client.fput_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... file_path="my-filename",
+ ... sse=SseKMS(
+ ... "KMS-KEY-ID",
+ ... {"Key1": "Value1", "Key2": "Value2"},
+ ... ),
+ ... )
+
+ >>> # Upload with S3-managed encryption
+ >>> result = client.fput_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... file_path="my-filename",
+ ... sse=SseS3(),
+ ... )
+
+ >>> # Upload with tags, retention and legal hold
+ >>> date = datetime.utcnow().replace(
+ ... hour=0, minute=0, second=0, microsecond=0,
+ ... ) + timedelta(days=30)
+ >>> tags = Tags(for_object=True)
+ >>> tags["User"] = "jsmith"
+ >>> result = client.fput_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... file_path="my-filename",
+ ... tags=tags,
+ ... retention=Retention(GOVERNANCE, date),
+ ... legal_hold=True,
+ ... )
+
+ >>> # Upload with progress bar
+ >>> result = client.fput_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... file_path="my-filename",
+ ... progress=Progress(),
+ ... )
"""
-
file_size = os.stat(file_path).st_size
with open(file_path, "rb") as file_data:
return self.put_object(
- bucket_name,
- object_name,
- file_data,
- file_size,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ data=file_data,
+ length=file_size,
content_type=content_type,
- metadata=cast(Union[DictType, None], metadata),
+ headers=headers,
+ user_metadata=user_metadata,
sse=sse,
+ checksum=checksum,
progress=progress,
part_size=part_size,
num_parallel_uploads=num_parallel_uploads,
tags=tags,
retention=retention,
legal_hold=legal_hold,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
def fget_object(
self,
+ *,
bucket_name: str,
object_name: str,
file_path: str,
- request_headers: Optional[DictType] = None,
+ match_etag: Optional[str] = None,
+ not_match_etag: Optional[str] = None,
+ modified_since: Optional[datetime] = None,
+ unmodified_since: Optional[datetime] = None,
+ fetch_checksum: bool = False,
ssec: Optional[SseCustomerKey] = None,
version_id: Optional[str] = None,
- extra_query_params: Optional[DictType] = None,
tmp_file_path: Optional[str] = None,
progress: Optional[ProgressType] = None,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
):
"""
- Downloads data of an object to file.
-
- :param bucket_name: Name of the bucket.
- :param object_name: Object name in the bucket.
- :param file_path: Name of file to download.
- :param request_headers: Any additional headers to be added with GET
- request.
- :param ssec: Server-side encryption customer key.
- :param version_id: Version-ID of the object.
- :param extra_query_params: Extra query parameters for advanced usage.
- :param tmp_file_path: Path to a temporary file.
- :param progress: A progress object
- :return: Object information.
-
- Example::
- # Download data of an object.
- client.fget_object("my-bucket", "my-object", "my-filename")
-
- # Download data of an object of version-ID.
- client.fget_object(
- "my-bucket", "my-object", "my-filename",
- version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
- )
-
- # Download data of an SSE-C encrypted object.
- client.fget_object(
- "my-bucket", "my-object", "my-filename",
- ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
- )
+ Download an object to a file.
+
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ object_name (str):
+ Object name in the bucket.
+
+ file_path (str):
+ Path to the file where data will be downloaded.
+
+ match_etag (Optional[str], default=None):
+ Match ETag of the object.
+
+ not_match_etag (Optional[str], default=None):
+ None-match ETag of the object.
+
+ modified_since (Optional[datetime], default=None):
+ Condition to fetch object modified since the given date.
+
+ unmodified_since (Optional[datetime], default=None):
+ Condition to fetch object unmodified since the given date.
+
+ fetch_checksum (bool, default=False):
+ Flag to fetch object checksum.
+
+ ssec (Optional[SseCustomerKey], default=None):
+ Server-side encryption customer key.
+
+ version_id (Optional[str], default=None):
+ Version ID of the object.
+
+ tmp_file_path (Optional[str], default=None):
+ Path to a temporary file used during download.
+
+ progress (Optional[ProgressType], default=None):
+ Progress object to track download progress.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Example:
+ >>> # Download object
+ >>> client.fget_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... file_path="my-filename",
+ ... )
+ >>>
+ >>> # Download specific version of object
+ >>> client.fget_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... file_path="my-filename",
+ ... version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
+ ... )
+ >>>
+ >>> # Download SSE-C encrypted object
+ >>> client.fget_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... file_path="my-filename",
+ ... ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
+ ... )
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
check_object_name(object_name)
@@ -1120,15 +1977,14 @@ def fget_object(
makedirs(os.path.dirname(file_path))
stat = self.stat_object(
- bucket_name,
- object_name,
- ssec,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ ssec=ssec,
version_id=version_id,
- extra_headers=request_headers,
)
etag = queryencode(cast(str, stat.etag))
- # Write to a temporary file "file_path.part.minio" before saving.
+ # Write to a temporary file "file_path.ETAG.part.minio" before saving.
tmp_file_path = (
tmp_file_path or f"{file_path}.{etag}.part.minio"
)
@@ -1136,11 +1992,17 @@ def fget_object(
response = None
try:
response = self.get_object(
- bucket_name,
- object_name,
- request_headers=request_headers,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ match_etag=match_etag,
+ not_match_etag=not_match_etag,
+ modified_since=modified_since,
+ unmodified_since=unmodified_since,
+ fetch_checksum=fetch_checksum,
ssec=ssec,
version_id=version_id,
+ region=region,
+ extra_headers=extra_headers,
extra_query_params=extra_query_params,
)
@@ -1165,231 +2027,361 @@ def fget_object(
def get_object(
self,
+ *,
bucket_name: str,
object_name: str,
- offset: int = 0,
- length: int = 0,
- request_headers: Optional[DictType] = None,
- ssec: Optional[SseCustomerKey] = None,
version_id: Optional[str] = None,
- extra_query_params: Optional[DictType] = None,
+ ssec: Optional[SseCustomerKey] = None,
+ offset: int = 0,
+ length: Optional[int] = None,
+ match_etag: Optional[str] = None,
+ not_match_etag: Optional[str] = None,
+ modified_since: Optional[datetime] = None,
+ unmodified_since: Optional[datetime] = None,
+ fetch_checksum: bool = False,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> BaseHTTPResponse:
"""
- Get data of an object. Returned response should be closed after use to
- release network resources. To reuse the connection, it's required to
- call `response.release_conn()` explicitly.
-
- :param bucket_name: Name of the bucket.
- :param object_name: Object name in the bucket.
- :param offset: Start byte position of object data.
- :param length: Number of bytes of object data from offset.
- :param request_headers: Any additional headers to be added with GET
- request.
- :param ssec: Server-side encryption customer key.
- :param version_id: Version-ID of the object.
- :param extra_query_params: Extra query parameters for advanced usage.
- :return: :class:`urllib3.response.BaseHTTPResponse` object.
-
- Example::
- # Get data of an object.
- response = None
- try:
- response = client.get_object("my-bucket", "my-object")
- # Read data from response.
- finally:
- if response:
- response.close()
- response.release_conn()
-
- # Get data of an object of version-ID.
- response = None
- try:
- response = client.get_object(
- "my-bucket", "my-object",
- version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
- )
- # Read data from response.
- finally:
- if response:
- response.close()
- response.release_conn()
-
- # Get data of an object from offset and length.
- response = None
- try:
- response = client.get_object(
- "my-bucket", "my-object", offset=512, length=1024,
- )
- # Read data from response.
- finally:
- if response:
- response.close()
- response.release_conn()
-
- # Get data of an SSE-C encrypted object.
- response = None
- try:
- response = client.get_object(
- "my-bucket", "my-object",
- ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
- )
- # Read data from response.
- finally:
- if response:
- response.close()
- response.release_conn()
+ Get object data from a bucket.
+
+ Data is read starting at the specified offset up to the given length.
+ The returned response must be closed after use to release network
+ resources. To reuse the connection, explicitly call
+ ``response.release_conn()``.
+
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ object_name (str):
+ Object name in the bucket.
+
+ version_id (Optional[str], default=None):
+ Version ID of the object.
+
+ ssec (Optional[SseCustomerKey], default=None):
+ Server-side encryption customer key.
+
+ offset (int, default=0):
+ Start byte position of object data.
+
+ length (Optional[int], default=None):
+ Number of bytes of object data to read from offset.
+
+ match_etag (Optional[str], default=None):
+ Match ETag of the object.
+
+ not_match_etag (Optional[str], default=None):
+ None-match ETag of the object.
+
+ modified_since (Optional[datetime], default=None):
+ Condition to fetch object modified since the given date.
+
+ unmodified_since (Optional[datetime], default=None):
+ Condition to fetch object unmodified since the given date.
+
+ fetch_checksum (bool, default=False):
+ Flag to fetch object checksum.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Returns:
+ BaseHTTPResponse:
+ An :class:`urllib3.response.BaseHTTPResponse` or
+ :class:`urllib3.response.HTTPResponse` object containing
+ the object data.
+
+ Example:
+ >>> # Get data of an object
+ >>> try:
+ ... response = client.get_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... )
+ ... # Read data from response
+ ... finally:
+ ... response.close()
+ ... response.release_conn()
+ >>>
+ >>> # Get specific version of an object
+ >>> try:
+ ... response = client.get_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d",
+ ... )
+ ... finally:
+ ... response.close()
+ ... response.release_conn()
+ >>>
+ >>> # Get object data from offset and length
+ >>> try:
+ ... response = client.get_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... offset=512,
+ ... length=1024,
+ ... )
+ ... finally:
+ ... response.close()
+ ... response.release_conn()
+ >>>
+ >>> # Get SSE-C encrypted object
+ >>> try:
+ ... response = client.get_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... ssec=SseCustomerKey(
+ ... b"32byteslongsecretkeymustprovided"
+ ... ),
+ ... )
+ ... finally:
+ ... response.close()
+ ... response.release_conn()
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
check_object_name(object_name)
check_ssec(ssec)
- headers = cast(DictType, ssec.headers() if ssec else {})
- headers.update(request_headers or {})
-
- if offset or length:
- end = (offset + length - 1) if length else ""
- headers['Range'] = f"bytes={offset}-{end}"
-
+ headers = self._gen_read_headers(
+ ssec=ssec,
+ offset=offset,
+ length=length,
+ match_etag=match_etag,
+ not_match_etag=not_match_etag,
+ modified_since=modified_since,
+ unmodified_since=unmodified_since,
+ fetch_checksum=fetch_checksum,
+ )
+ query_params = HTTPQueryDict()
if version_id:
- extra_query_params = extra_query_params or {}
- extra_query_params["versionId"] = version_id
+ query_params["versionId"] = version_id
return self._execute(
- "GET",
- bucket_name,
- object_name,
- headers=cast(DictType, headers),
- query_params=extra_query_params,
+ method="GET",
+ bucket_name=bucket_name,
+ object_name=object_name,
+ headers=headers,
+ query_params=query_params,
preload_content=False,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
def prompt_object(
self,
+ *,
bucket_name: str,
object_name: str,
prompt: str,
lambda_arn: Optional[str] = None,
- request_headers: Optional[DictType] = None,
ssec: Optional[SseCustomerKey] = None,
version_id: Optional[str] = None,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
**kwargs: Optional[Any],
) -> BaseHTTPResponse:
"""
Prompt an object using natural language.
- :param bucket_name: Name of the bucket.
- :param object_name: Object name in the bucket.
- :param prompt: Prompt the Object to interact with the AI model.
- request.
- :param lambda_arn: Lambda ARN to use for prompt.
- :param request_headers: Any additional headers to be added with POST
- :param ssec: Server-side encryption customer key.
- :param version_id: Version-ID of the object.
- :param kwargs: Extra parameters for advanced usage.
- :return: :class:`urllib3.response.BaseHTTPResponse` object.
-
- Example::
- # prompt an object.
- response = None
- try:
- response = client.get_object(
- "my-bucket", "my-object",
- "Describe the object for me")
- # Read data from response.
- finally:
- if response:
- response.close()
- response.release_conn()
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ object_name (str):
+ Object name in the bucket.
+
+ prompt (str):
+ Natural language prompt to interact with the object using
+ the AI model.
+
+ lambda_arn (Optional[str], default=None):
+ AWS Lambda ARN to use for processing the prompt.
+
+ ssec (Optional[SseCustomerKey], default=None):
+ Server-side encryption customer key.
+
+ version_id (Optional[str], default=None):
+ Version ID of the object.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ **kwargs (Optional[Any]):
+ Additional parameters for advanced usage.
+
+ Returns:
+ BaseHTTPResponse:
+ An :class:`urllib3.response.BaseHTTPResponse` object.
+
+ Example:
+ >>> response = None
+ >>> try:
+ ... response = client.prompt_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... prompt="Describe the object for me",
+ ... )
+ ... # Read data from response
+ ... finally:
+ ... if response:
+ ... response.close()
+ ... response.release_conn()
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
check_object_name(object_name)
check_ssec(ssec)
- headers = cast(DictType, ssec.headers() if ssec else {})
- headers.update(request_headers or {})
-
- extra_query_params = {"lambdaArn": lambda_arn or ""}
-
+ query_params = HTTPQueryDict()
if version_id:
- extra_query_params["versionId"] = version_id
+ query_params["versionId"] = version_id
+ query_params["lambdaArn"] = lambda_arn or ""
prompt_body = kwargs
prompt_body["prompt"] = prompt
body = json.dumps(prompt_body)
return self._execute(
- "POST",
- bucket_name,
- object_name,
- headers=cast(DictType, headers),
- query_params=cast(DictType, extra_query_params),
+ method="POST",
+ bucket_name=bucket_name,
+ object_name=object_name,
+ headers=HTTPHeaderDict(ssec.headers()) if ssec else None,
+ query_params=query_params,
body=body.encode(),
preload_content=False,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
def copy_object(
self,
+ *,
bucket_name: str,
object_name: str,
source: CopySource,
sse: Optional[Sse] = None,
- metadata: Optional[DictType] = None,
+ user_metadata: Optional[HTTPHeaderDict] = None,
tags: Optional[Tags] = None,
retention: Optional[Retention] = None,
legal_hold: bool = False,
metadata_directive: Optional[str] = None,
tagging_directive: Optional[str] = None,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> ObjectWriteResult:
"""
Create an object by server-side copying data from another object.
- In this API maximum supported source object size is 5GiB.
- :param bucket_name: Name of the bucket.
- :param object_name: Object name in the bucket.
- :param source: :class:`CopySource` object.
- :param sse: Server-side encryption of destination object.
- :param metadata: Any user-defined metadata to be copied along with
- destination object.
- :param tags: Tags for destination object.
- :param retention: :class:`Retention` configuration object.
- :param legal_hold: Flag to set legal hold for destination object.
- :param metadata_directive: Directive used to handle user metadata for
- destination object.
- :param tagging_directive: Directive used to handle tags for destination
- object.
- :return: :class:`ObjectWriteResult ` object.
-
- Example::
- # copy an object from a bucket to another.
- result = client.copy_object(
- "my-bucket",
- "my-object",
- CopySource("my-sourcebucket", "my-sourceobject"),
- )
- print(result.object_name, result.version_id)
-
- # copy an object with condition.
- result = client.copy_object(
- "my-bucket",
- "my-object",
- CopySource(
- "my-sourcebucket",
- "my-sourceobject",
- modified_since=datetime(2014, 4, 1, tzinfo=timezone.utc),
- ),
- )
- print(result.object_name, result.version_id)
-
- # copy an object from a bucket with replacing metadata.
- metadata = {"test_meta_key": "test_meta_value"}
- result = client.copy_object(
- "my-bucket",
- "my-object",
- CopySource("my-sourcebucket", "my-sourceobject"),
- metadata=metadata,
- metadata_directive=REPLACE,
- )
- print(result.object_name, result.version_id)
+ The maximum supported source object size for this API is 5 GiB.
+
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ object_name (str):
+ Object name in the bucket.
+
+ source (CopySource):
+ Source object information.
+
+ sse (Optional[Sse], default=None):
+ Server-side encryption configuration for the destination
+ object.
+
+ user_metadata (Optional[HTTPHeaderDict], default=None):
+ User-defined metadata to be applied to the destination
+ object.
+
+ tags (Optional[Tags], default=None):
+ Tags for the destination object.
+
+ retention (Optional[Retention], default=None):
+ Retention configuration for the destination object.
+
+ legal_hold (bool, default=False):
+ Flag to enable legal hold on the destination object.
+
+ metadata_directive (Optional[str], default=None):
+ Directive for handling user metadata on the destination
+ object.
+
+ tagging_directive (Optional[str], default=None):
+ Directive for handling tags on the destination object.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Returns:
+ ObjectWriteResult:
+ The result of the copy operation.
+
+ Example:
+ >>> from datetime import datetime, timezone
+ >>> from minio.commonconfig import REPLACE, CopySource
+ >>>
+ >>> # Copy an object from a bucket to another
+ >>> result = client.copy_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... source=CopySource(
+ ... bucket_name="my-sourcebucket",
+ ... object_name="my-sourceobject",
+ ... ),
+ ... )
+ >>> print(result.object_name, result.version_id)
+ >>>
+ >>> # Copy an object with condition
+ >>> result = client.copy_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... source=CopySource(
+ ... bucket_name="my-sourcebucket",
+ ... object_name="my-sourceobject",
+ ... modified_since=datetime(
+ ... 2014, 4, 1, tzinfo=timezone.utc,
+ ... ),
+ ... ),
+ ... )
+ >>> print(result.object_name, result.version_id)
+ >>>
+ >>> # Copy an object with replacing metadata
+ >>> user_metadata = {"test_meta_key": "test_meta_value"}
+ >>> result = client.copy_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... source=CopySource(
+ ... bucket_name="my-sourcebucket",
+ ... object_name="my-sourceobject",
+ ... ),
+ ... user_metadata=user_metadata,
+ ... metadata_directive=REPLACE,
+ ... )
+ >>> print(result.object_name, result.version_id)
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
check_object_name(object_name)
@@ -1414,8 +2406,8 @@ def copy_object(
size = -1
if source.offset is None and source.length is None:
stat = self.stat_object(
- source.bucket_name,
- source.object_name,
+ bucket_name=source.bucket_name,
+ object_name=source.object_name,
version_id=source.version_id,
ssec=source.ssec,
)
@@ -1437,36 +2429,43 @@ def copy_object(
"object size greater than 5 GiB"
)
return self.compose_object(
- bucket_name, object_name, [ComposeSource.of(source)],
- sse=sse, metadata=metadata, tags=tags, retention=retention,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ sources=[ComposeSource.of(source)],
+ sse=sse,
+ user_metadata=user_metadata,
+ tags=tags,
+ retention=retention,
legal_hold=legal_hold,
)
- headers = genheaders(
- metadata,
- sse,
- tags,
- retention,
- legal_hold,
+ headers = self._gen_write_headers(
+ user_metadata=user_metadata,
+ sse=sse,
+ tags=tags,
+ retention=retention,
+ legal_hold=legal_hold,
)
if metadata_directive:
headers["x-amz-metadata-directive"] = metadata_directive
if tagging_directive:
headers["x-amz-tagging-directive"] = tagging_directive
- headers.update(source.gen_copy_headers())
+ headers.extend(source.gen_copy_headers())
response = self._execute(
- "PUT",
- bucket_name,
+ method="PUT",
+ bucket_name=bucket_name,
object_name=object_name,
headers=headers,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
etag, last_modified = parse_copy_object(response)
- return ObjectWriteResult(
- bucket_name,
- object_name,
- response.headers.get("x-amz-version-id"),
- etag,
- response.headers,
+ return ObjectWriteResult.new(
+ headers=response.headers,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ etag=etag,
last_modified=last_modified,
)
@@ -1478,8 +2477,8 @@ def _calc_part_count(self, sources: list[ComposeSource]) -> int:
for src in sources:
i += 1
stat = self.stat_object(
- src.bucket_name,
- src.object_name,
+ bucket_name=src.bucket_name,
+ object_name=src.object_name,
version_id=src.version_id,
ssec=src.ssec,
)
@@ -1537,79 +2536,138 @@ def _calc_part_count(self, sources: list[ComposeSource]) -> int:
def _upload_part_copy(
self,
+ *,
bucket_name: str,
object_name: str,
upload_id: str,
part_number: int,
- headers: DictType,
+ headers: HTTPHeaderDict,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> tuple[str, Optional[datetime]]:
"""Execute UploadPartCopy S3 API."""
- response = self._execute(
- "PUT",
- bucket_name,
- object_name,
- headers=headers,
- query_params={
+ query_params = HTTPQueryDict(
+ {
"partNumber": str(part_number),
"uploadId": upload_id,
},
)
+ response = self._execute(
+ method="PUT",
+ bucket_name=bucket_name,
+ object_name=object_name,
+ headers=headers,
+ query_params=query_params,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
+ )
return parse_copy_object(response)
def compose_object(
self,
+ *,
bucket_name: str,
object_name: str,
sources: list[ComposeSource],
sse: Optional[Sse] = None,
- metadata: Optional[DictType] = None,
+ user_metadata: Optional[HTTPHeaderDict] = None,
tags: Optional[Tags] = None,
retention: Optional[Retention] = None,
legal_hold: bool = False,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> ObjectWriteResult:
"""
- Create an object by combining data from different source objects using
+ Create an object by combining data from multiple source objects using
server-side copy.
- :param bucket_name: Name of the bucket.
- :param object_name: Object name in the bucket.
- :param sources: List of :class:`ComposeSource` object.
- :param sse: Server-side encryption of destination object.
- :param metadata: Any user-defined metadata to be copied along with
- destination object.
- :param tags: Tags for destination object.
- :param retention: :class:`Retention` configuration object.
- :param legal_hold: Flag to set legal hold for destination object.
- :return: :class:`ObjectWriteResult ` object.
-
- Example::
- sources = [
- ComposeSource("my-job-bucket", "my-object-part-one"),
- ComposeSource("my-job-bucket", "my-object-part-two"),
- ComposeSource("my-job-bucket", "my-object-part-three"),
- ]
-
- # Create my-bucket/my-object by combining source object
- # list.
- result = client.compose_object("my-bucket", "my-object", sources)
- print(result.object_name, result.version_id)
-
- # Create my-bucket/my-object with user metadata by combining
- # source object list.
- result = client.compose_object(
- "my-bucket",
- "my-object",
- sources,
- metadata={"test_meta_key": "test_meta_value"},
- )
- print(result.object_name, result.version_id)
-
- # Create my-bucket/my-object with user metadata and
- # server-side encryption by combining source object list.
- client.compose_object(
- "my-bucket", "my-object", sources, sse=SseS3(),
- )
- print(result.object_name, result.version_id)
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ object_name (str):
+ Object name in the bucket.
+
+ sources (list[ComposeSource]):
+ List of source objects to be combined.
+
+ sse (Optional[Sse], default=None):
+ Server-side encryption configuration for the destination
+ object.
+
+ user_metadata (Optional[HTTPHeaderDict], default=None):
+ User-defined metadata to be applied to the destination
+ object.
+
+ tags (Optional[Tags], default=None):
+ Tags for the destination object.
+
+ retention (Optional[Retention], default=None):
+ Retention configuration for the destination object.
+
+ legal_hold (bool, default=False):
+ Flag to enable legal hold on the destination object.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Returns:
+ ObjectWriteResult:
+ The result of the compose operation.
+
+ Example:
+ >>> from minio.commonconfig import ComposeSource
+ >>> from minio.sse import SseS3
+ >>>
+ >>> sources = [
+ ... ComposeSource(
+ ... bucket_name="my-job-bucket",
+ ... object_name="my-object-part-one",
+ ... ),
+ ... ComposeSource(
+ ... bucket_name="my-job-bucket",
+ ... object_name="my-object-part-two",
+ ... ),
+ ... ComposeSource(
+ ... bucket_name="my-job-bucket",
+ ... object_name="my-object-part-three",
+ ... ),
+ ... ]
+ >>>
+ >>> # Create object by combining sources
+ >>> result = client.compose_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... sources=sources,
+ ... )
+ >>> print(result.object_name, result.version_id)
+ >>>
+ >>> # With user metadata
+ >>> result = client.compose_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... sources=sources,
+ ... user_metadata={"test_meta_key": "test_meta_value"},
+ ... )
+ >>> print(result.object_name, result.version_id)
+ >>>
+ >>> # With user metadata and SSE
+ >>> result = client.compose_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... sources=sources,
+ ... sse=SseS3(),
+ ... )
+ >>> print(result.object_name, result.version_id)
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
check_object_name(object_name)
@@ -1633,18 +2691,37 @@ def compose_object(
sources[0].length is None
):
return self.copy_object(
- bucket_name, object_name, CopySource.of(sources[0]),
- sse=sse, metadata=metadata, tags=tags, retention=retention,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ source=CopySource.of(sources[0]),
+ sse=sse,
+ user_metadata=user_metadata,
+ tags=tags,
+ retention=retention,
legal_hold=legal_hold,
- metadata_directive=REPLACE if metadata else None,
+ metadata_directive=REPLACE if user_metadata else None,
tagging_directive=REPLACE if tags else None,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
- headers = genheaders(metadata, sse, tags, retention, legal_hold)
+ headers = self._gen_write_headers(
+ user_metadata=user_metadata,
+ sse=sse,
+ tags=tags,
+ retention=retention,
+ legal_hold=legal_hold,
+ )
upload_id = self._create_multipart_upload(
- bucket_name, object_name, headers,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ headers=headers,
+ )
+ ssec_headers = (
+ sse.headers() if isinstance(sse, SseCustomerKey)
+ else HTTPHeaderDict()
)
- ssec_headers = sse.headers() if isinstance(sse, SseCustomerKey) else {}
try:
part_number = 0
total_parts = []
@@ -1655,8 +2732,8 @@ def compose_object(
elif src.offset is not None:
size -= src.offset
offset = src.offset or 0
- headers = cast(DictType, src.headers)
- headers.update(ssec_headers)
+ headers = cast(HTTPHeaderDict, src.headers)
+ headers.extend(ssec_headers)
if size <= MAX_PART_SIZE:
part_number += 1
if src.length is not None:
@@ -1668,11 +2745,11 @@ def compose_object(
f"bytes={offset}-{offset + size - 1}"
)
etag, _ = self._upload_part_copy(
- bucket_name,
- object_name,
- upload_id,
- part_number,
- headers,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ upload_id=upload_id,
+ part_number=part_number,
+ headers=headers,
)
total_parts.append(Part(part_number, etag))
continue
@@ -1685,53 +2762,69 @@ def compose_object(
f"bytes={offset}-{end_bytes}"
)
etag, _ = self._upload_part_copy(
- bucket_name,
- object_name,
- upload_id,
- part_number,
- headers_copy,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ upload_id=upload_id,
+ part_number=part_number,
+ headers=headers_copy,
)
total_parts.append(Part(part_number, etag))
offset += length
size -= length
result = self._complete_multipart_upload(
- bucket_name, object_name, upload_id, total_parts,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ upload_id=upload_id,
+ parts=total_parts,
)
- return ObjectWriteResult(
- cast(str, result.bucket_name),
- cast(str, result.object_name),
- result.version_id,
- result.etag,
- result.http_headers,
+ return ObjectWriteResult.new(
+ headers=result.headers,
+ bucket_name=cast(str, result.bucket_name),
+ object_name=cast(str, result.object_name),
+ version_id=result.version_id,
+ etag=result.etag,
location=result.location,
)
except Exception as exc:
if upload_id:
self._abort_multipart_upload(
- bucket_name, object_name, upload_id,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ upload_id=upload_id,
)
raise exc
def _abort_multipart_upload(
self,
+ *,
bucket_name: str,
object_name: str,
upload_id: str,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
):
"""Execute AbortMultipartUpload S3 API."""
self._execute(
- "DELETE",
- bucket_name,
- object_name,
- query_params={'uploadId': upload_id},
+ method="DELETE",
+ bucket_name=bucket_name,
+ object_name=object_name,
+ query_params=HTTPQueryDict({'uploadId': upload_id}),
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
def _complete_multipart_upload(
self,
+ *,
bucket_name: str,
object_name: str,
upload_id: str,
parts: list[Part],
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> CompleteMultipartUploadResult:
"""Execute CompleteMultipartUpload S3 API."""
element = Element("CompleteMultipartUpload")
@@ -1740,151 +2833,298 @@ def _complete_multipart_upload(
SubElement(tag, "PartNumber", str(part.part_number))
SubElement(tag, "ETag", '"' + part.etag + '"')
body = getbytes(element)
- response = self._execute(
- "POST",
- bucket_name,
- object_name,
- body=body,
- headers={
+ headers = HTTPHeaderDict(
+ {
"Content-Type": 'application/xml',
- "Content-MD5": cast(str, md5sum_hash(body)),
+ "Content-MD5": base64_string(MD5.hash(body)),
},
- query_params={'uploadId': upload_id},
+ )
+ response = self._execute(
+ method="POST",
+ bucket_name=bucket_name,
+ object_name=object_name,
+ body=body,
+ headers=headers,
+ query_params=HTTPQueryDict({'uploadId': upload_id}),
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
return CompleteMultipartUploadResult(response)
def _create_multipart_upload(
self,
+ *,
bucket_name: str,
object_name: str,
- headers: DictType,
+ headers: HTTPHeaderDict,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> str:
"""Execute CreateMultipartUpload S3 API."""
if not headers.get("Content-Type"):
headers["Content-Type"] = "application/octet-stream"
response = self._execute(
- "POST",
- bucket_name,
- object_name,
+ method="POST",
+ bucket_name=bucket_name,
+ object_name=object_name,
headers=headers,
- query_params={"uploads": ""},
+ query_params=HTTPQueryDict({"uploads": ""}),
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
element = ET.fromstring(response.data.decode())
return cast(str, findtext(element, "UploadId", True))
def _put_object(
self,
+ *,
bucket_name: str,
object_name: str,
data: bytes,
- headers: Optional[DictType] = None,
- query_params: Optional[DictType] = None,
+ headers: Optional[HTTPHeaderDict] = None,
+ query_params: Optional[HTTPQueryDict] = None,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> ObjectWriteResult:
"""Execute PutObject S3 API."""
response = self._execute(
- "PUT",
- bucket_name,
- object_name,
+ method="PUT",
+ bucket_name=bucket_name,
+ object_name=object_name,
body=data,
headers=headers,
query_params=query_params,
no_body_trace=True,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
- return ObjectWriteResult(
- bucket_name,
- object_name,
- response.headers.get("x-amz-version-id"),
- response.headers.get("etag", "").replace('"', ""),
- response.headers,
+ return ObjectWriteResult.new(
+ headers=response.headers,
+ bucket_name=bucket_name,
+ object_name=object_name,
)
def _upload_part(
self,
+ *,
bucket_name: str,
object_name: str,
data: bytes,
- headers: Optional[DictType],
+ headers: Optional[HTTPHeaderDict],
upload_id: str,
part_number: int,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> str:
"""Execute UploadPart S3 API."""
+ query_params = HTTPQueryDict({
+ "partNumber": str(part_number),
+ "uploadId": upload_id,
+ })
result = self._put_object(
- bucket_name,
- object_name,
- data,
- headers,
- query_params={
- "partNumber": str(part_number),
- "uploadId": upload_id,
- },
+ bucket_name=bucket_name,
+ object_name=object_name,
+ data=data,
+ headers=headers,
+ query_params=query_params,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
return cast(str, result.etag)
- def _upload_part_task(self, args):
+ def _upload_part_task(self, kwargs):
"""Upload_part task for ThreadPool."""
- return args[5], self._upload_part(*args)
+ return kwargs["part_number"], self._upload_part(**kwargs)
def put_object(
self,
+ *,
bucket_name: str,
object_name: str,
data: BinaryIO,
length: int,
content_type: str = "application/octet-stream",
- metadata: Optional[DictType] = None,
+ headers: Optional[HTTPHeaderDict] = None,
+ user_metadata: Optional[HTTPHeaderDict] = None,
sse: Optional[Sse] = None,
progress: Optional[ProgressType] = None,
part_size: int = 0,
+ checksum: Optional[Algorithm] = None,
num_parallel_uploads: int = 3,
tags: Optional[Tags] = None,
retention: Optional[Retention] = None,
legal_hold: bool = False,
- write_offset: Optional[int] = None,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> ObjectWriteResult:
"""
- Uploads data from a stream to an object in a bucket.
-
- :param bucket_name: Name of the bucket.
- :param object_name: Object name in the bucket.
- :param data: An object having callable read() returning bytes object.
- :param length: Data size; -1 for unknown size and set valid part_size.
- :param content_type: Content type of the object.
- :param metadata: Any additional metadata to be uploaded along
- with your PUT request.
- :param sse: Server-side encryption.
- :param progress: A progress object;
- :param part_size: Multipart part size.
- :param num_parallel_uploads: Number of parallel uploads.
- :param tags: :class:`Tags` for the object.
- :param retention: :class:`Retention` configuration object.
- :param legal_hold: Flag to set legal hold for the object.
- :param write_offset: Offset byte for appending data to existing object.
- :return: :class:`ObjectWriteResult` object.
-
- Example::
- # Upload data.
- result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
- )
-
- # Upload data with metadata.
- result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
- metadata={"My-Project": "one"},
- )
-
- # Upload data with tags, retention and legal-hold.
- date = datetime.utcnow().replace(
- hour=0, minute=0, second=0, microsecond=0,
- ) + timedelta(days=30)
- tags = Tags(for_object=True)
- tags["User"] = "jsmith"
- result = client.put_object(
- "my-bucket", "my-object", io.BytesIO(b"hello"), 5,
- tags=tags,
- retention=Retention(GOVERNANCE, date),
- legal_hold=True,
- )
+ Upload data from a stream to an object in a bucket.
+
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ object_name (str):
+ Object name in the bucket.
+
+ data (BinaryIO):
+ An object with a callable ``read()`` method that returns a
+ bytes object.
+
+ length (int):
+ Size of the data in bytes. Use -1 for unknown size and set a
+ valid ``part_size``.
+
+ content_type (str, default="application/octet-stream"):
+ Content type of the object.
+
+ headers (Optional[HTTPHeaderDict], default=None):
+ Additional headers.
+
+ user_metadata (Optional[HTTPHeaderDict], default=None):
+ User metadata for the object.
+
+ sse (Optional[Sse], default=None):
+ Server-side encryption configuration.
+
+ progress (Optional[ProgressType], default=None):
+ Progress object to track upload progress.
+
+ part_size (int, default=0):
+ Multipart upload part size in bytes.
+
+ checksum (Optional[Algorithm], default=None):
+ Algorithm for checksum computation.
+
+ num_parallel_uploads (int, default=3):
+ Number of parallel uploads.
+
+ tags (Optional[Tags], default=None):
+ Tags for the object.
+
+ retention (Optional[Retention], default=None):
+ Retention configuration.
+
+ legal_hold (bool, default=False):
+ Flag to enable legal hold on the object.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Returns:
+ ObjectWriteResult:
+ The result of the object upload operation.
+
+ Example:
+ >>> # Upload simple data
+ >>> result = client.put_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... data=io.BytesIO(b"hello"),
+ ... length=5,
+ ... )
+ >>> print(
+ ... f"created {result.object_name} object; "
+ ... f"etag: {result.etag}, version-id: {result.version_id}",
+ ... )
+ >>>
+ >>> # Upload unknown-sized data with multipart
+ >>> with urlopen("https://cdn.kernel.org/pub/linux/kernel/v5.x/"
+ ... "linux-5.4.81.tar.xz") as data:
+ ... result = client.put_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... data=data,
+ ... length=-1,
+ ... part_size=10*1024*1024,
+ ... )
+ >>>
+ >>> # Upload with content type
+ >>> result = client.put_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... data=io.BytesIO(b"hello"),
+ ... length=5,
+ ... content_type="application/csv",
+ ... )
+ >>>
+ >>> # Upload with metadata
+ >>> result = client.put_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... data=io.BytesIO(b"hello"),
+ ... length=5,
+ ... metadata={"My-Project": "one"},
+ ... )
+ >>>
+ >>> # Upload with customer key SSE
+ >>> result = client.put_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... data=io.BytesIO(b"hello"),
+ ... length=5,
+ ... sse=SseCustomerKey(b"32byteslongsecretkeymustprovided"),
+ ... )
+ >>>
+ >>> # Upload with KMS SSE
+ >>> result = client.put_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... data=io.BytesIO(b"hello"),
+ ... length=5,
+ ... sse=SseKMS(
+ ... "KMS-KEY-ID",
+ ... {"Key1": "Value1", "Key2": "Value2"},
+ ... ),
+ ... )
+ >>>
+ >>> # Upload with S3-managed SSE
+ >>> result = client.put_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... data=io.BytesIO(b"hello"),
+ ... length=5,
+ ... sse=SseS3(),
+ ... )
+ >>>
+ >>> # Upload with tags, retention, and legal hold
+ >>> date = datetime.utcnow().replace(
+ ... hour=0, minute=0, second=0, microsecond=0,
+ ... ) + timedelta(days=30)
+ >>> tags = Tags(for_object=True)
+ >>> tags["User"] = "jsmith"
+ >>> result = client.put_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... data=io.BytesIO(b"hello"),
+ ... length=5,
+ ... tags=tags,
+ ... retention=Retention(GOVERNANCE, date),
+ ... legal_hold=True,
+ ... )
+ >>>
+ >>> # Upload with progress bar
+ >>> result = client.put_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... data=io.BytesIO(b"hello"),
+ ... length=5,
+ ... progress=Progress(),
+ ... )
"""
check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host)
check_object_name(object_name)
@@ -1895,21 +3135,27 @@ def put_object(
raise ValueError("retention must be Retention type")
if not callable(getattr(data, "read")):
raise ValueError("input data must have callable read()")
- if write_offset is not None:
- if write_offset < 0:
- raise ValueError("write offset should not be negative")
- if length < 0:
- raise ValueError("length must be provided for write offset")
- part_size = length if length > MIN_PART_SIZE else MIN_PART_SIZE
part_size, part_count = get_part_info(length, part_size)
if progress:
# Set progress bar length and object name before upload
progress.set_meta(object_name=object_name, total_length=length)
- headers = genheaders(metadata, sse, tags, retention, legal_hold)
+ add_content_sha256 = self._base_url.is_https
+ algorithms = [checksum or Algorithm.CRC32C]
+ add_sha256_checksum = algorithms[0] == Algorithm.SHA256
+ if add_content_sha256 and not add_sha256_checksum:
+ algorithms.append(Algorithm.SHA256)
+ hashers = new_hashers(algorithms)
+
+ headers = self._gen_write_headers(
+ headers=headers,
+ user_metadata=user_metadata,
+ sse=sse,
+ tags=tags,
+ retention=retention,
+ legal_hold=legal_hold,
+ )
headers["Content-Type"] = content_type or "application/octet-stream"
- if write_offset:
- headers["x-amz-write-offset-bytes"] = str(write_offset)
object_size = length
uploaded_size = 0
@@ -1928,7 +3174,10 @@ def put_object(
part_size = object_size - uploaded_size
stop = True
part_data = read_part_data(
- data, part_size, progress=progress,
+ stream=data,
+ size=part_size,
+ progress=progress,
+ hashers=hashers,
)
if len(part_data) != part_size:
raise IOError(
@@ -1938,7 +3187,11 @@ def put_object(
)
else:
part_data = read_part_data(
- data, part_size + 1, one_byte, progress=progress,
+ stream=data,
+ size=part_size + 1,
+ part_data=one_byte,
+ progress=progress,
+ hashers=hashers,
)
# If part_data_size is less or equal to part_size,
# then we have reached last part.
@@ -1951,36 +3204,61 @@ def put_object(
uploaded_size += len(part_data)
+ checksum_headers = make_headers(
+ hashers, add_content_sha256, add_sha256_checksum,
+ )
+
if part_count == 1:
+ headers.extend(checksum_headers)
return self._put_object(
- bucket_name, object_name, part_data, headers,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ data=part_data,
+ headers=headers,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
if not upload_id:
+ headers.extend(checksum_headers)
upload_id = self._create_multipart_upload(
- bucket_name, object_name, headers,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ headers=headers,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
if num_parallel_uploads and num_parallel_uploads > 1:
pool = ThreadPool(num_parallel_uploads)
pool.start_parallel()
- args = (
- bucket_name,
- object_name,
- part_data,
- (
- cast(DictType, sse.headers())
- if isinstance(sse, SseCustomerKey) else None
- ),
- upload_id,
- part_number,
+ headers = HTTPHeaderDict(
+ sse.headers() if isinstance(sse, SseCustomerKey) else None,
)
+ headers.extend(checksum_headers)
if num_parallel_uploads > 1:
+ kwargs = {
+ "bucket_name": bucket_name,
+ "object_name": object_name,
+ "data": part_data,
+ "headers": headers,
+ "upload_id": upload_id,
+ "part_number": part_number,
+ }
cast(ThreadPool, pool).add_task(
- self._upload_part_task, args,
+ self._upload_part_task, kwargs,
)
else:
- etag = self._upload_part(*args)
+ etag = self._upload_part(
+ bucket_name=bucket_name,
+ object_name=object_name,
+ data=part_data,
+ headers=headers,
+ upload_id=upload_id,
+ part_number=part_number,
+ )
parts.append(Part(part_number, etag))
if pool:
@@ -1991,86 +3269,45 @@ def put_object(
parts[part_number - 1] = Part(part_number, etag)
upload_result = self._complete_multipart_upload(
- bucket_name, object_name, cast(str, upload_id), parts,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ upload_id=cast(str, upload_id),
+ parts=parts,
)
- return ObjectWriteResult(
- cast(str, upload_result.bucket_name),
- cast(str, upload_result.object_name),
- upload_result.version_id,
- upload_result.etag,
- upload_result.http_headers,
+ return ObjectWriteResult.new(
+ headers=upload_result.headers,
+ bucket_name=cast(str, upload_result.bucket_name),
+ object_name=cast(str, upload_result.object_name),
+ version_id=upload_result.version_id,
+ etag=upload_result.etag,
location=upload_result.location,
)
except Exception as exc:
if upload_id:
self._abort_multipart_upload(
- bucket_name, object_name, upload_id,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ upload_id=upload_id,
)
raise exc
- def append_object(
+ def _append_object(
self,
+ *,
bucket_name: str,
object_name: str,
- data: BinaryIO,
- length: int,
- chunk_size: Optional[int] = None,
+ stream: BinaryIO,
+ length: Optional[int] = None,
+ chunk_size: int,
progress: Optional[ProgressType] = None,
- extra_headers: Optional[DictType] = None,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
) -> ObjectWriteResult:
- """
- Appends from a stream to existing object in a bucket.
-
- :param bucket_name: Name of the bucket.
- :param object_name: Object name in the bucket.
- :param data: An object having callable read() returning bytes object.
- :param length: Data size; -1 for unknown size.
- :param chunk_size: Chunk size to optimize uploads.
- :return: :class:`ObjectWriteResult` object.
-
- Example::
- # Append data.
- result = client.append_object(
- "my-bucket", "my-object", io.BytesIO(b"world"), 5,
- )
- print(f"appended {result.object_name} object; etag: {result.etag}")
-
- # Append data in chunks.
- data = urlopen(
- "https://www.kernel.org/pub/linux/kernel/v6.x/"
- "linux-6.13.12.tar.xz",
- )
- result = client.append_object(
- "my-bucket", "my-object", data, 148611164, 5*1024*1024,
- )
- print(f"appended {result.object_name} object; etag: {result.etag}")
-
- # Append unknown sized data.
- data = urlopen(
- "https://www.kernel.org/pub/linux/kernel/v6.x/"
- "linux-6.14.3.tar.xz",
- )
- result = client.append_object(
- "my-bucket", "my-object", data, 149426584, 5*1024*1024,
- )
- print(f"appended {result.object_name} object; etag: {result.etag}")
- """
- if length == 0:
- raise ValueError("length should not be zero")
- if chunk_size is not None:
- if chunk_size < MIN_PART_SIZE:
- raise ValueError("chunk size must be minimum of 5 MiB")
- if chunk_size > MAX_PART_SIZE:
- raise ValueError("chunk size must be less than 5 GiB")
- else:
- chunk_size = length if length > MIN_PART_SIZE else MIN_PART_SIZE
-
+ """Do append object."""
chunk_count = -1
- if length > 0:
- chunk_count = int(length / chunk_size)
- if (chunk_count * chunk_size) < length:
- chunk_count += 1
- chunk_count = chunk_count or 1
+ if length is not None:
+ chunk_count = max(int((length + chunk_size - 1) / chunk_size), 1)
object_size = length
uploaded_size = 0
@@ -2078,17 +3315,20 @@ def append_object(
one_byte = b""
stop = False
- stat = self.stat_object(bucket_name, object_name)
+ stat = self.stat_object(
+ bucket_name=bucket_name,
+ object_name=object_name,
+ )
write_offset = cast(int, stat.size)
while not stop:
chunk_number += 1
if chunk_count > 0:
- if chunk_number == chunk_count:
+ if chunk_number == chunk_count and object_size is not None:
chunk_size = object_size - uploaded_size
stop = True
chunk_data = read_part_data(
- data, chunk_size, progress=progress,
+ stream=stream, size=chunk_size, progress=progress,
)
if len(chunk_data) != chunk_size:
raise IOError(
@@ -2098,7 +3338,10 @@ def append_object(
)
else:
chunk_data = read_part_data(
- data, chunk_size + 1, one_byte, progress=progress,
+ stream=stream,
+ size=chunk_size + 1,
+ part_data=one_byte,
+ progress=progress,
)
# If chunk_data_size is less or equal to chunk_size,
# then we have reached last chunk.
@@ -2111,23 +3354,161 @@ def append_object(
uploaded_size += len(chunk_data)
- headers = extra_headers or {}
- headers["x-amz-write-offset-bytes"] = str(write_offset)
+ headers = HTTPHeaderDict(
+ {"x-amz-write-offset-bytes": str(write_offset)},
+ )
upload_result = self._put_object(
- bucket_name, object_name, chunk_data, headers=headers,
+ bucket_name=bucket_name,
+ object_name=object_name,
+ data=chunk_data,
+ headers=headers,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
write_offset += len(chunk_data)
- return ObjectWriteResult(
- cast(str, upload_result.bucket_name),
- cast(str, upload_result.object_name),
- upload_result.version_id,
- upload_result.etag,
- upload_result.http_headers,
- location=upload_result.location,
+ return upload_result
+
+ def append_object(
+ self,
+ *,
+ bucket_name: str,
+ object_name: str,
+ filename: Optional[str | os.PathLike] = None,
+ stream: Optional[BinaryIO] = None,
+ data: Optional[bytes] = None,
+ length: Optional[int] = None,
+ chunk_size: Optional[int] = None,
+ progress: Optional[ProgressType] = None,
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ) -> ObjectWriteResult:
+ """
+ Append data to an existing object in a bucket.
+
+ Only one of ``filename``, ``stream`` or ``data`` must be provided.
+ If ``data`` is supplied, ``length`` must also be provided.
+
+ Args:
+ bucket_name (str):
+ Name of the bucket.
+
+ object_name (str):
+ Object name in the bucket.
+
+ filename (Optional[str | os.PathLike], default=None):
+ Path to a file whose contents will be appended.
+
+ stream (Optional[BinaryIO], default=None):
+ An object with a callable ``read()`` method returning a
+ bytes object.
+
+ data (Optional[bytes], default=None):
+ Raw data in a bytes object.
+
+ length (Optional[int], default=None):
+ Data length of ``data`` or ``stream``.
+
+ chunk_size (Optional[int], default=None):
+ Chunk size to split the data for appending.
+
+ progress (Optional[ProgressType], default=None):
+ Progress object to track upload progress.
+
+ region (Optional[str], default=None):
+ Region of the bucket to skip auto probing.
+
+ extra_headers (Optional[HTTPHeaderDict], default=None):
+ Extra headers for advanced usage.
+
+ extra_query_params (Optional[HTTPQueryDict], default=None):
+ Extra query parameters for advanced usage.
+
+ Returns:
+ ObjectWriteResult:
+ The result of the append operation.
+
+ Example:
+ >>> # Append simple data
+ >>> result = client.append_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... data=io.BytesIO(b"world"),
+ ... length=5,
+ ... )
+ >>> print(f"appended {result.object_name} object; "
+ ... f"etag: {result.etag}")
+ >>>
+ >>> # Append data in chunks
+ >>> with urlopen("https://www.kernel.org/pub/linux/kernel/v6.x/"
+ ... "linux-6.13.12.tar.xz") as stream:
+ ... result = client.append_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... stream=stream,
+ ... length=148611164,
+ ... chunk_size=5*1024*1024,
+ ... )
+ >>> print(f"appended {result.object_name} object; "
+ ... f"etag: {result.etag}")
+ >>>
+ >>> # Append unknown-sized data
+ >>> with urlopen("https://www.kernel.org/pub/linux/kernel/v6.x/"
+ ... "linux-6.14.3.tar.xz") as stream:
+ ... result = client.append_object(
+ ... bucket_name="my-bucket",
+ ... object_name="my-object",
+ ... stream=stream,
+ ... chunk_size=5*1024*1024,
+ ... )
+ >>> print(f"appended {result.object_name} object; "
+ ... f"etag: {result.etag}")
+ """
+ if sum(x is not None for x in (filename, stream, data)) != 1:
+ raise ValueError(
+ "either filename, stream or data must be provided")
+ if (length is not None and length <= 0):
+ raise ValueError("valid length must be provided")
+ if data is not None and length is None:
+ raise ValueError("valid length must be provided for data")
+ if chunk_size is not None:
+ if chunk_size < MIN_PART_SIZE:
+ raise ValueError("chunk size must be minimum of 5 MiB")
+ if chunk_size > MAX_PART_SIZE:
+ raise ValueError("chunk size must be less than 5 GiB")
+ else:
+ chunk_size = max(MIN_PART_SIZE, length or 0)
+
+ if filename:
+ file_size = os.stat(filename).st_size
+ with open(filename, "rb") as file:
+ return self._append_object(
+ bucket_name=bucket_name,
+ object_name=object_name,
+ stream=file,
+ length=file_size,
+ chunk_size=cast(int, chunk_size),
+ progress=progress,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
+ )
+ return self._append_object(
+ bucket_name=bucket_name,
+ object_name=object_name,
+ stream=stream if stream else io.BytesIO(cast(bytes, data)),
+ length=length,
+ chunk_size=cast(int, chunk_size),
+ progress=progress,
+ region=region,
+ extra_headers=extra_headers,
+ extra_query_params=extra_query_params,
)
def list_objects(
self,
+ *,
bucket_name: str,
prefix: Optional[str] = None,
recursive: bool = False,
@@ -2137,61 +3518,95 @@ def list_objects(
use_api_v1: bool = False,
use_url_encoding_type: bool = True,
fetch_owner: bool = False,
- extra_headers: Optional[DictType] = None,
- extra_query_params: Optional[DictType] = None,
- ):
+ region: Optional[str] = None,
+ extra_headers: Optional[HTTPHeaderDict] = None,
+ extra_query_params: Optional[HTTPQueryDict] = None,
+ ) -> Iterator[Object]:
"""
- Lists object information of a bucket.
-
- :param bucket_name: Name of the bucket.
- :param prefix: Object name starts with prefix.
- :param recursive: List recursively than directory structure emulation.
- :param start_after: List objects after this key name.
- :param include_user_meta: MinIO specific flag to control to include
- user metadata.
- :param include_version: Flag to control whether include object
- versions.
- :param use_api_v1: Flag to control to use ListObjectV1 S3 API or not.
- :param use_url_encoding_type: Flag to control whether URL encoding type
- to be used or not.
- :param extra_headers: Extra HTTP headers for advanced usage.
- :param extra_query_params: Extra query parameters for advanced usage.
- :return: Iterator of :class:`Object