diff --git a/README.md b/README.md index 2739e6d1..efd3a3af 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ The MinIO Python Client SDK provides high level APIs to access any MinIO Object This Quickstart Guide covers how to install the MinIO client SDK, connect to the object storage service, and create a sample file uploader. The example below uses: -- [Python version 3.7+](https://www.python.org/downloads/) +- [Python version 3.9+](https://www.python.org/downloads/) - The [MinIO `mc` command line tool](https://min.io/docs/minio/linux/reference/minio-mc.html) - The MinIO `play` test server @@ -17,7 +17,7 @@ For a complete list of APIs and examples, see the [Python Client API Reference]( ## Install the MinIO Python SDK -The Python SDK requires Python version 3.7+. +The Python SDK requires Python version 3.9+. You can install the SDK with `pip` or from the [`minio/minio-py` GitHub repository](https://github.com/minio/minio-py): ### Using `pip` @@ -49,7 +49,8 @@ For example: ```py from minio import Minio -client = Minio("play.min.io", +client = Minio( + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) @@ -74,7 +75,8 @@ from minio.error import S3Error def main(): # Create a client with the MinIO server playground, its access key # and secret key. - client = Minio("play.min.io", + client = Minio( + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) @@ -87,16 +89,18 @@ def main(): destination_file = "my-test-file.txt" # Make the bucket if it doesn't exist. - found = client.bucket_exists(bucket_name) + found = client.bucket_exists(bucket_name=bucket_name) if not found: - client.make_bucket(bucket_name) + client.make_bucket(bucket_name=bucket_name) print("Created bucket", bucket_name) else: print("Bucket", bucket_name, "already exists") # Upload the file, renaming it in the process client.fput_object( - bucket_name, destination_file, source_file, + bucket_name=bucket_name, + object_name=destination_file, + file_path=source_file, ) print( source_file, "successfully uploaded as object", diff --git a/docs/API.md b/docs/API.md index 22e7e936..4023e7d4 100644 --- a/docs/API.md +++ b/docs/API.md @@ -2,22 +2,22 @@ ## 1. Constructor -### Minio(endpoint, access_key=None, secret_key=None, session_token=None, secure=True, region=None, http_client=None, credentials=None) +### Minio(*, endpoint: str, access_key: Optional[str] = None, secret_key: Optional[str] = None, session_token: Optional[str] = None, secure: bool = True, region: Optional[str] = None, http_client: Optional[urllib3.PoolManager] = None, credentials: Optional[Provider] = None, cert_check: bool = True) Initializes a new client object. __Parameters__ -| Param | Type | Description | -|:----------------|:----------------------------------|:---------------------------------------------------------------------------------| -| `endpoint` | _str_ | Hostname of a S3 service. | -| `access_key` | _str_ | (Optional) Access key (aka user ID) of your account in S3 service. | -| `secret_key` | _str_ | (Optional) Secret Key (aka password) of your account in S3 service. | -| `session_token` | _str_ | (Optional) Session token of your account in S3 service. | -| `secure` | _bool_ | (Optional) Flag to indicate to use secure (TLS) connection to S3 service or not. | -| `region` | _str_ | (Optional) Region name of buckets in S3 service. | -| `http_client` | _urllib3.poolmanager.PoolManager_ | (Optional) Customized HTTP client. | -| `credentials` | _minio.credentials.Provider_ | (Optional) Credentials provider of your account in S3 service. | -| `cert_check` | _bool_ | (Optional) Flag to check on server certificate for HTTPS connection. | +| Param | Type | Description | +|:----------------|:----------------------------------------------|:---------------------------------------------------------------------------------| +| `endpoint` | _str_ | Hostname of a S3 service. | +| `access_key` | _Optional[str] = None_ | (Optional) Access key (aka user ID) of your account in S3 service. | +| `secret_key` | _Optional[str] = None_ | (Optional) Secret Key (aka password) of your account in S3 service. | +| `session_token` | _Optional[str] = None_ | (Optional) Session token of your account in S3 service. | +| `secure` | _bool = True_ | (Optional) Flag to indicate to use secure (TLS) connection to S3 service or not. | +| `region` | _Optional[str] = None_ | (Optional) Region name of buckets in S3 service. | +| `http_client` | _Optional[urllib3.PoolManager] = None_ | (Optional) Customized HTTP client. | +| `credentials` | _Optional[minio.credentials.Provider] = None_ | (Optional) Credentials provider of your account in S3 service. | +| `cert_check` | _bool = True_ | (Optional) Flag to check on server certificate for HTTPS connection. | **NOTE on concurrent usage:** `Minio` object is thread safe when using the Python `threading` library. Specifically, it is **NOT** safe to share it between multiple processes, for example when using `multiprocessing.Pool`. The solution is simply to create a new `Minio` object in each process, and not share it between processes. @@ -28,14 +28,18 @@ __Example__ from minio import Minio # Create client with anonymous access. -client = Minio("play.min.io") +client = Minio(endpoint="play.min.io") # Create client with access and secret key. -client = Minio("s3.amazonaws.com", "ACCESS-KEY", "SECRET-KEY") +client = Minio( + endpoint="s3.amazonaws.com", + access_key="ACCESS-KEY", + secret_key="SECRET-KEY", +) # Create client with access key and secret key with specific region. client = Minio( - "play.minio.io:9000", + endpoint="play.minio.io:9000", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", region="my-region", @@ -44,7 +48,7 @@ client = Minio( # Create client with custom HTTP client using proxy server. import urllib3 client = Minio( - "SERVER:PORT", + endpoint="SERVER:PORT", access_key="ACCESS_KEY", secret_key="SECRET_KEY", secure=True, @@ -87,7 +91,7 @@ client = Minio( | [`set_bucket_notification`](#set_bucket_notification) | [`presigned_post_policy`](#presigned_post_policy) | | [`listen_bucket_notification`](#listen_bucket_notification) | [`get_presigned_url`](#get_presigned_url) | | [`delete_bucket_encryption`](#delete_bucket_encryption) | [`upload_snowball_objects`](#upload_snowball_objects) | -| [`get_bucket_encryption`](#get_bucket_encryption) | | +| [`get_bucket_encryption`](#get_bucket_encryption) | [`prompt_object`](#prompt_object) | | [`set_bucket_encryption`](#set_bucket_encryption) | | | [`delete_object_lock_config`](#delete_object_lock_config) | | | [`get_object_lock_config`](#get_object_lock_config) | | @@ -97,42 +101,52 @@ client = Minio( -### make_bucket(bucket_name, location='us-east-1', object_lock=False) +### make_bucket(self, *, bucket_name: str, location: Optional[str] = None, object_lock: bool = False, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Create a bucket with region and object lock. __Parameters__ -| Param | Type | Description | -|---------------|--------|---------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `location` | _str_ | Region in which the bucket will be created. | -| `object_lock` | _bool_ | Flag to set object-lock feature. | +| Param | Type | Description | +|----------------------|-------------------------------------------------|--------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `location` | _Optional[str] = None_ | Region in which the bucket to be created. | +| `object_lock` | _bool = False_ | Flag to set object-lock feature. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py # Create bucket. -client.make_bucket("my-bucket") +client.make_bucket(bucket_name="my-bucket") # Create bucket on specific region. -client.make_bucket("my-bucket", "us-west-1") +client.make_bucket(bucket_name="my-bucket", location="us-west-1") # Create bucket with object-lock feature on specific region. -client.make_bucket("my-bucket", "eu-west-2", object_lock=True) +client.make_bucket(bucket_name="my-bucket", location="eu-west-2", object_lock=True) ``` -### list_buckets() +### list_buckets(self, *, bucket_region: Optional[str] = None, max_buckets: int = 10000, prefix: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None, ) -> Iterator[Bucket] List information of all accessible buckets. +| Param | Type | Description | +|----------------------|-------------------------------------------------|--------------------------------------------| +| `bucket_region` | _Optional[str] = None_ | Fetch buckets from the region. | +| `max_buckets` | _int = 10000_ | Fetch maximum number of buckets. | +| `prefix` | _Optional[str] = None_ | Fetch buckets starts with the prefix. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | + __Parameters__ -| Return | -|:-----------------| -| List of _Bucket_ | +| Return | +|:----------------------------------------| +| An iterator of _minio.datatypes.Bucket_ | __Example__ @@ -144,20 +158,23 @@ for bucket in buckets: -### bucket_exists(bucket_name) +### bucket_exists(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> bool Check if a bucket exists. __Parameters__ -| Param | Type | Description | -|:--------------|:------|:--------------------| -| `bucket_name` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py -if client.bucket_exists("my-bucket"): +if client.bucket_exists(bucket_name="my-bucket"): print("my-bucket exists") else: print("my-bucket does not exist") @@ -165,71 +182,76 @@ else: -### remove_bucket(bucket_name) +### remove_bucket(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Remove an empty bucket. __Parameters__ -| Param | Type | Description | -|:--------------|:------|:--------------------| -| `bucket_name` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py -client.remove_bucket("my-bucket") +client.remove_bucket(bucket_name="my-bucket") ``` -### list_objects(bucket_name, prefix=None, recursive=False, start_after=None, include_user_meta=False, include_version=False, use_api_v1=False, use_url_encoding_type=True, extra_headers=None, extra_query_params=None) +### list_objects(self, *, bucket_name: str, prefix: Optional[str] = None, recursive: bool = False, start_after: Optional[str] = None, include_user_meta: bool = False, include_version: bool = False, use_api_v1: bool = False, use_url_encoding_type: bool = True, fetch_owner: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None, ) -> Iterator[Object] Lists object information of a bucket. __Parameters__ -| Param | Type | Description | -|:------------------------|:-------|:-------------------------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `prefix` | _str_ | Object name starts with prefix. | -| `recursive` | _bool_ | List recursively than directory structure emulation. | -| `start_after` | _str_ | List objects after this key name. | -| `include_user_meta` | _bool_ | MinIO specific flag to control to include user metadata. | -| `include_version` | _bool_ | Flag to control whether include object versions. | -| `use_api_v1` | _bool_ | Flag to control to use ListObjectV1 S3 API or not. | -| `use_url_encoding_type` | _bool_ | Flag to control whether URL encoding type to be used or not. | -| `extra_headers` | _dict_ | Extra HTTP headers for advanced usage. | -| `extra_query_params` | _dict_ | Extra query parameters for advanced usage. | +| Param | Type | Description | +|:------------------------|:------------------------------------------------|:-------------------------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `prefix` | _Optional[str] = None_ | Object name starts with prefix. | +| `recursive` | _bool = False_ | List recursively than directory structure emulation. | +| `start_after` | _Optional[str] = None_ | List objects after this key name. | +| `include_user_meta` | _bool = False_ | MinIO specific flag to control to include user metadata. | +| `include_version` | _bool = False_ | Flag to control whether include object versions. | +| `use_api_v1` | _bool = False_ | Flag to control to use ListObjectV1 S3 API or not. | +| `use_url_encoding_type` | _bool = True_ | Flag to control whether URL encoding type to be used or not. | +| `fetch_owner` | _bool = False_ | Flag to control to fetch owner information. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Return | -|:------------------------| -| An iterator of _Object_ | +| Return | +|:----------------------------------------| +| An iterator of _minio.datatypes.Object_ | __Example__ ```py # List objects information. -objects = client.list_objects("my-bucket") +objects = client.list_objects(bucket_name="my-bucket") for obj in objects: print(obj) # List objects information whose names starts with "my/prefix/". -objects = client.list_objects("my-bucket", prefix="my/prefix/") +objects = client.list_objects(bucket_name="my-bucket", prefix="my/prefix/") for obj in objects: print(obj) # List objects information recursively. -objects = client.list_objects("my-bucket", recursive=True) +objects = client.list_objects(bucket_name="my-bucket", recursive=True) for obj in objects: print(obj) # List objects information recursively whose names starts with # "my/prefix/". objects = client.list_objects( - "my-bucket", prefix="my/prefix/", recursive=True, + bucket_name="my-bucket", prefix="my/prefix/", recursive=True, ) for obj in objects: print(obj) @@ -237,7 +259,7 @@ for obj in objects: # List objects information recursively after object name # "my/prefix/world/1". objects = client.list_objects( - "my-bucket", recursive=True, start_after="my/prefix/world/1", + bucket_name="my-bucket", recursive=True, start_after="my/prefix/world/1", ) for obj in objects: print(obj) @@ -245,15 +267,18 @@ for obj in objects: -### get_bucket_policy(bucket_name) +### get_bucket_policy(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> str Get bucket policy configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ @@ -264,21 +289,24 @@ __Return Value__ __Example__ ```py -policy = client.get_bucket_policy("my-bucket") +policy = client.get_bucket_policy(bucket_name="my-bucket") ``` -### set_bucket_policy(bucket_name, policy) +### set_bucket_policy(self, *, bucket_name: str, policy: str | bytes, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Set bucket policy configuration to a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------------------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | -| ``Policy`` | _str_ | Bucket policy configuration as JSON string. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:--------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `policy` | _str \| bytes_ | Bucket policy configuration as JSON string. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ @@ -301,7 +329,7 @@ policy = { }, ], } -client.set_bucket_policy("my-bucket", json.dumps(policy)) +client.set_bucket_policy(bucket_name="my-bucket", policy=json.dumps(policy)) # Example anonymous read-write bucket policy. policy = { @@ -331,63 +359,72 @@ policy = { }, ], } -client.set_bucket_policy("my-bucket", json.dumps(policy)) +client.set_bucket_policy(bucket_name="my-bucket", policy=json.dumps(policy)) ``` -### delete_bucket_policy(bucket_name) +### delete_bucket_policy(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Delete bucket policy configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py -client.delete_bucket_policy("my-bucket") +client.delete_bucket_policy(bucket_name="my-bucket") ``` -### get_bucket_notification(bucket_name) +### get_bucket_notification(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> NotificationConfig Get notification configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Param | -|:-----------------------------| -| _NotificationConfig_ object. | +| Param | +|:------------------------------------------------------| +| _minio.notificationconfig.NotificationConfig_ object. | __Example__ ```py -config = client.get_bucket_notification("my-bucket") +config = client.get_bucket_notification(bucket_name="my-bucket") ``` -### set_bucket_notification(bucket_name, config) +### set_bucket_notification(self, *, bucket_name: str, config: NotificationConfig, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Set notification configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:---------------------|:----------------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | -| ``config`` | _NotificationConfig_ | Notification configuration. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `config` | _minio.notificationconfig.NotificationConfig_ | Notification configuration. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ @@ -395,58 +432,64 @@ __Example__ config = NotificationConfig( queue_config_list=[ QueueConfig( - "QUEUE-ARN-OF-THIS-BUCKET", - ["s3:ObjectCreated:*"], + queue="QUEUE-ARN-OF-THIS-BUCKET", + events=["s3:ObjectCreated:*"], config_id="1", prefix_filter_rule=PrefixFilterRule("abc"), ), ], ) -client.set_bucket_notification("my-bucket", config) +client.set_bucket_notification(bucket_name="my-bucket", config=config) ``` -### delete_bucket_notification(bucket_name) +### delete_bucket_notification(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Delete notification configuration of a bucket. On success, S3 service stops notification of events previously set of the bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py -client.delete_bucket_notification("my-bucket") +client.delete_bucket_notification(bucket_name="my-bucket") ``` -### listen_bucket_notification(bucket_name, prefix='', suffix='', events=('s3:ObjectCreated:\*', 's3:ObjectRemoved:\*', 's3:ObjectAccessed:\*')) +### listen_bucket_notification(self, *, bucket_name: str, prefix: str = "", suffix: str = "", events: tuple[str, ...] = ('s3:ObjectCreated:*', 's3:ObjectRemoved:*', 's3:ObjectAccessed:*'), region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> EventIterable Listen events of object prefix and suffix of a bucket. Caller should iterate returned iterator to read new events. __Parameters__ -| Param | Type | Description | -|:--------------|:-------|:--------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `prefix` | _str_ | Listen events of object starts with prefix. | -| `suffix` | _str_ | Listen events of object ends with suffix. | -| `events` | _list_ | Events to listen. | +| Param | Type | Description | +|:---------------------|:----------------------------------------------------------------------------------------|:--------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `prefix` | _str = ""_ | Listen events of object starts with prefix. | +| `suffix` | _str = ""_ | Listen events of object ends with suffix. | +| `events` | _tuple[str, ...] = ('s3:ObjectCreated:*', 's3:ObjectRemoved:*', 's3:ObjectAccessed:*')_ | Events to listen. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Param | -|:------------------------------------| -| Iterator of event records as _dict_ | +| Param | +|:----------------------------------------------------------| +| Iterator _minio.datatypes.EventIterable_ of event records | ```py with client.listen_bucket_notification( - "my-bucket", + bucket_name="my-bucket", prefix="my-prefix/", events=["s3:ObjectCreated:*", "s3:ObjectRemoved:*"], ) as events: @@ -456,169 +499,198 @@ with client.listen_bucket_notification( -### get_bucket_encryption(bucket_name) - +### get_bucket_encryption(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Optional[SSEConfig] Get encryption configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Param | -|:--------------------| -| _SSEConfig_ object. | +| Param | +|:----------------------------------------------| +| _Optional[minio.sseconfig.SSEConfig]_ object. | __Example__ ```py -config = client.get_bucket_encryption("my-bucket") +config = client.get_bucket_encryption(bucket_name="my-bucket") ``` -### set_bucket_encryption(bucket_name, config) +### set_bucket_encryption(self, *, bucket_name: str, config: SSEConfig, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Set encryption configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------------|:--------------------------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | -| ``config`` | _SSEConfig_ | Server-side encryption configuration. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `config` | _minio.sseconfig.SSEConfig_ | Server-side encryption configuration. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py client.set_bucket_encryption( - "my-bucket", SSEConfig(Rule.new_sse_s3_rule()), + bucket_name="my-bucket", config=SSEConfig(Rule.new_sse_s3_rule()), ) ``` -### delete_bucket_encryption(bucket_name) +### delete_bucket_encryption(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Delete encryption configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py -client.delete_bucket_encryption("my-bucket") +client.delete_bucket_encryption(bucket_name="my-bucket") ``` -### get_bucket_versioning(bucket_name) +### get_bucket_versioning(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> VersioningConfig Get versioning configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | + +__Return Value__ + +| Param | +|:--------------------------------------------------| +| _minio.versioningconfig.VersioningConfig_ object. | __Example__ ```py -config = client.get_bucket_versioning("my-bucket") +config = client.get_bucket_versioning(bucket_name="my-bucket") print(config.status) ``` -### set_bucket_versioning(bucket_name, config) +### set_bucket_versioning(self, *, bucket_name: str, config: VersioningConfig, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Set versioning configuration to a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:-------------------|:--------------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | -| ``config`` | _VersioningConfig_ | Versioning configuration. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `config` | _minio.versioningconfig.VersioningConfig_ | Versioning configuration. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py -client.set_bucket_versioning("my-bucket", VersioningConfig(ENABLED)) +client.set_bucket_versioning(bucket_name="my-bucket", config=VersioningConfig(ENABLED)) ``` -### delete_bucket_replication(bucket_name) +### delete_bucket_replication(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Delete replication configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py -client.delete_bucket_replication("my-bucket") +client.delete_bucket_replication(bucket_name="my-bucket") ``` -### get_bucket_replication(bucket_name) +### get_bucket_replication(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Optional[ReplicationConfig] Get replication configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | -| Return | -|:----------------------------------------| -| _ReplicationConfig_ object. | +| Return | +|:--------------------------------------------------------------| +| _Optional[minio.replicationconfig.ReplicationConfig]_ object. | __Example__ ```py -config = client.get_bucket_replication("my-bucket") +config = client.get_bucket_replication(bucket_name="my-bucket") ``` -### set_bucket_replication(bucket_name, config) +### set_bucket_replication(self, *, bucket_name: str, config: ReplicationConfig, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Set replication configuration to a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:--------------------|:---------------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | -| ``config`` | _ReplicationConfig_ | Replication configuration. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `config` | _minio.replicationconfig.ReplicationConfig_ | Replication configuration. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py config = ReplicationConfig( - "REPLACE-WITH-ACTUAL-ROLE", - [ + role="REPLACE-WITH-ACTUAL-ROLE", + rules=[ Rule( - Destination( + destination=Destination( "REPLACE-WITH-ACTUAL-DESTINATION-BUCKET-ARN", ), - ENABLED, + status=ENABLED, delete_marker_replication=DeleteMarkerReplication( DISABLED, ), @@ -633,62 +705,71 @@ config = ReplicationConfig( ), ], ) -client.set_bucket_replication("my-bucket", config) +client.set_bucket_replication(bucket_name="my-bucket", config=config) ``` -### delete_bucket_lifecycle(bucket_name) +### delete_bucket_lifecycle(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Delete lifecycle configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py -client.delete_bucket_lifecycle("my-bucket") +client.delete_bucket_lifecycle(bucket_name="my-bucket") ``` -### get_bucket_lifecycle(bucket_name) +### get_bucket_lifecycle(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Optional[LifecycleConfig] Get lifecycle configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | -| Return | -|:--------------------------| -| _LifecycleConfig_ object. | +| Return | +|:----------------------------------------------------------| +| _Optional[minio.lifecycleconfig.LifecycleConfig]_ object. | __Example__ ```py -config = client.get_bucket_lifecycle("my-bucket") +config = client.get_bucket_lifecycle(bucket_name="my-bucket") ``` -### set_bucket_lifecycle(bucket_name, config) +### set_bucket_lifecycle(self, *, bucket_name: str, config: LifecycleConfig, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Set lifecycle configuration to a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------------------|:-------------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | -| ``config`` | _LifecycleConfig_ | Lifecycle configuration. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `config` | _minio.lifecycleconfig.LifecycleConfig_ | Lifecycle configuration. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ @@ -696,74 +777,83 @@ __Example__ config = LifecycleConfig( [ Rule( - ENABLED, + status=ENABLED, rule_filter=Filter(prefix="documents/"), rule_id="rule1", transition=Transition(days=30, storage_class="GLACIER"), ), Rule( - ENABLED, + status=ENABLED, rule_filter=Filter(prefix="logs/"), rule_id="rule2", expiration=Expiration(days=365), ), ], ) -client.set_bucket_lifecycle("my-bucket", config) +client.set_bucket_lifecycle(bucket_name="my-bucket", config=config) ``` -### delete_bucket_tags(bucket_name) +### delete_bucket_tags(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Delete tags configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py -client.delete_bucket_tags("my-bucket") +client.delete_bucket_tags(bucket_name="my-bucket") ``` -### get_bucket_tags(bucket_name) +### get_bucket_tags(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Optional[Tags] Get tags configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | -| Return | -|:---------------| -| _Tags_ object. | +| Return | +|:--------------------------------------------| +| _Optional[minio.commonconfig.Tags]_ object. | __Example__ ```py -tags = client.get_bucket_tags("my-bucket") +tags = client.get_bucket_tags(bucket_name="my-bucket") ``` -### set_bucket_tags(bucket_name, tags) +### set_bucket_tags(self, *, bucket_name: str, tags: Tags, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Set tags configuration to a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:-------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | -| ``tags`` | _Tags_ | Tags configuration. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `tags` | _minio.commonconfig.Tags_ | Tags configuration. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ @@ -771,153 +861,182 @@ __Example__ tags = Tags.new_bucket_tags() tags["Project"] = "Project One" tags["User"] = "jsmith" -client.set_bucket_tags("my-bucket", tags) +client.set_bucket_tags(bucket_name="my-bucket", tags=tags) ``` -### delete_object_lock_config(bucket_name) +### delete_object_lock_config(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Optional[Tags] Delete object-lock configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py -client.delete_object_lock_config("my-bucket") +client.delete_object_lock_config(bucket_name="my-bucket") ``` -### get_object_lock_config(bucket_name) +### get_object_lock_config(self, *, bucket_name: str, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> ObjectLockConfig Get object-lock configuration of a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:--------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | -| Return | -|:---------------------------| -| _ObjectLockConfig_ object. | +| Return | +|:--------------------------------------------------| +| _minio.objectlockconfig.ObjectLockConfig_ object. | __Example__ ```py -config = client.get_object_lock_config("my-bucket") +config = client.get_object_lock_config(bucket_name="my-bucket") ``` -### set_object_lock_config(bucket_name, config) +### set_object_lock_config(self, *, bucket_name: str, config: ObjectLockConfig, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Set object-lock configuration to a bucket. __Parameters__ -| Param | Type | Description | -|:----------------|:-------------------|:---------------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | -| ``config`` | _ObjectLockConfig_ | Object-Lock configuration. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `config` | _minio.objectlockconfig.ObjectLockConfig_ | Object-Lock configuration. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py config = ObjectLockConfig(GOVERNANCE, 15, DAYS) -client.set_object_lock_config("my-bucket", config) +client.set_object_lock_config(bucket_name="my-bucket", config=config) ``` ## 3. Object operations -### append_object(bucket_name, object_name, data, length, content_type="application/octet-stream", metadata=None, sse=None, progress=None, part_size=0, num_parallel_uploads=3, tags=None, retention=None, legal_hold=False) +### append_object(self, *, bucket_name: str, object_name: str, filename: Optional[str | os.PathLike] = None, stream: Optional[BinaryIO] = None, data: Optional[bytes] = None, length: Optional[int] = None, chunk_size: Optional[int] = None, progress: Optional[ProgressType] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -Appends from a stream to existing object in a bucket. +Appends data to existing object in a bucket. Only of `filename`, `stream` or `data` must be provided and `length` must be provided if `data` is supplied. __Parameters__ -| Param | Type | Description | -|:----------------|:------------|:---------------------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `data` | _object_ | An object having callable read() returning bytes object. | -| `length` | _int_ | Data size; -1 for unknown size and set valid part_size. | -| `part_size` | _int_ | Chunk size. | -| `progress` | _threading_ | A progress object. | -| `extra_headers` | _dict_ | Extra headers. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-----------------------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `filename` | _Optional[str \| os.PathLike] = None_ | Name of file to append. | +| `stream` | _Optional[io.BinaryIO] = None_ | An object having callable `read()` returning bytes object. | +| `data` | _Optional[bytes] = None_ | Data in byte array. | +| `length` | _Optional[int] = None_ | Data length of `data` or `stream`. | +| `chunk_size` | _Optional[int] = None_ | Chunk size. | +| `progress` | _Optional[minio.helpers.ProgressType] = None_ | A progress object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Return | -|:----------------------------| -| _ObjectWriteResult_ object. | +| Return | +|:------------------------------------------| +| _minio.helpers.ObjectWriteResult_ object. | __Example__ ```py # Append data. result = client.append_object( - "my-bucket", "my-object", io.BytesIO(b"world"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"world"), + length=5, ) print(f"appended {result.object_name} object; etag: {result.etag}") # Append data in chunks. -data = urlopen( +with urlopen( "https://www.kernel.org/pub/linux/kernel/v6.x/linux-6.13.12.tar.xz", -) -result = client.append_object( - "my-bucket", "my-object", data, 148611164, 5*1024*1024, -) -print(f"appended {result.object_name} object; etag: {result.etag}") +) as stream: + result = client.append_object( + bucket_name="my-bucket", + object_name="my-object", + stream=stream, + length=148611164, + chunk_size=5*1024*1024, + ) + print(f"appended {result.object_name} object; etag: {result.etag}") # Append unknown sized data. -data = urlopen( +with urlopen( "https://www.kernel.org/pub/linux/kernel/v6.x/linux-6.14.3.tar.xz", -) -result = client.append_object( - "my-bucket", "my-object", data, 149426584, 5*1024*1024, -) -print(f"appended {result.object_name} object; etag: {result.etag}") +) as stream: + result = client.append_object( + bucket_name="my-bucket", + object_name="my-object", + stream=stream, + chunk_size=5*1024*1024, + ) + print(f"appended {result.object_name} object; etag: {result.etag}") ``` -### get_object(bucket_name, object_name, offset=0, length=0, request_headers=None, ssec=None, version_id=None, extra_query_params=None) +### get_object(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, ssec: Optional[SseCustomerKey] = None, offset: int = 0, length: Optional[int] = None, match_etag: Optional[str] = None, not_match_etag: Optional[str] = None, modified_since: Optional[datetime] = None, unmodified_since: Optional[datetime] = None, fetch_checksum: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> BaseHTTPResponse Gets data from offset to length of an object. Returned response should be closed after use to release network resources. To reuse the connection, it's required to call `response.release_conn()` explicitly. __Parameters__ -| Param | Type | Description | -|:---------------------|:-----------------|:-----------------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `offset` | _int_ | Start byte position of object data. | -| `length` | _int_ | Number of bytes of object data from offset. | -| `request_headers` | _dict_ | Any additional headers to be added with GET request. | -| `ssec` | _SseCustomerKey_ | Server-side encryption customer key. | -| `version_id` | _str_ | Version-ID of the object. | -| `extra_query_params` | _dict_ | Extra query parameters for advanced usage. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:--------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `version_id` | _Optional[str] = None_ | Version-ID of the object. | +| `ssec` | _Optional[minio.sse.SseCustomerKey] = None_ | Server-side encryption customer key. | +| `offset` | _int = 0_ | Start byte position of object data. | +| `length` | _Optional[int] = None_ | Number of bytes of object data from offset. | +| `match_etag` | _Optional[str] = None_ | Match ETag of the object. | +| `not_match_etag` | _Optional[str] = None_ | None-match ETag of the object. | +| `modified_since` | _Optional[datetime.datetime] = None_ | Modified-since of the object. | +| `unmodified_since` | _Optional[datetime.datetime] = None_ | Unmodified-since of the object. | +| `fetch_checksum` | _bool = False_ | Fetch object checksum. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Return | -|:----------------------------------------| -| _urllib3.response.HTTPResponse_ object. | +| Return | +|:-------------------------------------------------------------------------------| +| _urllib3.response.BaseHTTPResponse_ or _urllib3.response.HTTPResponse_ object. | __Example__ ```py # Get data of an object. try: - response = client.get_object("my-bucket", "my-object") + response = client.get_object(bucket_name="my-bucket", object_name="my-object") # Read data from response. finally: response.close() @@ -926,7 +1045,8 @@ finally: # Get data of an object of version-ID. try: response = client.get_object( - "my-bucket", "my-object", + bucket_name="my-bucket", + object_name="my-object", version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", ) # Read data from response. @@ -937,7 +1057,10 @@ finally: # Get data of an object from offset and length. try: response = client.get_object( - "my-bucket", "my-object", offset=512, length=1024, + bucket_name="my-bucket", + object_name="my-object", + offset=512, + length=1024, ) # Read data from response. finally: @@ -947,7 +1070,8 @@ finally: # Get data of an SSE-C encrypted object. try: response = client.get_object( - "my-bucket", "my-object", + bucket_name="my-bucket", + object_name="my-object", ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"), ) # Read data from response. @@ -958,34 +1082,37 @@ finally: -### select_object_content(bucket_name, object_name, request) +### select_object_content(self, *, bucket_name: str, object_name: str, request: SelectRequest, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> SelectObjectReader Select content of an object by SQL expression. __Parameters__ -| Param | Type | Description | -|:--------------|:----------------|:---------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `request` | _SelectRequest_ | Select request. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:--------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `request` | _minio.select.SelectRequest_ | Select request. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Return | -|:-------------------------------------------------------------------------------------| -| A reader contains requested records and progress information as _SelectObjectReader_ | +| Return | +|:------------------------------------------| +| _minio.select.SelectObjectReader_ object. | __Example__ ```py with client.select_object_content( - "my-bucket", - "my-object.csv", - SelectRequest( - "select * from S3Object", - CSVInputSerialization(), - CSVOutputSerialization(), + bucket_name="my-bucket", + object_name="my-object.csv", + request=SelectRequest( + expression="select * from S3Object", + input_serialization=CSVInputSerialization(), + output_serialization=CSVOutputSerialization(), request_progress=True, ), ) as result: @@ -996,74 +1123,89 @@ with client.select_object_content( -### fget_object(bucket_name, object_name, file_path, request_headers=None, ssec=None, version_id=None, extra_query_params=None, tmp_file_path=None) +### fget_object(self, *, bucket_name: str, object_name: str, file_path: str, match_etag: Optional[str] = None, not_match_etag: Optional[str] = None, modified_since: Optional[datetime] = None, unmodified_since: Optional[datetime] = None, fetch_checksum: bool = False, ssec: Optional[SseCustomerKey] = None, version_id: Optional[str] = None, tmp_file_path: Optional[str] = None, progress: Optional[ProgressType] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) + Downloads data of an object to file. __Parameters__ -| Param | Type | Description | -|:---------------------|:-----------------|:-----------------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `file_path` | _str_ | Name of file to download. | -| `request_headers` | _dict_ | Any additional headers to be added with GET request. | -| `ssec` | _SseCustomerKey_ | Server-side encryption customer key. | -| `version_id` | _str_ | Version-ID of the object. | -| `extra_query_params` | _dict_ | Extra query parameters for advanced usage. | -| `tmp_file_path` | _str_ | Path to a temporary file. | - -__Return Value__ - -| Return | -|:-------------------------------| -| Object information as _Object_ | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:--------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `file_path` | _str_ | Name of file to download. | +| `version_id` | _Optional[str] = None_ | Version-ID of the object. | +| `ssec` | _Optional[minio.sse.SseCustomerKey] = None_ | Server-side encryption customer key. | +| `offset` | _int = 0_ | Start byte position of object data. | +| `length` | _Optional[int] = None_ | Number of bytes of object data from offset. | +| `tmp_file_path` | _Optional[str] = None_ | Path to a temporary file. | +| `progress` | _Optional[minio.helpers.ProgressType] = None_ | A progress object. | +| `match_etag` | _Optional[str] = None_ | Match ETag of the object. | +| `not_match_etag` | _Optional[str] = None_ | None-match ETag of the object. | +| `modified_since` | _Optional[datetime.datetime] = None_ | Modified-since of the object. | +| `unmodified_since` | _Optional[datetime.datetime] = None_ | Unmodified-since of the object. | +| `fetch_checksum` | _bool = False_ | Fetch object checksum. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py # Download data of an object. -client.fget_object("my-bucket", "my-object", "my-filename") +client.fget_object( + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", +) # Download data of an object of version-ID. client.fget_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", ) # Download data of an SSE-C encrypted object. client.fget_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"), ) ``` -### copy_object(bucket_name, object_name, source, sse=None, metadata=None, tags=None, retention=None, legal_hold=False, metadata_directive=None, tagging_directive=None) +### copy_object(self, *, bucket_name: str, object_name: str, source: CopySource, sse: Optional[Sse] = None, user_metadata: Optional[HTTPHeaderDict] = None, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, metadata_directive: Optional[str] = None, tagging_directive: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> ObjectWriteResult Create an object by server-side copying data from another object. In this API maximum supported source object size is 5GiB. __Parameters__ -| Param | Type | Description | -|:---------------------|:-------------|:----------------------------------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `source` | _CopySource_ | Source object information. | -| `sse` | _Sse_ | Server-side encryption of destination object. | -| `metadata` | _dict_ | Any user-defined metadata to be copied along with destination object. | -| `tags` | _Tags_ | Tags for destination object. | -| `retention` | _Retention_ | Retention configuration. | -| `legal_hold` | _bool_ | Flag to set legal hold for destination object. | -| `metadata_directive` | _str_ | Directive used to handle user metadata for destination object. | -| `tagging_directive` | _str_ | Directive used to handle tags for destination object. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:----------------------------------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `source` | _minio.commonconfig.CopySource_ | Source object information. | +| `sse` | _Optional[minio.sse.Sse] = None_ | Server-side encryption of destination object. | +| `user_metadata` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Any user-defined metadata to be copied along with destination object. | +| `tags` | _Optional[minio.commonconfig.Tags] = None_ | Tags for destination object. | +| `retention` | _Optional[minio.retention.Retention] = None_ | Retention configuration. | +| `legal_hold` | _bool = False_ | Flag to set legal hold for destination object. | +| `metadata_directive` | _Optional[str] = None_ | Directive used to handle user metadata for destination object. | +| `tagging_directive` | _Optional[str] = None_ | Directive used to handle tags for destination object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Return | -|:----------------------------| -| _ObjectWriteResult_ object. | +| Return | +|:------------------------------------------| +| _minio.helpers.ObjectWriteResult_ object. | __Example__ @@ -1073,31 +1215,37 @@ from minio.commonconfig import REPLACE, CopySource # copy an object from a bucket to another. result = client.copy_object( - "my-bucket", - "my-object", - CopySource("my-sourcebucket", "my-sourceobject"), + bucket_name="my-bucket", + object_name="my-object", + CopySource( + bucket_name="my-sourcebucket", + object_name="my-sourceobject", + ), ) print(result.object_name, result.version_id) # copy an object with condition. result = client.copy_object( - "my-bucket", - "my-object", + bucket_name="my-bucket", + object_name="my-object", CopySource( - "my-sourcebucket", - "my-sourceobject", + bucket_name="my-sourcebucket", + object_name="my-sourceobject", modified_since=datetime(2014, 4, 1, tzinfo=timezone.utc), ), ) print(result.object_name, result.version_id) # copy an object from a bucket with replacing metadata. -metadata = {"test_meta_key": "test_meta_value"} +user_metadata = {"test_meta_key": "test_meta_value"} result = client.copy_object( - "my-bucket", - "my-object", - CopySource("my-sourcebucket", "my-sourceobject"), - metadata=metadata, + bucket_name="my-bucket", + object_name="my-object", + CopySource( + bucket_name="my-sourcebucket", + object_name="my-sourceobject", + ), + user_metadata=user_metadata, metadata_directive=REPLACE, ) print(result.object_name, result.version_id) @@ -1105,29 +1253,32 @@ print(result.object_name, result.version_id) -### compose_object(bucket_name, object_name, sources, sse=None, metadata=None, tags=None, retention=None, legal_hold=False) +### compose_object(self, *, bucket_name: str, object_name: str, sources: list[ComposeSource], sse: Optional[Sse] = None, user_metadata: Optional[HTTPHeaderDict] = None, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> ObjectWriteResult Create an object by combining data from different source objects using server-side copy. __Parameters__ -| Param | Type | Description | -|:--------------|:------------|:----------------------------------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `sources` | _list_ | List of _ComposeSource_ object. | -| `sse` | _Sse_ | Server-side encryption of destination object. | -| `metadata` | _dict_ | Any user-defined metadata to be copied along with destination object. | -| `tags` | _Tags_ | Tags for destination object. | -| `retention` | _Retention_ | Retention configuration. | -| `legal_hold` | _bool_ | Flag to set legal hold for destination object. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:----------------------------------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `sources` | _list[minio.commonconfig.ComposeSource]_ | List of _ComposeSource_ object. | +| `sse` | _Optional[minio.sse.Sse] = None_ | Server-side encryption of destination object. | +| `user_metadata` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Any user-defined metadata to be copied along with destination object. | +| `tags` | _Optional[minio.commonconfig.Tags] = None_ | Tags for destination object. | +| `retention` | _Optional[minio.retention.Retention] = None_ | Retention configuration. | +| `legal_hold` | _bool = False_ | Flag to set legal hold for destination object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Return | -|:----------------------------| -| _ObjectWriteResult_ object. | +| Return | +|:------------------------------------------| +| _minio.helpers.ObjectWriteResult_ object. | __Example__ @@ -1136,139 +1287,178 @@ from minio.commonconfig import ComposeSource from minio.sse import SseS3 sources = [ - ComposeSource("my-job-bucket", "my-object-part-one"), - ComposeSource("my-job-bucket", "my-object-part-two"), - ComposeSource("my-job-bucket", "my-object-part-three"), + ComposeSource( + bucket_name="my-job-bucket", + object_name="my-object-part-one", + ), + ComposeSource( + bucket_name="my-job-bucket", + object_name="my-object-part-two", + ), + ComposeSource( + bucket_name="my-job-bucket", + object_name="my-object-part-three", + ), ] # Create my-bucket/my-object by combining source object # list. -result = client.compose_object("my-bucket", "my-object", sources) +result = client.compose_object( + bucket_name="my-bucket", + object_name="my-object", + sources=sources, +) print(result.object_name, result.version_id) # Create my-bucket/my-object with user metadata by combining # source object list. result = client.compose_object( - "my-bucket", - "my-object", - sources, - metadata={"test_meta_key": "test_meta_value"}, + bucket_name="my-bucket", + object_name="my-object", + sources=sources, + user_metadata={"test_meta_key": "test_meta_value"}, ) print(result.object_name, result.version_id) # Create my-bucket/my-object with user metadata and # server-side encryption by combining source object list. -client.compose_object("my-bucket", "my-object", sources, sse=SseS3()) +client.compose_object( + bucket_name="my-bucket", + object_name="my-object", + sources=sources, + sse=SseS3(), +) print(result.object_name, result.version_id) ``` -### put_object(bucket_name, object_name, data, length, content_type="application/octet-stream", metadata=None, sse=None, progress=None, part_size=0, num_parallel_uploads=3, tags=None, retention=None, legal_hold=False) +### put_object(self, *, bucket_name: str, object_name: str, data: BinaryIO, length: int, content_type: str = "application/octet-stream", headers: Optional[HTTPHeaderDict] = None, user_metadata: Optional[HTTPHeaderDict] = None, sse: Optional[Sse] = None, progress: Optional[ProgressType] = None, part_size: int = 0, checksum: Optional[Algorithm] = None, num_parallel_uploads: int = 3, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> ObjectWriteResult Uploads data from a stream to an object in a bucket. __Parameters__ -| Param | Type | Description | -|:---------------|:------------|:--------------------------------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `data` | _object_ | An object having callable read() returning bytes object. | -| `length` | _int_ | Data size; -1 for unknown size and set valid part_size. | -| `content_type` | _str_ | Content type of the object. | -| `metadata` | _dict_ | Any additional metadata to be uploaded along with your PUT request. | -| `sse` | _Sse_ | Server-side encryption. | -| `progress` | _threading_ | A progress object. | -| `part_size` | _int_ | Multipart part size. | -| `tags` | _Tags_ | Tags for the object. | -| `retention` | _Retention_ | Retention configuration. | -| `legal_hold` | _bool_ | Flag to set legal hold for the object. | +| Param | Type | Description | +|:-----------------------|:------------------------------------------------|:----------------------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `data` | _io.BinaryIO_ | An object having callable read() returning bytes object. | +| `length` | _int_ | Data size; -1 for unknown size and set valid `part_size`. | +| `content_type` | _str = "application/octet-stream"_ | Content type of the object. | +| `headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Additional headers. | +| `user_metadata` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | User metadata of the object. | +| `sse` | _Optional[minio.sse.Sse] = None_ | Server-side encryption. | +| `progress` | _Optional[minio.helpers.ProgressType] = None_ | A progress object. | +| `part_size` | _int = 0_ | Multipart part size. | +| `checksum` | _Optional[minio.checksum.Algorithm] = None_ | Algorithm for checksum computation. | +| `num_parallel_uploads` | _int = 3_ | Number of parallel uploads. | +| `tags` | _Optional[minio.commonconfig.Tags] = None_ | Tags for the object. | +| `retention` | _Optional[minio.retention.Retention] = None_ | Retention configuration. | +| `legal_hold` | _bool = False_ | Flag to set legal hold for the object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Return | -|:----------------------------| -| _ObjectWriteResult_ object. | +| Return | +|:------------------------------------------| +| _minio.helpers.ObjectWriteResult_ object. | __Example__ ```py # Upload data. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload unknown sized data. -data = urlopen( +with urlopen( "https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.4.81.tar.xz", -) -result = client.put_object( - "my-bucket", "my-object", data, length=-1, part_size=10*1024*1024, -) -print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), -) +) as data: + result = client.put_object( + bucket_name="my-bucket", + object_name="my-object", + data=data, + length=-1, + part_size=10*1024*1024, + ) + print( + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", + ) # Upload data with content-type. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, content_type="application/csv", ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with metadata. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, metadata={"My-Project": "one"}, ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with customer key type of server-side encryption. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, sse=SseCustomerKey(b"32byteslongsecretkeymustprovided"), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with KMS type of server-side encryption. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, sse=SseKMS("KMS-KEY-ID", {"Key1": "Value1", "Key2": "Value2"}), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with S3 type of server-side encryption. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, sse=SseS3(), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with tags, retention and legal-hold. @@ -1278,121 +1468,151 @@ date = datetime.utcnow().replace( tags = Tags(for_object=True) tags["User"] = "jsmith" result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, tags=tags, retention=Retention(GOVERNANCE, date), legal_hold=True, ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with progress bar. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, progress=Progress(), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) ``` -### fput_object(bucket_name, object_name, file_path, content_type="application/octet-stream", metadata=None, sse=None, progress=None, part_size=0, num_parallel_uploads=3, tags=None, retention=None, legal_hold=False) +### fput_object(self, *, bucket_name: str, object_name: str, file_path: str, content_type: str = "application/octet-stream", headers: Optional[HTTPHeaderDict] = None, user_metadata: Optional[HTTPHeaderDict] = None, sse: Optional[Sse] = None, progress: Optional[ProgressType] = None, part_size: int = 0, checksum: Optional[Algorithm] = None, num_parallel_uploads: int = 3, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> ObjectWriteResult Uploads data from a file to an object in a bucket. -| Param | Type | Description | -|:---------------|:------------|:--------------------------------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `file_path` | _str_ | Name of file to upload. | -| `content_type` | _str_ | Content type of the object. | -| `metadata` | _dict_ | Any additional metadata to be uploaded along with your PUT request. | -| `sse` | _Sse_ | Server-side encryption. | -| `progress` | _threading_ | A progress object. | -| `part_size` | _int_ | Multipart part size. | -| `tags` | _Tags_ | Tags for the object. | -| `retention` | _Retention_ | Retention configuration. | -| `legal_hold` | _bool_ | Flag to set legal hold for the object. | +__Parameters__ + +| Param | Type | Description | +|:-----------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `file_path` | _str_ | Name of file to upload. | +| `content_type` | _str = "application/octet-stream"_ | Content type of the object. | +| `headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Additional headers. | +| `user_metadata` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | User metadata of the object. | +| `sse` | _Optional[minio.sse.Sse] = None_ | Server-side encryption. | +| `progress` | _Optional[minio.helpers.ProgressType] = None_ | A progress object. | +| `part_size` | _int = 0_ | Multipart part size. | +| `checksum` | _Optional[minio.checksum.Algorithm] = None_ | Algorithm for checksum computation. | +| `num_parallel_uploads` | _int = 3_ | Number of parallel uploads. | +| `tags` | _Optional[minio.commonconfig.Tags] = None_ | Tags for the object. | +| `retention` | _Optional[minio.retention.Retention] = None_ | Retention configuration. | +| `legal_hold` | _bool = False_ | Flag to set legal hold for the object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Return | -|:----------------------------| -| _ObjectWriteResult_ object. | +| Return | +|:------------------------------------------| +| _minio.helpers.ObjectWriteResult_ object. | __Example__ ```py # Upload data. result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", +) + +# Upload data with part size. +result = client.fput_object( + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", + part_size=10*1024*1024, +) +print( + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with content-type. result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", content_type="application/csv", ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with metadata. result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", metadata={"My-Project": "one"}, ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with customer key type of server-side encryption. result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", sse=SseCustomerKey(b"32byteslongsecretkeymustprovided"), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with KMS type of server-side encryption. result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", sse=SseKMS("KMS-KEY-ID", {"Key1": "Value1", "Key2": "Value2"}), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with S3 type of server-side encryption. result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", sse=SseS3(), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with tags, retention and legal-hold. @@ -1402,143 +1622,159 @@ date = datetime.utcnow().replace( tags = Tags(for_object=True) tags["User"] = "jsmith" result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", tags=tags, retention=Retention(GOVERNANCE, date), legal_hold=True, ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with progress bar. result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", progress=Progress(), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) ``` -### stat_object(bucket_name, object_name, ssec=None, version_id=None, extra_headers=None, extra_query_params=None) +### stat_object(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, ssec: Optional[SseCustomerKey] = None, offset: int = 0, length: Optional[int] = None, match_etag: Optional[str] = None, not_match_etag: Optional[str] = None, modified_since: Optional[datetime] = None, unmodified_since: Optional[datetime] = None, fetch_checksum: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Object: Get object information and metadata of an object. __Parameters__ -| Param | Type | Description | -|:---------------------|:-----------------|:-------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `ssec` | _SseCustomerKey_ | Server-side encryption customer key. | -| `version_id` | _str_ | Version ID of the object. | -| `extra_headers` | _dict_ | Extra HTTP headers for advanced usage. | -| `extra_query_params` | _dict_ | Extra query parameters for advanced usage. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:--------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `version_id` | _Optional[str] = None_ | Version ID of the object. | +| `ssec` | _Optional[minio.sse.SseCustomerKey] = None_ | Server-side encryption customer key. | +| `offset` | _int = 0_ | Start byte position of object data. | +| `length` | _Optional[int] = None_ | Number of bytes of object data from offset. | +| `match_etag` | _Optional[str] = None_ | Match ETag of the object. | +| `not_match_etag` | _Optional[str] = None_ | None-match ETag of the object. | +| `modified_since` | _Optional[datetime.datetime] = None_ | Modified-since of the object. | +| `unmodified_since` | _Optional[datetime.datetime] = None_ | Unmodified-since of the object. | +| `fetch_checksum` | _bool = False_ | Fetch object checksum. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Return | -|:-------------------------------| -| Object information as _Object_ | +| Return | +|:---------------------------------| +| _minio.datatypes.Object_ object. | __Example__ ```py # Get object information. -result = client.stat_object("my-bucket", "my-object") -print( - "last-modified: {0}, size: {1}".format( - result.last_modified, result.size, - ), +result = client.stat_object( + bucket_name="my-bucket", + object_name="my-object", ) +print(f"last-modified: {result.last_modified}, size: {result.size}") # Get object information of version-ID. result = client.stat_object( - "my-bucket", "my-object", + bucket_name="my-bucket", + object_name="my-object", version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", ) -print( - "last-modified: {0}, size: {1}".format( - result.last_modified, result.size, - ), -) +print(f"last-modified: {result.last_modified}, size: {result.size}") # Get SSE-C encrypted object information. result = client.stat_object( - "my-bucket", "my-object", + bucket_name="my-bucket", + object_name="my-object", ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"), ) -print( - "last-modified: {0}, size: {1}".format( - result.last_modified, result.size, - ), -) +print(f"last-modified: {result.last_modified}, size: {result.size}") ``` -### remove_object(bucket_name, object_name, version_id=None) +### remove_object(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Remove an object. __Parameters__ -| Param | Type | Description | -|:--------------|:------|:---------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `version_id` | _str_ | Version ID of the object. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `version_id` | _Optional[str] = None_ | Version ID of the object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py # Remove object. -client.remove_object("my-bucket", "my-object") +client.remove_object( + bucket_name="my-bucket", + object_name="my-object", +) # Remove version of an object. client.remove_object( - "my-bucket", "my-object", + bucket_name="my-bucket", + object_name="my-object", version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", ) ``` -### remove_objects(bucket_name, delete_object_list, bypass_governance_mode=False) +### remove_objects(self, *, bucket_name: str, delete_object_list: Iterable[DeleteObject], bypass_governance_mode: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Iterator[DeleteError] Remove multiple objects. __Parameters__ -| Param | Type | Description | -|:-------------------------|:-----------|:--------------------------------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `delete_object_list` | _iterable_ | An iterable containing :class:`DeleteObject ` object. | -| `bypass_governance_mode` | _bool_ | Bypass Governance retention mode. | +| Param | Type | Description | +|:-------------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `delete_object_list` | _Iterable[minio.deleteobjects.DeleteObject]_ | DeleteObject iterable. | +| `bypass_governance_mode` | _bool = False_ | Bypass Governance retention mode. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Return | -|:-----------------------------------------------------------------| -| An iterator containing :class:`DeleteError ` object | +| Return | +|:----------------------------------------------------| +| _Iterator[minio.deleteobjects.DeleteError]_ object. | __Example__ ```py # Remove list of objects. errors = client.remove_objects( - "my-bucket", - [ - DeleteObject("my-object1"), - DeleteObject("my-object2"), - DeleteObject("my-object3", "13f88b18-8dcd-4c83-88f2-8631fdb6250c"), + bucket_name="my-bucket", + delete_object_list=[ + DeleteObject(name="my-object1"), + DeleteObject(name="my-object2"), + DeleteObject( + name="my-object3", + version_id="13f88b18-8dcd-4c83-88f2-8631fdb6250c", + ), ], ) for error in errors: @@ -1547,71 +1783,87 @@ for error in errors: # Remove a prefix recursively. delete_object_list = map( lambda x: DeleteObject(x.object_name), - client.list_objects("my-bucket", "my/prefix/", recursive=True), + client.list_objects( + bucket_name="my-bucket", + prefix="my/prefix/", + recursive=True, + ), +) +errors = client.remove_objects( + bucket_name="my-bucket", + delete_object_list=delete_object_list, ) -errors = client.remove_objects("my-bucket", delete_object_list) for error in errors: print("error occurred when deleting object", error) ``` -### delete_object_tags(bucket_name, object_name, version_id=None) +### delete_object_tags(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Delete tags configuration of an object. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:---------------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | -| ``object_name`` | _str_ | Object name in the bucket. | -| ``version_id`` | _str_ | Version ID of the object. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `version_id` | _Optional[str] = None_ | Version ID of the object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py -client.delete_object_tags("my-bucket", "my-object") +client.delete_object_tags(bucket_name="my-bucket", object_name="my-object") ``` -### get_object_tags(bucket_name, object_name, version_id=None) +### get_object_tags(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Optional[Tags] Get tags configuration of an object. __Parameters__ -| Param | Type | Description | -|:----------------|:------|:---------------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | -| ``object_name`` | _str_ | Object name in the bucket. | -| ``version_id`` | _str_ | Version ID of the object. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `version_id` | _Optional[str] = None_ | Version ID of the object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | -| Return | -|:---------------| -| _Tags_ object. | +| Return | +|:--------------------------------------------| +| _Optional[minio.commonconfig.Tags]_ object. | __Example__ ```py -tags = client.get_object_tags("my-bucket", "my-object") +tags = client.get_object_tags(bucket_name="my-bucket", object_name="my-object") ``` -### set_object_tags(bucket_name, object_name, tags, version_id=None) +### set_object_tags(self, *, bucket_name: str, object_name: str, tags: Tags, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Set tags configuration to an object. __Parameters__ -| Param | Type | Description | -|:----------------|:-------|:---------------------------| -| ``bucket_name`` | _str_ | Name of the bucket. | -| ``object_name`` | _str_ | Object name in the bucket. | -| ``tags`` | _Tags_ | Tags configuration. | -| ``version_id`` | _str_ | Version ID of the object. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `tags` | _minio.commonconfig.Tags_ | Tags configuration. | +| `version_id` | _Optional[str] = None_ | Version ID of the object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ @@ -1619,67 +1871,79 @@ __Example__ tags = Tags.new_object_tags() tags["Project"] = "Project One" tags["User"] = "jsmith" -client.set_object_tags("my-bucket", "my-object", tags) +client.set_object_tags(bucket_name="my-bucket", object_name="my-object", tags=tags) ``` -### enable_object_legal_hold(bucket_name, object_name, version_id=None) +### enable_object_legal_hold(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Enable legal hold on an object. __Parameters__ -| Param | Type | Description | -|:--------------|:------|:---------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `version_id` | _str_ | Version ID of the object. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `version_id` | _Optional[str] = None_ | Version ID of the object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py -client.enable_object_legal_hold("my-bucket", "my-object") +client.enable_object_legal_hold(bucket_name="my-bucket", object_name="my-object") ``` -### disable_object_legal_hold(bucket_name, object_name, version_id=None) +### disable_object_legal_hold(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Disable legal hold on an object. __Parameters__ -| Param | Type | Description | -|:--------------|:------|:---------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `version_id` | _str_ | Version ID of the object. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `version_id` | _Optional[str] = None_ | Version ID of the object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py -client.disable_object_legal_hold("my-bucket", "my-object") +client.disable_object_legal_hold(bucket_name="my-bucket", object_name="my-object") ``` -### is_object_legal_hold_enabled(bucket_name, object_name, version_id=None) +### is_object_legal_hold_enabled(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> bool Returns true if legal hold is enabled on an object. __Parameters__ -| Param | Type | Description | -|:--------------|:------|:---------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `version_id` | _str_ | Version ID of the object. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `version_id` | _Optional[str] = None_ | Version ID of the object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py -if client.is_object_legal_hold_enabled("my-bucket", "my-object"): +if client.is_object_legal_hold_enabled( + bucket_name="my-bucket", + object_name="my-object", +): print("legal hold is enabled on my-object") else: print("legal hold is not enabled on my-object") @@ -1687,70 +1951,127 @@ else: -### get_object_retention(bucket_name, object_name, version_id=None) +### get_object_retention(self, *, bucket_name: str, object_name: str, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> Optional[Retention] Get retention information of an object. __Parameters__ -| Param | Type | Description | -|:---------------------|:-----------------|:-------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `version_id` | _str_ | Version ID of the object. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `version_id` | _Optional[str] = None_ | Version ID of the object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Return | -|:-------------------| -| _Retention_ object | +| Return | +|:----------------------------------------------| +| _Optional[minio.retention.Retention]_ object. | __Example__ ```py -config = client.get_object_retention("my-bucket", "my-object") +config = client.get_object_retention( + bucket_name="my-bucket", + object_name="my-object", +) ``` -### set_object_retention(bucket_name, object_name, config, version_id=None) +### set_object_retention(self, *, bucket_name: str, object_name: str, config: Retention, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) Set retention information to an object. __Parameters__ -| Param | Type | Description | -|:--------------|:------------|:---------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `config` | _Retention_ | Retention configuration. | -| `version_id` | _str_ | Version ID of the object. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `config` | _minio.retention.Retention_ | Retention configuration. | +| `version_id` | _Optional[str] = None_ | Version ID of the object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Example__ ```py config = Retention(GOVERNANCE, datetime.utcnow() + timedelta(days=10)) -client.set_object_retention("my-bucket", "my-object", config) +client.set_object_retention( + bucket_name="my-bucket", + object_name="my-object", + config=config, +) +``` + + + +### prompt_object(self, *, bucket_name: str, object_name: str, prompt: str, lambda_arn: Optional[str] = None, ssec: Optional[SseCustomerKey] = None, version_id: Optional[str] = None, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None, **kwargs: Optional[Any]) -> BaseHTTPResponse + +Prompt an object using natural language. + +__Parameters__ + +| Param | Type | Description | +|----------------------|-------------------------------------------------|-------------------------------------------------------------------------| +| `bucket_name` | `str` | Name of the bucket. | +| `object_name` | `str` | Object name in the bucket. | +| `prompt` | `str` | Natural language prompt to interact with the object using the AI model. | +| `lambda_arn` | `Optional[str] = None` | AWS Lambda ARN to use for processing the prompt. | +| `ssec` | `Optional[minio.sse.SseCustomerKey] = None` | Server-side encryption customer key. | +| `version_id` | `Optional[str] = None` | Version ID of the object. | +| `region` | `Optional[str] = None` | Region of the bucket to skip auto probing. | +| `extra_headers` | `Optional[minio.helpers.HTTPHeaderDict] = None` | Extra headers for advanced usage. | +| `extra_query_params` | `Optional[minio.helpers.HTTPQueryDict] = None` | Extra query parameters for advanced usage. | +| `**kwargs` | `Optional[Any]` | Additional parameters for advanced usage. | + +__Return Value__ + +| Return | +|:-------------------------------------------------------------------------------| +| _urllib3.response.BaseHTTPResponse_ or _urllib3.response.HTTPResponse_ object. | + +__Example__ + +```py +response = None +try: + response = client.prompt_object( + bucket_name="my-bucket", + object_name="my-object", + prompt="Describe the object for me", + ) + # Read data from response +finally: + if response: + response.close() + response.release_conn() ``` -### presigned_get_object(bucket_name, object_name, expires=timedelta(days=7), response_headers=None, request_date=None, version_id=None, extra_query_params=None) +### presigned_get_object(self, *, bucket_name: str, object_name: str, expires: timedelta = timedelta(days=7), request_date: Optional[datetime] = None, version_id: Optional[str] = None, region: Optional[str] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> str Get presigned URL of an object to download its data with expiry time and custom request parameters. __Parameters__ -| Param | Type | Description | -|:---------------------|:---------------------|:---------------------------------------------------------------------------------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `expires` | _datetime.timedelta_ | Expiry in seconds; defaults to 7 days. | -| `response_headers` | _dict_ | Optional response_headers argument to specify response fields like date, size, type of file, data about server, etc. | -| `request_date` | _datetime.datetime_ | Optional request_date argument to specify a different request date. Default is current date. | -| `version_id` | _str_ | Version ID of the object. | -| `extra_query_params` | _dict_ | Extra query parameters for advanced usage. | +| Param | Type | Description | +|:---------------------|:--------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `expires` | _datetime.timedelta = datetime.timedelta(days=7)_ | Expiry in seconds. | +| `request_date` | _Optional[datetime.datetime] = None_ | Request time instead of current time. | +| `version_id` | _Optional[str] = None_ | Version ID of the object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ @@ -1763,30 +2084,37 @@ __Example__ ```py # Get presigned URL string to download 'my-object' in # 'my-bucket' with default expiry (i.e. 7 days). -url = client.presigned_get_object("my-bucket", "my-object") +url = client.presigned_get_object( + bucket_name="my-bucket", + object_name="my-object", +) print(url) # Get presigned URL string to download 'my-object' in # 'my-bucket' with two hours expiry. url = client.presigned_get_object( - "my-bucket", "my-object", expires=timedelta(hours=2), + bucket_name="my-bucket", + object_name="my-object", + expires=timedelta(hours=2), ) print(url) ``` -### presigned_put_object(bucket_name, object_name, expires=timedelta(days=7)) +### presigned_put_object(self, *, bucket_name: str, object_name: str, expires: timedelta = timedelta(days=7), region: Optional[str] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> str Get presigned URL of an object to upload data with expiry time and custom request parameters. __Parameters__ -| Param | Type | Description | -|:--------------|:---------------------|:---------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `expires` | _datetime.timedelta_ | Expiry in seconds; defaults to 7 days. | +| Param | Type | Description | +|:---------------------|:--------------------------------------------------|:-------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `expires` | _datetime.timedelta = datetime.timedelta(days=7)_ | Expiry in seconds. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ @@ -1799,34 +2127,39 @@ __Example__ ```py # Get presigned URL string to upload data to 'my-object' in # 'my-bucket' with default expiry (i.e. 7 days). -url = client.presigned_put_object("my-bucket", "my-object") +url = client.presigned_put_object( + bucket_name="my-bucket", + object_name="my-object", +) print(url) # Get presigned URL string to upload data to 'my-object' in # 'my-bucket' with two hours expiry. url = client.presigned_put_object( - "my-bucket", "my-object", expires=timedelta(hours=2), + bucket_name="my-bucket", + object_name="my-object", + expires=timedelta(hours=2), ) print(url) ``` -### presigned_post_policy(policy) +### presigned_post_policy(policy: PostPolicy) -> dict[str, str] Get form-data of PostPolicy of an object to upload its data using POST method. __Parameters__ -| Param | Type | Description | -|:---------|:-------------|:-------------| -| `policy` | _PostPolicy_ | Post policy. | +| Param | Type | Description | +|:---------|:-----------------------------|:-------------| +| `policy` | _minio.datatypes.PostPolicy_ | Post policy. | __Return Value__ -| Return | -|:----------------------------| -| Form-data containing _dict_ | +| Return | +|:----------------------------------------------| +| _dict[str, str]_ object containing form-data. | __Example__ @@ -1843,22 +2176,22 @@ form_data = client.presigned_post_policy(policy) -### get_presigned_url(method, bucket_name, object_name, expires=timedelta(days=7), response_headers=None, request_date=None, version_id=None, extra_query_params=None) +### get_presigned_url(self, *, method: str, bucket_name: str, object_name: str, expires: timedelta = timedelta(days=7), request_date: Optional[datetime] = None, version_id: Optional[str] = None, region: Optional[str] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> str Get presigned URL of an object for HTTP method, expiry time and custom request parameters. __Parameters__ -| Param | Type | Description | -|:---------------------|:---------------------|:---------------------------------------------------------------------------------------------------------------------| -| `method` | _str_ | HTTP method. | -| `bucket_name` | _str_ | Name of the bucket. | -| `object_name` | _str_ | Object name in the bucket. | -| `expires` | _datetime.timedelta_ | Expiry in seconds; defaults to 7 days. | -| `response_headers` | _dict_ | Optional response_headers argument to specify response fields like date, size, type of file, data about server, etc. | -| `request_date` | _datetime.datetime_ | Optional request_date argument to specify a different request date. Default is current date. | -| `version_id` | _str_ | Version ID of the object. | -| `extra_query_params` | _dict_ | Extra query parameters for advanced usage. | +| Param | Type | Description | +|:---------------------|:--------------------------------------------------|:-------------------------------------------| +| `method` | _str_ | HTTP method. | +| `bucket_name` | _str_ | Name of the bucket. | +| `object_name` | _str_ | Object name in the bucket. | +| `expires` | _datetime.timedelta = datetime.timedelta(days=7)_ | Expiry in seconds. | +| `request_date` | _Optional[datetime.datetime] = None_ | Request time instead of current time. | +| `version_id` | _Optional[str] = None_ | Version ID of the object. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ @@ -1872,9 +2205,9 @@ __Example__ # Get presigned URL string to delete 'my-object' in # 'my-bucket' with one day expiry. url = client.get_presigned_url( - "DELETE", - "my-bucket", - "my-object", + method="DELETE", + bucket_name="my-bucket", + object_name="my-object", expires=timedelta(days=1), ) print(url) @@ -1883,20 +2216,20 @@ print(url) # 'my-bucket' with response-content-type as application/json # and one day expiry. url = client.get_presigned_url( - "PUT", - "my-bucket", - "my-object", + method="PUT", + bucket_name="my-bucket", + object_name="my-object", expires=timedelta(days=1), - response_headers={"response-content-type": "application/json"}, + extra_query_params=HTTPQueryDict({"response-content-type": "application/json"}), ) print(url) # Get presigned URL string to download 'my-object' in # 'my-bucket' with two hours expiry. url = client.get_presigned_url( - "GET", - "my-bucket", - "my-object", + method="GET", + bucket_name="my-bucket", + object_name="my-object", expires=timedelta(hours=2), ) print(url) @@ -1904,43 +2237,54 @@ print(url) -### upload_snowball_objects(bucket_name, object_list, metadata=None, sse=None, tags=None, retention=None, legal_hold=False, staging_filename=None, compression=False) +### upload_snowball_objects(self, *, bucket_name: str, objects: Iterable[SnowballObject], headers: Optional[HTTPHeaderDict] = None, user_metadata: Optional[HTTPHeaderDict] = None, sse: Optional[Sse] = None, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, staging_filename: Optional[str] = None, compression: bool = False, region: Optional[str] = None, extra_headers: Optional[HTTPHeaderDict] = None, extra_query_params: Optional[HTTPQueryDict] = None) -> ObjectWriteResult Uploads multiple objects in a single put call. It is done by creating intermediate TAR file optionally compressed which is uploaded to S3 service. __Parameters__ -| Param | Type | Description | -|:-------------------|:------------|:------------------------------------------------------------------------| -| `bucket_name` | _str_ | Name of the bucket. | -| `object_list` | _iterable_ | An iterable containing :class:`SnowballObject ` object. | -| `metadata` | _dict_ | Any additional metadata to be uploaded along with your PUT request. | -| `sse` | _Sse_ | Server-side encryption. | -| `tags` | _Tags_ | Tags for the object. | -| `retention` | _Retention_ | Retention configuration. | -| `legal_hold` | _bool_ | Flag to set legal hold for the object. | -| `staging_filename` | _str_ | A staging filename to create intermediate tarball. | -| `compression` | _bool_ | Flag to compress tarball. | +| Param | Type | Description | +|:---------------------|:------------------------------------------------|:---------------------------------------------------| +| `bucket_name` | _str_ | Name of the bucket. | +| `objects` | _Iterable[minio.commonconfig.SnowballObject]_ | An iterable contain snowball object. | +| `headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Additional headers. | +| `user_metadata` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | User metadata. | +| `sse` | _Optional[minio.sse.Sse] = None_ | Server-side encryption. | +| `tags` | _Optional[minio.commonconfig.Tags] = None_ | Tags for the object. | +| `retention` | _Optional[minio.retention.Retention] = None_ | Retention configuration. | +| `legal_hold` | _bool = False_ | Flag to set legal hold for the object. | +| `staging_filename` | _Optional[str] = None_ | A staging filename to create intermediate tarball. | +| `compression` | _bool = False_ | Flag to compress tarball. | +| `region` | _Optional[str] = None_ | Region of the bucket to skip auto probing. | +| `extra_headers` | _Optional[minio.helpers.HTTPHeaderDict] = None_ | Extra headers for advanced usage. | +| `extra_query_params` | _Optional[minio.helpers.HTTPQueryDict] = None_ | Extra query parameters for advanced usage. | __Return Value__ -| Return | -|:----------------------------| -| _ObjectWriteResult_ object. | +| Return | +|:------------------------------------------| +| _minio.helpers.ObjectWriteResult_ object. | __Example__ ```py # Upload snowball object. client.upload_snowball_objects( - "my-bucket", - [ - SnowballObject("my-object1", filename="/etc/hostname"), + bucket_name="my-bucket", + objects=[ + SnowballObject( + object_name="my-object1", + filename="/etc/hostname", + ), SnowballObject( - "my-object2", data=io.BytesIO("hello"), length=5, + object_name="my-object2", + data=io.BytesIO(b"hello"), + length=5, ), SnowballObject( - "my-object3", data=io.BytesIO("world"), length=5, + object_name="my-object3", + data=io.BytesIO(b"world"), + length=5, mod_time=datetime.now(), ), ], diff --git a/examples/append_object.py b/examples/append_object.py index b1d340e5..90b4569d 100644 --- a/examples/append_object.py +++ b/examples/append_object.py @@ -17,41 +17,53 @@ import io from urllib.request import urlopen -from examples.progress import Progress from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) # Upload data. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello, "), 7, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello, "), + length=7, ) print(f"created {result.object_name} object; etag: {result.etag}") # Append data. result = client.append_object( - "my-bucket", "my-object", io.BytesIO(b"world"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"world"), + length=5, ) print(f"appended {result.object_name} object; etag: {result.etag}") # Append data in chunks. -data = urlopen( +with urlopen( "https://www.kernel.org/pub/linux/kernel/v6.x/linux-6.13.12.tar.xz", -) -result = client.append_object( - "my-bucket", "my-object", data, 148611164, 5*1024*1024, -) -print(f"appended {result.object_name} object; etag: {result.etag}") +) as stream: + result = client.append_object( + bucket_name="my-bucket", + object_name="my-object", + stream=stream, + length=148611164, + chunk_size=5*1024*1024, + ) + print(f"appended {result.object_name} object; etag: {result.etag}") # Append unknown sized data. -data = urlopen( +with urlopen( "https://www.kernel.org/pub/linux/kernel/v6.x/linux-6.14.3.tar.xz", -) -result = client.append_object( - "my-bucket", "my-object", data, 149426584, 5*1024*1024, -) -print(f"appended {result.object_name} object; etag: {result.etag}") +) as stream: + result = client.append_object( + bucket_name="my-bucket", + object_name="my-object", + stream=stream, + chunk_size=5*1024*1024, + ) + print(f"appended {result.object_name} object; etag: {result.etag}") diff --git a/examples/bucket_exists.py b/examples/bucket_exists.py index 13a7df88..eb3bd269 100644 --- a/examples/bucket_exists.py +++ b/examples/bucket_exists.py @@ -17,12 +17,12 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -if client.bucket_exists("my-bucket"): +if client.bucket_exists(bucket_name="my-bucket"): print("my-bucket exists") else: print("my-bucket does not exist") diff --git a/examples/compose_object.py b/examples/compose_object.py index 265f2e6c..9d092a44 100644 --- a/examples/compose_object.py +++ b/examples/compose_object.py @@ -19,33 +19,48 @@ from minio.sse import SseS3 client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) sources = [ - ComposeSource("my-job-bucket", "my-object-part-one"), - ComposeSource("my-job-bucket", "my-object-part-two"), - ComposeSource("my-job-bucket", "my-object-part-three"), + ComposeSource( + bucket_name="my-job-bucket", object_name="my-object-part-one", + ), + ComposeSource( + bucket_name="my-job-bucket", object_name="my-object-part-two", + ), + ComposeSource( + bucket_name="my-job-bucket", object_name="my-object-part-three", + ), ] # Create my-bucket/my-object by combining source object # list. -result = client.compose_object("my-bucket", "my-object", sources) +result = client.compose_object( + bucket_name="my-bucket", + object_name="my-object", + sources=sources, +) print(result.object_name, result.version_id) # Create my-bucket/my-object with user metadata by combining # source object list. result = client.compose_object( - "my-bucket", - "my-object", - sources, - metadata={"test_meta_key": "test_meta_value"}, + bucket_name="my-bucket", + object_name="my-object", + sources=sources, + user_metadata={"test_meta_key": "test_meta_value"}, ) print(result.object_name, result.version_id) # Create my-bucket/my-object with user metadata and # server-side encryption by combining source object list. -client.compose_object("my-bucket", "my-object", sources, sse=SseS3()) +client.compose_object( + bucket_name="my-bucket", + object_name="my-object", + sources=sources, + sse=SseS3(), +) print(result.object_name, result.version_id) diff --git a/examples/copy_object.py b/examples/copy_object.py index 06f6d136..9314743b 100644 --- a/examples/copy_object.py +++ b/examples/copy_object.py @@ -20,38 +20,41 @@ from minio.commonconfig import REPLACE, CopySource client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) # copy an object from a bucket to another. result = client.copy_object( - "my-bucket", - "my-object", - CopySource("my-sourcebucket", "my-sourceobject"), + bucket_name="my-bucket", + object_name="my-object", + source=CopySource( + bucket_name="my-sourcebucket", object_name="my-sourceobject", + ), ) print(result.object_name, result.version_id) # copy an object with condition. result = client.copy_object( - "my-bucket", - "my-object", - CopySource( - "my-sourcebucket", - "my-sourceobject", + bucket_name="my-bucket", + object_name="my-object", + source=CopySource( + bucket_name="my-sourcebucket", + object_name="my-sourceobject", modified_since=datetime(2014, 4, 1, tzinfo=timezone.utc), ), ) print(result.object_name, result.version_id) # copy an object from a bucket with replacing metadata. -metadata = {"test_meta_key": "test_meta_value"} result = client.copy_object( - "my-bucket", - "my-object", - CopySource("my-sourcebucket", "my-sourceobject"), - metadata=metadata, + bucket_name="my-bucket", + object_name="my-object", + source=CopySource( + bucket_name="my-sourcebucket", object_name="my-sourceobject", + ), + user_metadata={"test_meta_key": "test_meta_value"}, metadata_directive=REPLACE, ) print(result.object_name, result.version_id) diff --git a/examples/delete_bucket_encryption.py b/examples/delete_bucket_encryption.py index 324ea7c9..c616d4de 100644 --- a/examples/delete_bucket_encryption.py +++ b/examples/delete_bucket_encryption.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -client.delete_bucket_encryption("my-bucket") +client.delete_bucket_encryption(bucket_name="my-bucket") diff --git a/examples/delete_bucket_lifecycle.py b/examples/delete_bucket_lifecycle.py index 82e3d28a..972ec411 100644 --- a/examples/delete_bucket_lifecycle.py +++ b/examples/delete_bucket_lifecycle.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -client.delete_bucket_lifecycle("my-bucket") +client.delete_bucket_lifecycle(bucket_name="my-bucket") diff --git a/examples/delete_bucket_notification.py b/examples/delete_bucket_notification.py index b1e3f5ed..a8d2ebcc 100644 --- a/examples/delete_bucket_notification.py +++ b/examples/delete_bucket_notification.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -client.delete_bucket_notification("my-bucket") +client.delete_bucket_notification(bucket_name="my-bucket") diff --git a/examples/delete_bucket_policy.py b/examples/delete_bucket_policy.py index 0c985383..c0a126ce 100644 --- a/examples/delete_bucket_policy.py +++ b/examples/delete_bucket_policy.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -client.delete_bucket_policy("my-bucket") +client.delete_bucket_policy(bucket_name="my-bucket") diff --git a/examples/delete_bucket_replication.py b/examples/delete_bucket_replication.py index a7ebaeb2..2a3573fe 100644 --- a/examples/delete_bucket_replication.py +++ b/examples/delete_bucket_replication.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -client.delete_bucket_replication("my-bucket") +client.delete_bucket_replication(bucket_name="my-bucket") diff --git a/examples/delete_bucket_tags.py b/examples/delete_bucket_tags.py index ab4dbbcd..26414764 100644 --- a/examples/delete_bucket_tags.py +++ b/examples/delete_bucket_tags.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -client.delete_bucket_tags("my-bucket") +client.delete_bucket_tags(bucket_name="my-bucket") diff --git a/examples/delete_object_lock_config.py b/examples/delete_object_lock_config.py index 7bb333b8..6274c7c6 100644 --- a/examples/delete_object_lock_config.py +++ b/examples/delete_object_lock_config.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -client.delete_object_lock_config("my-bucket") +client.delete_object_lock_config(bucket_name="my-bucket") diff --git a/examples/delete_object_tags.py b/examples/delete_object_tags.py index 66a22edf..6f92aefb 100644 --- a/examples/delete_object_tags.py +++ b/examples/delete_object_tags.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -client.delete_object_tags("my-bucket", "my-object") +client.delete_object_tags(bucket_name="my-bucket", object_name="my-object") diff --git a/examples/disable_object_legal_hold.py b/examples/disable_object_legal_hold.py index ea07c452..92060fcd 100644 --- a/examples/disable_object_legal_hold.py +++ b/examples/disable_object_legal_hold.py @@ -17,9 +17,11 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -client.disable_object_legal_hold("my-bucket", "my-object") +client.disable_object_legal_hold( + bucket_name="my-bucket", object_name="my-object", +) diff --git a/examples/enable_object_legal_hold.py b/examples/enable_object_legal_hold.py index d78706bd..ef29cafb 100644 --- a/examples/enable_object_legal_hold.py +++ b/examples/enable_object_legal_hold.py @@ -17,9 +17,11 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -client.enable_object_legal_hold("my-bucket", "my-object") +client.enable_object_legal_hold( + bucket_name="my-bucket", object_name="my-object", +) diff --git a/examples/fget_object.py b/examples/fget_object.py index eb157fd5..3245bac3 100644 --- a/examples/fget_object.py +++ b/examples/fget_object.py @@ -18,22 +18,30 @@ from minio.sse import SseCustomerKey client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) # Download data of an object. -client.fget_object("my-bucket", "my-object", "my-filename") +client.fget_object( + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", +) # Download data of an object of version-ID. client.fget_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", ) # Download data of an SSE-C encrypted object. client.fget_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"), ) diff --git a/examples/fput_object.py b/examples/fput_object.py index 8d79fd47..cfe6d36f 100644 --- a/examples/fput_object.py +++ b/examples/fput_object.py @@ -23,74 +23,80 @@ from minio.sse import SseCustomerKey, SseKMS, SseS3 client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) # Upload data. result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with content-type. result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", content_type="application/csv", ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with metadata. result = client.fput_object( - "my-bucket", "my-object", "my-filename", - metadata={"My-Project": "one"}, + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", + user_metadata={"My-Project": "one"}, ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with customer key type of server-side encryption. result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", sse=SseCustomerKey(b"32byteslongsecretkeymustprovided"), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with KMS type of server-side encryption. result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", sse=SseKMS("KMS-KEY-ID", {"Key1": "Value1", "Key2": "Value2"}), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with S3 type of server-side encryption. result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", sse=SseS3(), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with tags, retention and legal-hold. @@ -100,24 +106,26 @@ tags = Tags(for_object=True) tags["User"] = "jsmith" result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", tags=tags, retention=Retention(GOVERNANCE, date), legal_hold=True, ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with progress bar. result = client.fput_object( - "my-bucket", "my-object", "my-filename", + bucket_name="my-bucket", + object_name="my-object", + file_path="my-filename", progress=Progress(), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) diff --git a/examples/get_bucket_encryption.py b/examples/get_bucket_encryption.py index 8dd8efde..997863b4 100644 --- a/examples/get_bucket_encryption.py +++ b/examples/get_bucket_encryption.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -config = client.get_bucket_encryption("my-bucket") +config = client.get_bucket_encryption(bucket_name="my-bucket") diff --git a/examples/get_bucket_lifecycle.py b/examples/get_bucket_lifecycle.py index 1cb89d9f..cc9e3381 100644 --- a/examples/get_bucket_lifecycle.py +++ b/examples/get_bucket_lifecycle.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -config = client.get_bucket_lifecycle("my-bucket") +config = client.get_bucket_lifecycle(bucket_name="my-bucket") diff --git a/examples/get_bucket_notification.py b/examples/get_bucket_notification.py index ddf2a8af..226a8159 100644 --- a/examples/get_bucket_notification.py +++ b/examples/get_bucket_notification.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -config = client.get_bucket_notification("my-bucket") +config = client.get_bucket_notification(bucket_name="my-bucket") diff --git a/examples/get_bucket_policy.py b/examples/get_bucket_policy.py index 67b95e17..378f9d41 100644 --- a/examples/get_bucket_policy.py +++ b/examples/get_bucket_policy.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -policy = client.get_bucket_policy("my-bucket") +policy = client.get_bucket_policy(bucket_name="my-bucket") diff --git a/examples/get_bucket_replication.py b/examples/get_bucket_replication.py index 0c29597d..ec2dc373 100644 --- a/examples/get_bucket_replication.py +++ b/examples/get_bucket_replication.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -config = client.get_bucket_replication("my-bucket") +config = client.get_bucket_replication(bucket_name="my-bucket") diff --git a/examples/get_bucket_tags.py b/examples/get_bucket_tags.py index 95c7b1f9..89aebeb2 100644 --- a/examples/get_bucket_tags.py +++ b/examples/get_bucket_tags.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -tags = client.get_bucket_tags("my-bucket") +tags = client.get_bucket_tags(bucket_name="my-bucket") diff --git a/examples/get_bucket_versioning.py b/examples/get_bucket_versioning.py index 4ef41e2a..20e1e2a7 100644 --- a/examples/get_bucket_versioning.py +++ b/examples/get_bucket_versioning.py @@ -17,10 +17,10 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -config = client.get_bucket_versioning("my-bucket") +config = client.get_bucket_versioning(bucket_name="my-bucket") print(config.status) diff --git a/examples/get_object.py b/examples/get_object.py index aa534984..0a247a7b 100644 --- a/examples/get_object.py +++ b/examples/get_object.py @@ -18,7 +18,7 @@ from minio.sse import SseCustomerKey client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) @@ -26,7 +26,10 @@ # Get data of an object. response = None try: - response = client.get_object("my-bucket", "my-object") + response = client.get_object( + bucket_name="my-bucket", + object_name="my-object", + ) # Read data from response. finally: if response: @@ -37,7 +40,8 @@ response = None try: response = client.get_object( - "my-bucket", "my-object", + bucket_name="my-bucket", + object_name="my-object", version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", ) # Read data from response. @@ -50,7 +54,10 @@ response = None try: response = client.get_object( - "my-bucket", "my-object", offset=512, length=1024, + bucket_name="my-bucket", + object_name="my-object", + offset=512, + length=1024, ) # Read data from response. finally: @@ -62,7 +69,8 @@ response = None try: response = client.get_object( - "my-bucket", "my-object", + bucket_name="my-bucket", + object_name="my-object", ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"), ) # Read data from response. diff --git a/examples/get_object_lock_config.py b/examples/get_object_lock_config.py index 2255edc4..8a4493c4 100644 --- a/examples/get_object_lock_config.py +++ b/examples/get_object_lock_config.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -config = client.get_object_lock_config("my-bucket") +config = client.get_object_lock_config(bucket_name="my-bucket") diff --git a/examples/get_object_retention.py b/examples/get_object_retention.py index d060bffd..5a6d786c 100644 --- a/examples/get_object_retention.py +++ b/examples/get_object_retention.py @@ -17,9 +17,11 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -config = client.get_object_retention("my-bucket", "my-object") +config = client.get_object_retention( + bucket_name="my-bucket", object_name="my-object", +) diff --git a/examples/get_object_tags.py b/examples/get_object_tags.py index 3631acc8..47b030c9 100644 --- a/examples/get_object_tags.py +++ b/examples/get_object_tags.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -tags = client.get_object_tags("my-bucket", "my-object") +tags = client.get_object_tags(bucket_name="my-bucket", object_name="my-object") diff --git a/examples/get_presigned_url.py b/examples/get_presigned_url.py index 5532317b..545c300e 100644 --- a/examples/get_presigned_url.py +++ b/examples/get_presigned_url.py @@ -17,9 +17,10 @@ from datetime import timedelta from minio import Minio +from minio.helpers import HTTPQueryDict client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) @@ -27,9 +28,9 @@ # Get presigned URL string to delete 'my-object' in # 'my-bucket' with one day expiry. url = client.get_presigned_url( - "DELETE", - "my-bucket", - "my-object", + method="DELETE", + bucket_name="my-bucket", + object_name="my-object", expires=timedelta(days=1), ) print(url) @@ -38,20 +39,22 @@ # 'my-bucket' with response-content-type as application/json # and one day expiry. url = client.get_presigned_url( - "PUT", - "my-bucket", - "my-object", + method="PUT", + bucket_name="my-bucket", + object_name="my-object", expires=timedelta(days=1), - response_headers={"response-content-type": "application/json"}, + extra_query_params=HTTPQueryDict( + {"response-content-type": "application/json"}, + ), ) print(url) # Get presigned URL string to download 'my-object' in # 'my-bucket' with two hours expiry. url = client.get_presigned_url( - "GET", - "my-bucket", - "my-object", + method="GET", + bucket_name="my-bucket", + object_name="my-object", expires=timedelta(hours=2), ) print(url) diff --git a/examples/is_object_legal_hold_enabled.py b/examples/is_object_legal_hold_enabled.py index 82b66539..c22b3aa5 100644 --- a/examples/is_object_legal_hold_enabled.py +++ b/examples/is_object_legal_hold_enabled.py @@ -17,12 +17,14 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -if client.is_object_legal_hold_enabled("my-bucket", "my-object"): +if client.is_object_legal_hold_enabled( + bucket_name="my-bucket", object_name="my-object", +): print("legal hold is enabled on my-object") else: print("legal hold is not enabled on my-object") diff --git a/examples/list_buckets.py b/examples/list_buckets.py index 373b3555..88bfa99c 100644 --- a/examples/list_buckets.py +++ b/examples/list_buckets.py @@ -17,7 +17,7 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) diff --git a/examples/list_objects.py b/examples/list_objects.py index fcabe4b5..498597cc 100644 --- a/examples/list_objects.py +++ b/examples/list_objects.py @@ -17,30 +17,30 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) # List objects information. -objects = client.list_objects("my-bucket") +objects = client.list_objects(bucket_name="my-bucket") for obj in objects: print(obj) # List objects information whose names starts with "my/prefix/". -objects = client.list_objects("my-bucket", prefix="my/prefix/") +objects = client.list_objects(bucket_name="my-bucket", prefix="my/prefix/") for obj in objects: print(obj) # List objects information recursively. -objects = client.list_objects("my-bucket", recursive=True) +objects = client.list_objects(bucket_name="my-bucket", recursive=True) for obj in objects: print(obj) # List objects information recursively whose names starts with # "my/prefix/". objects = client.list_objects( - "my-bucket", prefix="my/prefix/", recursive=True, + bucket_name="my-bucket", prefix="my/prefix/", recursive=True, ) for obj in objects: print(obj) @@ -48,7 +48,7 @@ # List objects information recursively after object name # "my/prefix/world/1". objects = client.list_objects( - "my-bucket", recursive=True, start_after="my/prefix/world/1", + bucket_name="my-bucket", recursive=True, start_after="my/prefix/world/1", ) for obj in objects: print(obj) diff --git a/examples/listen_bucket_notification.py b/examples/listen_bucket_notification.py index 28cb8a9a..f48b2e2d 100644 --- a/examples/listen_bucket_notification.py +++ b/examples/listen_bucket_notification.py @@ -17,13 +17,13 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) with client.listen_bucket_notification( - "my-bucket", + bucket_name="my-bucket", prefix="my-prefix/", events=["s3:ObjectCreated:*", "s3:ObjectRemoved:*"], ) as events: diff --git a/examples/make_bucket.py b/examples/make_bucket.py index 3e7720e5..63c42677 100644 --- a/examples/make_bucket.py +++ b/examples/make_bucket.py @@ -17,16 +17,18 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) # Create bucket. -client.make_bucket("my-bucket") +client.make_bucket(bucket_name="my-bucket") # Create bucket on specific region. -client.make_bucket("my-bucket", "us-west-1") +client.make_bucket(bucket_name="my-bucket", location="us-west-1") # Create bucket with object-lock feature on specific region. -client.make_bucket("my-bucket", "eu-west-2", object_lock=True) +client.make_bucket( + bucket_name="my-bucket", location="eu-west-2", object_lock=True, +) diff --git a/examples/minio_with_assume_role_provider.py b/examples/minio_with_assume_role_provider.py index ac352e22..9c539f5f 100644 --- a/examples/minio_with_assume_role_provider.py +++ b/examples/minio_with_assume_role_provider.py @@ -43,9 +43,9 @@ region = "REGION" provider = AssumeRoleProvider( - sts_endpoint, - access_key, - secret_key, + sts_endpoint=sts_endpoint, + access_key=access_key, + secret_key=secret_key, policy=policy, region=region, role_arn=role_arn, @@ -53,8 +53,8 @@ external_id=external_id, ) -client = Minio("MINIO-HOST:MINIO-PORT", credentials=provider) +client = Minio(endpoint="MINIO-HOST:MINIO-PORT", credentials=provider) # Get information of an object. -stat = client.stat_object("my-bucket", "my-object") +stat = client.stat_object(bucket_name="my-bucket", object_name="my-object") print(stat) diff --git a/examples/minio_with_aws_config_provider.py b/examples/minio_with_aws_config_provider.py index ac134699..2315e098 100644 --- a/examples/minio_with_aws_config_provider.py +++ b/examples/minio_with_aws_config_provider.py @@ -18,8 +18,8 @@ from minio import Minio from minio.credentials import AWSConfigProvider -client = Minio("s3.amazonaws.com", credentials=AWSConfigProvider()) +client = Minio(endpoint="s3.amazonaws.com", credentials=AWSConfigProvider()) # Get information of an object. -stat = client.stat_object("my-bucket", "my-object") +stat = client.stat_object(bucket_name="my-bucket", object_name="my-object") print(stat) diff --git a/examples/minio_with_certificate_identity_provider.py b/examples/minio_with_certificate_identity_provider.py index b00f2ce3..a9229f74 100644 --- a/examples/minio_with_certificate_identity_provider.py +++ b/examples/minio_with_certificate_identity_provider.py @@ -28,11 +28,13 @@ key_file = "/path/to/client.key" provider = CertificateIdentityProvider( - sts_endpoint, cert_file=cert_file, key_file=key_file, + sts_endpoint=sts_endpoint, + cert_file=cert_file, + key_file=key_file, ) -client = Minio("MINIO-HOST:MINIO-PORT", credentials=provider) +client = Minio(endpoint="MINIO-HOST:MINIO-PORT", credentials=provider) # Get information of an object. -stat = client.stat_object("my-bucket", "my-object") +stat = client.stat_object(bucket_name="my-bucket", object_name="my-object") print(stat) diff --git a/examples/minio_with_chained_provider.py b/examples/minio_with_chained_provider.py index d68b04b6..3886693f 100644 --- a/examples/minio_with_chained_provider.py +++ b/examples/minio_with_chained_provider.py @@ -23,7 +23,7 @@ EnvAWSProvider, IamAwsProvider) client = Minio( - "s3.amazonaws.com", + endpoint="s3.amazonaws.com", credentials=ChainedProvider( [ IamAwsProvider(), @@ -34,5 +34,5 @@ ) # Get information of an object. -stat = client.stat_object("my-bucket", "my-object") +stat = client.stat_object(bucket_name="my-bucket", object_name="my-object") print(stat) diff --git a/examples/minio_with_client_grants_provider.py b/examples/minio_with_client_grants_provider.py index 958126e6..5045e666 100644 --- a/examples/minio_with_client_grants_provider.py +++ b/examples/minio_with_client_grants_provider.py @@ -52,11 +52,12 @@ def get_jwt(client_id, client_secret, idp_endpoint): sts_endpoint = "http://STS-HOST:STS-PORT/" provider = ClientGrantsProvider( - lambda: get_jwt(client_id, client_secret, idp_endpoint), sts_endpoint, + jwt_provider_func=lambda: get_jwt(client_id, client_secret, idp_endpoint), + sts_endpoint=sts_endpoint, ) -client = Minio("MINIO-HOST:MINIO-PORT", credentials=provider) +client = Minio(endpoint="MINIO-HOST:MINIO-PORT", credentials=provider) # Get information of an object. -stat = client.stat_object("my-bucket", "my-object") +stat = client.stat_object(bucket_name="my-bucket", object_name="my-object") print(stat) diff --git a/examples/minio_with_env_aws_provider.py b/examples/minio_with_env_aws_provider.py index 69898149..32dfe32b 100644 --- a/examples/minio_with_env_aws_provider.py +++ b/examples/minio_with_env_aws_provider.py @@ -18,8 +18,8 @@ from minio import Minio from minio.credentials import EnvAWSProvider -client = Minio("s3.amazonaws.com", credentials=EnvAWSProvider()) +client = Minio(endpoint="s3.amazonaws.com", credentials=EnvAWSProvider()) # Get information of an object. -stat = client.stat_object("my-bucket", "my-object") +stat = client.stat_object(bucket_name="my-bucket", object_name="my-object") print(stat) diff --git a/examples/minio_with_env_minio_provider.py b/examples/minio_with_env_minio_provider.py index f0f985b5..a31d67d4 100644 --- a/examples/minio_with_env_minio_provider.py +++ b/examples/minio_with_env_minio_provider.py @@ -18,8 +18,11 @@ from minio import Minio from minio.credentials import EnvMinioProvider -client = Minio("MINIO-HOST:MINIO-PORT", credentials=EnvMinioProvider()) +client = Minio( + endpoint="MINIO-HOST:MINIO-PORT", + credentials=EnvMinioProvider(), +) # Get information of an object. -stat = client.stat_object("my-bucket", "my-object") +stat = client.stat_object(bucket_name="my-bucket", object_name="my-object") print(stat) diff --git a/examples/minio_with_iam_aws_provider.py b/examples/minio_with_iam_aws_provider.py index 5c9e5713..6b745364 100644 --- a/examples/minio_with_iam_aws_provider.py +++ b/examples/minio_with_iam_aws_provider.py @@ -18,8 +18,8 @@ from minio import Minio from minio.credentials import IamAwsProvider -client = Minio("s3.amazonaws.com", credentials=IamAwsProvider()) +client = Minio(endpoint="s3.amazonaws.com", credentials=IamAwsProvider()) # Get information of an object. -stat = client.stat_object("my-bucket", "my-object") +stat = client.stat_object(bucket_name="my-bucket", object_name="my-object") print(stat) diff --git a/examples/minio_with_ldap_identity_provider.py b/examples/minio_with_ldap_identity_provider.py index 55b8468a..1ccdb82e 100644 --- a/examples/minio_with_ldap_identity_provider.py +++ b/examples/minio_with_ldap_identity_provider.py @@ -27,10 +27,14 @@ # LDAP password. ldap_password = "LDAP-PASSWORD" -provider = LdapIdentityProvider(sts_endpoint, ldap_username, ldap_password) +provider = LdapIdentityProvider( + sts_endpoint=sts_endpoint, + ldap_username=ldap_username, + ldap_password=ldap_password, +) -client = Minio("MINIO-HOST:MINIO-PORT", credentials=provider) +client = Minio(endpoint="MINIO-HOST:MINIO-PORT", credentials=provider) # Get information of an object. -stat = client.stat_object("my-bucket", "my-object") +stat = client.stat_object(bucket_name="my-bucket", object_name="my-object") print(stat) diff --git a/examples/minio_with_minio_client_config_provider.py b/examples/minio_with_minio_client_config_provider.py index 283c7e17..9ae5d9f1 100644 --- a/examples/minio_with_minio_client_config_provider.py +++ b/examples/minio_with_minio_client_config_provider.py @@ -19,9 +19,9 @@ from minio.credentials import MinioClientConfigProvider client = Minio( - "MINIO-HOST:MINIO-PORT", credentials=MinioClientConfigProvider(), + endpoint="MINIO-HOST:MINIO-PORT", credentials=MinioClientConfigProvider(), ) # Get information of an object. -stat = client.stat_object("my-bucket", "my-object") +stat = client.stat_object(bucket_name="my-bucket", object_name="my-object") print(stat) diff --git a/examples/minio_with_web_identity_provider.py b/examples/minio_with_web_identity_provider.py index 0135f8ff..57d624b1 100644 --- a/examples/minio_with_web_identity_provider.py +++ b/examples/minio_with_web_identity_provider.py @@ -63,14 +63,16 @@ def get_jwt(client_id, client_secret, idp_client_id, idp_endpoint): role_session_name = "ROLE-SESSION-NAME" provider = WebIdentityProvider( - lambda: get_jwt(client_id, client_secret, idp_client_id, idp_endpoint), - sts_endpoint, + jwt_provider_func=lambda: get_jwt( + client_id, client_secret, idp_client_id, idp_endpoint, + ), + sts_endpoint=sts_endpoint, role_arn=role_arn, role_session_name=role_session_name, ) -client = Minio("MINIO-HOST:MINIO-PORT", credentials=provider) +client = Minio(endpoint="MINIO-HOST:MINIO-PORT", credentials=provider) # Get information of an object. -stat = client.stat_object("my-bucket", "my-object") +stat = client.stat_object(bucket_name="my-bucket", object_name="my-object") print(stat) diff --git a/examples/presigned_get_object.py b/examples/presigned_get_object.py index fbf8a90e..7ce66e7c 100644 --- a/examples/presigned_get_object.py +++ b/examples/presigned_get_object.py @@ -19,19 +19,24 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) # Get presigned URL string to download 'my-object' in # 'my-bucket' with default expiry (i.e. 7 days). -url = client.presigned_get_object("my-bucket", "my-object") +url = client.presigned_get_object( + bucket_name="my-bucket", + object_name="my-object", +) print(url) # Get presigned URL string to download 'my-object' in # 'my-bucket' with two hours expiry. url = client.presigned_get_object( - "my-bucket", "my-object", expires=timedelta(hours=2), + bucket_name="my-bucket", + object_name="my-object", + expires=timedelta(hours=2), ) print(url) diff --git a/examples/presigned_post_policy.py b/examples/presigned_post_policy.py index b7c7c0b7..8decea86 100644 --- a/examples/presigned_post_policy.py +++ b/examples/presigned_post_policy.py @@ -20,24 +20,20 @@ from minio.datatypes import PostPolicy client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -policy = PostPolicy( - "my-bucket", datetime.utcnow() + timedelta(days=10), -) +policy = PostPolicy("my-bucket", datetime.utcnow() + timedelta(days=10)) policy.add_starts_with_condition("key", "my/object/prefix/") policy.add_content_length_range_condition(1*1024*1024, 10*1024*1024) form_data = client.presigned_post_policy(policy) +args = " ".join([f"-F {k}={v}" for k, v in form_data.items()]) curl_cmd = ( - "curl -X POST " - "https://play.min.io/my-bucket " - "{0} -F file=@ -F key=" -).format( - " ".join(["-F {0}={1}".format(k, v) for k, v in form_data.items()]), + "curl -X POST https://play.min.io/my-bucket " + f"{args} -F file=@ -F key=" ) print(curl_cmd) diff --git a/examples/presigned_put_object.py b/examples/presigned_put_object.py index bcfdfb28..0ddd2717 100644 --- a/examples/presigned_put_object.py +++ b/examples/presigned_put_object.py @@ -19,19 +19,24 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) # Get presigned URL string to upload data to 'my-object' in # 'my-bucket' with default expiry (i.e. 7 days). -url = client.presigned_put_object("my-bucket", "my-object") +url = client.presigned_put_object( + bucket_name="my-bucket", + object_name="my-object", +) print(url) # Get presigned URL string to upload data to 'my-object' in # 'my-bucket' with two hours expiry. url = client.presigned_put_object( - "my-bucket", "my-object", expires=timedelta(hours=2), + bucket_name="my-bucket", + object_name="my-object", + expires=timedelta(hours=2), ) print(url) diff --git a/examples/progress.py b/examples/progress.py index ab35a48e..f4551e44 100644 --- a/examples/progress.py +++ b/examples/progress.py @@ -69,6 +69,7 @@ def __init__(self, interval=1, stdout=sys.stdout): self.display_queue = Queue() self.initial_time = time.time() self.stdout = stdout + self.prefix = None self.start() def set_meta(self, total_length, object_name): @@ -89,9 +90,9 @@ def run(self): # display every interval secs task = self.display_queue.get(timeout=self.interval) except Empty: - elapsed_time = time.time() - self.initial_time - if elapsed_time > displayed_time: - displayed_time = elapsed_time + displayed_time = max( + displayed_time, time.time() - self.initial_time, + ) self.print_status(current_size=self.current_size, total_length=self.total_length, displayed_time=displayed_time, @@ -117,8 +118,8 @@ def update(self, size): bytes. """ if not isinstance(size, int): - raise ValueError('{} type can not be displayed. ' - 'Please change it to Int.'.format(type(size))) + raise ValueError(f"{type(size)} type can not be displayed. " + "Please change it to Int.") self.current_size += size self.display_queue.put((self.current_size, self.total_length)) @@ -147,8 +148,7 @@ def seconds_to_time(seconds): hours, m = divmod(minutes, 60) if hours: return _HOURS_OF_ELAPSED % (hours, m, seconds) - else: - return _MINUTES_OF_ELAPSED % (m, seconds) + return _MINUTES_OF_ELAPSED % (m, seconds) def format_string(current_size, total_length, elapsed_time): diff --git a/examples/prompt_object.py b/examples/prompt_object.py new file mode 100644 index 00000000..0ffd723d --- /dev/null +++ b/examples/prompt_object.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# MinIO Python Library for Amazon S3 Compatible Cloud Storage, +# (C) 2025 MinIO, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from minio import Minio + +client = Minio( + endpoint="play.min.io", + access_key="Q3AM3UQ867SPQQA43P2F", + secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", +) + +response = None +try: + response = client.prompt_object( + bucket_name="my-bucket", + object_name="my-object", + prompt="Describe the object for me", + ) + # Read data from response +finally: + if response: + response.close() + response.release_conn() diff --git a/examples/put_object.py b/examples/put_object.py index 92395dc2..afab26f4 100644 --- a/examples/put_object.py +++ b/examples/put_object.py @@ -25,87 +25,102 @@ from minio.sse import SseCustomerKey, SseKMS, SseS3 client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) # Upload data. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload unknown sized data. -data = urlopen( +with urlopen( "https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.4.81.tar.xz", -) -result = client.put_object( - "my-bucket", "my-object", data, length=-1, part_size=10*1024*1024, -) -print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), -) +) as data: + result = client.put_object( + bucket_name="my-bucket", + object_name="my-object", + data=data, + length=-1, + part_size=10*1024*1024, + ) + print( + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", + ) # Upload data with content-type. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, content_type="application/csv", ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with metadata. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, - metadata={"My-Project": "one"}, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, + user_metadata={"My-Project": "one"}, ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with customer key type of server-side encryption. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, sse=SseCustomerKey(b"32byteslongsecretkeymustprovided"), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with KMS type of server-side encryption. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, sse=SseKMS("KMS-KEY-ID", {"Key1": "Value1", "Key2": "Value2"}), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with S3 type of server-side encryption. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, sse=SseS3(), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with tags, retention and legal-hold. @@ -115,24 +130,28 @@ tags = Tags(for_object=True) tags["User"] = "jsmith" result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, tags=tags, retention=Retention(GOVERNANCE, date), legal_hold=True, ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) # Upload data with progress bar. result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, + bucket_name="my-bucket", + object_name="my-object", + data=io.BytesIO(b"hello"), + length=5, progress=Progress(), ) print( - "created {0} object; etag: {1}, version-id: {2}".format( - result.object_name, result.etag, result.version_id, - ), + f"created {result.object_name} object; etag: {result.etag}, " + f"version-id: {result.version_id}", ) diff --git a/examples/remove_bucket.py b/examples/remove_bucket.py index a3c60795..bbe4fd74 100644 --- a/examples/remove_bucket.py +++ b/examples/remove_bucket.py @@ -17,9 +17,9 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -client.remove_bucket("my-bucket") +client.remove_bucket(bucket_name="my-bucket") diff --git a/examples/remove_object.py b/examples/remove_object.py index 8828509f..aff25033 100644 --- a/examples/remove_object.py +++ b/examples/remove_object.py @@ -17,16 +17,17 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) # Remove object. -client.remove_object("my-bucket", "my-object") +client.remove_object(bucket_name="my-bucket", object_name="my-object") # Remove version of an object. client.remove_object( - "my-bucket", "my-object", + bucket_name="my-bucket", + object_name="my-object", version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", ) diff --git a/examples/remove_objects.py b/examples/remove_objects.py index 1369132a..60d1b7f3 100644 --- a/examples/remove_objects.py +++ b/examples/remove_objects.py @@ -18,18 +18,21 @@ from minio.deleteobjects import DeleteObject client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) # Remove list of objects. errors = client.remove_objects( - "my-bucket", - [ - DeleteObject("my-object1"), - DeleteObject("my-object2"), - DeleteObject("my-object3", "13f88b18-8dcd-4c83-88f2-8631fdb6250c"), + bucket_name="my-bucket", + delete_object_list=[ + DeleteObject(name="my-object1"), + DeleteObject(name="my-object2"), + DeleteObject( + name="my-object3", + version_id="13f88b18-8dcd-4c83-88f2-8631fdb6250c", + ), ], ) for error in errors: @@ -38,8 +41,15 @@ # Remove a prefix recursively. delete_object_list = map( lambda x: DeleteObject(x.object_name), - client.list_objects("my-bucket", "my/prefix/", recursive=True), + client.list_objects( + bucket_name="my-bucket", + prefix="my/prefix/", + recursive=True, + ), +) +errors = client.remove_objects( + bucket_name="my-bucket", + delete_object_list=delete_object_list, ) -errors = client.remove_objects("my-bucket", delete_object_list) for error in errors: print("error occurred when deleting object", error) diff --git a/examples/select_object_content.py b/examples/select_object_content.py index 59f5a4bc..3e6fff75 100644 --- a/examples/select_object_content.py +++ b/examples/select_object_content.py @@ -20,18 +20,18 @@ SelectRequest) client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) with client.select_object_content( - "my-bucket", - "my-object.csv", - SelectRequest( - "select * from S3Object", - CSVInputSerialization(), - CSVOutputSerialization(), + bucket_name="my-bucket", + object_name="my-object.csv", + request=SelectRequest( + expression="select * from S3Object", + input_serialization=CSVInputSerialization(), + output_serialization=CSVOutputSerialization(), request_progress=True, ), ) as result: diff --git a/examples/set_bucket_encryption.py b/examples/set_bucket_encryption.py index fe840c5c..33652684 100644 --- a/examples/set_bucket_encryption.py +++ b/examples/set_bucket_encryption.py @@ -18,11 +18,11 @@ from minio.sseconfig import Rule, SSEConfig client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) client.set_bucket_encryption( - "my-bucket", SSEConfig(Rule.new_sse_s3_rule()), + bucket_name="my-bucket", config=SSEConfig(Rule.new_sse_s3_rule()), ) diff --git a/examples/set_bucket_lifecycle.py b/examples/set_bucket_lifecycle.py index ec053b8c..4ea26e52 100644 --- a/examples/set_bucket_lifecycle.py +++ b/examples/set_bucket_lifecycle.py @@ -19,7 +19,7 @@ from minio.lifecycleconfig import Expiration, LifecycleConfig, Rule, Transition client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) @@ -27,17 +27,17 @@ config = LifecycleConfig( [ Rule( - ENABLED, + status=ENABLED, rule_filter=Filter(prefix="documents/"), rule_id="rule1", transition=Transition(days=30, storage_class="GLACIER"), ), Rule( - ENABLED, + status=ENABLED, rule_filter=Filter(prefix="logs/"), rule_id="rule2", expiration=Expiration(days=365), ), ], ) -client.set_bucket_lifecycle("my-bucket", config) +client.set_bucket_lifecycle(bucket_name="my-bucket", config=config) diff --git a/examples/set_bucket_notification.py b/examples/set_bucket_notification.py index 89c43f45..7d20ab75 100644 --- a/examples/set_bucket_notification.py +++ b/examples/set_bucket_notification.py @@ -19,7 +19,7 @@ QueueConfig) client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) @@ -27,11 +27,11 @@ config = NotificationConfig( queue_config_list=[ QueueConfig( - "QUEUE-ARN-OF-THIS-BUCKET", - ["s3:ObjectCreated:*"], + queue="QUEUE-ARN-OF-THIS-BUCKET", + events=["s3:ObjectCreated:*"], config_id="1", prefix_filter_rule=PrefixFilterRule("abc"), ), ], ) -client.set_bucket_notification("my-bucket", config) +client.set_bucket_notification(bucket_name="my-bucket", config=config) diff --git a/examples/set_bucket_policy.py b/examples/set_bucket_policy.py index f1c938f6..bc25b73e 100644 --- a/examples/set_bucket_policy.py +++ b/examples/set_bucket_policy.py @@ -19,7 +19,7 @@ from minio import Minio client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) @@ -42,7 +42,7 @@ }, ], } -client.set_bucket_policy("my-bucket", json.dumps(policy)) +client.set_bucket_policy(bucket_name="my-bucket", policy=json.dumps(policy)) # Example anonymous read-write bucket policy. policy = { @@ -72,4 +72,4 @@ }, ], } -client.set_bucket_policy("my-bucket", json.dumps(policy)) +client.set_bucket_policy(bucket_name="my-bucket", policy=json.dumps(policy)) diff --git a/examples/set_bucket_replication.py b/examples/set_bucket_replication.py index de5d38c6..76b6025c 100644 --- a/examples/set_bucket_replication.py +++ b/examples/set_bucket_replication.py @@ -20,19 +20,19 @@ ReplicationConfig, Rule) client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) config = ReplicationConfig( - "REPLACE-WITH-ACTUAL-ROLE", - [ + role="REPLACE-WITH-ACTUAL-ROLE", + rules=[ Rule( - Destination( + destination=Destination( "REPLACE-WITH-ACTUAL-DESTINATION-BUCKET-ARN", ), - ENABLED, + status=ENABLED, delete_marker_replication=DeleteMarkerReplication( DISABLED, ), @@ -47,4 +47,4 @@ ), ], ) -client.set_bucket_replication("my-bucket", config) +client.set_bucket_replication(bucket_name="my-bucket", config=config) diff --git a/examples/set_bucket_tags.py b/examples/set_bucket_tags.py index 0d759765..af513667 100644 --- a/examples/set_bucket_tags.py +++ b/examples/set_bucket_tags.py @@ -18,7 +18,7 @@ from minio.commonconfig import Tags client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) @@ -26,4 +26,4 @@ tags = Tags.new_bucket_tags() tags["Project"] = "Project One" tags["User"] = "jsmith" -client.set_bucket_tags("my-bucket", tags) +client.set_bucket_tags(bucket_name="my-bucket", tags=tags) diff --git a/examples/set_bucket_versioning.py b/examples/set_bucket_versioning.py index 418616a7..5f164e82 100644 --- a/examples/set_bucket_versioning.py +++ b/examples/set_bucket_versioning.py @@ -19,9 +19,12 @@ from minio.versioningconfig import VersioningConfig client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -client.set_bucket_versioning("my-bucket", VersioningConfig(ENABLED)) +client.set_bucket_versioning( + bucket_name="my-bucket", + config=VersioningConfig(ENABLED), +) diff --git a/examples/set_object_lock_config.py b/examples/set_object_lock_config.py index 6a77a190..317bc4a4 100644 --- a/examples/set_object_lock_config.py +++ b/examples/set_object_lock_config.py @@ -19,10 +19,10 @@ from minio.objectlockconfig import DAYS, ObjectLockConfig client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) -config = ObjectLockConfig(GOVERNANCE, 15, DAYS) -client.set_object_lock_config("my-bucket", config) +config = ObjectLockConfig(mode=GOVERNANCE, duration=15, duration_unit=DAYS) +client.set_object_lock_config(bucket_name="my-bucket", config=config) diff --git a/examples/set_object_retention.py b/examples/set_object_retention.py index a0afc8b8..189fad04 100644 --- a/examples/set_object_retention.py +++ b/examples/set_object_retention.py @@ -21,10 +21,14 @@ from minio.retention import Retention client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) config = Retention(GOVERNANCE, datetime.utcnow() + timedelta(days=10)) -client.set_object_retention("my-bucket", "my-object", config) +client.set_object_retention( + bucket_name="my-bucket", + object_name="my-object", + config=config, +) diff --git a/examples/set_object_tags.py b/examples/set_object_tags.py index edcd5382..36e9dbe2 100644 --- a/examples/set_object_tags.py +++ b/examples/set_object_tags.py @@ -18,7 +18,7 @@ from minio.commonconfig import Tags client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) @@ -26,4 +26,8 @@ tags = Tags.new_object_tags() tags["Project"] = "Project One" tags["User"] = "jsmith" -client.set_object_tags("my-bucket", "my-object", tags) +client.set_object_tags( + bucket_name="my-bucket", + object_name="my-object", + tags=tags, +) diff --git a/examples/stat_object.py b/examples/stat_object.py index 3a38ff02..707685cc 100644 --- a/examples/stat_object.py +++ b/examples/stat_object.py @@ -18,37 +18,27 @@ from minio.sse import SseCustomerKey client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) # Get object information. -result = client.stat_object("my-bucket", "my-object") -print( - "last-modified: {0}, size: {1}".format( - result.last_modified, result.size, - ), -) +result = client.stat_object(bucket_name="my-bucket", object_name="my-object") +print(f"last-modified: {result.last_modified}, size: {result.size}") # Get object information of version-ID. result = client.stat_object( - "my-bucket", "my-object", + bucket_name="my-bucket", + object_name="my-object", version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", ) -print( - "last-modified: {0}, size: {1}".format( - result.last_modified, result.size, - ), -) +print(f"last-modified: {result.last_modified}, size: {result.size}") # Get SSE-C encrypted object information. result = client.stat_object( - "my-bucket", "my-object", + bucket_name="my-bucket", + object_name="my-object", ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"), ) -print( - "last-modified: {0}, size: {1}".format( - result.last_modified, result.size, - ), -) +print(f"last-modified: {result.last_modified}, size: {result.size}") diff --git a/examples/upload_snowball_objects.py b/examples/upload_snowball_objects.py index c73aa0a0..6270a2d4 100644 --- a/examples/upload_snowball_objects.py +++ b/examples/upload_snowball_objects.py @@ -21,20 +21,27 @@ from minio.commonconfig import SnowballObject client = Minio( - "play.min.io", + endpoint="play.min.io", access_key="Q3AM3UQ867SPQQA43P2F", secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ) client.upload_snowball_objects( - "my-bucket", - [ - SnowballObject("my-object1", filename="/etc/hostname"), + bucket_name="my-bucket", + objects=[ SnowballObject( - "my-object2", data=io.BytesIO(b"hello"), length=5, + object_name="my-object1", + filename="/etc/hostname", ), SnowballObject( - "my-object3", data=io.BytesIO(b"world"), length=5, + object_name="my-object2", + data=io.BytesIO(b"hello"), + length=5, + ), + SnowballObject( + object_name="my-object3", + data=io.BytesIO(b"world"), + length=5, mod_time=datetime.now(), ), ], diff --git a/minio/api.py b/minio/api.py index 285920ef..4c583140 100644 --- a/minio/api.py +++ b/minio/api.py @@ -21,7 +21,6 @@ # pylint: disable=too-many-public-methods # pylint: disable=too-many-statements # pylint: disable=too-many-locals -# pylint: disable=too-many-positional-arguments """ Simple Storage Service (aka S3) client to perform bucket and object operations. @@ -29,6 +28,7 @@ from __future__ import absolute_import, annotations +import io import itertools import json import os @@ -37,9 +37,8 @@ from datetime import datetime, timedelta from io import BytesIO from random import random -from typing import (Any, BinaryIO, Iterator, Optional, TextIO, Tuple, Union, - cast) -from urllib.parse import urlunsplit +from typing import Any, BinaryIO, Iterator, Optional, TextIO, Union, cast +from urllib.parse import quote, urlencode, urlunsplit from xml.etree import ElementTree as ET import certifi @@ -55,9 +54,13 @@ from urllib3.util import Timeout from . import time +from .checksum import (MD5, SHA256, UNSIGNED_PAYLOAD, ZERO_MD5_HASH, + ZERO_SHA256_HASH, Algorithm, base64_string, + base64_string_to_sum, hex_string, make_headers, + new_hashers) from .commonconfig import (COPY, REPLACE, ComposeSource, CopySource, SnowballObject, Tags) -from .credentials import Credentials, StaticProvider +from .credentials import StaticProvider from .credentials.providers import Provider from .datatypes import (Bucket, CompleteMultipartUploadResult, EventIterable, ListAllMyBucketsResult, ListMultipartUploadsResult, @@ -68,11 +71,11 @@ from .error import InvalidResponseError, S3Error, ServerError from .helpers import (_DEFAULT_USER_AGENT, MAX_MULTIPART_COUNT, MAX_MULTIPART_OBJECT_SIZE, MAX_PART_SIZE, MIN_PART_SIZE, - BaseURL, DictType, ObjectWriteResult, ProgressType, - ThreadPool, check_bucket_name, check_object_name, - check_sse, check_ssec, genheaders, get_part_info, + BaseURL, HTTPQueryDict, ObjectWriteResult, ProgressType, + RegionMap, ThreadPool, check_bucket_name, + check_object_name, check_sse, check_ssec, get_part_info, headers_to_strings, is_valid_policy_type, makedirs, - md5sum_hash, queryencode, read_part_data, sha256_hash) + normalize_headers, queryencode, read_part_data) from .legalhold import LegalHold from .lifecycleconfig import LifecycleConfig from .notificationconfig import NotificationConfig @@ -84,6 +87,7 @@ from .sse import Sse, SseCustomerKey from .sseconfig import SSEConfig from .tagging import Tagging +from .time import to_http_header, to_iso8601utc from .versioningconfig import VersioningConfig from .xml import Element, SubElement, findtext, getbytes, marshal, unmarshal @@ -92,42 +96,8 @@ class Minio: """ Simple Storage Service (aka S3) client to perform bucket and object operations. - - :param endpoint: Hostname of a S3 service. - :param access_key: Access key (aka user ID) of your account in S3 service. - :param secret_key: Secret Key (aka password) of your account in S3 service. - :param session_token: Session token of your account in S3 service. - :param secure: Flag to indicate to use secure (TLS) connection to S3 - service or not. - :param region: Region name of buckets in S3 service. - :param http_client: Customized HTTP client. - :param credentials: Credentials provider of your account in S3 service. - :param cert_check: Flag to indicate to verify SSL certificate or not. - :return: :class:`Minio ` object - - Example:: - # Create client with anonymous access. - client = Minio("play.min.io") - - # Create client with access and secret key. - client = Minio("s3.amazonaws.com", "ACCESS-KEY", "SECRET-KEY") - - # Create client with access key and secret key with specific region. - client = Minio( - "play.minio.io:9000", - access_key="Q3AM3UQ867SPQQA43P2F", - secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", - region="my-region", - ) - - **NOTE on concurrent usage:** `Minio` object is thread safe when using - the Python `threading` library. Specifically, it is **NOT** safe to share - it between multiple processes, for example when using - `multiprocessing.Pool`. The solution is simply to create a new `Minio` - object in each process, and not share it between processes. - """ - _region_map: dict[str, str] + _region_map: RegionMap _base_url: BaseURL _user_agent: str _trace_stream: Optional[TextIO] @@ -136,6 +106,7 @@ class Minio: def __init__( self, + *, endpoint: str, access_key: Optional[str] = None, secret_key: Optional[str] = None, @@ -146,13 +117,94 @@ def __init__( credentials: Optional[Provider] = None, cert_check: bool = True, ): + """ + Initializes a new Minio client object. + + Args: + endpoint (str): + Hostname of an S3 service. + + access_key (Optional[str], default=None): + Access key (aka user ID) of your account in the S3 service. + + secret_key (Optional[str], default=None): + Secret key (aka password) of your account in the S3 service. + + session_token (Optional[str], default=None): + Session token of your account in the S3 service. + + secure (bool, default=True): + Flag to indicate whether to use a secure (TLS) connection + to the S3 service. + + region (Optional[str], default=None): + Region name of buckets in the S3 service. + + http_client (Optional[urllib3.PoolManager], default=None): + Customized HTTP client. + + credentials (Optional[Provider], default=None): + Credentials provider of your account in the S3 service. + + cert_check (bool, default=True): + Flag to enable/disable server certificate validation + for HTTPS connections. + + Notes: + The `Minio` object is thread-safe when used with the Python + `threading` library. However, it is **not** safe to share it + between multiple processes, for example when using + `multiprocessing.Pool`. To avoid issues, create a new `Minio` + object in each process instead of sharing it. + + Example: + >>> from minio import Minio + >>> + >>> # Create client with anonymous access + >>> client = Minio(endpoint="play.min.io") + >>> + >>> # Create client with access and secret key + >>> client = Minio( + ... endpoint="s3.amazonaws.com", + ... access_key="ACCESS-KEY", + ... secret_key="SECRET-KEY", + ... ) + >>> + >>> # Create client with specific region + >>> client = Minio( + ... endpoint="play.minio.io:9000", + ... access_key="Q3AM3UQ867SPQQA43P2F", + ... secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + ... region="my-region", + ... ) + >>> + >>> # Create client with custom HTTP client using proxy + >>> import urllib3 + >>> client = Minio( + ... endpoint="SERVER:PORT", + ... access_key="ACCESS_KEY", + ... secret_key="SECRET_KEY", + ... secure=True, + ... http_client=urllib3.ProxyManager( + ... "https://PROXYSERVER:PROXYPORT/", + ... timeout=urllib3.Timeout.DEFAULT_TIMEOUT, + ... cert_reqs="CERT_REQUIRED", + ... retries=urllib3.Retry( + ... total=5, + ... backoff_factor=0.2, + ... status_forcelist=[500, 502, 503, 504], + ... ), + ... ), + ... ) + """ # Validate http client has correct base class. if http_client and not isinstance(http_client, urllib3.PoolManager): - raise ValueError( - "HTTP client should be instance of `urllib3.PoolManager`" + raise TypeError( + "HTTP client should be urllib3.PoolManager like object, " + f"got {type(http_client).__name__}", ) - self._region_map = {} + self._region_map = RegionMap() self._base_url = BaseURL( ("https://" if secure else "http://") + endpoint, region, @@ -183,6 +235,67 @@ def __del__(self): if hasattr(self, "_http"): # Only required for unit test run self._http.clear() + @staticmethod + def _gen_read_headers( + *, + ssec: Optional[SseCustomerKey] = None, + offset: int = 0, + length: Optional[int] = None, + match_etag: Optional[str] = None, + not_match_etag: Optional[str] = None, + modified_since: Optional[datetime] = None, + unmodified_since: Optional[datetime] = None, + fetch_checksum: bool = False, + ) -> HTTPHeaderDict: + """Generates conditional headers for get/head object.""" + headers = HTTPHeaderDict() + if ssec: + headers.extend(ssec.headers()) + if offset or length: + end = (offset + length - 1) if length else "" + headers['Range'] = f"bytes={offset}-{end}" + if match_etag: + headers["if-match"] = match_etag + if not_match_etag: + headers["if-none-match"] = not_match_etag + if modified_since: + headers["if-modified-since"] = to_http_header(modified_since) + if unmodified_since: + headers["if-unmodified-since"] = to_http_header(unmodified_since) + if fetch_checksum: + headers["x-amz-checksum-mode"] = "ENABLED" + return headers + + @staticmethod + def _gen_write_headers( + *, + headers: Optional[HTTPHeaderDict] = None, + user_metadata: Optional[HTTPHeaderDict] = None, + sse: Optional[Sse] = None, + tags: Optional[Tags] = None, + retention: Optional[Retention] = None, + legal_hold: bool = False, + ) -> HTTPHeaderDict: + """Generate headers for given parameters.""" + headers = headers.copy() if headers else HTTPHeaderDict() + if user_metadata: + headers.extend(user_metadata) + headers = normalize_headers(headers) + if sse: + headers.extend(sse.headers()) + if tags: + headers["x-amz-tagging"] = urlencode( + list(tags.items()), quote_via=quote, + ) + if retention and retention.mode: + headers["x-amz-object-lock-mode"] = retention.mode + headers["x-amz-object-lock-retain-until-date"] = cast( + str, to_iso8601utc(retention.retain_until_date), + ) + if legal_hold: + headers["x-amz-object-lock-legal-hold"] = "ON" + return headers + def _handle_redirect_response( self, method: str, @@ -211,71 +324,76 @@ def _handle_redirect_response( return code, message - def _build_headers( - self, - host: str, - headers: Optional[DictType] = None, - body: Optional[bytes] = None, - creds: Optional[Credentials] = None, - ) -> tuple[DictType, datetime]: - """Build headers with given parameters.""" - headers = headers or {} - md5sum_added = headers.get("Content-MD5") - headers["Host"] = host - headers["User-Agent"] = self._user_agent - sha256 = None - md5sum = None - - if body: - headers["Content-Length"] = str(len(body)) - if creds: - if self._base_url.is_https: - sha256 = "UNSIGNED-PAYLOAD" - md5sum = None if md5sum_added else md5sum_hash(body) - else: - sha256 = sha256_hash(body) - else: - md5sum = None if md5sum_added else md5sum_hash(body) - if md5sum: - headers["Content-MD5"] = md5sum - if sha256: - headers["x-amz-content-sha256"] = sha256 - if creds and creds.session_token: - headers["X-Amz-Security-Token"] = creds.session_token - date = time.utcnow() - headers["x-amz-date"] = time.to_amz_date(date) - return headers, date - def _url_open( self, + *, method: str, region: str, bucket_name: Optional[str] = None, object_name: Optional[str] = None, body: Optional[bytes] = None, - headers: Optional[DictType] = None, - query_params: Optional[DictType] = None, + headers: Optional[HTTPHeaderDict] = None, + query_params: Optional[HTTPQueryDict] = None, preload_content: bool = True, no_body_trace: bool = False, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> BaseHTTPResponse: """Execute HTTP request.""" - creds = self._provider.retrieve() if self._provider else None url = self._base_url.build( method=method, region=region, bucket_name=bucket_name, object_name=object_name, query_params=query_params, + extra_query_params=extra_query_params, ) - headers, date = self._build_headers(url.netloc, headers, body, creds) - if creds: + + headers = headers.copy() if headers else HTTPHeaderDict() + if extra_headers: + headers.extend(extra_headers) + + headers["Host"] = url.netloc + headers["User-Agent"] = self._user_agent + content_sha256 = headers.get("x-amz-content-sha256") + content_md5 = headers.get("Content-MD5") + if method in ["PUT", "POST"]: + headers["Content-Length"] = str(len(body or b"")) + if not headers.get("Content-Type"): + headers["Content-Type"] = "application/octet-stream" + if body is None: + content_sha256 = content_sha256 or ZERO_SHA256_HASH + content_md5 = content_md5 or ZERO_MD5_HASH + else: + if not content_sha256: + if self._base_url.is_https: + content_sha256 = UNSIGNED_PAYLOAD + else: + sha256_checksum = headers.get("x-amz-checksum-sha256") + content_sha256 = hex_string( + base64_string_to_sum(sha256_checksum) if sha256_checksum + else SHA256.hash(body), + ) + if not content_md5 and content_sha256 == UNSIGNED_PAYLOAD: + content_md5 = base64_string(MD5.hash(body)) + if not headers.get("x-amz-content-sha256"): + headers["x-amz-content-sha256"] = cast(str, content_sha256) + if not headers.get("Content-MD5") and content_md5: + headers["Content-MD5"] = content_md5 + date = time.utcnow() + headers["x-amz-date"] = time.to_amz_date(date) + + if self._provider is not None: + creds = self._provider.retrieve() + if creds.session_token: + headers["X-Amz-Security-Token"] = creds.session_token headers = sign_v4_s3( method=method, url=url, region=region, headers=headers, credentials=creds, - content_sha256=cast(str, headers.get("x-amz-content-sha256")), + content_sha256=cast(str, content_sha256), date=date, ) @@ -295,19 +413,11 @@ def _url_open( self._trace_stream.write("\n") self._trace_stream.write("\n") - http_headers = HTTPHeaderDict() - for key, value in (headers or {}).items(): - if isinstance(value, (list, tuple)): - for val in value: - http_headers.add(key, val) - else: - http_headers.add(key, value) - response = self._http.urlopen( method, urlunsplit(url), body=body, - headers=http_headers, + headers=headers, preload_content=preload_content, ) @@ -422,28 +532,35 @@ def _url_open( if response_error.code in ["NoSuchBucket", "RetryHead"]: if bucket_name is not None: - self._region_map.pop(bucket_name, None) + self._region_map.remove(bucket_name) raise response_error def _execute( self, + *, method: str, bucket_name: Optional[str] = None, object_name: Optional[str] = None, body: Optional[bytes] = None, - headers: Optional[DictType] = None, - query_params: Optional[DictType] = None, + headers: Optional[HTTPHeaderDict] = None, + query_params: Optional[HTTPQueryDict] = None, preload_content: bool = True, no_body_trace: bool = False, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> BaseHTTPResponse: """Execute HTTP request.""" - region = self._get_region(bucket_name) + region = self._get_region( + bucket_name=bucket_name, + region=region, + ) try: return self._url_open( - method, - region, + method=method, + region=region, bucket_name=bucket_name, object_name=object_name, body=body, @@ -451,6 +568,8 @@ def _execute( query_params=query_params, preload_content=preload_content, no_body_trace=no_body_trace, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) except S3Error as exc: if exc.code != "RetryHead": @@ -459,8 +578,8 @@ def _execute( # Retry only once on RetryHead error. try: return self._url_open( - method, - region, + method=method, + region=region, bucket_name=bucket_name, object_name=object_name, body=body, @@ -468,6 +587,8 @@ def _execute( query_params=query_params, preload_content=preload_content, no_body_trace=no_body_trace, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) except S3Error as exc: if exc.code != "RetryHead": @@ -478,13 +599,31 @@ def _execute( ) raise exc.copy(cast(str, code), cast(str, message)) - def _get_region(self, bucket_name: Optional[str] = None) -> str: + def _get_region( + self, + *, + bucket_name: Optional[str] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ) -> str: """ Return region of given bucket either from region cache or set in constructor. """ - if self._base_url.region: + if ( + region is not None and self._base_url.region is not None and + region != self._base_url.region + ): + raise ValueError( + f"region must be {self._base_url.region}, but passed {region}", + ) + + if region is not None: + return region + + if self._base_url.region is not None: return self._base_url.region if not bucket_name or not self._provider: @@ -496,10 +635,12 @@ def _get_region(self, bucket_name: Optional[str] = None) -> str: # Execute GetBucketLocation REST API to get region of the bucket. response = self._url_open( - "GET", - "us-east-1", + method="GET", + region="us-east-1", bucket_name=bucket_name, - query_params={"location": ""}, + query_params=HTTPQueryDict({"location": ""}), + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) element = ET.fromstring(response.data.decode()) @@ -510,18 +651,22 @@ def _get_region(self, bucket_name: Optional[str] = None) -> str: else: region = element.text - self._region_map[bucket_name] = region + self._region_map.set(bucket_name, region) return region def set_app_info(self, app_name: str, app_version: str): """ Set your application name and version to user agent header. - :param app_name: Application name. - :param app_version: Application version. + Args: + app_name (str): + Application name. - Example:: - client.set_app_info('my_app', '1.0.2') + app_version (str): + Application version. + + Example: + >>> client.set_app_info("my_app", "1.0.2") """ if not (app_name and app_version): raise ValueError("Application name/version cannot be empty.") @@ -531,7 +676,12 @@ def trace_on(self, stream: TextIO): """ Enable http trace. - :param stream: Stream for writing HTTP call tracing. + Args: + stream (TextIO): + Stream for writing HTTP call tracing. + + Example: + >>> client.trace_on(sys.stdout) """ if not stream: raise ValueError('Input stream for trace output is invalid.') @@ -539,9 +689,7 @@ def trace_on(self, stream: TextIO): self._trace_stream = stream def trace_off(self): - """ - Disable HTTP trace. - """ + """Disable HTTP trace.""" self._trace_stream = None def enable_accelerate_endpoint(self): @@ -570,71 +718,122 @@ def disable_virtual_style_endpoint(self): def select_object_content( self, + *, bucket_name: str, object_name: str, request: SelectRequest, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> SelectObjectReader: """ Select content of an object by SQL expression. - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param request: :class:`SelectRequest ` object. - :return: A reader contains requested records and progress information. - - Example:: - with client.select_object_content( - "my-bucket", - "my-object.csv", - SelectRequest( - "select * from S3Object", - CSVInputSerialization(), - CSVOutputSerialization(), - request_progress=True, - ), - ) as result: - for data in result.stream(): - print(data.decode()) - print(result.stats()) + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + request (SelectRequest): + Select request. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + SelectObjectReader: + A reader object representing the results of the select + operation. + + Example: + >>> with client.select_object_content( + ... bucket_name="my-bucket", + ... object_name="my-object.csv", + ... request=SelectRequest( + ... expression="select * from S3Object", + ... input_serialization=CSVInputSerialization(), + ... output_serialization=CSVOutputSerialization(), + ... request_progress=True, + ... ), + ... ) as result: + ... for data in result.stream(): + ... print(data.decode()) + ... print(result.stats()) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) if not isinstance(request, SelectRequest): raise ValueError("request must be SelectRequest type") body = marshal(request) + headers = HTTPHeaderDict( + {"Content-MD5": base64_string(MD5.hash(body))}, + ) response = self._execute( - "POST", + method="POST", bucket_name=bucket_name, object_name=object_name, body=body, - headers={"Content-MD5": cast(str, md5sum_hash(body))}, - query_params={"select": "", "select-type": "2"}, + headers=headers, + query_params=HTTPQueryDict({"select": "", "select-type": "2"}), preload_content=False, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) return SelectObjectReader(response) def make_bucket( self, + *, bucket_name: str, location: Optional[str] = None, object_lock: bool = False, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ): """ - Create a bucket with region and object lock. - - :param bucket_name: Name of the bucket. - :param location: Region in which the bucket will be created. - :param object_lock: Flag to set object-lock feature. - - Examples:: - # Create bucket. - client.make_bucket("my-bucket") - - # Create bucket on specific region. - client.make_bucket("my-bucket", "us-west-1") - - # Create bucket with object-lock feature on specific region. - client.make_bucket("my-bucket", "eu-west-2", object_lock=True) + Create a bucket with region and optional object lock. + + Args: + bucket_name (str): + Name of the bucket. + + location (Optional[str], default=None): + Region in which the bucket is to be created. + + object_lock (bool, default=False): + Flag to enable the object-lock feature. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> # Create bucket + >>> client.make_bucket(bucket_name="my-bucket") + >>> + >>> # Create bucket in a specific region + >>> client.make_bucket( + ... bucket_name="my-bucket", + ... location="eu-west-1", + ... ) + >>> + >>> # Create bucket with object-lock in a region + >>> client.make_bucket( + ... bucket_name="my-bucket", + ... location="eu-west-2", + ... object_lock=True, + ... ) """ check_bucket_name(bucket_name, True, s3_check=self._base_url.is_aws_host) @@ -647,228 +846,624 @@ def make_bucket( f"but passed {location}" ) location = self._base_url.region or location or "us-east-1" - headers: Optional[DictType] = ( - {"x-amz-bucket-object-lock-enabled": "true"} - if object_lock else None - ) - + headers = HTTPHeaderDict() + if object_lock: + headers["x-amz-bucket-object-lock-enabled"] = "true" body = None if location != "us-east-1": element = Element("CreateBucketConfiguration") SubElement(element, "LocationConstraint", location) body = getbytes(element) self._url_open( - "PUT", - location, + method="PUT", + region=location, bucket_name=bucket_name, body=body, headers=headers, + extra_headers=extra_headers, + extra_query_params=extra_query_params, + ) + self._region_map.set(bucket_name, location) + + def _list_buckets( + self, + *, + bucket_region: Optional[str] = None, + max_buckets: int = 10000, + prefix: Optional[str] = None, + continuation_token: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ) -> ListAllMyBucketsResult: + """Do ListBuckets S3 API.""" + query_params = HTTPQueryDict() + query_params["max-buckets"] = str( + max_buckets if max_buckets > 0 else 10000, + ) + if bucket_region is not None: + query_params["bucket-region"] = bucket_region + if prefix: + query_params["prefix"] = prefix + if continuation_token: + query_params["continuation-token"] = continuation_token + + response = self._execute( + method="GET", + query_params=query_params, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) - self._region_map[bucket_name] = location + return unmarshal(ListAllMyBucketsResult, response.data.decode()) - def list_buckets(self) -> list[Bucket]: + def list_buckets( + self, + *, + bucket_region: Optional[str] = None, + max_buckets: int = 10000, + prefix: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ) -> Iterator[Bucket]: """ List information of all accessible buckets. - :return: List of :class:`Bucket ` object. + Args: + bucket_region (Optional[str], default=None): + Fetch buckets from the specified region. - Example:: - buckets = client.list_buckets() - for bucket in buckets: - print(bucket.name, bucket.creation_date) - """ + max_buckets (int, default=10000): + Maximum number of buckets to fetch. + + prefix (Optional[str], default=None): + Return only buckets whose names start with this prefix. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. - response = self._execute("GET") - result = unmarshal(ListAllMyBucketsResult, response.data.decode()) - return result.buckets + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. - def bucket_exists(self, bucket_name: str) -> bool: + Returns: + Iterator[Bucket]: + An iterator of :class:`minio.datatypes.Bucket` objects. + + Example: + >>> buckets = client.list_buckets() + >>> for bucket in buckets: + ... print(bucket.name, bucket.creation_date) + """ + continuation_token: Optional[str] = "" + while continuation_token is not None: + result = self._list_buckets( + bucket_region=bucket_region, + max_buckets=max_buckets, + prefix=prefix, + continuation_token=continuation_token, + extra_headers=extra_headers, + extra_query_params=extra_query_params, + ) + continuation_token = result.continuation_token + yield from result.buckets + + def bucket_exists( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ) -> bool: """ Check if a bucket exists. - :param bucket_name: Name of the bucket. - :return: True if the bucket exists. + Args: + bucket_name (str): + Name of the bucket. - Example:: - if client.bucket_exists("my-bucket"): - print("my-bucket exists") - else: - print("my-bucket does not exist") + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + bool: + True if the bucket exists, False otherwise. + + Example: + >>> if client.bucket_exists(bucket_name="my-bucket"): + ... print("my-bucket exists") + ... else: + ... print("my-bucket does not exist") """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) try: - self._execute("HEAD", bucket_name) + self._execute( + method="HEAD", + bucket_name=bucket_name, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, + ) return True except S3Error as exc: if exc.code != "NoSuchBucket": raise return False - def remove_bucket(self, bucket_name: str): + def remove_bucket( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ): """ Remove an empty bucket. - :param bucket_name: Name of the bucket. + Args: + bucket_name (str): + Name of the bucket. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. - Example:: - client.remove_bucket("my-bucket") + Example: + >>> client.remove_bucket(bucket_name="my-bucket") """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) - self._execute("DELETE", bucket_name) - self._region_map.pop(bucket_name, None) + self._execute( + method="DELETE", + bucket_name=bucket_name, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, + ) + self._region_map.remove(bucket_name) - def get_bucket_policy(self, bucket_name: str) -> str: + def get_bucket_policy( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ) -> str: """ - Get bucket policy configuration of a bucket. + Get the bucket policy configuration of a bucket. - :param bucket_name: Name of the bucket. - :return: Bucket policy configuration as JSON string. + Args: + bucket_name (str): + Name of the bucket. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. - Example:: - policy = client.get_bucket_policy("my-bucket") + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + str: + Bucket policy configuration as a JSON string. + + Example: + >>> policy = client.get_bucket_policy(bucket_name="my-bucket") """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) response = self._execute( - "GET", bucket_name, query_params={"policy": ""}, + method="GET", + bucket_name=bucket_name, + query_params=HTTPQueryDict({"policy": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) return response.data.decode() - def delete_bucket_policy(self, bucket_name: str): + def _execute_delete_bucket( + self, + *, + bucket_name: str, + query_params: HTTPQueryDict, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ): + """ Delete any bucket API. """ + check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) + self._execute( + method="DELETE", + bucket_name=bucket_name, + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, + ) + + def delete_bucket_policy( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ): """ - Delete bucket policy configuration of a bucket. + Delete the bucket policy configuration of a bucket. - :param bucket_name: Name of the bucket. + Args: + bucket_name (str): + Name of the bucket. - Example:: - client.delete_bucket_policy("my-bucket") - """ - check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) - self._execute("DELETE", bucket_name, query_params={"policy": ""}) + region (Optional[str], default=None): + Region of the bucket to skip auto probing. - def set_bucket_policy(self, bucket_name: str, policy: str | bytes): - """ - Set bucket policy configuration to a bucket. + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. - :param bucket_name: Name of the bucket. - :param policy: Bucket policy configuration as JSON string. + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> client.delete_bucket_policy(bucket_name="my-bucket") + """ + self._execute_delete_bucket( + bucket_name=bucket_name, + query_params=HTTPQueryDict({"policy": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, + ) - Example:: - client.set_bucket_policy("my-bucket", policy) + def set_bucket_policy( + self, + *, + bucket_name: str, + policy: str | bytes, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ): + """ + Set the bucket policy configuration for a bucket. + + Args: + bucket_name (str): + Name of the bucket. + + policy (str | bytes): + Bucket policy configuration as a JSON string. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> # Example anonymous read-only bucket policy + >>> policy = { + ... "Version": "2012-10-17", + ... "Statement": [ + ... { + ... "Effect": "Allow", + ... "Principal": {"AWS": "*"}, + ... "Action": ["s3:GetBucketLocation", "s3:ListBucket"], + ... "Resource": "arn:aws:s3:::my-bucket", + ... }, + ... { + ... "Effect": "Allow", + ... "Principal": {"AWS": "*"}, + ... "Action": "s3:GetObject", + ... "Resource": "arn:aws:s3:::my-bucket/*", + ... }, + ... ], + ... } + >>> client.set_bucket_policy( + ... bucket_name="my-bucket", + ... policy=json.dumps(policy), + ... ) + >>> # Example anonymous read-write bucket policy + >>> policy = { + ... "Version": "2012-10-17", + ... "Statement": [ + ... { + ... "Effect": "Allow", + ... "Principal": {"AWS": "*"}, + ... "Action": [ + ... "s3:GetBucketLocation", + ... "s3:ListBucket", + ... "s3:ListBucketMultipartUploads", + ... ], + ... "Resource": "arn:aws:s3:::my-bucket", + ... }, + ... { + ... "Effect": "Allow", + ... "Principal": {"AWS": "*"}, + ... "Action": [ + ... "s3:GetObject", + ... "s3:PutObject", + ... "s3:DeleteObject", + ... "s3:ListMultipartUploadParts", + ... "s3:AbortMultipartUpload", + ... ], + ... "Resource": "arn:aws:s3:::my-bucket/images/*", + ... }, + ... ], + ... } + >>> client.set_bucket_policy( + ... bucket_name="my-bucket", + ... policy=json.dumps(policy), + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) is_valid_policy_type(policy) + body = policy if isinstance(policy, bytes) else policy.encode() + headers = HTTPHeaderDict( + {"Content-MD5": base64_string(MD5.hash(body))}, + ) self._execute( - "PUT", - bucket_name, - body=policy if isinstance(policy, bytes) else policy.encode(), - headers={"Content-MD5": cast(str, md5sum_hash(policy))}, - query_params={"policy": ""}, + method="PUT", + bucket_name=bucket_name, + body=body, + headers=headers, + query_params=HTTPQueryDict({"policy": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) - def get_bucket_notification(self, bucket_name: str) -> NotificationConfig: + def get_bucket_notification( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ) -> NotificationConfig: """ - Get notification configuration of a bucket. + Get the notification configuration of a bucket. - :param bucket_name: Name of the bucket. - :return: :class:`NotificationConfig ` object. + Args: + bucket_name (str): + Name of the bucket. - Example:: - config = client.get_bucket_notification("my-bucket") - """ + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + NotificationConfig: + The notification configuration of the bucket. + + Example: + >>> config = client.get_bucket_notification(bucket_name="my-bucket") + """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) response = self._execute( - "GET", bucket_name, query_params={"notification": ""}, + method="GET", + bucket_name=bucket_name, + query_params=HTTPQueryDict({"notification": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) return unmarshal(NotificationConfig, response.data.decode()) def set_bucket_notification( self, + *, bucket_name: str, config: NotificationConfig, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ): """ - Set notification configuration of a bucket. - - :param bucket_name: Name of the bucket. - :param config: class:`NotificationConfig ` object. - - Example:: - config = NotificationConfig( - queue_config_list=[ - QueueConfig( - "QUEUE-ARN-OF-THIS-BUCKET", - ["s3:ObjectCreated:*"], - config_id="1", - prefix_filter_rule=PrefixFilterRule("abc"), - ), - ], - ) - client.set_bucket_notification("my-bucket", config) + Set the notification configuration of a bucket. + + Args: + bucket_name (str): + Name of the bucket. + + config (NotificationConfig): + Notification configuration. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> config = NotificationConfig( + ... queue_config_list=[ + ... QueueConfig( + ... queue="QUEUE-ARN-OF-THIS-BUCKET", + ... events=["s3:ObjectCreated:*"], + ... config_id="1", + ... prefix_filter_rule=PrefixFilterRule("abc"), + ... ), + ... ], + ... ) + >>> client.set_bucket_notification( + ... bucket_name="my-bucket", + ... config=config, + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) if not isinstance(config, NotificationConfig): raise ValueError("config must be NotificationConfig type") body = marshal(config) + headers = HTTPHeaderDict( + {"Content-MD5": base64_string(MD5.hash(body))}, + ) self._execute( - "PUT", - bucket_name, + method="PUT", + bucket_name=bucket_name, body=body, - headers={"Content-MD5": cast(str, md5sum_hash(body))}, - query_params={"notification": ""}, + headers=headers, + query_params=HTTPQueryDict({"notification": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) - def delete_bucket_notification(self, bucket_name: str): + def delete_bucket_notification( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ): """ - Delete notification configuration of a bucket. On success, S3 service - stops notification of events previously set of the bucket. + Delete the notification configuration of a bucket. - :param bucket_name: Name of the bucket. + On success, the S3 service stops sending event notifications + that were previously configured for the bucket. + + Args: + bucket_name (str): + Name of the bucket. - Example:: - client.delete_bucket_notification("my-bucket") + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> client.delete_bucket_notification(bucket_name="my-bucket") """ - self.set_bucket_notification(bucket_name, NotificationConfig()) + self.set_bucket_notification( + bucket_name=bucket_name, + config=NotificationConfig(), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, + ) - def set_bucket_encryption(self, bucket_name: str, config: SSEConfig): + def set_bucket_encryption( + self, + *, + bucket_name: str, + config: SSEConfig, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ): """ - Set encryption configuration of a bucket. + Set the encryption configuration of a bucket. - :param bucket_name: Name of the bucket. - :param config: :class:`SSEConfig ` object. + Args: + bucket_name (str): + Name of the bucket. - Example:: - client.set_bucket_encryption( - "my-bucket", SSEConfig(Rule.new_sse_s3_rule()), - ) + config (SSEConfig): + Server-side encryption configuration. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> client.set_bucket_encryption( + ... bucket_name="my-bucket", + ... config=SSEConfig(Rule.new_sse_s3_rule()), + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) if not isinstance(config, SSEConfig): raise ValueError("config must be SSEConfig type") body = marshal(config) + headers = HTTPHeaderDict( + {"Content-MD5": base64_string(MD5.hash(body))}, + ) self._execute( - "PUT", - bucket_name, + method="PUT", + bucket_name=bucket_name, body=body, - headers={"Content-MD5": cast(str, md5sum_hash(body))}, - query_params={"encryption": ""}, + headers=headers, + query_params=HTTPQueryDict({"encryption": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) - def get_bucket_encryption(self, bucket_name: str) -> Optional[SSEConfig]: + def get_bucket_encryption( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ) -> Optional[SSEConfig]: """ - Get encryption configuration of a bucket. + Get the encryption configuration of a bucket. - :param bucket_name: Name of the bucket. - :return: :class:`SSEConfig ` object. + Args: + bucket_name (str): + Name of the bucket. - Example:: - config = client.get_bucket_encryption("my-bucket") - """ - check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) - try: + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + Optional[SSEConfig]: + The server-side encryption configuration of the bucket, or + None if no encryption configuration is set. + + Example: + >>> config = client.get_bucket_encryption(bucket_name="my-bucket") + """ + check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) + try: response = self._execute( - "GET", - bucket_name, - query_params={"encryption": ""}, + method="GET", + bucket_name=bucket_name, + query_params=HTTPQueryDict({"encryption": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) return unmarshal(SSEConfig, response.data.decode()) except S3Error as exc: @@ -876,21 +1471,40 @@ def get_bucket_encryption(self, bucket_name: str) -> Optional[SSEConfig]: raise return None - def delete_bucket_encryption(self, bucket_name: str): + def delete_bucket_encryption( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ): """ - Delete encryption configuration of a bucket. + Delete the encryption configuration of a bucket. - :param bucket_name: Name of the bucket. + Args: + bucket_name (str): + Name of the bucket. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. - Example:: - client.delete_bucket_encryption("my-bucket") + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> client.delete_bucket_encryption(bucket_name="my-bucket") """ - check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) try: - self._execute( - "DELETE", - bucket_name, - query_params={"encryption": ""}, + self._execute_delete_bucket( + bucket_name=bucket_name, + query_params=HTTPQueryDict({"encryption": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) except S3Error as exc: if exc.code != "ServerSideEncryptionConfigurationNotFoundError": @@ -898,31 +1512,61 @@ def delete_bucket_encryption(self, bucket_name: str): def listen_bucket_notification( self, + *, bucket_name: str, prefix: str = "", suffix: str = "", - events: tuple[str, ...] = ('s3:ObjectCreated:*', - 's3:ObjectRemoved:*', - 's3:ObjectAccessed:*'), + events: tuple[str, ...] = ( + 's3:ObjectCreated:*', + 's3:ObjectRemoved:*', + 's3:ObjectAccessed:*', + ), + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> EventIterable: """ - Listen events of object prefix and suffix of a bucket. Caller should - iterate returned iterator to read new events. + Listen for events on objects in a bucket matching prefix and/or suffix. - :param bucket_name: Name of the bucket. - :param prefix: Listen events of object starts with prefix. - :param suffix: Listen events of object ends with suffix. - :param events: Events to listen. - :return: Iterator of event records as :dict:. - - Example:: - with client.listen_bucket_notification( - "my-bucket", - prefix="my-prefix/", - events=["s3:ObjectCreated:*", "s3:ObjectRemoved:*"], - ) as events: - for event in events: - print(event) + The caller should iterate over the returned iterator to read new events + as they occur. + + Args: + bucket_name (str): + Name of the bucket. + + prefix (str, default=""): + Listen for events on objects whose names start with this prefix. + + suffix (str, default=""): + Listen for events on objects whose names end with this suffix. + + events (tuple[str, ...], default=("s3:ObjectCreated:*", + "s3:ObjectRemoved:*", "s3:ObjectAccessed:*")): + Events to listen for. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + EventIterable: + An iterator of :class:`minio.datatypes.EventIterable` containing + event records. + + Example: + >>> with client.listen_bucket_notification( + ... bucket_name="my-bucket", + ... prefix="my-prefix/", + ... events=["s3:ObjectCreated:*", "s3:ObjectRemoved:*"], + ... ) as events: + ... for event in events: + ... print(event) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) if self._base_url.is_aws_host: @@ -930,185 +1574,398 @@ def listen_bucket_notification( "ListenBucketNotification API is not supported in Amazon S3", ) + query_params = HTTPQueryDict({ + "prefix": prefix or "", + "suffix": suffix or "", + "events": events, + }) return EventIterable( lambda: self._execute( - "GET", - bucket_name, - query_params={ - "prefix": prefix or "", - "suffix": suffix or "", - "events": cast(Tuple[str], events), - }, + method="GET", + bucket_name=bucket_name, + query_params=query_params, preload_content=False, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ), ) def set_bucket_versioning( self, + *, bucket_name: str, config: VersioningConfig, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ): """ - Set versioning configuration to a bucket. + Set the versioning configuration for a bucket. - :param bucket_name: Name of the bucket. - :param config: :class:`VersioningConfig `. + Args: + bucket_name (str): + Name of the bucket. - Example:: - client.set_bucket_versioning( - "my-bucket", VersioningConfig(ENABLED), - ) + config (VersioningConfig): + Versioning configuration. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> client.set_bucket_versioning( + ... bucket_name="my-bucket", + ... config=VersioningConfig(ENABLED), + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) if not isinstance(config, VersioningConfig): raise ValueError("config must be VersioningConfig type") body = marshal(config) + headers = HTTPHeaderDict( + {"Content-MD5": base64_string(MD5.hash(body))}, + ) self._execute( - "PUT", - bucket_name, + method="PUT", + bucket_name=bucket_name, body=body, - headers={"Content-MD5": cast(str, md5sum_hash(body))}, - query_params={"versioning": ""}, + headers=headers, + query_params=HTTPQueryDict({"versioning": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) - def get_bucket_versioning(self, bucket_name: str) -> VersioningConfig: + def get_bucket_versioning( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ) -> VersioningConfig: """ - Get versioning configuration of a bucket. + Get the versioning configuration of a bucket. - :param bucket_name: Name of the bucket. - :return: :class:`VersioningConfig `. + Args: + bucket_name (str): + Name of the bucket. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. - Example:: - config = client.get_bucket_versioning("my-bucket") - print(config.status) + Returns: + VersioningConfig: + The versioning configuration of the bucket. + + Example: + >>> config = client.get_bucket_versioning(bucket_name="my-bucket") + >>> print(config.status) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) response = self._execute( - "GET", - bucket_name, - query_params={"versioning": ""}, + method="GET", + bucket_name=bucket_name, + query_params=HTTPQueryDict({"versioning": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) return unmarshal(VersioningConfig, response.data.decode()) def fput_object( self, + *, bucket_name: str, object_name: str, file_path: str, content_type: str = "application/octet-stream", - metadata: Optional[DictType] = None, + headers: Optional[HTTPHeaderDict] = None, + user_metadata: Optional[HTTPHeaderDict] = None, sse: Optional[Sse] = None, progress: Optional[ProgressType] = None, part_size: int = 0, + checksum: Optional[Algorithm] = None, num_parallel_uploads: int = 3, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> ObjectWriteResult: """ - Uploads data from a file to an object in a bucket. - - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param file_path: Name of file to upload. - :param content_type: Content type of the object. - :param metadata: Any additional metadata to be uploaded along - with your PUT request. - :param sse: Server-side encryption. - :param progress: A progress object - :param part_size: Multipart part size - :param num_parallel_uploads: Number of parallel uploads. - :param tags: :class:`Tags` for the object. - :param retention: :class:`Retention` configuration object. - :param legal_hold: Flag to set legal hold for the object. - :return: :class:`ObjectWriteResult` object. - - Example:: - # Upload data. - result = client.fput_object( - "my-bucket", "my-object", "my-filename", - ) - - # Upload data with metadata. - result = client.fput_object( - "my-bucket", "my-object", "my-filename", - metadata={"My-Project": "one"}, - ) - - # Upload data with tags, retention and legal-hold. - date = datetime.utcnow().replace( - hour=0, minute=0, second=0, microsecond=0, - ) + timedelta(days=30) - tags = Tags(for_object=True) - tags["User"] = "jsmith" - result = client.fput_object( - "my-bucket", "my-object", "my-filename", - tags=tags, - retention=Retention(GOVERNANCE, date), - legal_hold=True, - ) + Upload data from a file to an object in a bucket. + + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + file_path (str): + Path to the file to upload. + + content_type (str, default="application/octet-stream"): + Content type of the object. + + headers (Optional[HTTPHeaderDict], default=None): + Additional headers. + + user_metadata (Optional[HTTPHeaderDict], default=None): + User metadata of the object. + + sse (Optional[Sse], default=None): + Server-side encryption configuration. + + progress (Optional[ProgressType], default=None): + Progress object to track upload progress. + + part_size (int, default=0): + Multipart upload part size in bytes. + + checksum (Optional[Algorithm], default=None): + Algorithm for checksum computation. + + num_parallel_uploads (int, default=3): + Number of parallel uploads. + + tags (Optional[Tags], default=None): + Tags for the object. + + retention (Optional[Retention], default=None): + Retention configuration. + + legal_hold (bool, default=False): + Flag to set legal hold for the object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + ObjectWriteResult: + The result of the object upload operation. + + Example: + >>> # Upload data + >>> result = client.fput_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... file_path="my-filename", + ... ) + >>> print( + ... f"created {result.object_name} object; " + ... f"etag: {result.etag}, version-id: {result.version_id}", + ... ) + + >>> # Upload with part size + >>> result = client.fput_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... file_path="my-filename", + ... part_size=10*1024*1024, + ... ) + + >>> # Upload with content type + >>> result = client.fput_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... file_path="my-filename", + ... content_type="application/csv", + ... ) + + >>> # Upload with metadata + >>> result = client.fput_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... file_path="my-filename", + ... metadata={"My-Project": "one"}, + ... ) + + >>> # Upload with customer key encryption + >>> result = client.fput_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... file_path="my-filename", + ... sse=SseCustomerKey(b"32byteslongsecretkeymustprovided"), + ... ) + + >>> # Upload with KMS encryption + >>> result = client.fput_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... file_path="my-filename", + ... sse=SseKMS( + ... "KMS-KEY-ID", + ... {"Key1": "Value1", "Key2": "Value2"}, + ... ), + ... ) + + >>> # Upload with S3-managed encryption + >>> result = client.fput_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... file_path="my-filename", + ... sse=SseS3(), + ... ) + + >>> # Upload with tags, retention and legal hold + >>> date = datetime.utcnow().replace( + ... hour=0, minute=0, second=0, microsecond=0, + ... ) + timedelta(days=30) + >>> tags = Tags(for_object=True) + >>> tags["User"] = "jsmith" + >>> result = client.fput_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... file_path="my-filename", + ... tags=tags, + ... retention=Retention(GOVERNANCE, date), + ... legal_hold=True, + ... ) + + >>> # Upload with progress bar + >>> result = client.fput_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... file_path="my-filename", + ... progress=Progress(), + ... ) """ - file_size = os.stat(file_path).st_size with open(file_path, "rb") as file_data: return self.put_object( - bucket_name, - object_name, - file_data, - file_size, + bucket_name=bucket_name, + object_name=object_name, + data=file_data, + length=file_size, content_type=content_type, - metadata=cast(Union[DictType, None], metadata), + headers=headers, + user_metadata=user_metadata, sse=sse, + checksum=checksum, progress=progress, part_size=part_size, num_parallel_uploads=num_parallel_uploads, tags=tags, retention=retention, legal_hold=legal_hold, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) def fget_object( self, + *, bucket_name: str, object_name: str, file_path: str, - request_headers: Optional[DictType] = None, + match_etag: Optional[str] = None, + not_match_etag: Optional[str] = None, + modified_since: Optional[datetime] = None, + unmodified_since: Optional[datetime] = None, + fetch_checksum: bool = False, ssec: Optional[SseCustomerKey] = None, version_id: Optional[str] = None, - extra_query_params: Optional[DictType] = None, tmp_file_path: Optional[str] = None, progress: Optional[ProgressType] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ): """ - Downloads data of an object to file. - - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param file_path: Name of file to download. - :param request_headers: Any additional headers to be added with GET - request. - :param ssec: Server-side encryption customer key. - :param version_id: Version-ID of the object. - :param extra_query_params: Extra query parameters for advanced usage. - :param tmp_file_path: Path to a temporary file. - :param progress: A progress object - :return: Object information. - - Example:: - # Download data of an object. - client.fget_object("my-bucket", "my-object", "my-filename") - - # Download data of an object of version-ID. - client.fget_object( - "my-bucket", "my-object", "my-filename", - version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", - ) - - # Download data of an SSE-C encrypted object. - client.fget_object( - "my-bucket", "my-object", "my-filename", - ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"), - ) + Download an object to a file. + + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + file_path (str): + Path to the file where data will be downloaded. + + match_etag (Optional[str], default=None): + Match ETag of the object. + + not_match_etag (Optional[str], default=None): + None-match ETag of the object. + + modified_since (Optional[datetime], default=None): + Condition to fetch object modified since the given date. + + unmodified_since (Optional[datetime], default=None): + Condition to fetch object unmodified since the given date. + + fetch_checksum (bool, default=False): + Flag to fetch object checksum. + + ssec (Optional[SseCustomerKey], default=None): + Server-side encryption customer key. + + version_id (Optional[str], default=None): + Version ID of the object. + + tmp_file_path (Optional[str], default=None): + Path to a temporary file used during download. + + progress (Optional[ProgressType], default=None): + Progress object to track download progress. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> # Download object + >>> client.fget_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... file_path="my-filename", + ... ) + >>> + >>> # Download specific version of object + >>> client.fget_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... file_path="my-filename", + ... version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", + ... ) + >>> + >>> # Download SSE-C encrypted object + >>> client.fget_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... file_path="my-filename", + ... ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"), + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) @@ -1120,15 +1977,14 @@ def fget_object( makedirs(os.path.dirname(file_path)) stat = self.stat_object( - bucket_name, - object_name, - ssec, + bucket_name=bucket_name, + object_name=object_name, + ssec=ssec, version_id=version_id, - extra_headers=request_headers, ) etag = queryencode(cast(str, stat.etag)) - # Write to a temporary file "file_path.part.minio" before saving. + # Write to a temporary file "file_path.ETAG.part.minio" before saving. tmp_file_path = ( tmp_file_path or f"{file_path}.{etag}.part.minio" ) @@ -1136,11 +1992,17 @@ def fget_object( response = None try: response = self.get_object( - bucket_name, - object_name, - request_headers=request_headers, + bucket_name=bucket_name, + object_name=object_name, + match_etag=match_etag, + not_match_etag=not_match_etag, + modified_since=modified_since, + unmodified_since=unmodified_since, + fetch_checksum=fetch_checksum, ssec=ssec, version_id=version_id, + region=region, + extra_headers=extra_headers, extra_query_params=extra_query_params, ) @@ -1165,231 +2027,361 @@ def fget_object( def get_object( self, + *, bucket_name: str, object_name: str, - offset: int = 0, - length: int = 0, - request_headers: Optional[DictType] = None, - ssec: Optional[SseCustomerKey] = None, version_id: Optional[str] = None, - extra_query_params: Optional[DictType] = None, + ssec: Optional[SseCustomerKey] = None, + offset: int = 0, + length: Optional[int] = None, + match_etag: Optional[str] = None, + not_match_etag: Optional[str] = None, + modified_since: Optional[datetime] = None, + unmodified_since: Optional[datetime] = None, + fetch_checksum: bool = False, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> BaseHTTPResponse: """ - Get data of an object. Returned response should be closed after use to - release network resources. To reuse the connection, it's required to - call `response.release_conn()` explicitly. - - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param offset: Start byte position of object data. - :param length: Number of bytes of object data from offset. - :param request_headers: Any additional headers to be added with GET - request. - :param ssec: Server-side encryption customer key. - :param version_id: Version-ID of the object. - :param extra_query_params: Extra query parameters for advanced usage. - :return: :class:`urllib3.response.BaseHTTPResponse` object. - - Example:: - # Get data of an object. - response = None - try: - response = client.get_object("my-bucket", "my-object") - # Read data from response. - finally: - if response: - response.close() - response.release_conn() - - # Get data of an object of version-ID. - response = None - try: - response = client.get_object( - "my-bucket", "my-object", - version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", - ) - # Read data from response. - finally: - if response: - response.close() - response.release_conn() - - # Get data of an object from offset and length. - response = None - try: - response = client.get_object( - "my-bucket", "my-object", offset=512, length=1024, - ) - # Read data from response. - finally: - if response: - response.close() - response.release_conn() - - # Get data of an SSE-C encrypted object. - response = None - try: - response = client.get_object( - "my-bucket", "my-object", - ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"), - ) - # Read data from response. - finally: - if response: - response.close() - response.release_conn() + Get object data from a bucket. + + Data is read starting at the specified offset up to the given length. + The returned response must be closed after use to release network + resources. To reuse the connection, explicitly call + ``response.release_conn()``. + + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + version_id (Optional[str], default=None): + Version ID of the object. + + ssec (Optional[SseCustomerKey], default=None): + Server-side encryption customer key. + + offset (int, default=0): + Start byte position of object data. + + length (Optional[int], default=None): + Number of bytes of object data to read from offset. + + match_etag (Optional[str], default=None): + Match ETag of the object. + + not_match_etag (Optional[str], default=None): + None-match ETag of the object. + + modified_since (Optional[datetime], default=None): + Condition to fetch object modified since the given date. + + unmodified_since (Optional[datetime], default=None): + Condition to fetch object unmodified since the given date. + + fetch_checksum (bool, default=False): + Flag to fetch object checksum. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + BaseHTTPResponse: + An :class:`urllib3.response.BaseHTTPResponse` or + :class:`urllib3.response.HTTPResponse` object containing + the object data. + + Example: + >>> # Get data of an object + >>> try: + ... response = client.get_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... ) + ... # Read data from response + ... finally: + ... response.close() + ... response.release_conn() + >>> + >>> # Get specific version of an object + >>> try: + ... response = client.get_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", + ... ) + ... finally: + ... response.close() + ... response.release_conn() + >>> + >>> # Get object data from offset and length + >>> try: + ... response = client.get_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... offset=512, + ... length=1024, + ... ) + ... finally: + ... response.close() + ... response.release_conn() + >>> + >>> # Get SSE-C encrypted object + >>> try: + ... response = client.get_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... ssec=SseCustomerKey( + ... b"32byteslongsecretkeymustprovided" + ... ), + ... ) + ... finally: + ... response.close() + ... response.release_conn() """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) check_ssec(ssec) - headers = cast(DictType, ssec.headers() if ssec else {}) - headers.update(request_headers or {}) - - if offset or length: - end = (offset + length - 1) if length else "" - headers['Range'] = f"bytes={offset}-{end}" - + headers = self._gen_read_headers( + ssec=ssec, + offset=offset, + length=length, + match_etag=match_etag, + not_match_etag=not_match_etag, + modified_since=modified_since, + unmodified_since=unmodified_since, + fetch_checksum=fetch_checksum, + ) + query_params = HTTPQueryDict() if version_id: - extra_query_params = extra_query_params or {} - extra_query_params["versionId"] = version_id + query_params["versionId"] = version_id return self._execute( - "GET", - bucket_name, - object_name, - headers=cast(DictType, headers), - query_params=extra_query_params, + method="GET", + bucket_name=bucket_name, + object_name=object_name, + headers=headers, + query_params=query_params, preload_content=False, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) def prompt_object( self, + *, bucket_name: str, object_name: str, prompt: str, lambda_arn: Optional[str] = None, - request_headers: Optional[DictType] = None, ssec: Optional[SseCustomerKey] = None, version_id: Optional[str] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, **kwargs: Optional[Any], ) -> BaseHTTPResponse: """ Prompt an object using natural language. - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param prompt: Prompt the Object to interact with the AI model. - request. - :param lambda_arn: Lambda ARN to use for prompt. - :param request_headers: Any additional headers to be added with POST - :param ssec: Server-side encryption customer key. - :param version_id: Version-ID of the object. - :param kwargs: Extra parameters for advanced usage. - :return: :class:`urllib3.response.BaseHTTPResponse` object. - - Example:: - # prompt an object. - response = None - try: - response = client.get_object( - "my-bucket", "my-object", - "Describe the object for me") - # Read data from response. - finally: - if response: - response.close() - response.release_conn() + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + prompt (str): + Natural language prompt to interact with the object using + the AI model. + + lambda_arn (Optional[str], default=None): + AWS Lambda ARN to use for processing the prompt. + + ssec (Optional[SseCustomerKey], default=None): + Server-side encryption customer key. + + version_id (Optional[str], default=None): + Version ID of the object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + **kwargs (Optional[Any]): + Additional parameters for advanced usage. + + Returns: + BaseHTTPResponse: + An :class:`urllib3.response.BaseHTTPResponse` object. + + Example: + >>> response = None + >>> try: + ... response = client.prompt_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... prompt="Describe the object for me", + ... ) + ... # Read data from response + ... finally: + ... if response: + ... response.close() + ... response.release_conn() """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) check_ssec(ssec) - headers = cast(DictType, ssec.headers() if ssec else {}) - headers.update(request_headers or {}) - - extra_query_params = {"lambdaArn": lambda_arn or ""} - + query_params = HTTPQueryDict() if version_id: - extra_query_params["versionId"] = version_id + query_params["versionId"] = version_id + query_params["lambdaArn"] = lambda_arn or "" prompt_body = kwargs prompt_body["prompt"] = prompt body = json.dumps(prompt_body) return self._execute( - "POST", - bucket_name, - object_name, - headers=cast(DictType, headers), - query_params=cast(DictType, extra_query_params), + method="POST", + bucket_name=bucket_name, + object_name=object_name, + headers=HTTPHeaderDict(ssec.headers()) if ssec else None, + query_params=query_params, body=body.encode(), preload_content=False, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) def copy_object( self, + *, bucket_name: str, object_name: str, source: CopySource, sse: Optional[Sse] = None, - metadata: Optional[DictType] = None, + user_metadata: Optional[HTTPHeaderDict] = None, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, metadata_directive: Optional[str] = None, tagging_directive: Optional[str] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> ObjectWriteResult: """ Create an object by server-side copying data from another object. - In this API maximum supported source object size is 5GiB. - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param source: :class:`CopySource` object. - :param sse: Server-side encryption of destination object. - :param metadata: Any user-defined metadata to be copied along with - destination object. - :param tags: Tags for destination object. - :param retention: :class:`Retention` configuration object. - :param legal_hold: Flag to set legal hold for destination object. - :param metadata_directive: Directive used to handle user metadata for - destination object. - :param tagging_directive: Directive used to handle tags for destination - object. - :return: :class:`ObjectWriteResult ` object. - - Example:: - # copy an object from a bucket to another. - result = client.copy_object( - "my-bucket", - "my-object", - CopySource("my-sourcebucket", "my-sourceobject"), - ) - print(result.object_name, result.version_id) - - # copy an object with condition. - result = client.copy_object( - "my-bucket", - "my-object", - CopySource( - "my-sourcebucket", - "my-sourceobject", - modified_since=datetime(2014, 4, 1, tzinfo=timezone.utc), - ), - ) - print(result.object_name, result.version_id) - - # copy an object from a bucket with replacing metadata. - metadata = {"test_meta_key": "test_meta_value"} - result = client.copy_object( - "my-bucket", - "my-object", - CopySource("my-sourcebucket", "my-sourceobject"), - metadata=metadata, - metadata_directive=REPLACE, - ) - print(result.object_name, result.version_id) + The maximum supported source object size for this API is 5 GiB. + + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + source (CopySource): + Source object information. + + sse (Optional[Sse], default=None): + Server-side encryption configuration for the destination + object. + + user_metadata (Optional[HTTPHeaderDict], default=None): + User-defined metadata to be applied to the destination + object. + + tags (Optional[Tags], default=None): + Tags for the destination object. + + retention (Optional[Retention], default=None): + Retention configuration for the destination object. + + legal_hold (bool, default=False): + Flag to enable legal hold on the destination object. + + metadata_directive (Optional[str], default=None): + Directive for handling user metadata on the destination + object. + + tagging_directive (Optional[str], default=None): + Directive for handling tags on the destination object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + ObjectWriteResult: + The result of the copy operation. + + Example: + >>> from datetime import datetime, timezone + >>> from minio.commonconfig import REPLACE, CopySource + >>> + >>> # Copy an object from a bucket to another + >>> result = client.copy_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... source=CopySource( + ... bucket_name="my-sourcebucket", + ... object_name="my-sourceobject", + ... ), + ... ) + >>> print(result.object_name, result.version_id) + >>> + >>> # Copy an object with condition + >>> result = client.copy_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... source=CopySource( + ... bucket_name="my-sourcebucket", + ... object_name="my-sourceobject", + ... modified_since=datetime( + ... 2014, 4, 1, tzinfo=timezone.utc, + ... ), + ... ), + ... ) + >>> print(result.object_name, result.version_id) + >>> + >>> # Copy an object with replacing metadata + >>> user_metadata = {"test_meta_key": "test_meta_value"} + >>> result = client.copy_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... source=CopySource( + ... bucket_name="my-sourcebucket", + ... object_name="my-sourceobject", + ... ), + ... user_metadata=user_metadata, + ... metadata_directive=REPLACE, + ... ) + >>> print(result.object_name, result.version_id) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) @@ -1414,8 +2406,8 @@ def copy_object( size = -1 if source.offset is None and source.length is None: stat = self.stat_object( - source.bucket_name, - source.object_name, + bucket_name=source.bucket_name, + object_name=source.object_name, version_id=source.version_id, ssec=source.ssec, ) @@ -1437,36 +2429,43 @@ def copy_object( "object size greater than 5 GiB" ) return self.compose_object( - bucket_name, object_name, [ComposeSource.of(source)], - sse=sse, metadata=metadata, tags=tags, retention=retention, + bucket_name=bucket_name, + object_name=object_name, + sources=[ComposeSource.of(source)], + sse=sse, + user_metadata=user_metadata, + tags=tags, + retention=retention, legal_hold=legal_hold, ) - headers = genheaders( - metadata, - sse, - tags, - retention, - legal_hold, + headers = self._gen_write_headers( + user_metadata=user_metadata, + sse=sse, + tags=tags, + retention=retention, + legal_hold=legal_hold, ) if metadata_directive: headers["x-amz-metadata-directive"] = metadata_directive if tagging_directive: headers["x-amz-tagging-directive"] = tagging_directive - headers.update(source.gen_copy_headers()) + headers.extend(source.gen_copy_headers()) response = self._execute( - "PUT", - bucket_name, + method="PUT", + bucket_name=bucket_name, object_name=object_name, headers=headers, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) etag, last_modified = parse_copy_object(response) - return ObjectWriteResult( - bucket_name, - object_name, - response.headers.get("x-amz-version-id"), - etag, - response.headers, + return ObjectWriteResult.new( + headers=response.headers, + bucket_name=bucket_name, + object_name=object_name, + etag=etag, last_modified=last_modified, ) @@ -1478,8 +2477,8 @@ def _calc_part_count(self, sources: list[ComposeSource]) -> int: for src in sources: i += 1 stat = self.stat_object( - src.bucket_name, - src.object_name, + bucket_name=src.bucket_name, + object_name=src.object_name, version_id=src.version_id, ssec=src.ssec, ) @@ -1537,79 +2536,138 @@ def _calc_part_count(self, sources: list[ComposeSource]) -> int: def _upload_part_copy( self, + *, bucket_name: str, object_name: str, upload_id: str, part_number: int, - headers: DictType, + headers: HTTPHeaderDict, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> tuple[str, Optional[datetime]]: """Execute UploadPartCopy S3 API.""" - response = self._execute( - "PUT", - bucket_name, - object_name, - headers=headers, - query_params={ + query_params = HTTPQueryDict( + { "partNumber": str(part_number), "uploadId": upload_id, }, ) + response = self._execute( + method="PUT", + bucket_name=bucket_name, + object_name=object_name, + headers=headers, + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, + ) return parse_copy_object(response) def compose_object( self, + *, bucket_name: str, object_name: str, sources: list[ComposeSource], sse: Optional[Sse] = None, - metadata: Optional[DictType] = None, + user_metadata: Optional[HTTPHeaderDict] = None, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> ObjectWriteResult: """ - Create an object by combining data from different source objects using + Create an object by combining data from multiple source objects using server-side copy. - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param sources: List of :class:`ComposeSource` object. - :param sse: Server-side encryption of destination object. - :param metadata: Any user-defined metadata to be copied along with - destination object. - :param tags: Tags for destination object. - :param retention: :class:`Retention` configuration object. - :param legal_hold: Flag to set legal hold for destination object. - :return: :class:`ObjectWriteResult ` object. - - Example:: - sources = [ - ComposeSource("my-job-bucket", "my-object-part-one"), - ComposeSource("my-job-bucket", "my-object-part-two"), - ComposeSource("my-job-bucket", "my-object-part-three"), - ] - - # Create my-bucket/my-object by combining source object - # list. - result = client.compose_object("my-bucket", "my-object", sources) - print(result.object_name, result.version_id) - - # Create my-bucket/my-object with user metadata by combining - # source object list. - result = client.compose_object( - "my-bucket", - "my-object", - sources, - metadata={"test_meta_key": "test_meta_value"}, - ) - print(result.object_name, result.version_id) - - # Create my-bucket/my-object with user metadata and - # server-side encryption by combining source object list. - client.compose_object( - "my-bucket", "my-object", sources, sse=SseS3(), - ) - print(result.object_name, result.version_id) + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + sources (list[ComposeSource]): + List of source objects to be combined. + + sse (Optional[Sse], default=None): + Server-side encryption configuration for the destination + object. + + user_metadata (Optional[HTTPHeaderDict], default=None): + User-defined metadata to be applied to the destination + object. + + tags (Optional[Tags], default=None): + Tags for the destination object. + + retention (Optional[Retention], default=None): + Retention configuration for the destination object. + + legal_hold (bool, default=False): + Flag to enable legal hold on the destination object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + ObjectWriteResult: + The result of the compose operation. + + Example: + >>> from minio.commonconfig import ComposeSource + >>> from minio.sse import SseS3 + >>> + >>> sources = [ + ... ComposeSource( + ... bucket_name="my-job-bucket", + ... object_name="my-object-part-one", + ... ), + ... ComposeSource( + ... bucket_name="my-job-bucket", + ... object_name="my-object-part-two", + ... ), + ... ComposeSource( + ... bucket_name="my-job-bucket", + ... object_name="my-object-part-three", + ... ), + ... ] + >>> + >>> # Create object by combining sources + >>> result = client.compose_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... sources=sources, + ... ) + >>> print(result.object_name, result.version_id) + >>> + >>> # With user metadata + >>> result = client.compose_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... sources=sources, + ... user_metadata={"test_meta_key": "test_meta_value"}, + ... ) + >>> print(result.object_name, result.version_id) + >>> + >>> # With user metadata and SSE + >>> result = client.compose_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... sources=sources, + ... sse=SseS3(), + ... ) + >>> print(result.object_name, result.version_id) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) @@ -1633,18 +2691,37 @@ def compose_object( sources[0].length is None ): return self.copy_object( - bucket_name, object_name, CopySource.of(sources[0]), - sse=sse, metadata=metadata, tags=tags, retention=retention, + bucket_name=bucket_name, + object_name=object_name, + source=CopySource.of(sources[0]), + sse=sse, + user_metadata=user_metadata, + tags=tags, + retention=retention, legal_hold=legal_hold, - metadata_directive=REPLACE if metadata else None, + metadata_directive=REPLACE if user_metadata else None, tagging_directive=REPLACE if tags else None, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) - headers = genheaders(metadata, sse, tags, retention, legal_hold) + headers = self._gen_write_headers( + user_metadata=user_metadata, + sse=sse, + tags=tags, + retention=retention, + legal_hold=legal_hold, + ) upload_id = self._create_multipart_upload( - bucket_name, object_name, headers, + bucket_name=bucket_name, + object_name=object_name, + headers=headers, + ) + ssec_headers = ( + sse.headers() if isinstance(sse, SseCustomerKey) + else HTTPHeaderDict() ) - ssec_headers = sse.headers() if isinstance(sse, SseCustomerKey) else {} try: part_number = 0 total_parts = [] @@ -1655,8 +2732,8 @@ def compose_object( elif src.offset is not None: size -= src.offset offset = src.offset or 0 - headers = cast(DictType, src.headers) - headers.update(ssec_headers) + headers = cast(HTTPHeaderDict, src.headers) + headers.extend(ssec_headers) if size <= MAX_PART_SIZE: part_number += 1 if src.length is not None: @@ -1668,11 +2745,11 @@ def compose_object( f"bytes={offset}-{offset + size - 1}" ) etag, _ = self._upload_part_copy( - bucket_name, - object_name, - upload_id, - part_number, - headers, + bucket_name=bucket_name, + object_name=object_name, + upload_id=upload_id, + part_number=part_number, + headers=headers, ) total_parts.append(Part(part_number, etag)) continue @@ -1685,53 +2762,69 @@ def compose_object( f"bytes={offset}-{end_bytes}" ) etag, _ = self._upload_part_copy( - bucket_name, - object_name, - upload_id, - part_number, - headers_copy, + bucket_name=bucket_name, + object_name=object_name, + upload_id=upload_id, + part_number=part_number, + headers=headers_copy, ) total_parts.append(Part(part_number, etag)) offset += length size -= length result = self._complete_multipart_upload( - bucket_name, object_name, upload_id, total_parts, + bucket_name=bucket_name, + object_name=object_name, + upload_id=upload_id, + parts=total_parts, ) - return ObjectWriteResult( - cast(str, result.bucket_name), - cast(str, result.object_name), - result.version_id, - result.etag, - result.http_headers, + return ObjectWriteResult.new( + headers=result.headers, + bucket_name=cast(str, result.bucket_name), + object_name=cast(str, result.object_name), + version_id=result.version_id, + etag=result.etag, location=result.location, ) except Exception as exc: if upload_id: self._abort_multipart_upload( - bucket_name, object_name, upload_id, + bucket_name=bucket_name, + object_name=object_name, + upload_id=upload_id, ) raise exc def _abort_multipart_upload( self, + *, bucket_name: str, object_name: str, upload_id: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ): """Execute AbortMultipartUpload S3 API.""" self._execute( - "DELETE", - bucket_name, - object_name, - query_params={'uploadId': upload_id}, + method="DELETE", + bucket_name=bucket_name, + object_name=object_name, + query_params=HTTPQueryDict({'uploadId': upload_id}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) def _complete_multipart_upload( self, + *, bucket_name: str, object_name: str, upload_id: str, parts: list[Part], + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> CompleteMultipartUploadResult: """Execute CompleteMultipartUpload S3 API.""" element = Element("CompleteMultipartUpload") @@ -1740,151 +2833,298 @@ def _complete_multipart_upload( SubElement(tag, "PartNumber", str(part.part_number)) SubElement(tag, "ETag", '"' + part.etag + '"') body = getbytes(element) - response = self._execute( - "POST", - bucket_name, - object_name, - body=body, - headers={ + headers = HTTPHeaderDict( + { "Content-Type": 'application/xml', - "Content-MD5": cast(str, md5sum_hash(body)), + "Content-MD5": base64_string(MD5.hash(body)), }, - query_params={'uploadId': upload_id}, + ) + response = self._execute( + method="POST", + bucket_name=bucket_name, + object_name=object_name, + body=body, + headers=headers, + query_params=HTTPQueryDict({'uploadId': upload_id}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) return CompleteMultipartUploadResult(response) def _create_multipart_upload( self, + *, bucket_name: str, object_name: str, - headers: DictType, + headers: HTTPHeaderDict, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> str: """Execute CreateMultipartUpload S3 API.""" if not headers.get("Content-Type"): headers["Content-Type"] = "application/octet-stream" response = self._execute( - "POST", - bucket_name, - object_name, + method="POST", + bucket_name=bucket_name, + object_name=object_name, headers=headers, - query_params={"uploads": ""}, + query_params=HTTPQueryDict({"uploads": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) element = ET.fromstring(response.data.decode()) return cast(str, findtext(element, "UploadId", True)) def _put_object( self, + *, bucket_name: str, object_name: str, data: bytes, - headers: Optional[DictType] = None, - query_params: Optional[DictType] = None, + headers: Optional[HTTPHeaderDict] = None, + query_params: Optional[HTTPQueryDict] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> ObjectWriteResult: """Execute PutObject S3 API.""" response = self._execute( - "PUT", - bucket_name, - object_name, + method="PUT", + bucket_name=bucket_name, + object_name=object_name, body=data, headers=headers, query_params=query_params, no_body_trace=True, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) - return ObjectWriteResult( - bucket_name, - object_name, - response.headers.get("x-amz-version-id"), - response.headers.get("etag", "").replace('"', ""), - response.headers, + return ObjectWriteResult.new( + headers=response.headers, + bucket_name=bucket_name, + object_name=object_name, ) def _upload_part( self, + *, bucket_name: str, object_name: str, data: bytes, - headers: Optional[DictType], + headers: Optional[HTTPHeaderDict], upload_id: str, part_number: int, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> str: """Execute UploadPart S3 API.""" + query_params = HTTPQueryDict({ + "partNumber": str(part_number), + "uploadId": upload_id, + }) result = self._put_object( - bucket_name, - object_name, - data, - headers, - query_params={ - "partNumber": str(part_number), - "uploadId": upload_id, - }, + bucket_name=bucket_name, + object_name=object_name, + data=data, + headers=headers, + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) return cast(str, result.etag) - def _upload_part_task(self, args): + def _upload_part_task(self, kwargs): """Upload_part task for ThreadPool.""" - return args[5], self._upload_part(*args) + return kwargs["part_number"], self._upload_part(**kwargs) def put_object( self, + *, bucket_name: str, object_name: str, data: BinaryIO, length: int, content_type: str = "application/octet-stream", - metadata: Optional[DictType] = None, + headers: Optional[HTTPHeaderDict] = None, + user_metadata: Optional[HTTPHeaderDict] = None, sse: Optional[Sse] = None, progress: Optional[ProgressType] = None, part_size: int = 0, + checksum: Optional[Algorithm] = None, num_parallel_uploads: int = 3, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, - write_offset: Optional[int] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> ObjectWriteResult: """ - Uploads data from a stream to an object in a bucket. - - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param data: An object having callable read() returning bytes object. - :param length: Data size; -1 for unknown size and set valid part_size. - :param content_type: Content type of the object. - :param metadata: Any additional metadata to be uploaded along - with your PUT request. - :param sse: Server-side encryption. - :param progress: A progress object; - :param part_size: Multipart part size. - :param num_parallel_uploads: Number of parallel uploads. - :param tags: :class:`Tags` for the object. - :param retention: :class:`Retention` configuration object. - :param legal_hold: Flag to set legal hold for the object. - :param write_offset: Offset byte for appending data to existing object. - :return: :class:`ObjectWriteResult` object. - - Example:: - # Upload data. - result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, - ) - - # Upload data with metadata. - result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, - metadata={"My-Project": "one"}, - ) - - # Upload data with tags, retention and legal-hold. - date = datetime.utcnow().replace( - hour=0, minute=0, second=0, microsecond=0, - ) + timedelta(days=30) - tags = Tags(for_object=True) - tags["User"] = "jsmith" - result = client.put_object( - "my-bucket", "my-object", io.BytesIO(b"hello"), 5, - tags=tags, - retention=Retention(GOVERNANCE, date), - legal_hold=True, - ) + Upload data from a stream to an object in a bucket. + + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + data (BinaryIO): + An object with a callable ``read()`` method that returns a + bytes object. + + length (int): + Size of the data in bytes. Use -1 for unknown size and set a + valid ``part_size``. + + content_type (str, default="application/octet-stream"): + Content type of the object. + + headers (Optional[HTTPHeaderDict], default=None): + Additional headers. + + user_metadata (Optional[HTTPHeaderDict], default=None): + User metadata for the object. + + sse (Optional[Sse], default=None): + Server-side encryption configuration. + + progress (Optional[ProgressType], default=None): + Progress object to track upload progress. + + part_size (int, default=0): + Multipart upload part size in bytes. + + checksum (Optional[Algorithm], default=None): + Algorithm for checksum computation. + + num_parallel_uploads (int, default=3): + Number of parallel uploads. + + tags (Optional[Tags], default=None): + Tags for the object. + + retention (Optional[Retention], default=None): + Retention configuration. + + legal_hold (bool, default=False): + Flag to enable legal hold on the object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + ObjectWriteResult: + The result of the object upload operation. + + Example: + >>> # Upload simple data + >>> result = client.put_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... data=io.BytesIO(b"hello"), + ... length=5, + ... ) + >>> print( + ... f"created {result.object_name} object; " + ... f"etag: {result.etag}, version-id: {result.version_id}", + ... ) + >>> + >>> # Upload unknown-sized data with multipart + >>> with urlopen("https://cdn.kernel.org/pub/linux/kernel/v5.x/" + ... "linux-5.4.81.tar.xz") as data: + ... result = client.put_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... data=data, + ... length=-1, + ... part_size=10*1024*1024, + ... ) + >>> + >>> # Upload with content type + >>> result = client.put_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... data=io.BytesIO(b"hello"), + ... length=5, + ... content_type="application/csv", + ... ) + >>> + >>> # Upload with metadata + >>> result = client.put_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... data=io.BytesIO(b"hello"), + ... length=5, + ... metadata={"My-Project": "one"}, + ... ) + >>> + >>> # Upload with customer key SSE + >>> result = client.put_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... data=io.BytesIO(b"hello"), + ... length=5, + ... sse=SseCustomerKey(b"32byteslongsecretkeymustprovided"), + ... ) + >>> + >>> # Upload with KMS SSE + >>> result = client.put_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... data=io.BytesIO(b"hello"), + ... length=5, + ... sse=SseKMS( + ... "KMS-KEY-ID", + ... {"Key1": "Value1", "Key2": "Value2"}, + ... ), + ... ) + >>> + >>> # Upload with S3-managed SSE + >>> result = client.put_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... data=io.BytesIO(b"hello"), + ... length=5, + ... sse=SseS3(), + ... ) + >>> + >>> # Upload with tags, retention, and legal hold + >>> date = datetime.utcnow().replace( + ... hour=0, minute=0, second=0, microsecond=0, + ... ) + timedelta(days=30) + >>> tags = Tags(for_object=True) + >>> tags["User"] = "jsmith" + >>> result = client.put_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... data=io.BytesIO(b"hello"), + ... length=5, + ... tags=tags, + ... retention=Retention(GOVERNANCE, date), + ... legal_hold=True, + ... ) + >>> + >>> # Upload with progress bar + >>> result = client.put_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... data=io.BytesIO(b"hello"), + ... length=5, + ... progress=Progress(), + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) @@ -1895,21 +3135,27 @@ def put_object( raise ValueError("retention must be Retention type") if not callable(getattr(data, "read")): raise ValueError("input data must have callable read()") - if write_offset is not None: - if write_offset < 0: - raise ValueError("write offset should not be negative") - if length < 0: - raise ValueError("length must be provided for write offset") - part_size = length if length > MIN_PART_SIZE else MIN_PART_SIZE part_size, part_count = get_part_info(length, part_size) if progress: # Set progress bar length and object name before upload progress.set_meta(object_name=object_name, total_length=length) - headers = genheaders(metadata, sse, tags, retention, legal_hold) + add_content_sha256 = self._base_url.is_https + algorithms = [checksum or Algorithm.CRC32C] + add_sha256_checksum = algorithms[0] == Algorithm.SHA256 + if add_content_sha256 and not add_sha256_checksum: + algorithms.append(Algorithm.SHA256) + hashers = new_hashers(algorithms) + + headers = self._gen_write_headers( + headers=headers, + user_metadata=user_metadata, + sse=sse, + tags=tags, + retention=retention, + legal_hold=legal_hold, + ) headers["Content-Type"] = content_type or "application/octet-stream" - if write_offset: - headers["x-amz-write-offset-bytes"] = str(write_offset) object_size = length uploaded_size = 0 @@ -1928,7 +3174,10 @@ def put_object( part_size = object_size - uploaded_size stop = True part_data = read_part_data( - data, part_size, progress=progress, + stream=data, + size=part_size, + progress=progress, + hashers=hashers, ) if len(part_data) != part_size: raise IOError( @@ -1938,7 +3187,11 @@ def put_object( ) else: part_data = read_part_data( - data, part_size + 1, one_byte, progress=progress, + stream=data, + size=part_size + 1, + part_data=one_byte, + progress=progress, + hashers=hashers, ) # If part_data_size is less or equal to part_size, # then we have reached last part. @@ -1951,36 +3204,61 @@ def put_object( uploaded_size += len(part_data) + checksum_headers = make_headers( + hashers, add_content_sha256, add_sha256_checksum, + ) + if part_count == 1: + headers.extend(checksum_headers) return self._put_object( - bucket_name, object_name, part_data, headers, + bucket_name=bucket_name, + object_name=object_name, + data=part_data, + headers=headers, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) if not upload_id: + headers.extend(checksum_headers) upload_id = self._create_multipart_upload( - bucket_name, object_name, headers, + bucket_name=bucket_name, + object_name=object_name, + headers=headers, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) if num_parallel_uploads and num_parallel_uploads > 1: pool = ThreadPool(num_parallel_uploads) pool.start_parallel() - args = ( - bucket_name, - object_name, - part_data, - ( - cast(DictType, sse.headers()) - if isinstance(sse, SseCustomerKey) else None - ), - upload_id, - part_number, + headers = HTTPHeaderDict( + sse.headers() if isinstance(sse, SseCustomerKey) else None, ) + headers.extend(checksum_headers) if num_parallel_uploads > 1: + kwargs = { + "bucket_name": bucket_name, + "object_name": object_name, + "data": part_data, + "headers": headers, + "upload_id": upload_id, + "part_number": part_number, + } cast(ThreadPool, pool).add_task( - self._upload_part_task, args, + self._upload_part_task, kwargs, ) else: - etag = self._upload_part(*args) + etag = self._upload_part( + bucket_name=bucket_name, + object_name=object_name, + data=part_data, + headers=headers, + upload_id=upload_id, + part_number=part_number, + ) parts.append(Part(part_number, etag)) if pool: @@ -1991,86 +3269,45 @@ def put_object( parts[part_number - 1] = Part(part_number, etag) upload_result = self._complete_multipart_upload( - bucket_name, object_name, cast(str, upload_id), parts, + bucket_name=bucket_name, + object_name=object_name, + upload_id=cast(str, upload_id), + parts=parts, ) - return ObjectWriteResult( - cast(str, upload_result.bucket_name), - cast(str, upload_result.object_name), - upload_result.version_id, - upload_result.etag, - upload_result.http_headers, + return ObjectWriteResult.new( + headers=upload_result.headers, + bucket_name=cast(str, upload_result.bucket_name), + object_name=cast(str, upload_result.object_name), + version_id=upload_result.version_id, + etag=upload_result.etag, location=upload_result.location, ) except Exception as exc: if upload_id: self._abort_multipart_upload( - bucket_name, object_name, upload_id, + bucket_name=bucket_name, + object_name=object_name, + upload_id=upload_id, ) raise exc - def append_object( + def _append_object( self, + *, bucket_name: str, object_name: str, - data: BinaryIO, - length: int, - chunk_size: Optional[int] = None, + stream: BinaryIO, + length: Optional[int] = None, + chunk_size: int, progress: Optional[ProgressType] = None, - extra_headers: Optional[DictType] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> ObjectWriteResult: - """ - Appends from a stream to existing object in a bucket. - - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param data: An object having callable read() returning bytes object. - :param length: Data size; -1 for unknown size. - :param chunk_size: Chunk size to optimize uploads. - :return: :class:`ObjectWriteResult` object. - - Example:: - # Append data. - result = client.append_object( - "my-bucket", "my-object", io.BytesIO(b"world"), 5, - ) - print(f"appended {result.object_name} object; etag: {result.etag}") - - # Append data in chunks. - data = urlopen( - "https://www.kernel.org/pub/linux/kernel/v6.x/" - "linux-6.13.12.tar.xz", - ) - result = client.append_object( - "my-bucket", "my-object", data, 148611164, 5*1024*1024, - ) - print(f"appended {result.object_name} object; etag: {result.etag}") - - # Append unknown sized data. - data = urlopen( - "https://www.kernel.org/pub/linux/kernel/v6.x/" - "linux-6.14.3.tar.xz", - ) - result = client.append_object( - "my-bucket", "my-object", data, 149426584, 5*1024*1024, - ) - print(f"appended {result.object_name} object; etag: {result.etag}") - """ - if length == 0: - raise ValueError("length should not be zero") - if chunk_size is not None: - if chunk_size < MIN_PART_SIZE: - raise ValueError("chunk size must be minimum of 5 MiB") - if chunk_size > MAX_PART_SIZE: - raise ValueError("chunk size must be less than 5 GiB") - else: - chunk_size = length if length > MIN_PART_SIZE else MIN_PART_SIZE - + """Do append object.""" chunk_count = -1 - if length > 0: - chunk_count = int(length / chunk_size) - if (chunk_count * chunk_size) < length: - chunk_count += 1 - chunk_count = chunk_count or 1 + if length is not None: + chunk_count = max(int((length + chunk_size - 1) / chunk_size), 1) object_size = length uploaded_size = 0 @@ -2078,17 +3315,20 @@ def append_object( one_byte = b"" stop = False - stat = self.stat_object(bucket_name, object_name) + stat = self.stat_object( + bucket_name=bucket_name, + object_name=object_name, + ) write_offset = cast(int, stat.size) while not stop: chunk_number += 1 if chunk_count > 0: - if chunk_number == chunk_count: + if chunk_number == chunk_count and object_size is not None: chunk_size = object_size - uploaded_size stop = True chunk_data = read_part_data( - data, chunk_size, progress=progress, + stream=stream, size=chunk_size, progress=progress, ) if len(chunk_data) != chunk_size: raise IOError( @@ -2098,7 +3338,10 @@ def append_object( ) else: chunk_data = read_part_data( - data, chunk_size + 1, one_byte, progress=progress, + stream=stream, + size=chunk_size + 1, + part_data=one_byte, + progress=progress, ) # If chunk_data_size is less or equal to chunk_size, # then we have reached last chunk. @@ -2111,23 +3354,161 @@ def append_object( uploaded_size += len(chunk_data) - headers = extra_headers or {} - headers["x-amz-write-offset-bytes"] = str(write_offset) + headers = HTTPHeaderDict( + {"x-amz-write-offset-bytes": str(write_offset)}, + ) upload_result = self._put_object( - bucket_name, object_name, chunk_data, headers=headers, + bucket_name=bucket_name, + object_name=object_name, + data=chunk_data, + headers=headers, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) write_offset += len(chunk_data) - return ObjectWriteResult( - cast(str, upload_result.bucket_name), - cast(str, upload_result.object_name), - upload_result.version_id, - upload_result.etag, - upload_result.http_headers, - location=upload_result.location, + return upload_result + + def append_object( + self, + *, + bucket_name: str, + object_name: str, + filename: Optional[str | os.PathLike] = None, + stream: Optional[BinaryIO] = None, + data: Optional[bytes] = None, + length: Optional[int] = None, + chunk_size: Optional[int] = None, + progress: Optional[ProgressType] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ) -> ObjectWriteResult: + """ + Append data to an existing object in a bucket. + + Only one of ``filename``, ``stream`` or ``data`` must be provided. + If ``data`` is supplied, ``length`` must also be provided. + + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + filename (Optional[str | os.PathLike], default=None): + Path to a file whose contents will be appended. + + stream (Optional[BinaryIO], default=None): + An object with a callable ``read()`` method returning a + bytes object. + + data (Optional[bytes], default=None): + Raw data in a bytes object. + + length (Optional[int], default=None): + Data length of ``data`` or ``stream``. + + chunk_size (Optional[int], default=None): + Chunk size to split the data for appending. + + progress (Optional[ProgressType], default=None): + Progress object to track upload progress. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + ObjectWriteResult: + The result of the append operation. + + Example: + >>> # Append simple data + >>> result = client.append_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... data=io.BytesIO(b"world"), + ... length=5, + ... ) + >>> print(f"appended {result.object_name} object; " + ... f"etag: {result.etag}") + >>> + >>> # Append data in chunks + >>> with urlopen("https://www.kernel.org/pub/linux/kernel/v6.x/" + ... "linux-6.13.12.tar.xz") as stream: + ... result = client.append_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... stream=stream, + ... length=148611164, + ... chunk_size=5*1024*1024, + ... ) + >>> print(f"appended {result.object_name} object; " + ... f"etag: {result.etag}") + >>> + >>> # Append unknown-sized data + >>> with urlopen("https://www.kernel.org/pub/linux/kernel/v6.x/" + ... "linux-6.14.3.tar.xz") as stream: + ... result = client.append_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... stream=stream, + ... chunk_size=5*1024*1024, + ... ) + >>> print(f"appended {result.object_name} object; " + ... f"etag: {result.etag}") + """ + if sum(x is not None for x in (filename, stream, data)) != 1: + raise ValueError( + "either filename, stream or data must be provided") + if (length is not None and length <= 0): + raise ValueError("valid length must be provided") + if data is not None and length is None: + raise ValueError("valid length must be provided for data") + if chunk_size is not None: + if chunk_size < MIN_PART_SIZE: + raise ValueError("chunk size must be minimum of 5 MiB") + if chunk_size > MAX_PART_SIZE: + raise ValueError("chunk size must be less than 5 GiB") + else: + chunk_size = max(MIN_PART_SIZE, length or 0) + + if filename: + file_size = os.stat(filename).st_size + with open(filename, "rb") as file: + return self._append_object( + bucket_name=bucket_name, + object_name=object_name, + stream=file, + length=file_size, + chunk_size=cast(int, chunk_size), + progress=progress, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, + ) + return self._append_object( + bucket_name=bucket_name, + object_name=object_name, + stream=stream if stream else io.BytesIO(cast(bytes, data)), + length=length, + chunk_size=cast(int, chunk_size), + progress=progress, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) def list_objects( self, + *, bucket_name: str, prefix: Optional[str] = None, recursive: bool = False, @@ -2137,61 +3518,95 @@ def list_objects( use_api_v1: bool = False, use_url_encoding_type: bool = True, fetch_owner: bool = False, - extra_headers: Optional[DictType] = None, - extra_query_params: Optional[DictType] = None, - ): + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ) -> Iterator[Object]: """ - Lists object information of a bucket. - - :param bucket_name: Name of the bucket. - :param prefix: Object name starts with prefix. - :param recursive: List recursively than directory structure emulation. - :param start_after: List objects after this key name. - :param include_user_meta: MinIO specific flag to control to include - user metadata. - :param include_version: Flag to control whether include object - versions. - :param use_api_v1: Flag to control to use ListObjectV1 S3 API or not. - :param use_url_encoding_type: Flag to control whether URL encoding type - to be used or not. - :param extra_headers: Extra HTTP headers for advanced usage. - :param extra_query_params: Extra query parameters for advanced usage. - :return: Iterator of :class:`Object `. - - Example:: - # List objects information. - objects = client.list_objects("my-bucket") - for obj in objects: - print(obj) - - # List objects information whose names starts with "my/prefix/". - objects = client.list_objects("my-bucket", prefix="my/prefix/") - for obj in objects: - print(obj) - - # List objects information recursively. - objects = client.list_objects("my-bucket", recursive=True) - for obj in objects: - print(obj) - - # List objects information recursively whose names starts with - # "my/prefix/". - objects = client.list_objects( - "my-bucket", prefix="my/prefix/", recursive=True, - ) - for obj in objects: - print(obj) - - # List objects information recursively after object name - # "my/prefix/world/1". - objects = client.list_objects( - "my-bucket", recursive=True, start_after="my/prefix/world/1", - ) - for obj in objects: - print(obj) + List object information of a bucket. + + Args: + bucket_name (str): + Name of the bucket. + + prefix (Optional[str], default=None): + Return objects whose names start with this prefix. + + recursive (bool, default=False): + List objects recursively instead of emulating directory + structure. + + start_after (Optional[str], default=None): + List objects after this key name. + + include_user_meta (bool, default=False): + MinIO-specific flag to include user metadata. + + include_version (bool, default=False): + Flag to include object versions in the listing. + + use_api_v1 (bool, default=False): + Flag to use ListObjectsV1 S3 API instead of V2. + + use_url_encoding_type (bool, default=True): + Flag to enable URL encoding for object names. + + fetch_owner (bool, default=False): + Flag to fetch owner information of objects. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + Iterator[Object]: + An iterator of :class:`minio.datatypes.Object`. + + Example: + >>> # List all objects in a bucket + >>> objects = client.list_objects(bucket_name="my-bucket") + >>> for obj in objects: + ... print(obj) + >>> + >>> # List objects with a prefix + >>> objects = client.list_objects( + ... bucket_name="my-bucket", prefix="my/prefix/", + ... ) + >>> for obj in objects: + ... print(obj) + >>> + >>> # List objects recursively + >>> objects = client.list_objects( + ... bucket_name="my-bucket", recursive=True, + ... ) + >>> for obj in objects: + ... print(obj) + >>> + >>> # Recursively list objects with a prefix + >>> objects = client.list_objects( + ... bucket_name="my-bucket", + ... prefix="my/prefix/", + ... recursive=True, + ... ) + >>> for obj in objects: + ... print(obj) + >>> + >>> # Recursively list objects after a specific key + >>> objects = client.list_objects( + ... bucket_name="my-bucket", + ... recursive=True, + ... start_after="my/prefix/world/1", + ... ) + >>> for obj in objects: + ... print(obj) """ return self._list_objects( - bucket_name, + bucket_name=bucket_name, delimiter=None if recursive else "/", include_user_meta=include_user_meta, prefix=prefix, @@ -2200,63 +3615,135 @@ def list_objects( include_version=include_version, encoding_type="url" if use_url_encoding_type else None, fetch_owner=fetch_owner, + region=region, extra_headers=extra_headers, extra_query_params=extra_query_params, ) def stat_object( self, + *, bucket_name: str, object_name: str, - ssec: Optional[SseCustomerKey] = None, version_id: Optional[str] = None, - extra_headers: Optional[DictType] = None, - extra_query_params: Optional[DictType] = None, + ssec: Optional[SseCustomerKey] = None, + offset: int = 0, + length: Optional[int] = None, + match_etag: Optional[str] = None, + not_match_etag: Optional[str] = None, + modified_since: Optional[datetime] = None, + unmodified_since: Optional[datetime] = None, + fetch_checksum: bool = False, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> Object: """ Get object information and metadata of an object. - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param ssec: Server-side encryption customer key. - :param version_id: Version ID of the object. - :param extra_headers: Extra HTTP headers for advanced usage. - :param extra_query_params: Extra query parameters for advanced usage. - :return: :class:`Object `. - - Example:: - # Get object information. - result = client.stat_object("my-bucket", "my-object") - - # Get object information of version-ID. - result = client.stat_object( - "my-bucket", "my-object", - version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", - ) - - # Get SSE-C encrypted object information. - result = client.stat_object( - "my-bucket", "my-object", - ssec=SseCustomerKey(b"32byteslongsecretkeymustprovided"), - ) + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + version_id (Optional[str], default=None): + Version ID of the object. + + ssec (Optional[SseCustomerKey], default=None): + Server-side encryption customer key. + + offset (int, default=0): + Start byte position of object data. + + length (Optional[int], default=None): + Number of bytes of object data from offset. + + match_etag (Optional[str], default=None): + Fetch only if the ETag of the object matches. + + not_match_etag (Optional[str], default=None): + Fetch only if the ETag of the object does not match. + + modified_since (Optional[datetime], default=None): + Fetch only if the object was modified since this date. + + unmodified_since (Optional[datetime], default=None): + Fetch only if the object was unmodified since this date. + + fetch_checksum (bool, default=False): + Flag to fetch the checksum of the object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + Object: + A :class:`minio.datatypes.Object` object containing metadata + and information about the object. + + Example: + >>> # Get object information + >>> result = client.stat_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... ) + >>> print(f"last-modified: {result.last_modified}, " + ... f"size: {result.size}") + >>> + >>> # Get specific version of an object + >>> result = client.stat_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", + ... ) + >>> print(f"last-modified: {result.last_modified}, " + ... f"size: {result.size}") + >>> + >>> # Get SSE-C encrypted object information + >>> result = client.stat_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... ssec=SseCustomerKey( + ... b"32byteslongsecretkeymustprovided" + ... ), + ... ) + >>> print(f"last-modified: {result.last_modified}, " + ... f"size: {result.size}") """ - check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) check_ssec(ssec) - headers = cast(DictType, ssec.headers() if ssec else {}) - if extra_headers: - headers.update(extra_headers) - - query_params = extra_query_params or {} - query_params.update({"versionId": version_id} if version_id else {}) + headers = self._gen_read_headers( + ssec=ssec, + offset=offset, + length=length, + match_etag=match_etag, + not_match_etag=not_match_etag, + modified_since=modified_since, + unmodified_since=unmodified_since, + fetch_checksum=fetch_checksum, + ) + query_params = HTTPQueryDict() + if version_id: + query_params["versionId"] = version_id response = self._execute( - "HEAD", - bucket_name, - object_name, + method="HEAD", + bucket_name=bucket_name, + object_name=object_name, headers=headers, query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) value = response.headers.get("last-modified") @@ -2278,42 +3765,75 @@ def stat_object( def remove_object( self, + *, bucket_name: str, object_name: str, - version_id: Optional[str] = None + version_id: Optional[str] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ): """ - Remove an object. - - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param version_id: Version ID of the object. - - Example:: - # Remove object. - client.remove_object("my-bucket", "my-object") - - # Remove version of an object. - client.remove_object( - "my-bucket", "my-object", - version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", - ) + Remove an object from a bucket. + + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + version_id (Optional[str], default=None): + Version ID of the object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> # Remove object + >>> client.remove_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... ) + >>> + >>> # Remove a specific version of an object + >>> client.remove_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... version_id="dfbd25b3-abec-4184-a4e8-5a35a5c1174d", + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) + query_params = HTTPQueryDict() + if version_id: + query_params["versionId"] = version_id self._execute( - "DELETE", - bucket_name, - object_name, - query_params={"versionId": version_id} if version_id else None, + method="DELETE", + bucket_name=bucket_name, + object_name=object_name, + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) def _delete_objects( self, + *, bucket_name: str, delete_object_list: list[DeleteObject], quiet: bool = False, bypass_governance_mode: bool = False, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> DeleteResult: """ Delete multiple objects. @@ -2326,17 +3846,20 @@ def _delete_objects( :return: :class:`DeleteResult ` object. """ body = marshal(DeleteRequest(delete_object_list, quiet=quiet)) - headers: DictType = { - "Content-MD5": cast(str, md5sum_hash(body)), - } + headers = HTTPHeaderDict( + {"Content-MD5": base64_string(MD5.hash(body))}, + ) if bypass_governance_mode: headers["x-amz-bypass-governance-retention"] = "true" response = self._execute( - "POST", - bucket_name, + method="POST", + bucket_name=bucket_name, body=body, headers=headers, - query_params={"delete": ""}, + query_params=HTTPQueryDict({"delete": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) element = ET.fromstring(response.data.decode()) @@ -2348,49 +3871,73 @@ def _delete_objects( def remove_objects( self, + *, bucket_name: str, delete_object_list: Iterable[DeleteObject], bypass_governance_mode: bool = False, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> Iterator[DeleteError]: """ - Remove multiple objects. - - :param bucket_name: Name of the bucket. - :param delete_object_list: An iterable containing - :class:`DeleteObject ` object. - :param bypass_governance_mode: Bypass Governance retention mode. - :return: An iterator containing :class:`DeleteError ` - object. - - Example:: - # Remove list of objects. - errors = client.remove_objects( - "my-bucket", - [ - DeleteObject("my-object1"), - DeleteObject("my-object2"), - DeleteObject( - "my-object3", "13f88b18-8dcd-4c83-88f2-8631fdb6250c", - ), - ], - ) - for error in errors: - print("error occurred when deleting object", error) - - # Remove a prefix recursively. - delete_object_list = list( - map( - lambda x: DeleteObject(x.object_name), - client.list_objects( - "my-bucket", - "my/prefix/", - recursive=True, - ), - ) - ) - errors = client.remove_objects("my-bucket", delete_object_list) - for error in errors: - print("error occurred when deleting object", error) + Remove multiple objects from a bucket. + + Args: + bucket_name (str): + Name of the bucket. + + delete_object_list (Iterable[DeleteObject]): + Iterable of :class:`minio.deleteobjects.DeleteObject` + instances to be deleted. + + bypass_governance_mode (bool, default=False): + Flag to bypass Governance retention mode. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + Iterator[DeleteError]: + An iterator of :class:`minio.deleteobjects.DeleteError` + objects for any failures. + + Example: + >>> # Remove a list of objects + >>> errors = client.remove_objects( + ... bucket_name="my-bucket", + ... delete_object_list=[ + ... DeleteObject(name="my-object1"), + ... DeleteObject(name="my-object2"), + ... DeleteObject( + ... name="my-object3", + ... version_id="13f88b18-8dcd-4c83-88f2-8631fdb6250c", + ... ), + ... ], + ... ) + >>> for error in errors: + ... print("error occurred when deleting object", error) + >>> + >>> # Remove objects under a prefix recursively + >>> delete_object_list = map( + ... lambda x: DeleteObject(x.object_name), + ... client.list_objects( + ... bucket_name="my-bucket", + ... prefix="my/prefix/", + ... recursive=True, + ... ), + ... ) + >>> errors = client.remove_objects( + ... bucket_name="my-bucket", + ... delete_object_list=delete_object_list, + ... ) + >>> for error in errors: + ... print("error occurred when deleting object", error) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) @@ -2409,10 +3956,13 @@ def remove_objects( break result = self._delete_objects( - bucket_name, - objects, + bucket_name=bucket_name, + delete_object_list=objects, quiet=True, bypass_governance_mode=bypass_governance_mode, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) for error in result.error_list: @@ -2424,53 +3974,93 @@ def remove_objects( def get_presigned_url( self, + *, method: str, bucket_name: str, object_name: str, expires: timedelta = timedelta(days=7), - response_headers: Optional[DictType] = None, request_date: Optional[datetime] = None, version_id: Optional[str] = None, - extra_query_params: Optional[DictType] = None, + region: Optional[str] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> str: """ - Get presigned URL of an object for HTTP method, expiry time and custom - request parameters. - - :param method: HTTP method. - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param expires: Expiry in seconds; defaults to 7 days. - :param response_headers: Optional response_headers argument to - specify response fields like date, size, - type of file, data about server, etc. - :param request_date: Optional request_date argument to - specify a different request date. Default is - current date. - :param version_id: Version ID of the object. - :param extra_query_params: Extra query parameters for advanced usage. - :return: URL string. - - Example:: - # Get presigned URL string to delete 'my-object' in - # 'my-bucket' with one day expiry. - url = client.get_presigned_url( - "DELETE", - "my-bucket", - "my-object", - expires=timedelta(days=1), - ) - print(url) + Get a presigned URL for an object. + + The presigned URL can be used to perform the specified HTTP method + on an object, with a custom expiry time and optional query + parameters. + + Args: + method (str): + HTTP method to allow (e.g., "GET", "PUT", "DELETE"). + + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + expires (timedelta, default=timedelta(days=7)): + Expiry duration for the presigned URL. + + request_date (Optional[datetime], default=None): + Request time to base the URL on, instead of the current + time. + + version_id (Optional[str], default=None): + Version ID of the object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + str: + A presigned URL string. + + Example: + >>> # Generate presigned URL to delete object + >>> url = client.get_presigned_url( + ... method="DELETE", + ... bucket_name="my-bucket", + ... object_name="my-object", + ... expires=timedelta(days=1), + ... ) + >>> print(url) + >>> + >>> # Generate presigned URL to upload object with response type + >>> url = client.get_presigned_url( + ... method="PUT", + ... bucket_name="my-bucket", + ... object_name="my-object", + ... expires=timedelta(days=1), + ... extra_query_params=HTTPQueryDict( + ... {"response-content-type": "application/json"} + ... ), + ... ) + >>> print(url) + >>> + >>> # Generate presigned URL to download object + >>> url = client.get_presigned_url( + ... method="GET", + ... bucket_name="my-bucket", + ... object_name="my-object", + ... expires=timedelta(hours=2), + ... ) + >>> print(url) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) if expires.total_seconds() < 1 or expires.total_seconds() > 604800: raise ValueError("expires must be between 1 second to 7 days") - region = self._get_region(bucket_name) - query_params = extra_query_params or {} - query_params.update({"versionId": version_id} if version_id else {}) - query_params.update(response_headers or {}) + region = self._get_region(bucket_name=bucket_name) + query_params = HTTPQueryDict() + if version_id: + query_params["versionId"] = version_id creds = self._provider.retrieve() if self._provider else None if creds and creds.session_token: query_params["X-Amz-Security-Token"] = creds.session_token @@ -2480,6 +4070,7 @@ def get_presigned_url( bucket_name=bucket_name, object_name=object_name, query_params=query_params, + extra_query_params=extra_query_params, ) if creds: @@ -2495,104 +4086,157 @@ def get_presigned_url( def presigned_get_object( self, + *, bucket_name: str, object_name: str, expires: timedelta = timedelta(days=7), - response_headers: Optional[DictType] = None, request_date: Optional[datetime] = None, version_id: Optional[str] = None, - extra_query_params: Optional[DictType] = None, + region: Optional[str] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> str: """ - Get presigned URL of an object to download its data with expiry time - and custom request parameters. - - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param expires: Expiry in seconds; defaults to 7 days. - :param response_headers: Optional response_headers argument to - specify response fields like date, size, - type of file, data about server, etc. - :param request_date: Optional request_date argument to - specify a different request date. Default is - current date. - :param version_id: Version ID of the object. - :param extra_query_params: Extra query parameters for advanced usage. - :return: URL string. - - Example:: - # Get presigned URL string to download 'my-object' in - # 'my-bucket' with default expiry (i.e. 7 days). - url = client.presigned_get_object("my-bucket", "my-object") - print(url) - - # Get presigned URL string to download 'my-object' in - # 'my-bucket' with two hours expiry. - url = client.presigned_get_object( - "my-bucket", "my-object", expires=timedelta(hours=2), - ) - print(url) + Get a presigned URL to download an object. + + The presigned URL allows downloading an object's data with a custom + expiry time and optional query parameters. + + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + expires (timedelta, default=timedelta(days=7)): + Expiry duration for the presigned URL. + + request_date (Optional[datetime], default=None): + Request time to base the URL on, instead of the current + time. + + version_id (Optional[str], default=None): + Version ID of the object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + str: + A presigned URL string. + + Example: + >>> # Get presigned URL to download with default expiry (7 days) + >>> url = client.presigned_get_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... ) + >>> print(url) + >>> + >>> # Get presigned URL to download with 2-hour expiry + >>> url = client.presigned_get_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... expires=timedelta(hours=2), + ... ) + >>> print(url) """ return self.get_presigned_url( - "GET", - bucket_name, - object_name, - expires, - response_headers=response_headers, + method="GET", + bucket_name=bucket_name, + object_name=object_name, + expires=expires, request_date=request_date, version_id=version_id, + region=region, extra_query_params=extra_query_params, ) def presigned_put_object( self, + *, bucket_name: str, object_name: str, expires: timedelta = timedelta(days=7), + region: Optional[str] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> str: """ - Get presigned URL of an object to upload data with expiry time and - custom request parameters. - - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param expires: Expiry in seconds; defaults to 7 days. - :return: URL string. - - Example:: - # Get presigned URL string to upload data to 'my-object' in - # 'my-bucket' with default expiry (i.e. 7 days). - url = client.presigned_put_object("my-bucket", "my-object") - print(url) - - # Get presigned URL string to upload data to 'my-object' in - # 'my-bucket' with two hours expiry. - url = client.presigned_put_object( - "my-bucket", "my-object", expires=timedelta(hours=2), - ) - print(url) + Get a presigned URL to upload an object. + + The presigned URL allows uploading data to an object with a custom + expiry time and optional query parameters. + + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + expires (timedelta, default=timedelta(days=7)): + Expiry duration for the presigned URL. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + str: + A presigned URL string. + + Example: + >>> # Get presigned URL to upload with default expiry (7 days) + >>> url = client.presigned_put_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... ) + >>> print(url) + >>> + >>> # Get presigned URL to upload with 2-hour expiry + >>> url = client.presigned_put_object( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... expires=timedelta(hours=2), + ... ) + >>> print(url) """ return self.get_presigned_url( - "PUT", bucket_name, object_name, expires, + method="PUT", + bucket_name=bucket_name, + object_name=object_name, + expires=expires, + region=region, + extra_query_params=extra_query_params, ) def presigned_post_policy(self, policy: PostPolicy) -> dict[str, str]: """ - Get form-data of PostPolicy of an object to upload its data using POST - method. - - :param policy: :class:`PostPolicy `. - :return: :dict: contains form-data. - - Example:: - policy = PostPolicy( - "my-bucket", datetime.utcnow() + timedelta(days=10), - ) - policy.add_starts_with_condition("key", "my/object/prefix/") - policy.add_content_length_range_condition( - 1*1024*1024, 10*1024*1024, - ) - form_data = client.presigned_post_policy(policy) + Get form-data for a PostPolicy to upload an object using POST. + + Args: + policy (PostPolicy): + Post policy that defines conditions for the upload. + + Returns: + dict[str, str]: + A dictionary containing the form-data required for the POST + request. + + Example: + >>> policy = PostPolicy( + ... "my-bucket", datetime.utcnow() + timedelta(days=10), + ... ) + >>> policy.add_starts_with_condition("key", "my/object/prefix/") + >>> policy.add_content_length_range_condition( + ... 1*1024*1024, 10*1024*1024, + ... ) + >>> form_data = client.presigned_post_policy(policy) """ if not isinstance(policy, PostPolicy): raise ValueError("policy must be PostPolicy type") @@ -2604,38 +4248,85 @@ def presigned_post_policy(self, policy: PostPolicy) -> dict[str, str]: policy.bucket_name, s3_check=self._base_url.is_aws_host) return policy.form_data( self._provider.retrieve(), - self._get_region(policy.bucket_name), + self._get_region(bucket_name=policy.bucket_name), ) - def delete_bucket_replication(self, bucket_name: str): + def delete_bucket_replication( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ): """ - Delete replication configuration of a bucket. + Delete the replication configuration of a bucket. - :param bucket_name: Name of the bucket. + Args: + bucket_name (str): + Name of the bucket. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. - Example:: - client.delete_bucket_replication("my-bucket") + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> client.delete_bucket_replication(bucket_name="my-bucket") """ - check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) - self._execute("DELETE", bucket_name, query_params={"replication": ""}) + self._execute_delete_bucket( + bucket_name=bucket_name, + query_params=HTTPQueryDict({"replication": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, + ) def get_bucket_replication( self, + *, bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> Optional[ReplicationConfig]: """ - Get bucket replication configuration of a bucket. + Get the replication configuration of a bucket. - :param bucket_name: Name of the bucket. - :return: :class:`ReplicationConfig ` object. + Args: + bucket_name (str): + Name of the bucket. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. - Example:: - config = client.get_bucket_replication("my-bucket") + Returns: + Optional[ReplicationConfig]: + A :class:`minio.replicationconfig.ReplicationConfig` object + if replication is configured, otherwise ``None``. + + Example: + >>> config = client.get_bucket_replication(bucket_name="my-bucket") """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) try: response = self._execute( - "GET", bucket_name, query_params={"replication": ""}, + method="GET", + bucket_name=bucket_name, + query_params=HTTPQueryDict({"replication": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) return unmarshal(ReplicationConfig, response.data.decode()) except S3Error as exc: @@ -2645,81 +4336,154 @@ def get_bucket_replication( def set_bucket_replication( self, + *, bucket_name: str, config: ReplicationConfig, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ): """ - Set bucket replication configuration to a bucket. - - :param bucket_name: Name of the bucket. - :param config: :class:`ReplicationConfig ` object. - - Example:: - config = ReplicationConfig( - "REPLACE-WITH-ACTUAL-ROLE", - [ - Rule( - Destination( - "REPLACE-WITH-ACTUAL-DESTINATION-BUCKET-ARN", - ), - ENABLED, - delete_marker_replication=DeleteMarkerReplication( - DISABLED, - ), - rule_filter=Filter( - AndOperator( - "TaxDocs", - {"key1": "value1", "key2": "value2"}, - ), - ), - rule_id="rule1", - priority=1, - ), - ], - ) - client.set_bucket_replication("my-bucket", config) + Set the replication configuration of a bucket. + + Args: + bucket_name (str): + Name of the bucket. + + config (ReplicationConfig): + Replication configuration to apply to the bucket. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> config = ReplicationConfig( + ... role="REPLACE-WITH-ACTUAL-ROLE", + ... rules=[ + ... Rule( + ... destination=Destination( + ... "REPLACE-WITH-ACTUAL-DESTINATION-BUCKET-ARN", + ... ), + ... status=ENABLED, + ... delete_marker_replication=DeleteMarkerReplication( + ... DISABLED, + ... ), + ... rule_filter=Filter( + ... AndOperator( + ... "TaxDocs", + ... {"key1": "value1", "key2": "value2"}, + ... ), + ... ), + ... rule_id="rule1", + ... priority=1, + ... ), + ... ], + ... ) + >>> client.set_bucket_replication( + ... bucket_name="my-bucket", + ... config=config, + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) if not isinstance(config, ReplicationConfig): raise ValueError("config must be ReplicationConfig type") body = marshal(config) + headers = HTTPHeaderDict( + {"Content-MD5": base64_string(MD5.hash(body))}, + ) self._execute( - "PUT", - bucket_name, + method="PUT", + bucket_name=bucket_name, body=body, - headers={"Content-MD5": cast(str, md5sum_hash(body))}, - query_params={"replication": ""}, + headers=headers, + query_params=HTTPQueryDict({"replication": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) - def delete_bucket_lifecycle(self, bucket_name: str): + def delete_bucket_lifecycle( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ): """ - Delete notification configuration of a bucket. + Delete the lifecycle configuration of a bucket. - :param bucket_name: Name of the bucket. + Args: + bucket_name (str): + Name of the bucket. - Example:: - client.delete_bucket_lifecycle("my-bucket") + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> client.delete_bucket_lifecycle(bucket_name="my-bucket") """ - check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) - self._execute("DELETE", bucket_name, query_params={"lifecycle": ""}) + self._execute_delete_bucket( + bucket_name=bucket_name, + query_params=HTTPQueryDict({"lifecycle": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, + ) def get_bucket_lifecycle( self, + *, bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> Optional[LifecycleConfig]: """ - Get bucket lifecycle configuration of a bucket. + Get the lifecycle configuration of a bucket. - :param bucket_name: Name of the bucket. - :return: :class:`LifecycleConfig ` object. + Args: + bucket_name (str): + Name of the bucket. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. - Example:: - config = client.get_bucket_lifecycle("my-bucket") + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + Optional[LifecycleConfig]: + A :class:`minio.lifecycleconfig.LifecycleConfig` object if + configured, otherwise ``None``. + + Example: + >>> config = client.get_bucket_lifecycle(bucket_name="my-bucket") """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) try: response = self._execute( - "GET", bucket_name, query_params={"lifecycle": ""}, + method="GET", + bucket_name=bucket_name, + query_params=HTTPQueryDict({"lifecycle": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) return unmarshal(LifecycleConfig, response.data.decode()) except S3Error as exc: @@ -2729,74 +4493,151 @@ def get_bucket_lifecycle( def set_bucket_lifecycle( self, + *, bucket_name: str, config: LifecycleConfig, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ): """ - Set bucket lifecycle configuration to a bucket. - - :param bucket_name: Name of the bucket. - :param config: :class:`LifecycleConfig ` object. - - Example:: - config = LifecycleConfig( - [ - Rule( - ENABLED, - rule_filter=Filter(prefix="documents/"), - rule_id="rule1", - transition=Transition( - days=30, storage_class="GLACIER", - ), - ), - Rule( - ENABLED, - rule_filter=Filter(prefix="logs/"), - rule_id="rule2", - expiration=Expiration(days=365), - ), - ], - ) - client.set_bucket_lifecycle("my-bucket", config) + Set the lifecycle configuration of a bucket. + + Args: + bucket_name (str): + Name of the bucket. + + config (LifecycleConfig): + Lifecycle configuration to apply. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> config = LifecycleConfig( + ... [ + ... Rule( + ... status=ENABLED, + ... rule_filter=Filter(prefix="documents/"), + ... rule_id="rule1", + ... transition=Transition( + ... days=30, + ... storage_class="GLACIER", + ... ), + ... ), + ... Rule( + ... status=ENABLED, + ... rule_filter=Filter(prefix="logs/"), + ... rule_id="rule2", + ... expiration=Expiration(days=365), + ... ), + ... ], + ... ) + >>> client.set_bucket_lifecycle( + ... bucket_name="my-bucket", + ... config=config, + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) if not isinstance(config, LifecycleConfig): raise ValueError("config must be LifecycleConfig type") body = marshal(config) + headers = HTTPHeaderDict( + {"Content-MD5": base64_string(MD5.hash(body))}, + ) self._execute( - "PUT", - bucket_name, + method="PUT", + bucket_name=bucket_name, body=body, - headers={"Content-MD5": cast(str, md5sum_hash(body))}, - query_params={"lifecycle": ""}, + headers=headers, + query_params=HTTPQueryDict({"lifecycle": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) - def delete_bucket_tags(self, bucket_name: str): + def delete_bucket_tags( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ): """ - Delete tags configuration of a bucket. + Delete the tags configuration of a bucket. - :param bucket_name: Name of the bucket. + Args: + bucket_name (str): + Name of the bucket. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. - Example:: - client.delete_bucket_tags("my-bucket") + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> client.delete_bucket_tags(bucket_name="my-bucket") """ - check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) - self._execute("DELETE", bucket_name, query_params={"tagging": ""}) + self._execute_delete_bucket( + bucket_name=bucket_name, + query_params=HTTPQueryDict({"tagging": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, + ) - def get_bucket_tags(self, bucket_name: str) -> Optional[Tags]: + def get_bucket_tags( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ) -> Optional[Tags]: """ - Get tags configuration of a bucket. + Get the tags configuration of a bucket. - :param bucket_name: Name of the bucket. - :return: :class:`Tags ` object. + Args: + bucket_name (str): + Name of the bucket. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + Optional[Tags]: + A :class:`minio.commonconfig.Tags` object if tags are + configured, otherwise ``None``. - Example:: - tags = client.get_bucket_tags("my-bucket") + Example: + >>> tags = client.get_bucket_tags(bucket_name="my-bucket") """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) try: response = self._execute( - "GET", bucket_name, query_params={"tagging": ""}, + method="GET", + bucket_name=bucket_name, + query_params=HTTPQueryDict({"tagging": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) tagging = unmarshal(Tagging, response.data.decode()) return tagging.tags @@ -2805,85 +4646,171 @@ def get_bucket_tags(self, bucket_name: str) -> Optional[Tags]: raise return None - def set_bucket_tags(self, bucket_name: str, tags: Tags): + def set_bucket_tags( + self, + *, + bucket_name: str, + tags: Tags, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ): """ - Set tags configuration to a bucket. + Set the tags configuration for a bucket. - :param bucket_name: Name of the bucket. - :param tags: :class:`Tags ` object. + Args: + bucket_name (str): + Name of the bucket. + + tags (Tags): + Tags configuration as a + :class:`minio.commonconfig.Tags` object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. - Example:: - tags = Tags.new_bucket_tags() - tags["Project"] = "Project One" - tags["User"] = "jsmith" - client.set_bucket_tags("my-bucket", tags) + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> tags = Tags.new_bucket_tags() + >>> tags["Project"] = "Project One" + >>> tags["User"] = "jsmith" + >>> client.set_bucket_tags(bucket_name="my-bucket", tags=tags) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) if not isinstance(tags, Tags): raise ValueError("tags must be Tags type") body = marshal(Tagging(tags)) + headers = HTTPHeaderDict( + {"Content-MD5": base64_string(MD5.hash(body))}, + ) self._execute( - "PUT", - bucket_name, + method="PUT", + bucket_name=bucket_name, body=body, - headers={"Content-MD5": cast(str, md5sum_hash(body))}, - query_params={"tagging": ""}, + headers=headers, + query_params=HTTPQueryDict({"tagging": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) def delete_object_tags( self, + *, bucket_name: str, object_name: str, version_id: Optional[str] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ): """ - Delete tags configuration of an object. + Delete the tags configuration of an object. - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param version_id: Version ID of the Object. + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. - Example:: - client.delete_object_tags("my-bucket", "my-object") + version_id (Optional[str], default=None): + Version ID of the object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> client.delete_object_tags( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) - query_params = {"versionId": version_id} if version_id else {} + query_params = HTTPQueryDict() + if version_id: + query_params["versionId"] = version_id query_params["tagging"] = "" self._execute( - "DELETE", - bucket_name, + method="DELETE", + bucket_name=bucket_name, object_name=object_name, - query_params=cast(DictType, query_params), + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) def get_object_tags( self, + *, bucket_name: str, object_name: str, version_id: Optional[str] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> Optional[Tags]: """ - Get tags configuration of a object. + Get the tags configuration of an object. - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param version_id: Version ID of the Object. - :return: :class:`Tags ` object. + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + version_id (Optional[str], default=None): + Version ID of the object. - Example:: - tags = client.get_object_tags("my-bucket", "my-object") + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + Optional[Tags]: + A :class:`minio.commonconfig.Tags` object if tags are + configured, otherwise ``None``. + + Example: + >>> tags = client.get_object_tags( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) - query_params = {"versionId": version_id} if version_id else {} + query_params = HTTPQueryDict() + if version_id: + query_params["versionId"] = version_id query_params["tagging"] = "" try: response = self._execute( - "GET", - bucket_name, + method="GET", + bucket_name=bucket_name, object_name=object_name, - query_params=cast(DictType, query_params), + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) tagging = unmarshal(Tagging, response.data.decode()) return tagging.tags @@ -2894,130 +4821,255 @@ def get_object_tags( def set_object_tags( self, + *, bucket_name: str, object_name: str, tags: Tags, version_id: Optional[str] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ): """ - Set tags configuration to an object. + Set the tags configuration for an object. - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param version_id: Version ID of the Object. - :param tags: :class:`Tags ` object. + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + tags (Tags): + Tags configuration as a + :class:`minio.commonconfig.Tags` object. + + version_id (Optional[str], default=None): + Version ID of the object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. - Example:: - tags = Tags.new_object_tags() - tags["Project"] = "Project One" - tags["User"] = "jsmith" - client.set_object_tags("my-bucket", "my-object", tags) + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> tags = Tags.new_object_tags() + >>> tags["Project"] = "Project One" + >>> tags["User"] = "jsmith" + >>> client.set_object_tags( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... tags=tags, + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) if not isinstance(tags, Tags): raise ValueError("tags must be Tags type") body = marshal(Tagging(tags)) - query_params = {"versionId": version_id} if version_id else {} + headers = HTTPHeaderDict( + {"Content-MD5": base64_string(MD5.hash(body))}, + ) + query_params = HTTPQueryDict() + if version_id: + query_params["versionId"] = version_id query_params["tagging"] = "" self._execute( - "PUT", - bucket_name, + method="PUT", + bucket_name=bucket_name, object_name=object_name, body=body, - headers={"Content-MD5": cast(str, md5sum_hash(body))}, - query_params=cast(DictType, query_params), + headers=headers, + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) def enable_object_legal_hold( self, + *, bucket_name: str, object_name: str, version_id: Optional[str] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ): """ Enable legal hold on an object. - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param version_id: Version ID of the object. + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + version_id (Optional[str], default=None): + Version ID of the object. - Example:: - client.enable_object_legal_hold("my-bucket", "my-object") + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> client.enable_object_legal_hold( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) body = marshal(LegalHold(True)) - query_params = {"versionId": version_id} if version_id else {} + headers = HTTPHeaderDict( + {"Content-MD5": base64_string(MD5.hash(body))}, + ) + query_params = HTTPQueryDict() + if version_id: + query_params["versionId"] = version_id query_params["legal-hold"] = "" self._execute( - "PUT", - bucket_name, + method="PUT", + bucket_name=bucket_name, object_name=object_name, body=body, - headers={"Content-MD5": cast(str, md5sum_hash(body))}, - query_params=cast(DictType, query_params), + headers=headers, + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) def disable_object_legal_hold( self, + *, bucket_name: str, object_name: str, version_id: Optional[str] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ): """ Disable legal hold on an object. - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param version_id: Version ID of the object. + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. - Example:: - client.disable_object_legal_hold("my-bucket", "my-object") + version_id (Optional[str], default=None): + Version ID of the object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> client.disable_object_legal_hold( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) body = marshal(LegalHold(False)) - query_params = {"versionId": version_id} if version_id else {} + headers = HTTPHeaderDict( + {"Content-MD5": base64_string(MD5.hash(body))}, + ) + query_params = HTTPQueryDict() + if version_id: + query_params["versionId"] = version_id query_params["legal-hold"] = "" self._execute( - "PUT", - bucket_name, + method="PUT", + bucket_name=bucket_name, object_name=object_name, body=body, - headers={"Content-MD5": cast(str, md5sum_hash(body))}, - query_params=cast(DictType, query_params), + headers=headers, + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) def is_object_legal_hold_enabled( self, + *, bucket_name: str, object_name: str, version_id: Optional[str] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> bool: """ - Returns true if legal hold is enabled on an object. + Check if legal hold is enabled on an object. - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param version_id: Version ID of the object. + Args: + bucket_name (str): + Name of the bucket. - Example:: - if client.is_object_legal_hold_enabled("my-bucket", "my-object"): - print("legal hold is enabled on my-object") - else: - print("legal hold is not enabled on my-object") + object_name (str): + Object name in the bucket. + + version_id (Optional[str], default=None): + Version ID of the object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + bool: + True if legal hold is enabled, False otherwise. + + Example: + >>> if client.is_object_legal_hold_enabled( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... ): + ... print("legal hold is enabled on my-object") + ... else: + ... print("legal hold is not enabled on my-object") """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) - query_params = {"versionId": version_id} if version_id else {} + query_params = HTTPQueryDict() + if version_id: + query_params["versionId"] = version_id query_params["legal-hold"] = "" try: response = self._execute( - "GET", - bucket_name, + method="GET", + bucket_name=bucket_name, object_name=object_name, - query_params=cast(DictType, query_params), + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) legal_hold = unmarshal(LegalHold, response.data.decode()) return legal_hold.status @@ -3026,89 +5078,198 @@ def is_object_legal_hold_enabled( raise return False - def delete_object_lock_config(self, bucket_name: str): + def delete_object_lock_config( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ): """ - Delete object-lock configuration of a bucket. + Delete the object-lock configuration of a bucket. - :param bucket_name: Name of the bucket. + Args: + bucket_name (str): + Name of the bucket. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. - Example:: - client.delete_object_lock_config("my-bucket") + Example: + >>> client.delete_object_lock_config(bucket_name="my-bucket") """ self.set_object_lock_config( - bucket_name, ObjectLockConfig(None, None, None) + bucket_name=bucket_name, + config=ObjectLockConfig(None, None, None), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) - def get_object_lock_config(self, bucket_name: str) -> ObjectLockConfig: + def get_object_lock_config( + self, + *, + bucket_name: str, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, + ) -> ObjectLockConfig: """ - Get object-lock configuration of a bucket. + Get the object-lock configuration of a bucket. - :param bucket_name: Name of the bucket. - :return: :class:`ObjectLockConfig ` object. + Args: + bucket_name (str): + Name of the bucket. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + ObjectLockConfig: + A :class:`minio.objectlockconfig.ObjectLockConfig` + object representing the bucket's object-lock + configuration. - Example:: - config = client.get_object_lock_config("my-bucket") + Example: + >>> config = client.get_object_lock_config( + ... bucket_name="my-bucket", + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) response = self._execute( - "GET", bucket_name, query_params={"object-lock": ""}, + method="GET", + bucket_name=bucket_name, + query_params=HTTPQueryDict({"object-lock": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) return unmarshal(ObjectLockConfig, response.data.decode()) def set_object_lock_config( self, + *, bucket_name: str, config: ObjectLockConfig, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ): """ - Set object-lock configuration to a bucket. + Set the object-lock configuration for a bucket. - :param bucket_name: Name of the bucket. - :param config: :class:`ObjectLockConfig ` object. + Args: + bucket_name (str): + Name of the bucket. + + config (ObjectLockConfig): + The object-lock configuration to apply. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. - Example:: - config = ObjectLockConfig(GOVERNANCE, 15, DAYS) - client.set_object_lock_config("my-bucket", config) + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> config = ObjectLockConfig(GOVERNANCE, 15, DAYS) + >>> client.set_object_lock_config( + ... bucket_name="my-bucket", + ... config=config, + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) if not isinstance(config, ObjectLockConfig): raise ValueError("config must be ObjectLockConfig type") body = marshal(config) + headers = HTTPHeaderDict( + {"Content-MD5": base64_string(MD5.hash(body))}, + ) self._execute( - "PUT", - bucket_name, + method="PUT", + bucket_name=bucket_name, body=body, - headers={"Content-MD5": cast(str, md5sum_hash(body))}, - query_params={"object-lock": ""}, + headers=headers, + query_params=HTTPQueryDict({"object-lock": ""}), + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) def get_object_retention( self, + *, bucket_name: str, object_name: str, version_id: Optional[str] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> Optional[Retention]: """ - Get retention configuration of an object. + Get the retention information of an object. - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param version_id: Version ID of the object. - :return: :class:`Retention ` object. + Args: + bucket_name (str): + Name of the bucket. + + object_name (str): + Object name in the bucket. + + version_id (Optional[str], default=None): + Version ID of the object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. - Example:: - config = client.get_object_retention("my-bucket", "my-object") + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + Optional[Retention]: + A :class:`minio.retention.Retention` object if retention + is set, otherwise ``None``. + + Example: + >>> config = client.get_object_retention( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) - query_params = {"versionId": version_id} if version_id else {} + query_params = HTTPQueryDict() + if version_id: + query_params["versionId"] = version_id query_params["retention"] = "" try: response = self._execute( - "GET", - bucket_name, + method="GET", + bucket_name=bucket_name, object_name=object_name, - query_params=cast(DictType, query_params), + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) return unmarshal(Retention, response.data.decode()) except S3Error as exc: @@ -3118,104 +5279,180 @@ def get_object_retention( def set_object_retention( self, + *, bucket_name: str, object_name: str, config: Retention, version_id: Optional[str] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ): """ - Set retention configuration on an object. + Set the retention information for an object. - :param bucket_name: Name of the bucket. - :param object_name: Object name in the bucket. - :param version_id: Version ID of the object. - :param config: :class:`Retention ` object. + Args: + bucket_name (str): + Name of the bucket. - Example:: - config = Retention( - GOVERNANCE, datetime.utcnow() + timedelta(days=10), - ) - client.set_object_retention("my-bucket", "my-object", config) + object_name (str): + Object name in the bucket. + + config (Retention): + Retention configuration. + + version_id (Optional[str], default=None): + Version ID of the object. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Example: + >>> config = Retention( + ... GOVERNANCE, + ... datetime.utcnow() + timedelta(days=10), + ... ) + >>> client.set_object_retention( + ... bucket_name="my-bucket", + ... object_name="my-object", + ... config=config, + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) check_object_name(object_name) if not isinstance(config, Retention): raise ValueError("config must be Retention type") body = marshal(config) - query_params = {"versionId": version_id} if version_id else {} + headers = HTTPHeaderDict( + {"Content-MD5": base64_string(MD5.hash(body))}, + ) + query_params = HTTPQueryDict() + if version_id: + query_params["versionId"] = version_id query_params["retention"] = "" self._execute( - "PUT", - bucket_name, + method="PUT", + bucket_name=bucket_name, object_name=object_name, body=body, - headers={"Content-MD5": cast(str, md5sum_hash(body))}, - query_params=cast(DictType, query_params), + headers=headers, + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) def upload_snowball_objects( self, + *, bucket_name: str, - object_list: Iterable[SnowballObject], - metadata: Optional[DictType] = None, + objects: Iterable[SnowballObject], + headers: Optional[HTTPHeaderDict] = None, + user_metadata: Optional[HTTPHeaderDict] = None, sse: Optional[Sse] = None, tags: Optional[Tags] = None, retention: Optional[Retention] = None, legal_hold: bool = False, staging_filename: Optional[str] = None, compression: bool = False, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> ObjectWriteResult: """ - Uploads multiple objects in a single put call. It is done by creating - intermediate TAR file optionally compressed which is uploaded to S3 - service. - - :param bucket_name: Name of the bucket. - :param object_list: An iterable containing - :class:`SnowballObject ` object. - :param metadata: Any additional metadata to be uploaded along - with your PUT request. - :param sse: Server-side encryption. - :param tags: :class:`Tags` for the object. - :param retention: :class:`Retention` configuration object. - :param legal_hold: Flag to set legal hold for the object. - :param staging_filename: A staging filename to create intermediate - tarball. - :param compression: Flag to compress TAR ball. - :return: :class:`ObjectWriteResult` object. - - Example:: - # Upload snowball object. - result = client.upload_snowball_objects( - "my-bucket", - [ - SnowballObject("my-object1", filename="/etc/hostname"), - SnowballObject( - "my-object2", data=io.BytesIO("hello"), length=5, - ), - SnowballObject( - "my-object3", data=io.BytesIO("world"), length=5, - mod_time=datetime.now(), - ), - ], - ) + Upload multiple objects in a single PUT call. + + This method creates an intermediate TAR file, optionally compressed, + that is uploaded to the S3 service. + + Args: + bucket_name (str): + Name of the bucket. + + objects (Iterable[SnowballObject]): + An iterable containing Snowball objects. + + headers (Optional[HTTPHeaderDict], default=None): + Additional headers. + + user_metadata (Optional[HTTPHeaderDict], default=None): + User metadata. + + sse (Optional[Sse], default=None): + Server-side encryption. + + tags (Optional[Tags], default=None): + Tags for the object. + + retention (Optional[Retention], default=None): + Retention configuration. + + legal_hold (bool, default=False): + Flag to set legal hold for the object. + + staging_filename (Optional[str], default=None): + A staging filename to create the intermediate tarball. + + compression (bool, default=False): + Flag to compress the tarball. + + region (Optional[str], default=None): + Region of the bucket to skip auto probing. + + extra_headers (Optional[HTTPHeaderDict], default=None): + Extra headers for advanced usage. + + extra_query_params (Optional[HTTPQueryDict], default=None): + Extra query parameters for advanced usage. + + Returns: + ObjectWriteResult: + A :class:`minio.helpers.ObjectWriteResult` object. + + Example: + >>> client.upload_snowball_objects( + ... bucket_name="my-bucket", + ... objects=[ + ... SnowballObject( + ... object_name="my-object1", + ... filename="/etc/hostname", + ... ), + ... SnowballObject( + ... object_name="my-object2", + ... data=io.BytesIO(b"hello"), + ... length=5, + ... ), + ... SnowballObject( + ... object_name="my-object3", + ... data=io.BytesIO(b"world"), + ... length=5, + ... mod_time=datetime.now(), + ... ), + ... ], + ... ) """ check_bucket_name(bucket_name, s3_check=self._base_url.is_aws_host) object_name = f"snowball.{random()}.tar" # turn list like objects into an iterator. - object_list = itertools.chain(object_list) + objects = itertools.chain(objects) - metadata = metadata or {} - metadata["X-Amz-Meta-Snowball-Auto-Extract"] = "true" + headers = HTTPHeaderDict() if headers is None else headers.copy() + headers["X-Amz-Meta-Snowball-Auto-Extract"] = "true" name = staging_filename fileobj = None if name else BytesIO() with tarfile.open( name=name, mode="w:gz" if compression else "w", fileobj=fileobj, ) as tar: - for obj in object_list: + for obj in objects: if obj.filename: tar.add(obj.filename, obj.object_name) else: @@ -3236,31 +5473,40 @@ def upload_snowball_objects( if name: return self.fput_object( - bucket_name, - object_name, - cast(str, staging_filename), - metadata=metadata, + bucket_name=bucket_name, + object_name=object_name, + file_path=cast(str, staging_filename), + headers=headers, + user_metadata=user_metadata, sse=sse, tags=tags, retention=retention, legal_hold=legal_hold, part_size=part_size, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) return self.put_object( - bucket_name, - object_name, - cast(BinaryIO, fileobj), - length, - metadata=cast(Union[DictType, None], metadata), + bucket_name=bucket_name, + object_name=object_name, + data=cast(BinaryIO, fileobj), + length=length, + headers=headers, + user_metadata=user_metadata, sse=sse, tags=tags, retention=retention, legal_hold=legal_hold, part_size=part_size, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) def _list_objects( self, + *, bucket_name: str, continuation_token: Optional[str] = None, # listV2 only delimiter: Optional[str] = None, # all @@ -3274,8 +5520,9 @@ def _list_objects( version_id_marker: Optional[str] = None, # versioned use_api_v1: bool = False, include_version: bool = False, - extra_headers: Optional[DictType] = None, - extra_query_params: Optional[DictType] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> Iterator[Object]: """ List objects optionally including versions. @@ -3292,39 +5539,40 @@ def _list_objects( is_truncated = True while is_truncated: - query = extra_query_params or {} + query_params = HTTPQueryDict() if include_version: - query["versions"] = "" + query_params["versions"] = "" elif not use_api_v1: - query["list-type"] = "2" - + query_params["list-type"] = "2" if not include_version and not use_api_v1: if continuation_token: - query["continuation-token"] = continuation_token + query_params["continuation-token"] = continuation_token if fetch_owner: - query["fetch-owner"] = "true" + query_params["fetch-owner"] = "true" if include_user_meta: - query["metadata"] = "true" - query["delimiter"] = delimiter or "" + query_params["metadata"] = "true" + query_params["delimiter"] = delimiter or "" if encoding_type: - query["encoding-type"] = encoding_type - query["max-keys"] = str(max_keys or 1000) - query["prefix"] = prefix or "" + query_params["encoding-type"] = encoding_type + query_params["max-keys"] = str(max_keys or 1000) + query_params["prefix"] = prefix or "" if start_after: if include_version: - query["key-marker"] = start_after + query_params["key-marker"] = start_after elif use_api_v1: - query["marker"] = start_after + query_params["marker"] = start_after else: - query["start-after"] = start_after + query_params["start-after"] = start_after if version_id_marker: - query["version-id-marker"] = version_id_marker + query_params["version-id-marker"] = version_id_marker response = self._execute( - "GET", - bucket_name, - query_params=cast(DictType, query), - headers=extra_headers, + method="GET", + bucket_name=bucket_name, + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) objects, is_truncated, start_after, version_id_marker = ( @@ -3340,6 +5588,7 @@ def _list_objects( def _list_multipart_uploads( self, + *, bucket_name: str, delimiter: Optional[str] = None, encoding_type: Optional[str] = None, @@ -3347,8 +5596,9 @@ def _list_multipart_uploads( max_uploads: Optional[int] = None, prefix: Optional[str] = None, upload_id_marker: Optional[str] = None, - extra_headers: Optional[DictType] = None, - extra_query_params: Optional[DictType] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> ListMultipartUploadsResult: """ Execute ListMultipartUploads S3 API. @@ -3368,8 +5618,7 @@ def _list_multipart_uploads( object """ - query_params = extra_query_params or {} - query_params.update( + query_params = HTTPQueryDict( { "uploads": "", "delimiter": delimiter or "", @@ -3386,22 +5635,26 @@ def _list_multipart_uploads( query_params["upload-id-marker"] = upload_id_marker response = self._execute( - "GET", - bucket_name, - query_params=cast(DictType, query_params), - headers=cast(Union[DictType, None], extra_headers), + method="GET", + bucket_name=bucket_name, + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) return ListMultipartUploadsResult(response) def _list_parts( self, + *, bucket_name: str, object_name: str, upload_id: str, max_parts: Optional[int] = None, part_number_marker: Optional[str] = None, - extra_headers: Optional[DictType] = None, - extra_query_params: Optional[DictType] = None, + region: Optional[str] = None, + extra_headers: Optional[HTTPHeaderDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> ListPartsResult: """ Execute ListParts S3 API. @@ -3417,8 +5670,7 @@ def _list_parts( :return: :class:`ListPartsResult ` object """ - query_params = extra_query_params or {} - query_params.update( + query_params = HTTPQueryDict( { "uploadId": upload_id, "max-parts": str(max_parts or 1000), @@ -3428,10 +5680,12 @@ def _list_parts( query_params["part-number-marker"] = part_number_marker response = self._execute( - "GET", - bucket_name, + method="GET", + bucket_name=bucket_name, object_name=object_name, - query_params=cast(DictType, query_params), - headers=cast(Union[DictType, None], extra_headers), + query_params=query_params, + region=region, + extra_headers=extra_headers, + extra_query_params=extra_query_params, ) return ListPartsResult(response) diff --git a/minio/checksum.py b/minio/checksum.py new file mode 100644 index 00000000..94af7e1e --- /dev/null +++ b/minio/checksum.py @@ -0,0 +1,419 @@ +# -*- coding: utf-8 -*- +# MinIO Python Library for Amazon S3 Compatible Cloud Storage, (C) +# 2025 MinIO, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Checksum functions.""" + +from __future__ import absolute_import, annotations, division, unicode_literals + +import base64 +import binascii +import hashlib +import struct +from abc import ABC, abstractmethod +from enum import Enum +from typing import Dict, List, Optional + +# MD5 hash of zero length byte array. +ZERO_MD5_HASH = "1B2M2Y8AsgTpgAmY7PhCfg==" +# SHA-256 hash of zero length byte array. +ZERO_SHA256_HASH = ( + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) +UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD" + + +def base64_string(data: bytes) -> str: + """Encodes the specified bytes to Base64 string.""" + return base64.b64encode(data).decode("ascii") + + +def base64_string_to_sum(value: str) -> bytes: + """Decodes the specified Base64 encoded string to bytes.""" + return base64.b64decode(value) + + +def hex_string(data: bytes) -> str: + """Encodes the specified bytes to Base16 (hex) string.""" + return "".join(f"{b:02x}" for b in data) + + +def hex_string_to_sum(value: str) -> bytes: + """Decodes the specified Base16 (hex) encoded string to bytes.""" + if len(value) % 2 != 0: + raise ValueError("Hex string length must be even") + return bytes(int(value[i:i+2], 16) for i in range(0, len(value), 2)) + + +class Hasher(ABC): + """Checksum hasher interface.""" + + @abstractmethod + def update( + self, + data: bytes, + offset: Optional[int] = None, + length: Optional[int] = None, + ) -> None: + """Update the hash with bytes from b[off:off+length].""" + + @abstractmethod + def sum(self) -> bytes: + """Return the final digest.""" + + @abstractmethod + def reset(self) -> None: + """Reset the hasher state.""" + + +class CRC32(Hasher): + """CRC32 Hasher using binascii.crc32.""" + + def __init__(self): + self._crc = 0 + + def update( + self, + data: bytes, + offset: Optional[int] = None, + length: Optional[int] = None, + ) -> None: + offset = offset or 0 + if length is None: + length = len(data) - offset + self._crc = binascii.crc32( + data[offset:offset+length], self._crc, + ) & 0xFFFFFFFF + + def sum(self) -> bytes: + return struct.pack(">I", self._crc) + + def reset(self) -> None: + self._crc = 0 + + +def _generate_crc32c_table(): + """Generates CRC32C table.""" + table = [0] * 256 + for i in range(256): + crc = i + for _ in range(8): + crc = (crc >> 1) ^ (0x82F63B78 if (crc & 1) else 0) + table[i] = crc & 0xFFFFFFFF + return table + + +_CRC32C_TABLE = _generate_crc32c_table() + + +class CRC32C(Hasher): + """CRC32C Hasher.""" + + def __init__(self): + self._crc = 0xFFFFFFFF + + def update( + self, + data: bytes, + offset: Optional[int] = None, + length: Optional[int] = None, + ) -> None: + offset = offset or 0 + if length is None: + length = len(data) - offset + for byte in data[offset:offset+length]: + self._crc = _CRC32C_TABLE[ + (self._crc ^ byte) & 0xFF] ^ (self._crc >> 8) + + def sum(self) -> bytes: + crc_final = (~self._crc) & 0xFFFFFFFF + return crc_final.to_bytes(4, "big") + + def reset(self) -> None: + self._crc = 0xFFFFFFFF + + +def _generate_crc64nvme_table(): + """Generates CRC64NVME table.""" + table = [0] * 256 + slicing8_table = [[0] * 256 for _ in range(8)] + + polynomial = 0x9A6C9329AC4BC9B5 + for i in range(256): + crc = i + for _ in range(8): + if crc & 1: + crc = (crc >> 1) ^ polynomial + else: + crc >>= 1 + table[i] = crc & 0xFFFFFFFFFFFFFFFF + + slicing8_table[0] = table[:] + for i in range(256): + crc = table[i] + for j in range(1, 8): + crc = table[crc & 0xFF] ^ (crc >> 8) + slicing8_table[j][i] = crc & 0xFFFFFFFFFFFFFFFF + + return table, slicing8_table + + +_CRC64NVME_TABLE, _SLICING8_TABLE_NVME = _generate_crc64nvme_table() + + +class CRC64NVME(Hasher): + """CRC64 NVME checksum.""" + + def __init__(self): + self._crc = 0 + + def update( + self, + data: bytes, + offset: Optional[int] = None, + length: Optional[int] = None, + ): + offset = offset or 0 + if length is None: + length = len(data) - offset + data = data[offset:offset + length] + self._crc = ~self._crc & 0xFFFFFFFFFFFFFFFF + offset = 0 + + # Process in 8-byte chunks (little-endian) + while len(data) >= 64 and (len(data) - offset) > 8: + value = struct.unpack_from("> 8) & 0xFF] ^ + _SLICING8_TABLE_NVME[5][(self._crc >> 16) & 0xFF] ^ + _SLICING8_TABLE_NVME[4][(self._crc >> 24) & 0xFF] ^ + _SLICING8_TABLE_NVME[3][(self._crc >> 32) & 0xFF] ^ + _SLICING8_TABLE_NVME[2][(self._crc >> 40) & 0xFF] ^ + _SLICING8_TABLE_NVME[1][(self._crc >> 48) & 0xFF] ^ + _SLICING8_TABLE_NVME[0][(self._crc >> 56)] + ) & 0xFFFFFFFFFFFFFFFF + offset += 8 + + # Process remaining bytes + for i in range(offset, length): + self._crc = ( + _CRC64NVME_TABLE[(self._crc ^ data[i]) & 0xFF] ^ + (self._crc >> 8) + ) & 0xFFFFFFFFFFFFFFFF + + self._crc = ~self._crc & 0xFFFFFFFFFFFFFFFF + + def reset(self): + self._crc = 0 + + def sum(self) -> bytes: + value = self._crc + return bytes([ + (value >> 56) & 0xFF, + (value >> 48) & 0xFF, + (value >> 40) & 0xFF, + (value >> 32) & 0xFF, + (value >> 24) & 0xFF, + (value >> 16) & 0xFF, + (value >> 8) & 0xFF, + value & 0xFF + ]) + + +class HashlibHasher(Hasher, ABC): + """Generic wrapper for hashlib algorithms.""" + + def __init__(self, name: str): + self._name = name + self._hasher = hashlib.new(name) + + def update( + self, + data: bytes, + offset: Optional[int] = None, + length: Optional[int] = None, + ) -> None: + offset = offset or 0 + if length is None: + length = len(data) - offset + self._hasher.update(data[offset:offset+length]) + + def sum(self) -> bytes: + return self._hasher.digest() + + def reset(self) -> None: + self._hasher = hashlib.new(self._name) + + +class SHA1(HashlibHasher): + """SHA1 checksum.""" + + def __init__(self): + super().__init__("sha1") + + +class SHA256(HashlibHasher): + """SHA256 checksum.""" + + def __init__(self): + super().__init__("sha256") + + @classmethod + def hash( + cls, + data: str | bytes, + offset: Optional[int] = None, + length: Optional[int] = None, + ) -> bytes: + """Gets sum of given data.""" + hasher = cls() + hasher.update( + data if isinstance(data, bytes) else data.encode(), + offset, + length, + ) + return hasher.sum() + + +class MD5(HashlibHasher): + """MD5 checksum.""" + + def __init__(self): + super().__init__("md5") + + @classmethod + def hash( + cls, + data: bytes, + offset: Optional[int] = None, + length: Optional[int] = None, + ) -> bytes: + """Gets sum of given data.""" + hasher = cls() + hasher.update(data, offset, length) + return hasher.sum() + + +class Type(Enum): + """Checksum algorithm type.""" + COMPOSITE = "COMPOSITE" + FULL_OBJECT = "FULL_OBJECT" + + +class Algorithm(Enum): + """Checksum algorithm.""" + CRC32 = "crc32" + CRC32C = "crc32c" + CRC64NVME = "crc64nvme" + SHA1 = "sha1" + SHA256 = "sha256" + MD5 = "md5" + + def __str__(self) -> str: + return self.value + + def header(self) -> str: + """Gets headers for this algorithm.""" + return ( + "Content-MD5" if self == MD5 else f"x-amz-checksum-{self.value}" + ) + + def full_object_support(self) -> bool: + """Checks whether this algorithm supports full object.""" + return self in {CRC32, CRC32C, CRC64NVME} + + def composite_support(self) -> bool: + """Checks whether this algorithm supports composite.""" + return self in {CRC32, CRC32C, SHA1, SHA256} + + def validate(self, algo_type: Type): + """Validates given algorithm type for this algorithm.""" + if not ( + (self.composite_support() and algo_type == Type.COMPOSITE) + or (self.full_object_support() and algo_type == Type.FULL_OBJECT) + ): + raise ValueError( + f"algorithm {self.name} does not support {algo_type.name} type", + ) + + def hasher(self): + """Gets hasher for this algorithm.""" + if self == Algorithm.CRC32: + return CRC32() + if self == Algorithm.CRC32C: + return CRC32C() + if self == Algorithm.CRC64NVME: + return CRC64NVME() + if self == Algorithm.SHA1: + return SHA1() + if self == Algorithm.SHA256: + return SHA256() + if self == Algorithm.MD5: + return MD5() + return None + + +def new_hashers( + algorithms: Optional[List[Algorithm]], +) -> Optional[Dict[Algorithm, "Hasher"]]: + """Creates new hasher map for given algorithms.""" + hashers = {} + if algorithms: + for algo in algorithms: + if algo and algo not in hashers: + hashers[algo] = algo.hasher() + return hashers if hashers else None + + +def update_hashers( + hashers: Optional[Dict[Algorithm, "Hasher"]], + data: bytes, + length: int, +): + """Updates hashers with given data and length.""" + if not hashers: + return + for hasher in hashers.values(): + hasher.update(data, 0, length) + + +def reset_hashers(hashers: Optional[Dict[Algorithm, "Hasher"]]): + """Resets hashers.""" + if not hashers: + return + for hasher in hashers.values(): + hasher.reset() + + +def make_headers( + hashers: Optional[Dict[Algorithm, "Hasher"]], + add_content_sha256: bool, + add_sha256_checksum: bool +) -> Dict[str, str]: + """Makes headers for hashers.""" + headers = {} + if hashers: + for algo, hasher in hashers.items(): + sum_bytes = hasher.sum() + if algo == Algorithm.SHA256: + if add_content_sha256: + headers["x-amz-content-sha256"] = hex_string(sum_bytes) + if not add_sha256_checksum: + continue + headers["x-amz-sdk-checksum-algorithm"] = str(algo) + headers[algo.header()] = base64_string(sum_bytes) + return headers diff --git a/minio/commonconfig.py b/minio/commonconfig.py index 88729d12..993e6967 100644 --- a/minio/commonconfig.py +++ b/minio/commonconfig.py @@ -264,6 +264,71 @@ def check_status(status: str): raise ValueError("status must be 'Enabled' or 'Disabled'") +@dataclass(frozen=True) +class SourceObject: + """Source object for copy and compose object.""" + bucket_name: str + object_name: str + version_id: Optional[str] = None + ssec: Optional[SseCustomerKey] = None + offset: int = 0 + length: int = 0 + match_etag: Optional[str] = None + not_match_etag: Optional[str] = None + modified_since: Optional[datetime] = None + unmodified_since: Optional[datetime] = None + fetch_checksum: bool = False + region: Optional[str] = None + + def __post_init__(self): + if ( + self.ssec is not None and + not isinstance(self.ssec, SseCustomerKey) + ): + raise ValueError("ssec must be SseCustomerKey type") + if self.offset < 0: + raise ValueError("offset should be zero or greater") + if self.length <= 0: + raise ValueError("length should be greater than zero") + if self.match_etag is not None and self.match_etag == "": + raise ValueError("match_etag must not be empty") + if self.not_match_etag is not None and self.not_match_etag == "": + raise ValueError("not_match_etag must not be empty") + if ( + self.modified_since is not None and + not isinstance(self.modified_since, datetime) + ): + raise ValueError("modified_since must be datetime type") + if ( + self.unmodified_since is not None and + not isinstance(self.unmodified_since, datetime) + ): + raise ValueError("unmodified_since must be datetime type") + + def gen_copy_headers(self) -> dict[str, str]: + """Generate copy source headers.""" + copy_source = quote("/" + self.bucket_name + "/" + self.object_name) + if self.version_id: + copy_source += "?versionId=" + quote(self.version_id) + + headers = {"x-amz-copy-source": copy_source} + if self.ssec: + headers.update(self.ssec.copy_headers()) + if self.match_etag: + headers["x-amz-copy-source-if-match"] = self.match_etag + if self.not_match_etag: + headers["x-amz-copy-source-if-none-match"] = self.not_match_etag + if self.modified_since: + headers["x-amz-copy-source-if-modified-since"] = ( + to_http_header(self.modified_since) + ) + if self.unmodified_since: + headers["x-amz-copy-source-if-unmodified-since"] = ( + to_http_header(self.unmodified_since) + ) + return headers + + @dataclass class ObjectConditionalReadArgs(ABC): """Base argument class holds condition properties for reading object.""" diff --git a/minio/credentials/providers.py b/minio/credentials/providers.py index a5a4faf5..9161e4e9 100644 --- a/minio/credentials/providers.py +++ b/minio/credentials/providers.py @@ -36,6 +36,7 @@ from xml.etree import ElementTree as ET import certifi +from urllib3._collections import HTTPHeaderDict from urllib3.poolmanager import PoolManager try: @@ -76,7 +77,7 @@ def _urlopen( method: str, url: str, body: Optional[str | bytes] = None, - headers: Optional[dict[str, str | list[str] | tuple[str]]] = None, + headers: Optional[HTTPHeaderDict] = None, ) -> BaseHTTPResponse: """Wrapper of urlopen() handles HTTP status code.""" res = http_client.urlopen(method, url, body=body, headers=headers) @@ -173,15 +174,16 @@ def retrieve(self) -> Credentials: return self._credentials utctime = utcnow() + headers = HTTPHeaderDict({ + "Content-Type": "application/x-www-form-urlencoded", + "Host": self._host, + "X-Amz-Date": to_amz_date(utctime), + }) headers = sign_v4_sts( method="POST", url=self._url, region=self._region, - headers={ - "Content-Type": "application/x-www-form-urlencoded", - "Host": self._host, - "X-Amz-Date": to_amz_date(utctime), - }, + headers=headers, credentials=Credentials( access_key=self._access_key, secret_key=self._secret_key, @@ -444,7 +446,7 @@ def __init__( def fetch( self, url: str, - headers: Optional[dict[str, str | list[str] | tuple[str]]] = None, + headers: Optional[HTTPHeaderDict] = None, ) -> Credentials: """Fetch credentials from EC2/ECS.""" res = _urlopen(self._http_client, "GET", url, headers=headers) @@ -490,11 +492,14 @@ def retrieve(self) -> Credentials: self._credentials = provider.retrieve() return cast(Credentials, self._credentials) - headers: Optional[dict[str, str | list[str] | tuple[str]]] = None + headers: Optional[HTTPHeaderDict] = None if self._relative_uri: if not url: url = "http://169.254.170.2" + self._relative_uri - headers = {"Authorization": self._token} if self._token else None + headers = ( + HTTPHeaderDict({"Authorization": self._token}) + if self._token else None + ) elif self._full_uri: token = self._token if self._token_file: @@ -505,20 +510,28 @@ def retrieve(self) -> Credentials: if not url: url = self._full_uri _check_loopback_host(url) - headers = {"Authorization": token} if token else None + headers = ( + HTTPHeaderDict({"Authorization": token}) if token else None + ) else: if not url: url = "http://169.254.169.254" # Get IMDS Token + headers = HTTPHeaderDict( + {"X-aws-ec2-metadata-token-ttl-seconds": "21600"}, + ) res = _urlopen( self._http_client, "PUT", url+"/latest/api/token", - headers={"X-aws-ec2-metadata-token-ttl-seconds": "21600"}, + headers=headers, ) token = res.data.decode("utf-8") - headers = {"X-aws-ec2-metadata-token": token} if token else None + headers = ( + HTTPHeaderDict({"X-aws-ec2-metadata-token": token}) + if token else None + ) # Get role name url = urlunsplit( diff --git a/minio/datatypes.py b/minio/datatypes.py index 0b6f0307..2e645d37 100644 --- a/minio/datatypes.py +++ b/minio/datatypes.py @@ -41,7 +41,7 @@ from .commonconfig import Tags from .credentials import Credentials -from .helpers import check_bucket_name +from .helpers import HTTPQueryDict, check_bucket_name from .signer import get_credential_string, post_presign_v4 from .time import from_iso8601utc, to_amz_date, to_iso8601utc from .xml import find, findall, findtext @@ -67,10 +67,19 @@ class Bucket: class ListAllMyBucketsResult: """LissBuckets API result.""" buckets: list[Bucket] + prefix: Optional[str] + continuation_token: Optional[str] + owner_id: Optional[str] = None + owner_name: Optional[str] = None @classmethod def fromxml(cls: Type[A], element: ET.Element) -> A: """Create new object with values from XML element.""" + prefix = findtext(element, "Prefix") + continuation_token = findtext(element, "ContinuationToken") + owner = find(element, "Owner") + owner_id = None if owner is None else findtext(owner, "ID") + owner_name = None if owner is None else findtext(owner, "DisplayName") element = cast(ET.Element, find(element, "Buckets", True)) buckets = [] elements = findall(element, "Bucket") @@ -81,7 +90,13 @@ def fromxml(cls: Type[A], element: ET.Element) -> A: name, from_iso8601utc(creation_date) if creation_date else None, )) - return cls(buckets) + return cls( + buckets=buckets, + prefix=prefix, + continuation_token=continuation_token, + owner_id=owner_id, + owner_name=owner_name, + ) B = TypeVar("B", bound="Object") @@ -232,7 +247,7 @@ def parse_list_objects( class CompleteMultipartUploadResult: """CompleteMultipartUpload API result.""" - http_headers: HTTPHeaderDict + headers: HTTPHeaderDict bucket_name: Optional[str] = None object_name: Optional[str] = None location: Optional[str] = None @@ -240,7 +255,7 @@ class CompleteMultipartUploadResult: version_id: Optional[str] = None def __init__(self, response: BaseHTTPResponse): - object.__setattr__(self, "http_headers", response.headers) + object.__setattr__(self, "headers", response.headers) element = ET.fromstring(response.data.decode()) object.__setattr__(self, "bucket_name", findtext(element, "Bucket")) object.__setattr__(self, "object_name", findtext(element, "Key")) @@ -751,16 +766,15 @@ class SiteReplicationStatusOptions: entity: Optional[str] = None entity_value: Optional[str] = None - def to_query_params(self) -> dict[str, str]: + def to_query_params(self) -> HTTPQueryDict: """Convert this options to query parameters.""" - params = { - "buckets": str(self.buckets).lower(), - "policies": str(self.policies).lower(), - "users": str(self.users).lower(), - "groups": str(self.groups).lower(), - "metrics": str(self.metrics).lower(), - "showDeleted": str(self.show_deleted).lower(), - } + params = HTTPQueryDict() + params["buckets"] = str(self.buckets).lower() + params["policies"] = str(self.policies).lower() + params["users"] = str(self.users).lower() + params["groups"] = str(self.groups).lower() + params["metrics"] = str(self.metrics).lower() + params["showDeleted"] = str(self.show_deleted).lower() if self.entity and self.entity_value: params["entity"] = self.entity params["entityvalue"] = self.entity_value diff --git a/minio/helpers.py b/minio/helpers.py index 15d60c5d..aa0203c5 100644 --- a/minio/helpers.py +++ b/minio/helpers.py @@ -29,15 +29,16 @@ from dataclasses import dataclass from datetime import datetime from queue import Queue -from threading import BoundedSemaphore, Thread -from typing import BinaryIO, Dict, List, Mapping, Optional, Tuple, Union +from threading import BoundedSemaphore, Lock, Thread +from typing import (BinaryIO, Dict, Iterable, List, Mapping, Optional, Type, + Union) from typing_extensions import Protocol from urllib3._collections import HTTPHeaderDict from . import __title__, __version__ +from .checksum import Algorithm, Hasher, reset_hashers, update_hashers from .sse import Sse, SseCustomerKey -from .time import to_iso8601utc _DEFAULT_USER_AGENT = ( f"MinIO ({platform.system()}; {platform.machine()}) " @@ -53,6 +54,7 @@ r'vpce(-(?!_)[a-z_\d]+(? Optional[str]: + """Get region associated to the bucket.""" + with self._lock: + return self._map.get(bucket_name) + + def set(self, bucket_name: str, region: str): + """Set region for the bucket.""" + with self._lock: + self._map[bucket_name] = region + + def remove(self, bucket_name: str): + """Remove region for the bucket.""" + with self._lock: + self._map.pop(bucket_name, None) + + +class HTTPQueryDict(dict[str, List[str]]): + """Dictionary for HTTP query parameters with multiple values per key.""" + + def __init__( + self, + initial: Optional[ + Union[ + "HTTPQueryDict", + Mapping[str, Union[str, Iterable[str]]], + ] + ] = None + ): + super().__init__() + if initial: + if not isinstance(initial, Mapping): + raise TypeError( + "HTTPQueryDict expects a mapping-like object, " + f"got {type(initial).__name__}", + ) + for key, value in initial.items(): + if isinstance(value, (str, bytes)): + self[key] = [value] + else: + self[key] = list(value) + + def __setitem__(self, key: str, value: Union[str, Iterable[str]]) -> None: + super().__setitem__( + key, + [value] if isinstance(value, (str, bytes)) else list(value), + ) + + def copy(self) -> "HTTPQueryDict": + return HTTPQueryDict(self) + + def extend( + self, + other: Optional[ + Union[ + "HTTPQueryDict", + Mapping[str, Union[str, Iterable[str]]], + ] + ], + ) -> "HTTPQueryDict": + """Merges other keys and values.""" + if other is None: + return self + if not isinstance(other, Mapping): + raise TypeError( + "extend() expects a mapping-like object, " + f"got {type(other).__name__}", + ) + for key, value in other.items(): + normalized = ( + [value] if isinstance(value, (str, bytes)) else list(value) + ) + if key in self: + self[key] += normalized + else: + self[key] = normalized + return self + + def __str__(self) -> str: + """Convert dictionary to a URL-encoded query string.""" + query_list = [(k, v) for k, values in self.items() for v in values] + query_list.sort(key=lambda x: (x[0], x[1])) # Sort by key, then value + return urllib.parse.urlencode(query_list, quote_via=urllib.parse.quote) def quote( @@ -192,13 +283,19 @@ def update(self, length: int): def read_part_data( + *, stream: BinaryIO, size: int, part_data: bytes = b"", progress: Optional[ProgressType] = None, + hashers: Optional[Dict[Algorithm, Hasher]] = None, ) -> bytes: """Read part data of given size from stream.""" - size -= len(part_data) + reset_hashers(hashers) + initial_length = len(part_data) + size -= initial_length + if part_data: + update_hashers(hashers, part_data, initial_length) while size: data = stream.read(size) if not data: @@ -207,6 +304,11 @@ def read_part_data( raise ValueError("read() must return 'bytes' object") part_data += data size -= len(data) + update_hashers( + hashers, + data, + len(data) - (initial_length if size == 0 else 0), + ) if progress: progress.update(len(data)) return part_data @@ -352,90 +454,29 @@ def url_replace( ) -def _metadata_to_headers(metadata: DictType) -> dict[str, list[str]]: - """Convert user metadata to headers.""" - def normalize_key(key: str) -> str: - if not key.lower().startswith("x-amz-meta-"): - key = "X-Amz-Meta-" + key - return key - - def to_string(value) -> str: - value = str(value) - try: - value.encode("us-ascii") - except UnicodeEncodeError as exc: - raise ValueError( - f"unsupported metadata value {value}; " - f"only US-ASCII encoded characters are supported" - ) from exc - return value - - def normalize_value(values: str | list[str] | tuple[str]) -> list[str]: - if not isinstance(values, (list, tuple)): - values = [values] - return [to_string(value) for value in values] - - return { - normalize_key(key): normalize_value(value) - for key, value in (metadata or {}).items() - } - - -def normalize_headers(headers: Optional[DictType]) -> DictType: +def normalize_headers(headers: Optional[HTTPHeaderDict]) -> HTTPHeaderDict: """Normalize headers by prefixing 'X-Amz-Meta-' for user metadata.""" - headers = {str(key): value for key, value in (headers or {}).items()} - - def guess_user_metadata(key: str) -> bool: - key = key.lower() - return not ( - key.startswith("x-amz-") or - key in [ - "cache-control", - "content-encoding", - "content-type", - "content-disposition", - "content-language", - ] - ) - - user_metadata = { - key: value for key, value in headers.items() - if guess_user_metadata(key) - } - - # Remove guessed user metadata. - _ = [headers.pop(key) for key in user_metadata] - - headers.update(_metadata_to_headers(user_metadata)) - return headers - - -def genheaders( - headers: Optional[DictType], - sse: Optional[Sse], - tags: Optional[dict[str, str]], - retention, - legal_hold: bool, -) -> DictType: - """Generate headers for given parameters.""" - headers = normalize_headers(headers) - headers.update(sse.headers() if sse else {}) - tagging = "&".join( - [ - queryencode(key) + "=" + queryencode(value) - for key, value in (tags or {}).items() - ], - ) - if tagging: - headers["x-amz-tagging"] = tagging - if retention and retention.mode: - headers["x-amz-object-lock-mode"] = retention.mode - headers["x-amz-object-lock-retain-until-date"] = ( - to_iso8601utc(retention.retain_until_date) or "" - ) - if legal_hold: - headers["x-amz-object-lock-legal-hold"] = "ON" - return headers + allowed_headers = [ + "cache-control", + "content-encoding", + "content-type", + "content-disposition", + "content-language", + ] + + headers = HTTPHeaderDict() if headers is None else headers + normalized_headers = HTTPHeaderDict() + for key in headers: + values = headers.get_all(key) + lower_key = key.lower() + if not ( + lower_key.startswith(("x-amz-", "x-amz-meta-")) or + lower_key in allowed_headers + ): + key = "X-Amz-Meta-" + key + for value in values: + normalized_headers.add(key, value) + return normalized_headers def _get_aws_info( @@ -495,7 +536,7 @@ def _get_aws_info( return ({"s3_prefix": aws_s3_prefix, "domain_suffix": aws_domain_suffix, - "region": region or region_in_host, + "region": region or region_in_host or None, "dualstack": dualstack}, None) @@ -706,7 +747,8 @@ def build( region: str, bucket_name: Optional[str] = None, object_name: Optional[str] = None, - query_params: Optional[DictType] = None, + query_params: Optional[HTTPQueryDict] = None, + extra_query_params: Optional[HTTPQueryDict] = None, ) -> urllib.parse.SplitResult: """Build URL for given information.""" if not bucket_name and object_name: @@ -716,14 +758,10 @@ def build( url = url_replace(url=self._url, path="/") - query = [] - for key, values in sorted((query_params or {}).items()): - values = values if isinstance(values, (list, tuple)) else [values] - query += [ - f"{queryencode(key)}={queryencode(value)}" - for value in sorted(values) - ] - url = url_replace(url=url, query="&".join(query)) + query_params = HTTPQueryDict().extend(query_params).extend( + extra_query_params, + ) + url = url_replace(url=url, query=f"{query_params}") if not bucket_name: return self._build_list_buckets_url(url, region) @@ -765,13 +803,48 @@ def build( @dataclass(frozen=True) class ObjectWriteResult: """Result class of any APIs doing object creation.""" + headers: HTTPHeaderDict bucket_name: str object_name: str - version_id: Optional[str] - etag: Optional[str] - http_headers: HTTPHeaderDict + etag: str + version_id: Optional[str] = None last_modified: Optional[datetime] = None location: Optional[str] = None + checksum_crc32: Optional[str] = None + checksum_crc32c: Optional[str] = None + checksum_crc64nvme: Optional[str] = None + checksum_sha1: Optional[str] = None + checksum_sha256: Optional[str] = None + checksum_type: Optional[str] = None + + @classmethod + def new( + cls: Type[ObjectWriteResult], + *, + headers: HTTPHeaderDict, + bucket_name: str, + object_name: str, + etag: Optional[str] = None, + version_id: Optional[str] = None, + last_modified: Optional[datetime] = None, + location: Optional[str] = None, + ) -> ObjectWriteResult: + """Creates object write result.""" + return cls( + headers=headers, + bucket_name=bucket_name, + object_name=object_name, + etag=etag or headers.get("etag", "").replace('"', ""), + version_id=version_id or headers.get("x-amz-version-id"), + last_modified=last_modified, + location=location, + checksum_crc32=headers.get("x-amz-checksum-crc32"), + checksum_crc32c=headers.get("x-amz-checksum-crc32c"), + checksum_crc64nvme=headers.get("x-amz-checksum-crc64nvme"), + checksum_sha1=headers.get("x-amz-checksum-sha1"), + checksum_sha256=headers.get("x-amz-checksum-sha256"), + checksum_type=headers.get("x-amz-checksum-type"), + ) class Worker(Thread): diff --git a/minio/minioadmin.py b/minio/minioadmin.py index 9469ce10..42d0b851 100644 --- a/minio/minioadmin.py +++ b/minio/minioadmin.py @@ -45,9 +45,8 @@ from .crypto import decrypt, encrypt from .datatypes import PeerInfo, PeerSite, SiteReplicationStatusOptions from .error import MinioAdminException -from .helpers import (_DEFAULT_USER_AGENT, _REGION_REGEX, DictType, _parse_url, - headers_to_strings, queryencode, sha256_hash, - url_replace) +from .helpers import (_DEFAULT_USER_AGENT, _REGION_REGEX, HTTPQueryDict, + _parse_url, headers_to_strings, sha256_hash, url_replace) from .signer import sign_v4_s3 @@ -167,7 +166,7 @@ def _url_open( *, method: str, command: _COMMAND, - query_params: Optional[DictType] = None, + query_params: Optional[HTTPQueryDict] = None, body: Optional[bytes] = None, preload_content: bool = True, ) -> BaseHTTPResponse: @@ -175,24 +174,18 @@ def _url_open( creds = self._provider.retrieve() url = url_replace(url=self._url, path="/minio/admin/v3/"+command.value) - query = [] - for key, values in sorted((query_params or {}).items()): - values = values if isinstance(values, (list, tuple)) else [values] - query += [ - f"{queryencode(key)}={queryencode(value)}" - for value in sorted(values) - ] - url = url_replace(url=url, query="&".join(query)) + query = None if query_params is None else str(query_params) + url = url_replace(url=url, query=query) content_sha256 = sha256_hash(body) date = time.utcnow() - headers: DictType = { + headers = HTTPHeaderDict({ "Host": url.netloc, "User-Agent": self._user_agent, "x-amz-date": time.to_amz_date(date), "x-amz-content-sha256": content_sha256, "Content-Type": "application/octet-stream" - } + }) if creds.session_token: headers["X-Amz-Security-Token"] = creds.session_token if body: @@ -296,7 +289,7 @@ def service_restart(self) -> str: response = self._url_open( method="POST", command=_COMMAND.SERVICE, - query_params={"action": "restart"} + query_params=HTTPQueryDict({"action": "restart"}), ) return response.data.decode() @@ -305,7 +298,7 @@ def service_stop(self) -> str: response = self._url_open( method="POST", command=_COMMAND.SERVICE, - query_params={"action": "stop"} + query_params=HTTPQueryDict({"action": "stop"}), ) return response.data.decode() @@ -314,7 +307,7 @@ def update(self) -> str: response = self._url_open( method="POST", command=_COMMAND.UPDATE, - query_params={"updateURL": ""} + query_params=HTTPQueryDict({"updateURL": ""}), ) return response.data.decode() @@ -328,10 +321,13 @@ def info(self) -> str: def account_info(self, prefix_usage: bool = False) -> str: """Get usage information for the authenticating account""" + query_params = ( + HTTPQueryDict({"prefix-usage": "true"}) if prefix_usage else None + ) response = self._url_open( method="GET", command=_COMMAND.ACCOUNT_INFO, - query_params={"prefix-usage": "true"} if prefix_usage else None, + query_params=query_params, ) return response.data.decode() @@ -342,26 +338,32 @@ def user_add(self, access_key: str, secret_key: str) -> str: response = self._url_open( method="PUT", command=_COMMAND.ADD_USER, - query_params={"accessKey": access_key}, + query_params=HTTPQueryDict({"accessKey": access_key}), body=encrypt(body, self._provider.retrieve().secret_key), ) return response.data.decode() def user_disable(self, access_key: str) -> str: """Disable user.""" + query_params = HTTPQueryDict( + {"accessKey": access_key, "status": "disabled"}, + ) response = self._url_open( method="PUT", command=_COMMAND.SET_USER_STATUS, - query_params={"accessKey": access_key, "status": "disabled"} + query_params=query_params, ) return response.data.decode() def user_enable(self, access_key: str) -> str: """Enable user.""" + query_params = HTTPQueryDict( + {"accessKey": access_key, "status": "enabled"}, + ) response = self._url_open( method="PUT", command=_COMMAND.SET_USER_STATUS, - query_params={"accessKey": access_key, "status": "enabled"} + query_params=query_params, ) return response.data.decode() @@ -370,7 +372,7 @@ def user_remove(self, access_key: str) -> str: response = self._url_open( method="DELETE", command=_COMMAND.REMOVE_USER, - query_params={"accessKey": access_key}, + query_params=HTTPQueryDict({"accessKey": access_key}), ) return response.data.decode() @@ -379,7 +381,7 @@ def user_info(self, access_key: str) -> str: response = self._url_open( method="GET", command=_COMMAND.USER_INFO, - query_params={"accessKey": access_key}, + query_params=HTTPQueryDict({"accessKey": access_key}), ) return response.data.decode() @@ -395,7 +397,7 @@ def user_list(self) -> str: ) return plain_data.decode() - def group_add(self, group_name: str, members: str) -> str: + def group_add(self, group_name: str, members: list[str]) -> str: """Add users a new or existing group.""" body = json.dumps({ "group": group_name, @@ -411,19 +413,25 @@ def group_add(self, group_name: str, members: str) -> str: def group_disable(self, group_name: str) -> str: """Disable group.""" + query_params = HTTPQueryDict( + {"group": group_name, "status": "disabled"}, + ) response = self._url_open( method="PUT", command=_COMMAND.SET_GROUP_STATUS, - query_params={"group": group_name, "status": "disabled"} + query_params=query_params, ) return response.data.decode() def group_enable(self, group_name: str) -> str: """Enable group.""" + query_params = HTTPQueryDict( + {"group": group_name, "status": "enabled"}, + ) response = self._url_open( method="PUT", command=_COMMAND.SET_GROUP_STATUS, - query_params={"group": group_name, "status": "enabled"} + query_params=query_params, ) return response.data.decode() @@ -452,7 +460,7 @@ def group_info(self, group_name: str) -> str: response = self._url_open( method="GET", command=_COMMAND.GROUP_INFO, - query_params={"group": group_name}, + query_params=HTTPQueryDict({"group": group_name}), ) return response.data.decode() @@ -479,7 +487,7 @@ def policy_add(self, response = self._url_open( method="PUT", command=_COMMAND.ADD_CANNED_POLICY, - query_params={"name": policy_name}, + query_params=HTTPQueryDict({"name": policy_name}), body=body, ) return response.data.decode() @@ -489,7 +497,7 @@ def policy_remove(self, policy_name: str) -> str: response = self._url_open( method="DELETE", command=_COMMAND.REMOVE_CANNED_POLICY, - query_params={"name": policy_name}, + query_params=HTTPQueryDict({"name": policy_name}), ) return response.data.decode() @@ -498,7 +506,7 @@ def policy_info(self, policy_name: str) -> str: response = self._url_open( method="GET", command=_COMMAND.CANNED_POLICY_INFO, - query_params={"name": policy_name}, + query_params=HTTPQueryDict({"name": policy_name}), ) return response.data.decode() @@ -518,12 +526,17 @@ def policy_set( ) -> str: """Set IAM policy on a user or group.""" if (user is not None) ^ (group is not None): + query_params = HTTPQueryDict( + { + "userOrGroup": cast(str, user or group), + "isGroup": "true" if group else "false", + "policyName": policy_name, + }, + ) response = self._url_open( method="PUT", command=_COMMAND.SET_USER_OR_GROUP_POLICY, - query_params={"userOrGroup": cast(str, user or group), - "isGroup": "true" if group else "false", - "policyName": policy_name}, + query_params=query_params, ) return response.data.decode() raise ValueError("either user or group must be set") @@ -545,7 +558,7 @@ def config_get(self, key: Optional[str] = None) -> str: response = self._url_open( method="GET", command=_COMMAND.GET_CONFIG, - query_params={"key": key or "", "subSys": ""}, + query_params=HTTPQueryDict({"key": key or "", "subSys": ""}), preload_content=False, ) if key is None: @@ -593,7 +606,7 @@ def config_history(self) -> str: response = self._url_open( method="GET", command=_COMMAND.LIST_CONFIG_HISTORY, - query_params={"count": "10"}, + query_params=HTTPQueryDict({"count": "10"}), preload_content=False, ) plain_text = decrypt( @@ -610,7 +623,7 @@ def config_restore(self, restore_id: str) -> str: response = self._url_open( method="PUT", command=_COMMAND.RESOTRE_CONFIG_HISTORY, - query_params={"restoreId": restore_id} + query_params=HTTPQueryDict({"restoreId": restore_id}), ) return response.data.decode() @@ -622,7 +635,7 @@ def profile_start( response = self._url_open( method="POST", command=_COMMAND.START_PROFILE, - query_params={"profilerType;": ",".join(profilers)}, + query_params=HTTPQueryDict({"profilerType;": ",".join(profilers)}), ) return response.data.decode() @@ -639,7 +652,7 @@ def kms_key_create(self, key: Optional[str] = None) -> str: response = self._url_open( method="POST", command=_COMMAND.CREATE_KMS_KEY, - query_params={"key-id": key or ""}, + query_params=HTTPQueryDict({"key-id": key or ""}), ) return response.data.decode() @@ -648,7 +661,7 @@ def kms_key_status(self, key: Optional[str] = None) -> str: response = self._url_open( method="GET", command=_COMMAND.GET_KMS_KEY_STATUS, - query_params={"key-id": key or ""} + query_params=HTTPQueryDict({"key-id": key or ""}), ) return response.data.decode() @@ -659,7 +672,7 @@ def add_site_replication(self, peer_sites: list[PeerSite]) -> str: response = self._url_open( method="PUT", command=_COMMAND.SITE_REPLICATION_ADD, - query_params={"api-version": "1"}, + query_params=HTTPQueryDict({"api-version": "1"}), body=encrypt(body, self._provider.retrieve().secret_key), ) return response.data.decode() @@ -680,7 +693,7 @@ def get_site_replication_status( response = self._url_open( method="GET", command=_COMMAND.SITE_REPLICATION_STATUS, - query_params=cast(DictType, options.to_query_params()), + query_params=options.to_query_params(), ) return response.data.decode() @@ -690,7 +703,7 @@ def edit_site_replication(self, peer_info: PeerInfo) -> str: response = self._url_open( method="PUT", command=_COMMAND.SITE_REPLICATION_EDIT, - query_params={"api-version": "1"}, + query_params=HTTPQueryDict({"api-version": "1"}), body=encrypt(body, self._provider.retrieve().secret_key), ) return response.data.decode() @@ -712,7 +725,7 @@ def remove_site_replication( response = self._url_open( method="PUT", command=_COMMAND.SITE_REPLICATION_REMOVE, - query_params={"api-version": "1"}, + query_params=HTTPQueryDict({"api-version": "1"}), body=encrypt(body, self._provider.retrieve().secret_key), ) return response.data.decode() @@ -723,7 +736,7 @@ def bucket_quota_set(self, bucket: str, size: int) -> str: response = self._url_open( method="PUT", command=_COMMAND.SET_BUCKET_QUOTA, - query_params={"bucket": bucket}, + query_params=HTTPQueryDict({"bucket": bucket}), body=body ) return response.data.decode() @@ -737,7 +750,7 @@ def bucket_quota_get(self, bucket: str) -> str: response = self._url_open( method="GET", command=_COMMAND.GET_BUCKET_QUOTA, - query_params={"bucket": bucket} + query_params=HTTPQueryDict({"bucket": bucket}), ) return response.data.decode() @@ -754,7 +767,7 @@ def get_service_account(self, access_key: str) -> str: response = self._url_open( method="GET", command=_COMMAND.SERVICE_ACCOUNT_INFO, - query_params={"accessKey": access_key}, + query_params=HTTPQueryDict({"accessKey": access_key}), preload_content=False, ) plain_data = decrypt( @@ -767,7 +780,7 @@ def list_service_account(self, user: str) -> str: response = self._url_open( method="GET", command=_COMMAND.SERVICE_ACCOUNT_LIST, - query_params={"user": user}, + query_params=HTTPQueryDict({"user": user}), preload_content=False, ) plain_data = decrypt( @@ -781,6 +794,7 @@ def add_service_account(self, secret_key: Optional[str] = None, name: Optional[str] = None, description: Optional[str] = None, + target_user: Optional[str] = None, policy: Optional[dict] = None, policy_file: Optional[str | os.PathLike] = None, expiration: Optional[str] = None, @@ -803,6 +817,8 @@ def add_service_account(self, data["name"] = name if description: data["description"] = description + if target_user: + data["targetUser"] = target_user if policy_file: with open(policy_file, encoding="utf-8") as file: data["policy"] = json.load(file) @@ -865,7 +881,7 @@ def update_service_account(self, response = self._url_open( method="POST", command=_COMMAND.SERVICE_ACCOUNT_UPDATE, - query_params={"accessKey": access_key}, + query_params=HTTPQueryDict({"accessKey": access_key}), body=encrypt(body, self._provider.retrieve().secret_key), ) return response.data.decode() @@ -875,7 +891,7 @@ def delete_service_account(self, access_key: str) -> str: response = self._url_open( method="DELETE", command=_COMMAND.SERVICE_ACCOUNT_DELETE, - query_params={"accessKey": access_key}, + query_params=HTTPQueryDict({"accessKey": access_key}), ) return response.data.decode() @@ -942,10 +958,13 @@ def list_access_keys_ldap( list_type: str, ) -> str: """List service accounts belonging to the specified user.""" + query_params = HTTPQueryDict( + {"userDN": user_dn, "listType": list_type}, + ) response = self._url_open( method="GET", command=_COMMAND.IDP_LDAP_LIST_ACCESS_KEYS, - query_params={"userDN": user_dn, "listType": list_type}, + query_params=query_params, preload_content=False, ) plain_data = decrypt( @@ -967,7 +986,7 @@ def list_access_keys_ldap_bulk( response = self._url_open( method="GET", command=_COMMAND.IDP_LDAP_LIST_ACCESS_KEYS_BULK, - query_params={"listType": list_type, key: value}, + query_params=HTTPQueryDict({"listType": list_type, key: value}), preload_content=False, ) plain_data = decrypt( @@ -1004,10 +1023,13 @@ def get_policy_entities( policies: list[str], ) -> str: """Get builtin policy entities.""" + query_params = HTTPQueryDict( + {"user": users, "group": groups, "policy": policies}, + ) response = self._url_open( method="GET", command=_COMMAND.IDP_BUILTIN_POLICY_ENTITIES, - query_params={"user": users, "group": groups, "policy": policies}, + query_params=query_params, preload_content=False, ) plain_data = decrypt( diff --git a/minio/signer.py b/minio/signer.py index 58b0dbc5..304d4d8c 100644 --- a/minio/signer.py +++ b/minio/signer.py @@ -33,12 +33,14 @@ import re from collections import OrderedDict from datetime import datetime -from typing import Mapping, cast +from typing import cast from urllib.parse import SplitResult +from urllib3._collections import HTTPHeaderDict + from . import time from .credentials import Credentials -from .helpers import DictType, queryencode, sha256_hash +from .helpers import queryencode, sha256_hash SIGN_V4_ALGORITHM = 'AWS4-HMAC-SHA256' _MULTI_SPACE_REGEX = re.compile(r"( +)") @@ -60,21 +62,16 @@ def _get_scope(date: datetime, region: str, service_name: str) -> str: return f"{time.to_signer_date(date)}/{region}/{service_name}/aws4_request" -def _get_canonical_headers( - headers: Mapping[str, str | list[str] | tuple[str]], -) -> tuple[str, str]: +def _get_canonical_headers(headers: HTTPHeaderDict) -> tuple[str, str]: """Get canonical headers.""" ordered_headers = {} - for key, values in headers.items(): + for key in headers: key = key.lower() - if key not in ( - "authorization", - "user-agent", - ): - values = values if isinstance(values, (list, tuple)) else [values] + if key not in ("authorization", "user-agent"): ordered_headers[key] = ",".join([ - _MULTI_SPACE_REGEX.sub(" ", value).strip() for value in values + _MULTI_SPACE_REGEX.sub(" ", value).strip() + for value in headers.get_all(key) ]) ordered_headers = OrderedDict(sorted(ordered_headers.items())) @@ -101,7 +98,7 @@ def _get_canonical_query_string(query: str) -> str: def _get_canonical_request_hash( method: str, url: SplitResult, - headers: Mapping[str, str | list[str] | tuple[str]], + headers: HTTPHeaderDict, content_sha256: str, ) -> tuple[str, str]: """Get canonical request hash.""" @@ -192,11 +189,11 @@ def _sign_v4( method: str, url: SplitResult, region: str, - headers: DictType, + headers: HTTPHeaderDict, credentials: Credentials, content_sha256: str, date: datetime, -) -> DictType: +) -> HTTPHeaderDict: """Do signature V4 of given request for given service name.""" scope = _get_scope(date, region, service_name) @@ -220,11 +217,11 @@ def sign_v4_s3( method: str, url: SplitResult, region: str, - headers: DictType, + headers: HTTPHeaderDict, credentials: Credentials, content_sha256: str, date: datetime, -) -> DictType: +) -> HTTPHeaderDict: """Do signature V4 of given request for S3 service.""" return _sign_v4( service_name="s3", @@ -243,11 +240,11 @@ def sign_v4_sts( method: str, url: SplitResult, region: str, - headers: DictType, + headers: HTTPHeaderDict, credentials: Credentials, content_sha256: str, date: datetime, -) -> DictType: +) -> HTTPHeaderDict: """Do signature V4 of given request for STS service.""" return _sign_v4( service_name="sts", diff --git a/tests/functional/tests.py b/tests/functional/tests.py index 93747787..58935976 100644 --- a/tests/functional/tests.py +++ b/tests/functional/tests.py @@ -39,12 +39,14 @@ import certifi import urllib3 +from urllib3._collections import HTTPHeaderDict from minio import Minio from minio.commonconfig import ENABLED, REPLACE, CopySource, SnowballObject from minio.datatypes import PostPolicy from minio.deleteobjects import DeleteObject from minio.error import S3Error +from minio.helpers import HTTPQueryDict from minio.select import (CSVInputSerialization, CSVOutputSerialization, SelectRequest) from minio.sse import SseCustomerKey @@ -63,6 +65,11 @@ ) +def _serialize(headers: HTTPHeaderDict) -> dict: + """Convert HTTPHeaderDict to dict.""" + return {key: headers.getlist(key) for key in headers} + + def _gen_bucket_name(): """Generate random bucket name.""" return f"minio-py-test-{uuid4()}" @@ -167,11 +174,11 @@ def test_make_bucket_default_region(log_entry): } # Create a bucket with default bucket location - _call(log_entry, _CLIENT.make_bucket, bucket_name) + _call(log_entry, _CLIENT.make_bucket, bucket_name=bucket_name) # Check if bucket was created properly - _call(log_entry, _CLIENT.bucket_exists, bucket_name) + _call(log_entry, _CLIENT.bucket_exists, bucket_name=bucket_name) # Remove bucket - _call(log_entry, _CLIENT.remove_bucket, bucket_name) + _call(log_entry, _CLIENT.remove_bucket, bucket_name=bucket_name) # Test passes log_entry["method"] = _CLIENT.make_bucket @@ -194,11 +201,16 @@ def test_make_bucket_with_region(log_entry): } # Create a bucket with default bucket location - _call(log_entry, _CLIENT.make_bucket, bucket_name, location) + _call( + log_entry, + _CLIENT.make_bucket, + bucket_name=bucket_name, + location=location, + ) # Check if bucket was created properly - _call(log_entry, _CLIENT.bucket_exists, bucket_name) + _call(log_entry, _CLIENT.bucket_exists, bucket_name=bucket_name) # Remove bucket - _call(log_entry, _CLIENT.remove_bucket, bucket_name) + _call(log_entry, _CLIENT.remove_bucket, bucket_name=bucket_name) # Test passes log_entry["method"] = _CLIENT.make_bucket @@ -223,11 +235,11 @@ def test_negative_make_bucket_invalid_name( # pylint: disable=invalid-name log_entry["args"]["bucket_name"] = name try: # Create a bucket with default bucket location - _call(log_entry, _CLIENT.make_bucket, name) + _call(log_entry, _CLIENT.make_bucket, bucket_name=name) # Check if bucket was created properly - _call(log_entry, _CLIENT.bucket_exists, name) + _call(log_entry, _CLIENT.bucket_exists, bucket_name=name) # Remove bucket - _call(log_entry, _CLIENT.remove_bucket, name) + _call(log_entry, _CLIENT.remove_bucket, bucket_name=name) except ValueError: pass # Test passes @@ -242,7 +254,7 @@ def test_list_buckets(log_entry): bucket_name = _gen_bucket_name() # Create a bucket with default bucket location - _call(log_entry, _CLIENT.make_bucket, bucket_name) + _call(log_entry, _CLIENT.make_bucket, bucket_name=bucket_name) try: buckets = _CLIENT.list_buckets() @@ -253,7 +265,7 @@ def test_list_buckets(log_entry): raise ValueError('list_bucket api failure') finally: # Remove bucket - _call(log_entry, _CLIENT.remove_bucket, bucket_name) + _call(log_entry, _CLIENT.remove_bucket, bucket_name=bucket_name) def test_select_object_content(log_entry): @@ -269,10 +281,14 @@ def test_select_object_content(log_entry): } try: - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) content = io.BytesIO(b"col1,col2,col3\none,two,three\nX,Y,Z\n") - _CLIENT.put_object(bucket_name, csvfile, content, - len(content.getvalue())) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=csvfile, + data=content, + length=len(content.getvalue()), + ) request = SelectRequest( "select * from s3object", @@ -280,7 +296,11 @@ def test_select_object_content(log_entry): CSVOutputSerialization(), request_progress=True, ) - data = _CLIENT.select_object_content(bucket_name, csvfile, request) + data = _CLIENT.select_object_content( + bucket_name=bucket_name, + object_name=csvfile, + request=request, + ) # Get the records records = io.BytesIO() for data_bytes in data.stream(16): @@ -294,24 +314,38 @@ def test_select_object_content(log_entry): '"col1,col2,col3\none,two,three\nX,Y,Z\n"', f"Received {records.getvalue().decode()}") finally: - _CLIENT.remove_object(bucket_name, csvfile) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=csvfile) + _CLIENT.remove_bucket(bucket_name=bucket_name) def _test_fput_object(bucket_name, object_name, filename, metadata, sse): """Test fput_object().""" try: - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) if _IS_AWS: - _CLIENT.fput_object(bucket_name, object_name, filename, - metadata=metadata, sse=sse) + _CLIENT.fput_object( + bucket_name=bucket_name, + object_name=object_name, + file_path=filename, + user_metadata=metadata, + sse=sse, + ) else: - _CLIENT.fput_object(bucket_name, object_name, filename, sse=sse) + _CLIENT.fput_object( + bucket_name=bucket_name, + object_name=object_name, + file_path=filename, + sse=sse, + ) - _CLIENT.stat_object(bucket_name, object_name, ssec=sse) + _CLIENT.stat_object( + bucket_name=bucket_name, + object_name=object_name, + ssec=sse, + ) finally: - _CLIENT.remove_object(bucket_name, object_name) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_fput_object_small_file(log_entry, sse=None): @@ -323,13 +357,13 @@ def test_fput_object_small_file(log_entry, sse=None): # Get a unique bucket_name and object_name bucket_name = _gen_bucket_name() object_name = f"{uuid4()}-f" - metadata = {'x-amz-storage-class': 'STANDARD_IA'} + metadata = HTTPHeaderDict({'x-amz-storage-class': 'STANDARD_IA'}) log_entry["args"] = { "bucket_name": bucket_name, "object_name": object_name, "file_path": _TEST_FILE, - "metadata": metadata, + "metadata": _serialize(metadata), } _test_fput_object(bucket_name, object_name, _TEST_FILE, metadata, sse) @@ -344,13 +378,13 @@ def test_fput_object_large_file(log_entry, sse=None): # Get a unique bucket_name and object_name bucket_name = _gen_bucket_name() object_name = f"{uuid4()}-large" - metadata = {'x-amz-storage-class': 'STANDARD_IA'} + metadata = HTTPHeaderDict({'x-amz-storage-class': 'STANDARD_IA'}) log_entry["args"] = { "bucket_name": bucket_name, "object_name": object_name, "file_path": _LARGE_FILE, - "metadata": metadata, + "metadata": _serialize(metadata), } # upload local large file through multipart. @@ -364,14 +398,14 @@ def test_fput_object_with_content_type( # pylint: disable=invalid-name # Get a unique bucket_name and object_name bucket_name = _gen_bucket_name() object_name = f"{uuid4()}-f" - metadata = {'x-amz-storage-class': 'STANDARD_IA'} + metadata = HTTPHeaderDict({'x-amz-storage-class': 'STANDARD_IA'}) content_type = 'application/octet-stream' log_entry["args"] = { "bucket_name": bucket_name, "object_name": object_name, "file_path": _TEST_FILE, - "metadata": metadata, + "metadata": _serialize(metadata), "content_type": content_type, } @@ -441,21 +475,40 @@ def test_copy_object_no_copy_condition( # pylint: disable=invalid-name } try: - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) # Upload a streaming object of 1 KiB size = 1 * KB reader = LimitedRandomReader(size) - _CLIENT.put_object(bucket_name, object_source, reader, size, sse=ssec) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_source, + data=reader, + length=size, + sse=ssec, + ) _CLIENT.copy_object( - bucket_name, object_copy, sse=ssec, - source=CopySource(bucket_name, object_source, ssec=ssec_copy), + bucket_name=bucket_name, + object_name=object_copy, + sse=ssec, + source=CopySource( + bucket_name=bucket_name, + object_name=object_source, + ssec=ssec_copy, + ), + ) + st_obj = _CLIENT.stat_object( + bucket_name=bucket_name, + object_name=object_copy, + ssec=ssec, ) - st_obj = _CLIENT.stat_object(bucket_name, object_copy, ssec=ssec) _validate_stat(st_obj, size, {}) finally: - _CLIENT.remove_object(bucket_name, object_source) - _CLIENT.remove_object(bucket_name, object_copy) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object( + bucket_name=bucket_name, + object_name=object_source, + ) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_copy) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_copy_object_with_metadata(log_entry): @@ -466,40 +519,57 @@ def test_copy_object_with_metadata(log_entry): object_name = f"{uuid4()}" object_source = object_name + "-source" object_copy = object_name + "-copy" - metadata = { + metadata = HTTPHeaderDict({ "testing-string": "string", - "testing-int": 1, - 10: 'value', - } + "testing-int": "1", + "10": 'value', + }) log_entry["args"] = { "bucket_name": bucket_name, "object_source": object_source, "object_name": object_copy, - "metadata": metadata, + "metadata": _serialize(metadata), } try: - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) # Upload a streaming object of 1 KiB size = 1 * KB reader = LimitedRandomReader(size) - _CLIENT.put_object(bucket_name, object_source, reader, size) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_source, + data=reader, + length=size, + ) # Perform a server side copy of an object _CLIENT.copy_object( - bucket_name, object_copy, CopySource(bucket_name, object_source), - metadata=metadata, metadata_directive=REPLACE, + bucket_name=bucket_name, + object_name=object_copy, + source=CopySource( + bucket_name=bucket_name, + object_name=object_source, + ), + user_metadata=metadata, + metadata_directive=REPLACE, ) # Verification - st_obj = _CLIENT.stat_object(bucket_name, object_copy) + st_obj = _CLIENT.stat_object( + bucket_name=bucket_name, + object_name=object_copy, + ) expected_metadata = {'x-amz-meta-testing-int': '1', 'x-amz-meta-testing-string': 'string', 'x-amz-meta-10': 'value'} _validate_stat(st_obj, size, expected_metadata) finally: - _CLIENT.remove_object(bucket_name, object_source) - _CLIENT.remove_object(bucket_name, object_copy) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object( + bucket_name=bucket_name, + object_name=object_source, + ) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_copy) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_copy_object_etag_match(log_entry): @@ -518,26 +588,47 @@ def test_copy_object_etag_match(log_entry): } try: - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) # Upload a streaming object of 1 KiB size = 1 * KB reader = LimitedRandomReader(size) - _CLIENT.put_object(bucket_name, object_source, reader, size) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_source, + data=reader, + length=size, + ) # Perform a server side copy of an object _CLIENT.copy_object( - bucket_name, object_copy, CopySource(bucket_name, object_source), + bucket_name=bucket_name, + object_name=object_copy, + source=CopySource( + bucket_name=bucket_name, + object_name=object_source, + ), ) # Verification - source_etag = _CLIENT.stat_object(bucket_name, object_source).etag + source_etag = _CLIENT.stat_object( + bucket_name=bucket_name, + object_name=object_source, + ).etag log_entry["args"]["conditions"] = {'set_match_etag': source_etag} _CLIENT.copy_object( - bucket_name, object_copy, - CopySource(bucket_name, object_source, match_etag=source_etag), + bucket_name=bucket_name, + object_name=object_copy, + source=CopySource( + bucket_name=bucket_name, + object_name=object_source, + match_etag=source_etag, + ), ) finally: - _CLIENT.remove_object(bucket_name, object_source) - _CLIENT.remove_object(bucket_name, object_copy) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object( + bucket_name=bucket_name, + object_name=object_source, + ) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_copy) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_copy_object_negative_etag_match( # pylint: disable=invalid-name @@ -557,27 +648,40 @@ def test_copy_object_negative_etag_match( # pylint: disable=invalid-name } try: - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) # Upload a streaming object of 1 KiB size = 1 * KB reader = LimitedRandomReader(size) - _CLIENT.put_object(bucket_name, object_source, reader, size) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_source, + data=reader, + length=size, + ) try: # Perform a server side copy of an object # with incorrect pre-conditions and fail etag = 'test-etag' log_entry["args"]["conditions"] = {'set_match_etag': etag} _CLIENT.copy_object( - bucket_name, object_copy, - CopySource(bucket_name, object_source, match_etag=etag), + bucket_name=bucket_name, + object_name=object_copy, + source=CopySource( + bucket_name=bucket_name, + object_name=object_source, + match_etag=etag, + ), ) except S3Error as exc: if exc.code != "PreconditionFailed": raise finally: - _CLIENT.remove_object(bucket_name, object_source) - _CLIENT.remove_object(bucket_name, object_copy) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object( + bucket_name=bucket_name, + object_name=object_source, + ) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_copy) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_copy_object_modified_since(log_entry): @@ -596,11 +700,16 @@ def test_copy_object_modified_since(log_entry): } try: - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) # Upload a streaming object of 1 KiB size = 1 * KB reader = LimitedRandomReader(size) - _CLIENT.put_object(bucket_name, object_source, reader, size) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_source, + data=reader, + length=size, + ) # Set up the 'modified_since' copy condition mod_since = datetime(2014, 4, 1, tzinfo=timezone.utc) log_entry["args"]["conditions"] = { @@ -608,17 +717,21 @@ def test_copy_object_modified_since(log_entry): # Perform a server side copy of an object # and expect the copy to complete successfully _CLIENT.copy_object( - bucket_name, object_copy, - CopySource( + bucket_name=bucket_name, + object_name=object_copy, + source=CopySource( bucket_name=bucket_name, object_name=object_source, modified_since=mod_since, ), ) finally: - _CLIENT.remove_object(bucket_name, object_source) - _CLIENT.remove_object(bucket_name, object_copy) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object( + bucket_name=bucket_name, + object_name=object_source, + ) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_copy) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_copy_object_unmodified_since( # pylint: disable=invalid-name @@ -638,11 +751,16 @@ def test_copy_object_unmodified_since( # pylint: disable=invalid-name } try: - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) # Upload a streaming object of 1 KiB size = 1 * KB reader = LimitedRandomReader(size) - _CLIENT.put_object(bucket_name, object_source, reader, size) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_source, + data=reader, + length=size, + ) # Set up the 'unmodified_since' copy condition unmod_since = datetime(2014, 4, 1, tzinfo=timezone.utc) log_entry["args"]["conditions"] = { @@ -652,8 +770,9 @@ def test_copy_object_unmodified_since( # pylint: disable=invalid-name # the copy to fail since the creation/modification # time is now, way later than unmodification time, April 1st, 2014 _CLIENT.copy_object( - bucket_name, object_copy, - CopySource( + bucket_name=bucket_name, + object_name=object_copy, + source=CopySource( bucket_name=bucket_name, object_name=object_source, unmodified_since=unmod_since, @@ -663,9 +782,12 @@ def test_copy_object_unmodified_since( # pylint: disable=invalid-name if exc.code != "PreconditionFailed": raise finally: - _CLIENT.remove_object(bucket_name, object_source) - _CLIENT.remove_object(bucket_name, object_copy) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object( + bucket_name=bucket_name, + object_name=object_source, + ) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_copy) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_put_object(log_entry, sse=None): @@ -687,11 +809,21 @@ def test_put_object(log_entry, sse=None): } try: - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) # Put/Upload a streaming object of 1 MiB reader = LimitedRandomReader(length) - _CLIENT.put_object(bucket_name, object_name, reader, length, sse=sse) - _CLIENT.stat_object(bucket_name, object_name, ssec=sse) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_name, + data=reader, + length=length, + sse=sse, + ) + _CLIENT.stat_object( + bucket_name=bucket_name, + object_name=object_name, + ssec=sse, + ) # Put/Upload a streaming object of 11 MiB log_entry["args"]["length"] = length = 11 * MB @@ -707,13 +839,23 @@ def test_put_object(log_entry, sse=None): log_entry["args"]["content_type"] = content_type = ( "application/octet-stream") log_entry["args"]["object_name"] = object_name + "-metadata" - _CLIENT.put_object(bucket_name, object_name + "-metadata", reader, - length, content_type, metadata, sse=sse) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_name + "-metadata", + data=reader, + length=length, + content_type=content_type, + user_metadata=metadata, + sse=sse, + ) # Stat on the uploaded object to check if it exists # Fetch saved stat metadata on a previously uploaded object with # metadata. - st_obj = _CLIENT.stat_object(bucket_name, object_name + "-metadata", - ssec=sse) + st_obj = _CLIENT.stat_object( + bucket_name=bucket_name, + object_name=object_name + "-metadata", + ssec=sse, + ) normalized_meta = { key.lower(): value for key, value in ( st_obj.metadata or {}).items() @@ -726,9 +868,12 @@ def test_put_object(log_entry, sse=None): if 'x-amz-meta-test-key' not in normalized_meta: raise ValueError("Metadata key 'x-amz-meta-test-key' not found") finally: - _CLIENT.remove_object(bucket_name, object_name) - _CLIENT.remove_object(bucket_name, object_name+'-metadata') - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) + _CLIENT.remove_object( + bucket_name=bucket_name, + object_name=object_name+'-metadata', + ) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_negative_put_object_with_path_segment( # pylint: disable=invalid-name @@ -748,14 +893,19 @@ def test_negative_put_object_with_path_segment( # pylint: disable=invalid-name } try: - _CLIENT.make_bucket(bucket_name) - _CLIENT.put_object(bucket_name, object_name, io.BytesIO(b''), 0) - _CLIENT.remove_object(bucket_name, object_name) + _CLIENT.make_bucket(bucket_name=bucket_name) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_name, + data=io.BytesIO(b''), + length=0, + ) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) except S3Error as err: if err.code != 'XMinioInvalidObjectName': raise finally: - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def _test_stat_object(log_entry, sse=None, version_check=False): @@ -779,20 +929,28 @@ def _test_stat_object(log_entry, sse=None, version_check=False): version_id1 = None version_id2 = None - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: if version_check: _CLIENT.set_bucket_versioning( - bucket_name, VersioningConfig(ENABLED), + bucket_name=bucket_name, + config=VersioningConfig(ENABLED), ) # Put/Upload a streaming object of 1 MiB reader = LimitedRandomReader(length) result = _CLIENT.put_object( - bucket_name, object_name, reader, length, sse=sse, + bucket_name=bucket_name, + object_name=object_name, + data=reader, + length=length, + sse=sse, ) version_id1 = result.version_id _CLIENT.stat_object( - bucket_name, object_name, ssec=sse, version_id=version_id1, + bucket_name=bucket_name, + object_name=object_name, + ssec=sse, + version_id=version_id1, ) # Put/Upload a streaming object of 11 MiB @@ -805,27 +963,40 @@ def _test_stat_object(log_entry, sse=None, version_check=False): "application/octet-stream") log_entry["args"]["object_name"] = object_name + "-metadata" result = _CLIENT.put_object( - bucket_name, object_name + "-metadata", reader, - length, content_type, metadata, sse=sse, + bucket_name=bucket_name, + object_name=object_name + "-metadata", + data=reader, + length=length, + content_type=content_type, + user_metadata=metadata, + sse=sse, ) version_id2 = result.version_id # Stat on the uploaded object to check if it exists # Fetch saved stat metadata on a previously uploaded object with # metadata. st_obj = _CLIENT.stat_object( - bucket_name, object_name + "-metadata", - ssec=sse, version_id=version_id2, + bucket_name=bucket_name, + object_name=object_name + "-metadata", + ssec=sse, + version_id=version_id2, ) # Verify the collected stat data. _validate_stat( st_obj, length, metadata, version_id=version_id2, ) finally: - _CLIENT.remove_object(bucket_name, object_name, version_id=version_id1) _CLIENT.remove_object( - bucket_name, object_name+'-metadata', version_id=version_id2, + bucket_name=bucket_name, + object_name=object_name, + version_id=version_id1, + ) + _CLIENT.remove_object( + bucket_name=bucket_name, + object_name=object_name+'-metadata', + version_id=version_id2, ) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_stat_object(log_entry, sse=None): @@ -851,20 +1022,26 @@ def _test_remove_object(log_entry, version_check=False): "object_name": object_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: if version_check: _CLIENT.set_bucket_versioning( - bucket_name, VersioningConfig(ENABLED), + bucket_name=bucket_name, + config=VersioningConfig(ENABLED), ) result = _CLIENT.put_object( - bucket_name, object_name, LimitedRandomReader(length), length, + bucket_name=bucket_name, + object_name=object_name, + data=LimitedRandomReader(length), + length=length, ) _CLIENT.remove_object( - bucket_name, object_name, version_id=result.version_id, + bucket_name=bucket_name, + object_name=object_name, + version_id=result.version_id, ) finally: - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_remove_object(log_entry): @@ -893,29 +1070,40 @@ def _test_get_object(log_entry, sse=None, version_check=False): "object_name": object_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) version_id = None try: if version_check: _CLIENT.set_bucket_versioning( - bucket_name, VersioningConfig(ENABLED), + bucket_name=bucket_name, + config=VersioningConfig(ENABLED), ) result = _CLIENT.put_object( - bucket_name, object_name, LimitedRandomReader(length), - length, sse=sse, + bucket_name=bucket_name, + object_name=object_name, + data=LimitedRandomReader(length), + length=length, + sse=sse, ) version_id = result.version_id # Get/Download a full object, iterate on response to save to disk object_data = _CLIENT.get_object( - bucket_name, object_name, ssec=sse, version_id=version_id, + bucket_name=bucket_name, + object_name=object_name, + ssec=sse, + version_id=version_id, ) newfile = 'newfile جديد' with open(newfile, 'wb') as file_data: shutil.copyfileobj(object_data, file_data) os.remove(newfile) finally: - _CLIENT.remove_object(bucket_name, object_name, version_id=version_id) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object( + bucket_name=bucket_name, + object_name=object_name, + version_id=version_id, + ) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_get_object(log_entry, sse=None): @@ -947,26 +1135,38 @@ def _test_fget_object(log_entry, sse=None, version_check=False): "file_path": tmpfile } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) version_id = None try: if version_check: _CLIENT.set_bucket_versioning( - bucket_name, VersioningConfig(ENABLED), + bucket_name=bucket_name, + config=VersioningConfig(ENABLED), ) result = _CLIENT.put_object( - bucket_name, object_name, LimitedRandomReader(length), - length, sse=sse, + bucket_name=bucket_name, + object_name=object_name, + data=LimitedRandomReader(length), + length=length, + sse=sse, ) version_id = result.version_id # Get/Download a full object and save locally at path _CLIENT.fget_object( - bucket_name, object_name, tmpfile, ssec=sse, version_id=version_id, + bucket_name=bucket_name, + object_name=object_name, + file_path=tmpfile, + ssec=sse, + version_id=version_id, ) os.remove(tmpfile) finally: - _CLIENT.remove_object(bucket_name, object_name, version_id=version_id) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object( + bucket_name=bucket_name, + object_name=object_name, + version_id=version_id, + ) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_fget_object(log_entry, sse=None): @@ -999,13 +1199,22 @@ def test_get_object_with_default_length( # pylint: disable=invalid-name "offset": offset } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: - _CLIENT.put_object(bucket_name, object_name, - LimitedRandomReader(size), size, sse=sse) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_name, + data=LimitedRandomReader(size), + length=size, + sse=sse, + ) # Get half of the object - object_data = _CLIENT.get_object(bucket_name, object_name, - offset=offset, ssec=sse) + object_data = _CLIENT.get_object( + bucket_name=bucket_name, + object_name=object_name, + offset=offset, + ssec=sse, + ) newfile = 'newfile' with open(newfile, 'wb') as file_data: for data in object_data: @@ -1016,8 +1225,8 @@ def test_get_object_with_default_length( # pylint: disable=invalid-name if new_file_size != length: raise ValueError('Unexpected file size after running ') finally: - _CLIENT.remove_object(bucket_name, object_name) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_get_partial_object(log_entry, sse=None): @@ -1039,13 +1248,23 @@ def test_get_partial_object(log_entry, sse=None): "offset": offset } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: - _CLIENT.put_object(bucket_name, object_name, - LimitedRandomReader(size), size, sse=sse) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_name, + data=LimitedRandomReader(size), + length=size, + sse=sse, + ) # Get half of the object - object_data = _CLIENT.get_object(bucket_name, object_name, - offset=offset, length=length, ssec=sse) + object_data = _CLIENT.get_object( + bucket_name=bucket_name, + object_name=object_name, + offset=offset, + length=length, + ssec=sse, + ) newfile = 'newfile' with open(newfile, 'wb') as file_data: for data in object_data: @@ -1056,8 +1275,8 @@ def test_get_partial_object(log_entry, sse=None): if new_file_size != length: raise ValueError('Unexpected file size after running ') finally: - _CLIENT.remove_object(bucket_name, object_name) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def _test_list_objects(log_entry, use_api_v1=False, version_check=False): @@ -1074,26 +1293,36 @@ def _test_list_objects(log_entry, use_api_v1=False, version_check=False): "recursive": is_recursive, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) version_id1 = None version_id2 = None try: if version_check: _CLIENT.set_bucket_versioning( - bucket_name, VersioningConfig(ENABLED), + bucket_name=bucket_name, + config=VersioningConfig(ENABLED), ) size = 1 * KB result = _CLIENT.put_object( - bucket_name, object_name + "-1", LimitedRandomReader(size), size, + bucket_name=bucket_name, + object_name=object_name + "-1", + data=LimitedRandomReader(size), + length=size, ) version_id1 = result.version_id result = _CLIENT.put_object( - bucket_name, object_name + "-2", LimitedRandomReader(size), size, + bucket_name=bucket_name, + object_name=object_name + "-2", + data=LimitedRandomReader(size), + length=size, ) version_id2 = result.version_id # List all object paths in bucket. objects = _CLIENT.list_objects( - bucket_name, '', is_recursive, include_version=version_check, + bucket_name=bucket_name, + prefix='', + recursive=is_recursive, + include_version=version_check, use_api_v1=use_api_v1, ) for obj in objects: @@ -1107,12 +1336,16 @@ def _test_list_objects(log_entry, use_api_v1=False, version_check=False): ) finally: _CLIENT.remove_object( - bucket_name, object_name + "-1", version_id=version_id1, + bucket_name=bucket_name, + object_name=object_name + "-1", + version_id=version_id1, ) _CLIENT.remove_object( - bucket_name, object_name + "-2", version_id=version_id2, + bucket_name=bucket_name, + object_name=object_name + "-2", + version_id=version_id2, ) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_list_objects_v1(log_entry): @@ -1125,12 +1358,12 @@ def test_list_object_v1_versions(log_entry): _test_list_objects(log_entry, use_api_v1=True, version_check=True) -def _test_list_objects_api(bucket_name, expected_no, *argv): +def _test_list_objects_api(bucket_name, expected_no, **kwargs): """Test list_objects().""" # argv is composed of prefix and recursive arguments of # list_objects api. They are both supposed to be passed as strings. - objects = _CLIENT.list_objects(bucket_name, *argv) + objects = _CLIENT.list_objects(bucket_name=bucket_name, **kwargs) # expect all objects to be listed no_of_files = 0 @@ -1158,15 +1391,19 @@ def test_list_objects_with_prefix(log_entry): "object_name": object_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: size = 1 * KB no_of_created_files = 4 path_prefix = "" # Create files and directories for i in range(no_of_created_files): - _CLIENT.put_object(bucket_name, f"{path_prefix}{i}_{object_name}", - LimitedRandomReader(size), size) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=f"{path_prefix}{i}_{object_name}", + data=LimitedRandomReader(size), + length=size, + ) path_prefix = f"{path_prefix}{i}/" # Created files and directory structure @@ -1184,7 +1421,9 @@ def test_list_objects_with_prefix(log_entry): # List objects recursively with NO prefix log_entry["args"]["prefix"] = prefix = "" # no prefix log_entry["args"]["recursive"] = recursive = "" - _test_list_objects_api(bucket_name, no_of_created_files, prefix, True) + _test_list_objects_api( + bucket_name, no_of_created_files, prefix=prefix, recursive=True, + ) # List objects at the top level with no prefix and no recursive option # Expect only the top 2 objects to be listed @@ -1193,32 +1432,35 @@ def test_list_objects_with_prefix(log_entry): # List objects for '0' directory/prefix without recursive option # Expect 2 object (directory '0' and '0_' object) to be listed log_entry["args"]["prefix"] = prefix = "0" - _test_list_objects_api(bucket_name, 2, prefix) + _test_list_objects_api(bucket_name, 2, prefix=prefix) # List objects for '0/' directory/prefix without recursive option # Expect only 2 objects under directory '0/' to be listed, # non-recursive log_entry["args"]["prefix"] = prefix = "0/" - _test_list_objects_api(bucket_name, 2, prefix) + _test_list_objects_api(bucket_name, 2, prefix=prefix) # List objects for '0/' directory/prefix, recursively # Expect 2 objects to be listed log_entry["args"]["prefix"] = prefix = "0/" log_entry["args"]["recursive"] = recursive = "True" - _test_list_objects_api(bucket_name, 3, prefix, recursive) + _test_list_objects_api( + bucket_name, 3, prefix=prefix, recursive=recursive, + ) # List object with '0/1/2/' directory/prefix, non-recursive # Expect the single object under directory '0/1/2/' to be listed log_entry["args"]["prefix"] = prefix = "0/1/2/" - _test_list_objects_api(bucket_name, 1, prefix) + _test_list_objects_api(bucket_name, 1, prefix=prefix) finally: path_prefix = "" for i in range(no_of_created_files): _CLIENT.remove_object( - bucket_name, f"{path_prefix}{i}_{object_name}", + bucket_name=bucket_name, + object_name=f"{path_prefix}{i}_{object_name}", ) path_prefix = f"{path_prefix}{i}/" - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) # Test passes log_entry["args"]["prefix"] = ( "Several prefix/recursive combinations are tested") @@ -1239,21 +1481,28 @@ def test_list_objects_with_1001_files( # pylint: disable=invalid-name "object_name": f"{object_name}_0 ~ {0}_1000", } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: size = 1 * KB no_of_created_files = 2000 # Create files and directories for i in range(no_of_created_files): - _CLIENT.put_object(bucket_name, f"{object_name}_{i}", - LimitedRandomReader(size), size) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=f"{object_name}_{i}", + data=LimitedRandomReader(size), + length=size, + ) # List objects and check if 1001 files are returned _test_list_objects_api(bucket_name, no_of_created_files) finally: for i in range(no_of_created_files): - _CLIENT.remove_object(bucket_name, f"{object_name}_{i}") - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object( + bucket_name=bucket_name, + object_name=f"{object_name}_{i}", + ) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_list_objects(log_entry): @@ -1279,13 +1528,19 @@ def test_presigned_get_object_default_expiry( # pylint: disable=invalid-name "object_name": object_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: size = 1 * KB - _CLIENT.put_object(bucket_name, object_name, LimitedRandomReader(size), - size) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_name, + data=LimitedRandomReader(size), + length=size, + ) presigned_get_object_url = _CLIENT.presigned_get_object( - bucket_name, object_name) + bucket_name=bucket_name, + object_name=object_name, + ) response = HTTP.urlopen('GET', presigned_get_object_url) if response.status != 200: raise Exception( @@ -1293,8 +1548,8 @@ def test_presigned_get_object_default_expiry( # pylint: disable=invalid-name f"code: {response.code}, error: {response.data}" ) finally: - _CLIENT.remove_object(bucket_name, object_name) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_presigned_get_object_expiry( # pylint: disable=invalid-name @@ -1310,13 +1565,20 @@ def test_presigned_get_object_expiry( # pylint: disable=invalid-name "object_name": object_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: size = 1 * KB - _CLIENT.put_object(bucket_name, object_name, LimitedRandomReader(size), - size) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_name, + data=LimitedRandomReader(size), + length=size, + ) presigned_get_object_url = _CLIENT.presigned_get_object( - bucket_name, object_name, timedelta(seconds=120)) + bucket_name=bucket_name, + object_name=object_name, + expires=timedelta(seconds=120), + ) response = HTTP.urlopen('GET', presigned_get_object_url) if response.status != 200: raise Exception( @@ -1343,7 +1605,10 @@ def test_presigned_get_object_expiry( # pylint: disable=invalid-name ) presigned_get_object_url = _CLIENT.presigned_get_object( - bucket_name, object_name, timedelta(seconds=1)) + bucket_name=bucket_name, + object_name=object_name, + expires=timedelta(seconds=1), + ) # Wait for 2 seconds for the presigned url to expire time.sleep(2) @@ -1359,8 +1624,8 @@ def test_presigned_get_object_expiry( # pylint: disable=invalid-name if response.status == 200: raise ValueError('Presigned get url failed to expire!') finally: - _CLIENT.remove_object(bucket_name, object_name) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_presigned_get_object_response_headers( # pylint: disable=invalid-name @@ -1380,17 +1645,25 @@ def test_presigned_get_object_response_headers( # pylint: disable=invalid-name "content_language": content_language, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: size = 1 * KB - _CLIENT.put_object(bucket_name, object_name, LimitedRandomReader(size), - size) - response_headers = { + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_name, + data=LimitedRandomReader(size), + length=size, + ) + extra_query_params = HTTPQueryDict({ 'response-content-type': content_type, - 'response-content-language': content_language - } + 'response-content-language': content_language, + }) presigned_get_object_url = _CLIENT.presigned_get_object( - bucket_name, object_name, timedelta(seconds=120), response_headers) + bucket_name=bucket_name, + object_name=object_name, + expires=timedelta(seconds=120), + extra_query_params=extra_query_params, + ) log_entry["args"]["presigned_get_object_url"] = ( presigned_get_object_url) @@ -1417,8 +1690,8 @@ def test_presigned_get_object_response_headers( # pylint: disable=invalid-name "code: {response.code}, error: {response.data}" ) finally: - _CLIENT.remove_object(bucket_name, object_name) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_presigned_get_object_range( # pylint: disable=invalid-name @@ -1434,14 +1707,21 @@ def test_presigned_get_object_range( # pylint: disable=invalid-name "object_name": object_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: size = 556433 # on purpose its unaligned - _CLIENT.put_object(bucket_name, object_name, LimitedRandomReader(size), - size) + _CLIENT.put_object( + bucket_name=bucket_name, + object_name=object_name, + data=LimitedRandomReader(size), + length=size, + ) presigned_get_object_url = _CLIENT.presigned_get_object( - bucket_name, object_name, timedelta(seconds=120)) + bucket_name=bucket_name, + object_name=object_name, + expires=timedelta(seconds=120), + ) log_entry["args"]["presigned_get_object_url"] = ( presigned_get_object_url) @@ -1462,8 +1742,8 @@ def test_presigned_get_object_range( # pylint: disable=invalid-name "code: {response.code}, error: {response.data}" ) finally: - _CLIENT.remove_object(bucket_name, object_name) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_presigned_get_object_version( # pylint: disable=invalid-name @@ -1479,17 +1759,25 @@ def test_presigned_get_object_version( # pylint: disable=invalid-name "object_name": object_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) version_id = None try: - _CLIENT.set_bucket_versioning(bucket_name, VersioningConfig(ENABLED)) + _CLIENT.set_bucket_versioning( + bucket_name=bucket_name, + config=VersioningConfig(ENABLED), + ) size = 1 * KB result = _CLIENT.put_object( - bucket_name, object_name, LimitedRandomReader(size), size, + bucket_name=bucket_name, + object_name=object_name, + data=LimitedRandomReader(size), + length=size, ) version_id = result.version_id presigned_get_object_url = _CLIENT.presigned_get_object( - bucket_name, object_name, version_id=version_id, + bucket_name=bucket_name, + object_name=object_name, + version_id=version_id, ) response = HTTP.urlopen('GET', presigned_get_object_url) if response.status != 200: @@ -1498,8 +1786,12 @@ def test_presigned_get_object_version( # pylint: disable=invalid-name f"code: {response.code}, error: {response.data}" ) finally: - _CLIENT.remove_object(bucket_name, object_name, version_id=version_id) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object( + bucket_name=bucket_name, + object_name=object_name, + version_id=version_id, + ) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_presigned_put_object_default_expiry( # pylint: disable=invalid-name @@ -1515,10 +1807,12 @@ def test_presigned_put_object_default_expiry( # pylint: disable=invalid-name "object_name": object_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: presigned_put_object_url = _CLIENT.presigned_put_object( - bucket_name, object_name) + bucket_name=bucket_name, + object_name=object_name, + ) response = HTTP.urlopen('PUT', presigned_put_object_url, LimitedRandomReader(1 * KB)) @@ -1527,10 +1821,13 @@ def test_presigned_put_object_default_expiry( # pylint: disable=invalid-name f"Presigned PUT object URL {presigned_put_object_url} failed; " f"code: {response.code}, error: {response.data}" ) - _CLIENT.stat_object(bucket_name, object_name) + _CLIENT.stat_object( + bucket_name=bucket_name, + object_name=object_name, + ) finally: - _CLIENT.remove_object(bucket_name, object_name) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_presigned_put_object_expiry( # pylint: disable=invalid-name @@ -1546,10 +1843,13 @@ def test_presigned_put_object_expiry( # pylint: disable=invalid-name "object_name": object_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: presigned_put_object_url = _CLIENT.presigned_put_object( - bucket_name, object_name, timedelta(seconds=1)) + bucket_name=bucket_name, + object_name=object_name, + expires=timedelta(seconds=1), + ) # Wait for 2 seconds for the presigned url to expire time.sleep(2) response = HTTP.urlopen('PUT', @@ -1558,8 +1858,8 @@ def test_presigned_put_object_expiry( # pylint: disable=invalid-name if response.status == 200: raise ValueError('Presigned put url failed to expire!') finally: - _CLIENT.remove_object(bucket_name, object_name) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_presigned_post_policy(log_entry): @@ -1572,7 +1872,7 @@ def test_presigned_post_policy(log_entry): "bucket_name": bucket_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: no_of_days = 10 prefix = 'objectPrefix/' @@ -1589,9 +1889,9 @@ def test_presigned_post_policy(log_entry): "content_length_range": "64KiB to 10MiB", "Content-Type": "image/", } - _CLIENT.presigned_post_policy(policy) + _CLIENT.presigned_post_policy(policy=policy) finally: - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_thread_safe(log_entry): @@ -1602,7 +1902,7 @@ def test_thread_safe(log_entry): "bucket_name": bucket_name, "object_name": object_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) test_file_sha256sum = _get_sha256sum(_LARGE_FILE) exceptions = [] @@ -1610,7 +1910,11 @@ def test_thread_safe(log_entry): def get_object_and_check(index): local_file = f"copied_file_{index}" try: - _CLIENT.fget_object(bucket_name, object_name, local_file) + _CLIENT.fget_object( + bucket_name=bucket_name, + object_name=object_name, + file_path=local_file, + ) if _get_sha256sum(local_file) != test_file_sha256sum: raise ValueError( "checksum mismatch on multi-threaded put/get objects") @@ -1620,14 +1924,13 @@ def get_object_and_check(index): _ = os.path.isfile(local_file) and os.remove(local_file) try: - thread_count = 5 + _CLIENT.fput_object( + bucket_name=bucket_name, + object_name=object_name, + file_path=_LARGE_FILE, + ) - # Start threads for put object. - for _ in range(thread_count): - thread = Thread(target=_CLIENT.fput_object, - args=(bucket_name, object_name, _LARGE_FILE)) - thread.start() - thread.join() + thread_count = 5 # Start threads for get object. threads = [] @@ -1641,8 +1944,8 @@ def get_object_and_check(index): if exceptions: raise exceptions[0] finally: - _CLIENT.remove_object(bucket_name, object_name) - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object(bucket_name=bucket_name, object_name=object_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_get_bucket_policy(log_entry): @@ -1653,14 +1956,14 @@ def test_get_bucket_policy(log_entry): log_entry["args"] = { "bucket_name": bucket_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: - _CLIENT.get_bucket_policy(bucket_name) + _CLIENT.get_bucket_policy(bucket_name=bucket_name) except S3Error as exc: if exc.code != "NoSuchBucketPolicy": raise finally: - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def _get_policy_actions(stat): @@ -1678,7 +1981,8 @@ def listit(value): def _validate_policy(bucket_name, policy): """Validate policy.""" - policy_dict = json.loads(_CLIENT.get_bucket_policy(bucket_name)) + policy_dict = json.loads( + _CLIENT.get_bucket_policy(bucket_name=bucket_name)) actions = _get_policy_actions(policy_dict.get('Statement')) expected_actions = _get_policy_actions(policy.get('Statement')) return expected_actions == actions @@ -1693,16 +1997,16 @@ def test_get_bucket_notification(log_entry): "bucket_name": bucket_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: - config = _CLIENT.get_bucket_notification(bucket_name) + config = _CLIENT.get_bucket_notification(bucket_name=bucket_name) if ( config.cloud_func_config_list or config.queue_config_list or config.topic_config_list ): raise ValueError("Failed to receive an empty bucket notification") finally: - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_set_bucket_policy_readonly(log_entry): @@ -1714,7 +2018,7 @@ def test_set_bucket_policy_readonly(log_entry): "bucket_name": bucket_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: # read-only policy policy = { @@ -1744,12 +2048,15 @@ def test_set_bucket_policy_readonly(log_entry): ] } # Set read-only policy - _CLIENT.set_bucket_policy(bucket_name, json.dumps(policy)) + _CLIENT.set_bucket_policy( + bucket_name=bucket_name, + policy=json.dumps(policy), + ) # Validate if the policy is set correctly if not _validate_policy(bucket_name, policy): raise ValueError('Failed to set ReadOnly bucket policy') finally: - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_set_bucket_policy_readwrite( # pylint: disable=invalid-name @@ -1762,7 +2069,7 @@ def test_set_bucket_policy_readwrite( # pylint: disable=invalid-name "bucket_name": bucket_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: # Read-write policy policy = { @@ -1803,12 +2110,15 @@ def test_set_bucket_policy_readwrite( # pylint: disable=invalid-name ] } # Set read-write policy - _CLIENT.set_bucket_policy(bucket_name, json.dumps(policy)) + _CLIENT.set_bucket_policy( + bucket_name=bucket_name, + policy=json.dumps(policy), + ) # Validate if the policy is set correctly if not _validate_policy(bucket_name, policy): raise ValueError('Failed to set ReadOnly bucket policy') finally: - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def _test_remove_objects(log_entry, version_check=False): @@ -1820,20 +2130,24 @@ def _test_remove_objects(log_entry, version_check=False): "bucket_name": bucket_name, } - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) object_names = [] delete_object_list = [] try: if version_check: _CLIENT.set_bucket_versioning( - bucket_name, VersioningConfig(ENABLED), + bucket_name=bucket_name, + config=VersioningConfig(ENABLED), ) size = 1 * KB # Upload some new objects to prepare for multi-object delete test. for i in range(10): object_name = f"prefix-{i}" result = _CLIENT.put_object( - bucket_name, object_name, LimitedRandomReader(size), size, + bucket_name=bucket_name, + object_name=object_name, + data=LimitedRandomReader(size), + length=size, ) object_names.append( (object_name, result.version_id) if version_check @@ -1847,15 +2161,21 @@ def _test_remove_objects(log_entry, version_check=False): else DeleteObject(args[0], args[1]) ) # delete the objects in a single library call. - errs = _CLIENT.remove_objects(bucket_name, delete_object_list) + errs = _CLIENT.remove_objects( + bucket_name=bucket_name, + delete_object_list=delete_object_list, + ) for err in errs: raise ValueError(f"Remove objects err: {err}") finally: # Try to clean everything to keep our server intact - errs = _CLIENT.remove_objects(bucket_name, delete_object_list) + errs = _CLIENT.remove_objects( + bucket_name=bucket_name, + delete_object_list=delete_object_list, + ) for err in errs: raise ValueError(f"Remove objects err: {err}") - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def test_remove_objects(log_entry): @@ -1882,12 +2202,12 @@ def test_remove_bucket(log_entry): if _IS_AWS: log_entry["args"]["location"] = location = "us-east-1" - _CLIENT.make_bucket(bucket_name, location) + _CLIENT.make_bucket(bucket_name=bucket_name, location=location) else: - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) # Removing bucket. This operation will only work if your bucket is empty. - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def _test_upload_snowball_objects(log_entry, staging_filename=None): @@ -1901,13 +2221,13 @@ def _test_upload_snowball_objects(log_entry, staging_filename=None): } try: - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) size = 3 * MB reader1 = LimitedRandomReader(size) reader2 = LimitedRandomReader(size) _CLIENT.upload_snowball_objects( - bucket_name, - [ + bucket_name=bucket_name, + objects=[ SnowballObject("my-object1", data=io.BytesIO(b"py"), length=2), SnowballObject( "my-object2", data=reader1, length=size, @@ -1921,10 +2241,13 @@ def _test_upload_snowball_objects(log_entry, staging_filename=None): ) _test_list_objects_api(bucket_name, 3) finally: - _CLIENT.remove_object(bucket_name, "my-object1") - _CLIENT.remove_object(bucket_name, "my-object2") - _CLIENT.remove_object(bucket_name, "my-object3") - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_object(bucket_name=bucket_name, + object_name="my-object1") + _CLIENT.remove_object(bucket_name=bucket_name, + object_name="my-object2") + _CLIENT.remove_object(bucket_name=bucket_name, + object_name="my-object3") + _CLIENT.remove_bucket(bucket_name=bucket_name) if staging_filename and os.path.exists(staging_filename): os.remove(staging_filename) @@ -1953,18 +2276,18 @@ def test_set_get_bucket_versioning(log_entry): excl_prefixes = ['prefix1', 'prefix2'] - _CLIENT.make_bucket(bucket_name) + _CLIENT.make_bucket(bucket_name=bucket_name) try: # Test all fields of versioning configuration _CLIENT.set_bucket_versioning( - bucket_name, - VersioningConfig(status=ENABLED, - exclude_folders=True, - excluded_prefixes=excl_prefixes), + bucket_name=bucket_name, + config=VersioningConfig(status=ENABLED, + exclude_folders=True, + excluded_prefixes=excl_prefixes), ) - vcfg = _CLIENT.get_bucket_versioning(bucket_name) + vcfg = _CLIENT.get_bucket_versioning(bucket_name=bucket_name) if vcfg.status != ENABLED: raise ValueError(f'(1) unexpected get_bucket_versioning result: ' f'status: {vcfg.status}') @@ -1977,11 +2300,11 @@ def test_set_get_bucket_versioning(log_entry): # Disable all fields of versioning configuration _CLIENT.set_bucket_versioning( - bucket_name, - VersioningConfig(status=SUSPENDED), + bucket_name=bucket_name, + config=VersioningConfig(status=SUSPENDED), ) - vcfg = _CLIENT.get_bucket_versioning(bucket_name) + vcfg = _CLIENT.get_bucket_versioning(bucket_name=bucket_name) if vcfg.status != SUSPENDED: raise ValueError(f'(2) unexpected get_bucket_versioning result: ' f'status: {vcfg.status}') @@ -1993,7 +2316,7 @@ def test_set_get_bucket_versioning(log_entry): f'excluded_prefixes: {vcfg.excluded_prefixes}') finally: - _CLIENT.remove_bucket(bucket_name) + _CLIENT.remove_bucket(bucket_name=bucket_name) def main(): @@ -2013,7 +2336,12 @@ def main(): secret_key = 'zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG' secure = True - _CLIENT = Minio(server_endpoint, access_key, secret_key, secure=secure) + _CLIENT = Minio( + endpoint=server_endpoint, + access_key=access_key, + secret_key=secret_key, + secure=secure, + ) _IS_AWS = ".amazonaws.com" in server_endpoint # Check if we are running in the mint environment. diff --git a/tests/unit/bucket_exist_test.py b/tests/unit/bucket_exist_test.py index 7e446605..cf5f954e 100644 --- a/tests/unit/bucket_exist_test.py +++ b/tests/unit/bucket_exist_test.py @@ -26,16 +26,19 @@ class BucketExists(TestCase): def test_bucket_is_string(self): - client = Minio('localhost:9000') - self.assertRaises(TypeError, client.bucket_exists, 1234) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(TypeError): + client.bucket_exists(bucket_name=1234) def test_bucket_is_not_empty_string(self): - client = Minio('localhost:9000') - self.assertRaises(ValueError, client.bucket_exists, ' \t \n ') + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.bucket_exists(bucket_name=' \t \n ') def test_bucket_exists_invalid_name(self): - client = Minio('localhost:9000') - self.assertRaises(ValueError, client.bucket_exists, 'AB*CD') + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.bucket_exists(bucket_name='AB*CD') @mock.patch('urllib3.PoolManager') def test_bucket_exists_bad_request(self, mock_connection): @@ -47,8 +50,9 @@ def test_bucket_exists_bad_request(self, mock_connection): {'User-Agent': _DEFAULT_USER_AGENT}, 400) ) - client = Minio('localhost:9000') - self.assertRaises(S3Error, client.bucket_exists, 'hello') + client = Minio(endpoint='localhost:9000') + kwargs = {"bucket_name": 'hello'} + self.assertRaises(S3Error, client.bucket_exists, **kwargs) @mock.patch('urllib3.PoolManager') def test_bucket_exists_works(self, mock_connection): @@ -60,8 +64,8 @@ def test_bucket_exists_works(self, mock_connection): {'User-Agent': _DEFAULT_USER_AGENT}, 200) ) - client = Minio('localhost:9000') - result = client.bucket_exists('hello') + client = Minio(endpoint='localhost:9000') + result = client.bucket_exists(bucket_name='hello') self.assertTrue(result) mock_server.mock_add_request( MockResponse('HEAD', @@ -69,5 +73,5 @@ def test_bucket_exists_works(self, mock_connection): {'User-Agent': _DEFAULT_USER_AGENT}, 404) ) - false_result = client.bucket_exists('goodbye') + false_result = client.bucket_exists(bucket_name='goodbye') self.assertFalse(false_result) diff --git a/tests/unit/copy_object_test.py b/tests/unit/copy_object_test.py index 1ea66459..ab7ad408 100644 --- a/tests/unit/copy_object_test.py +++ b/tests/unit/copy_object_test.py @@ -22,11 +22,13 @@ class CopyObjectTest(TestCase): def test_valid_copy_source(self): - client = Minio('localhost:9000') - self.assertRaises( - ValueError, - client.copy_object, 'hello', '1', '/testbucket/object' - ) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.copy_object( + bucket_name='hello', + object_name='1', + source='/testbucket/object', + ) def test_valid_match_etag(self): self.assertRaises( diff --git a/tests/unit/get_bucket_policy_test.py b/tests/unit/get_bucket_policy_test.py index 05b72abf..c6ab6a45 100644 --- a/tests/unit/get_bucket_policy_test.py +++ b/tests/unit/get_bucket_policy_test.py @@ -48,8 +48,9 @@ def test_get_policy_for_non_existent_bucket(self, mock_connection): content=error.encode() ) ) - client = Minio('localhost:9000') - self.assertRaises(S3Error, client.get_bucket_policy, bucket_name) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(S3Error): + client.get_bucket_policy(bucket_name=bucket_name) @mock.patch('urllib3.PoolManager') def test_get_policy_for_existent_bucket(self, mock_connection): @@ -91,6 +92,6 @@ def test_get_policy_for_existent_bucket(self, mock_connection): content=mock_data ) ) - client = Minio('localhost:9000') - response = client.get_bucket_policy(bucket_name) + client = Minio(endpoint='localhost:9000') + response = client.get_bucket_policy(bucket_name=bucket_name) self.assertEqual(response, mock_data.decode()) diff --git a/tests/unit/get_object_test.py b/tests/unit/get_object_test.py index d1110d67..7bb09384 100644 --- a/tests/unit/get_object_test.py +++ b/tests/unit/get_object_test.py @@ -27,12 +27,14 @@ class GetObjectTest(TestCase): def test_object_is_string(self): - client = Minio('localhost:9000') - self.assertRaises(TypeError, client.get_object, 'hello', 1234) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(TypeError): + client.get_object(bucket_name='hello', object_name=1234) def test_object_is_not_empty_string(self): - client = Minio('localhost:9000') - self.assertRaises(ValueError, client.get_object, 'hello', ' \t \n ') + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.get_object(bucket_name='hello', object_name=' \t \n ') @mock.patch('urllib3.PoolManager') def test_get_object_throws_fail(self, mock_connection): @@ -49,5 +51,6 @@ def test_get_object_throws_fail(self, mock_connection): response_headers={"Content-Type": "application/xml"}, content=error_xml.encode()) ) - client = Minio('localhost:9000') - self.assertRaises(S3Error, client.get_object, 'hello', 'key') + client = Minio(endpoint='localhost:9000') + with self.assertRaises(S3Error): + client.get_object(bucket_name='hello', object_name='key') diff --git a/tests/unit/helpers.py b/tests/unit/helpers.py index 5fc54333..4c781c84 100644 --- a/tests/unit/helpers.py +++ b/tests/unit/helpers.py @@ -35,1741 +35,3 @@ def generate_error(code, message, request_id, host_id, '''.format(code, message, request_id, host_id, resource, bucket_name, object_name) - - -class BaseURLTests(TestCase): - def test_aws_new_baseurl_error(self): - cases = [ - # invalid Amazon AWS host error - "https://z3.amazonaws.com", - "https://1234567890.s3.amazonaws.com", - "https://1234567890.s3-accelerate.amazonaws.com", - "https://1234567890.abcdefgh.s3-control.amazonaws.com", - "https://s3fips.amazonaws.com", - "https://s3-fips.s3.amazonaws.com", - "https://s3-fips.s3accelerate.amazonaws.com", - "https://s3-fips.s3-accelerate.amazonaws.com", - "https://bucket.vpce.s3.us-east-1.vpce.amazonaws.com", - "https://bucket.bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com", - "https://accesspoint.accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com", - "https://accesspoint.vpce-1123.vpce-xyz.s3.amazonaws.com", - # use HTTPS scheme for host error - "http://s3-accesspoint.amazonaws.com", - # region missing in Amazon S3 China endpoint error - "https://s3.amazonaws.com.cn", - ] - for endpoint in cases: - self.assertRaises(ValueError, BaseURL, endpoint, None) - - def test_aws_new_baseurl(self): - Case = namedtuple("Case", ["args", "result"]) - cases = [ - Case( - ("https://s3.amazonaws.com", None), - { - "s3_prefix": "s3.", - "domain_suffix": "amazonaws.com", - "region": None, - "dualstack": False, - }, - ), - Case( - ("https://s3.amazonaws.com", "ap-south-1a"), - { - "s3_prefix": "s3.", - "domain_suffix": "amazonaws.com", - "region": "ap-south-1a", - "dualstack": False, - }, - ), - Case( - ("https://s3.us-gov-east-1.amazonaws.com", None), - { - "s3_prefix": "s3.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-east-1", - "dualstack": False, - }, - ), - Case( - ("https://s3.me-south-1.amazonaws.com", "cn-northwest-1"), - { - "s3_prefix": "s3.", - "domain_suffix": "amazonaws.com", - "region": "cn-northwest-1", - "dualstack": False, - }, - ), - ### - Case( - ("https://s3.dualstack.amazonaws.com", None), - { - "s3_prefix": "s3.", - "domain_suffix": "amazonaws.com", - "region": None, - "dualstack": True, - }, - ), - Case( - ("https://s3.dualstack.amazonaws.com", "ap-south-1a"), - { - "s3_prefix": "s3.", - "domain_suffix": "amazonaws.com", - "region": "ap-south-1a", - "dualstack": True, - }, - ), - Case( - ("https://s3.dualstack.us-gov-east-1.amazonaws.com", None), - { - "s3_prefix": "s3.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-east-1", - "dualstack": True, - }, - ), - Case( - ("https://s3.dualstack.me-south-1.amazonaws.com", - "cn-northwest-1"), - { - "s3_prefix": "s3.", - "domain_suffix": "amazonaws.com", - "region": "cn-northwest-1", - "dualstack": True, - }, - ), - ### - Case( - ("https://s3-accelerate.amazonaws.com", None), - { - "s3_prefix": "s3-accelerate.", - "domain_suffix": "amazonaws.com", - "region": None, - "dualstack": False, - }, - ), - Case( - ("https://s3-accelerate.amazonaws.com", "ap-south-1a"), - { - "s3_prefix": "s3-accelerate.", - "domain_suffix": "amazonaws.com", - "region": "ap-south-1a", - "dualstack": False, - }, - ), - Case( - ("https://s3-accelerate.us-gov-east-1.amazonaws.com", None), - { - "s3_prefix": "s3-accelerate.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-east-1", - "dualstack": False, - }, - ), - Case( - ("https://s3-accelerate.me-south-1.amazonaws.com", - "cn-northwest-1"), - { - "s3_prefix": "s3-accelerate.", - "domain_suffix": "amazonaws.com", - "region": "cn-northwest-1", - "dualstack": False, - }, - ), - ### - Case( - ("https://s3-accelerate.dualstack.amazonaws.com", None), - { - "s3_prefix": "s3-accelerate.", - "domain_suffix": "amazonaws.com", - "region": None, - "dualstack": True, - }, - ), - Case( - ("https://s3-accelerate.dualstack.amazonaws.com", - "ap-south-1a"), - { - "s3_prefix": "s3-accelerate.", - "domain_suffix": "amazonaws.com", - "region": "ap-south-1a", - "dualstack": True, - }, - ), - Case( - ("https://s3-accelerate.dualstack.us-gov-east-1.amazonaws.com", - None), - { - "s3_prefix": "s3-accelerate.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-east-1", - "dualstack": True, - }, - ), - Case( - ("https://s3-accelerate.dualstack.me-south-1.amazonaws.com", - "cn-northwest-1"), - { - "s3_prefix": "s3-accelerate.", - "domain_suffix": "amazonaws.com", - "region": "cn-northwest-1", - "dualstack": True, - }, - ), - ### - Case( - ("https://s3-fips.amazonaws.com", None), - { - "s3_prefix": "s3-fips.", - "domain_suffix": "amazonaws.com", - "region": None, - "dualstack": False, - }, - ), - Case( - ("https://s3-fips.amazonaws.com", "ap-south-1a"), - { - "s3_prefix": "s3-fips.", - "domain_suffix": "amazonaws.com", - "region": "ap-south-1a", - "dualstack": False, - }, - ), - Case( - ("https://s3-fips.us-gov-east-1.amazonaws.com", None), - { - "s3_prefix": "s3-fips.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-east-1", - "dualstack": False, - }, - ), - Case( - ("https://s3-fips.me-south-1.amazonaws.com", "cn-northwest-1"), - { - "s3_prefix": "s3-fips.", - "domain_suffix": "amazonaws.com", - "region": "cn-northwest-1", - "dualstack": False, - }, - ), - ### - Case( - ("https://s3-fips.dualstack.amazonaws.com", None), - { - "s3_prefix": "s3-fips.", - "domain_suffix": "amazonaws.com", - "region": None, - "dualstack": True, - }, - ), - Case( - ("https://s3-fips.dualstack.amazonaws.com", "ap-south-1a"), - { - "s3_prefix": "s3-fips.", - "domain_suffix": "amazonaws.com", - "region": "ap-south-1a", - "dualstack": True, - }, - ), - Case( - ("https://s3-fips.dualstack.us-gov-east-1.amazonaws.com", None), - { - "s3_prefix": "s3-fips.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-east-1", - "dualstack": True, - }, - ), - Case( - ("https://s3-fips.dualstack.me-south-1.amazonaws.com", - "cn-northwest-1"), - { - "s3_prefix": "s3-fips.", - "domain_suffix": "amazonaws.com", - "region": "cn-northwest-1", - "dualstack": True, - }, - ), - ### - Case( - ("https://s3-external-1.amazonaws.com", None), - { - "s3_prefix": "s3-external-1.", - "domain_suffix": "amazonaws.com", - "region": "us-east-1", - "dualstack": False, - }, - ), - Case( - ("https://s3-us-gov-west-1.amazonaws.com", None), - { - "s3_prefix": "s3-us-gov-west-1.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-west-1", - "dualstack": False, - }, - ), - Case( - ("https://s3-fips-us-gov-west-1.amazonaws.com", None), - { - "s3_prefix": "s3-fips-us-gov-west-1.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-west-1", - "dualstack": False, - }, - ), - ### - Case( - ("https://bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com", None), - { - "s3_prefix": "bucket.vpce-1a2b3c4d-5e6f.s3.", - "domain_suffix": "vpce.amazonaws.com", - "region": "us-east-1", - "dualstack": False, - }, - ), - Case( - ("https://accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com", None), - { - "s3_prefix": "accesspoint.vpce-1a2b3c4d-5e6f.s3.", - "domain_suffix": "vpce.amazonaws.com", - "region": "us-east-1", - "dualstack": False, - }, - ), - ### - Case( - ("https://012345678901.s3-control.amazonaws.com", None), - { - "s3_prefix": "012345678901.s3-control.", - "domain_suffix": "amazonaws.com", - "region": None, - "dualstack": False, - }, - ), - Case( - ("https://012345678901.s3-control.amazonaws.com", - "ap-south-1a"), - { - "s3_prefix": "012345678901.s3-control.", - "domain_suffix": "amazonaws.com", - "region": "ap-south-1a", - "dualstack": False, - }, - ), - Case( - ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", - None), - { - "s3_prefix": "012345678901.s3-control.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-east-1", - "dualstack": False, - }, - ), - Case( - ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", - "cn-northwest-1"), - { - "s3_prefix": "012345678901.s3-control.", - "domain_suffix": "amazonaws.com", - "region": "cn-northwest-1", - "dualstack": False, - }, - ), - ### - Case( - ("https://012345678901.s3-control.dualstack.amazonaws.com", - None), - { - "s3_prefix": "012345678901.s3-control.", - "domain_suffix": "amazonaws.com", - "region": None, - "dualstack": True, - }, - ), - Case( - ("https://012345678901.s3-control.dualstack.amazonaws.com", - "ap-south-1a"), - { - "s3_prefix": "012345678901.s3-control.", - "domain_suffix": "amazonaws.com", - "region": "ap-south-1a", - "dualstack": True, - }, - ), - Case( - ("https://012345678901.s3-control.dualstack.us-gov-east-1." - "amazonaws.com", - None), - { - "s3_prefix": "012345678901.s3-control.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-east-1", - "dualstack": True, - }, - ), - Case( - ("https://012345678901.s3-control.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - { - "s3_prefix": "012345678901.s3-control.", - "domain_suffix": "amazonaws.com", - "region": "cn-northwest-1", - "dualstack": True, - }, - ), - ### - Case( - ("https://012345678901.s3-control-fips.amazonaws.com", None), - { - "s3_prefix": "012345678901.s3-control-fips.", - "domain_suffix": "amazonaws.com", - "region": None, - "dualstack": False, - }, - ), - Case( - ("https://012345678901.s3-control-fips.amazonaws.com", - "ap-south-1a"), - { - "s3_prefix": "012345678901.s3-control-fips.", - "domain_suffix": "amazonaws.com", - "region": "ap-south-1a", - "dualstack": False, - }, - ), - Case( - ("https://012345678901.s3-control-fips.us-gov-east-1." - "amazonaws.com", - None), - { - "s3_prefix": "012345678901.s3-control-fips.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-east-1", - "dualstack": False, - }, - ), - Case( - ("https://012345678901.s3-control-fips.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - { - "s3_prefix": "012345678901.s3-control-fips.", - "domain_suffix": "amazonaws.com", - "region": "cn-northwest-1", - "dualstack": False, - }, - ), - ### - Case( - ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", - None), - { - "s3_prefix": "012345678901.s3-control-fips.", - "domain_suffix": "amazonaws.com", - "region": None, - "dualstack": True, - }, - ), - Case( - ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", - "ap-south-1a"), - { - "s3_prefix": "012345678901.s3-control-fips.", - "domain_suffix": "amazonaws.com", - "region": "ap-south-1a", - "dualstack": True, - }, - ), - Case( - ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." - "amazonaws.com", - None), - { - "s3_prefix": "012345678901.s3-control-fips.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-east-1", - "dualstack": True, - }, - ), - Case( - ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - { - "s3_prefix": "012345678901.s3-control-fips.", - "domain_suffix": "amazonaws.com", - "region": "cn-northwest-1", - "dualstack": True, - }, - ), - ### - Case( - ("https://s3-accesspoint.amazonaws.com", None), - { - "s3_prefix": "s3-accesspoint.", - "domain_suffix": "amazonaws.com", - "region": None, - "dualstack": False, - }, - ), - Case( - ("https://s3-accesspoint.amazonaws.com", - "ap-south-1a"), - { - "s3_prefix": "s3-accesspoint.", - "domain_suffix": "amazonaws.com", - "region": "ap-south-1a", - "dualstack": False, - }, - ), - Case( - ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", - None), - { - "s3_prefix": "s3-accesspoint.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-east-1", - "dualstack": False, - }, - ), - Case( - ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", - "cn-northwest-1"), - { - "s3_prefix": "s3-accesspoint.", - "domain_suffix": "amazonaws.com", - "region": "cn-northwest-1", - "dualstack": False, - }, - ), - ### - Case( - ("https://s3-accesspoint.dualstack.amazonaws.com", - None), - { - "s3_prefix": "s3-accesspoint.", - "domain_suffix": "amazonaws.com", - "region": None, - "dualstack": True, - }, - ), - Case( - ("https://s3-accesspoint.dualstack.amazonaws.com", - "ap-south-1a"), - { - "s3_prefix": "s3-accesspoint.", - "domain_suffix": "amazonaws.com", - "region": "ap-south-1a", - "dualstack": True, - }, - ), - Case( - ("https://s3-accesspoint.dualstack.us-gov-east-1." - "amazonaws.com", - None), - { - "s3_prefix": "s3-accesspoint.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-east-1", - "dualstack": True, - }, - ), - Case( - ("https://s3-accesspoint.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - { - "s3_prefix": "s3-accesspoint.", - "domain_suffix": "amazonaws.com", - "region": "cn-northwest-1", - "dualstack": True, - }, - ), - ### - Case( - ("https://s3-accesspoint-fips.amazonaws.com", None), - { - "s3_prefix": "s3-accesspoint-fips.", - "domain_suffix": "amazonaws.com", - "region": None, - "dualstack": False, - }, - ), - Case( - ("https://s3-accesspoint-fips.amazonaws.com", - "ap-south-1a"), - { - "s3_prefix": "s3-accesspoint-fips.", - "domain_suffix": "amazonaws.com", - "region": "ap-south-1a", - "dualstack": False, - }, - ), - Case( - ("https://s3-accesspoint-fips.us-gov-east-1." - "amazonaws.com", - None), - { - "s3_prefix": "s3-accesspoint-fips.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-east-1", - "dualstack": False, - }, - ), - Case( - ("https://s3-accesspoint-fips.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - { - "s3_prefix": "s3-accesspoint-fips.", - "domain_suffix": "amazonaws.com", - "region": "cn-northwest-1", - "dualstack": False, - }, - ), - ### - Case( - ("https://s3-accesspoint-fips.dualstack.amazonaws.com", - None), - { - "s3_prefix": "s3-accesspoint-fips.", - "domain_suffix": "amazonaws.com", - "region": None, - "dualstack": True, - }, - ), - Case( - ("https://s3-accesspoint-fips.dualstack.amazonaws.com", - "ap-south-1a"), - { - "s3_prefix": "s3-accesspoint-fips.", - "domain_suffix": "amazonaws.com", - "region": "ap-south-1a", - "dualstack": True, - }, - ), - Case( - ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." - "amazonaws.com", - None), - { - "s3_prefix": "s3-accesspoint-fips.", - "domain_suffix": "amazonaws.com", - "region": "us-gov-east-1", - "dualstack": True, - }, - ), - Case( - ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - { - "s3_prefix": "s3-accesspoint-fips.", - "domain_suffix": "amazonaws.com", - "region": "cn-northwest-1", - "dualstack": True, - }, - ), - ### - Case( - ("https://my-load-balancer-1234567890.us-west-2.elb." - "amazonaws.com", "us-west-2"), - None, - ), - ] - - for case in cases: - url = BaseURL(*case.args) - self.assertEqual(url._aws_info, case.result) - - def test_aws_list_buckets_build(self): - Case = namedtuple("Case", ["args", "result"]) - cases = [ - Case( - ("https://s3.amazonaws.com", None), - "https://s3.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3.amazonaws.com", "ap-south-1a"), - "https://s3.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://s3.us-gov-east-1.amazonaws.com", None), - "https://s3.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://s3.me-south-1.amazonaws.com", "cn-northwest-1"), - "https://s3.cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://s3.dualstack.amazonaws.com", None), - "https://s3.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3.dualstack.amazonaws.com", "ap-south-1a"), - "https://s3.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://s3.dualstack.us-gov-east-1.amazonaws.com", None), - "https://s3.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://s3.dualstack.me-south-1.amazonaws.com", - "cn-northwest-1"), - "https://s3.cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://s3-accelerate.amazonaws.com", None), - "https://s3.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3-accelerate.amazonaws.com", "ap-south-1a"), - "https://s3.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://s3-accelerate.us-gov-east-1.amazonaws.com", None), - "https://s3.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://s3-accelerate.me-south-1.amazonaws.com", - "cn-northwest-1"), - "https://s3.cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://s3-accelerate.dualstack.amazonaws.com", None), - "https://s3.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3-accelerate.dualstack.amazonaws.com", - "ap-south-1a"), - "https://s3.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://s3-accelerate.dualstack.us-gov-east-1.amazonaws.com", - None), - "https://s3.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://s3-accelerate.dualstack.me-south-1.amazonaws.com", - "cn-northwest-1"), - "https://s3.cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://s3-fips.amazonaws.com", None), - "https://s3.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3-fips.amazonaws.com", "ap-south-1a"), - "https://s3.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://s3-fips.us-gov-east-1.amazonaws.com", None), - "https://s3.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://s3-fips.me-south-1.amazonaws.com", "cn-northwest-1"), - "https://s3.cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://s3-fips.dualstack.amazonaws.com", None), - "https://s3.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3-fips.dualstack.amazonaws.com", "ap-south-1a"), - "https://s3.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://s3-fips.dualstack.us-gov-east-1.amazonaws.com", None), - "https://s3.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://s3-fips.dualstack.me-south-1.amazonaws.com", - "cn-northwest-1"), - "https://s3.cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://s3-external-1.amazonaws.com", None), - "https://s3-external-1.amazonaws.com/", - ), - Case( - ("https://s3-us-gov-west-1.amazonaws.com", None), - "https://s3-us-gov-west-1.amazonaws.com/", - ), - Case( - ("https://s3-fips-us-gov-west-1.amazonaws.com", None), - "https://s3-fips-us-gov-west-1.amazonaws.com/", - ), - ### - Case( - ("https://bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com", None), - "https://bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com/", - ), - Case( - ("https://accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com", None), - "https://accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com/", - ), - ### - Case( - ("https://012345678901.s3-control.amazonaws.com", None), - "https://012345678901.s3-control.us-east-1.amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control.amazonaws.com", - "ap-south-1a"), - "https://012345678901.s3-control.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", - None), - "https://012345678901.s3-control.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", - "cn-northwest-1"), - "https://012345678901.s3-control.cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://012345678901.s3-control.dualstack.amazonaws.com", - None), - "https://012345678901.s3-control.us-east-1." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control.dualstack.amazonaws.com", - "ap-south-1a"), - "https://012345678901.s3-control.ap-south-1a." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control.dualstack.us-gov-east-1." - "amazonaws.com", - None), - "https://012345678901.s3-control.us-gov-east-1." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://012345678901.s3-control.cn-northwest-1." - "amazonaws.com/", - ), - ### - Case( - ("https://012345678901.s3-control-fips.amazonaws.com", None), - "https://012345678901.s3-control-fips.us-east-1.amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control-fips.amazonaws.com", - "ap-south-1a"), - "https://012345678901.s3-control-fips.ap-south-1a." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control-fips.us-gov-east-1." - "amazonaws.com", - None), - "https://012345678901.s3-control-fips.us-gov-east-1." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control-fips.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://012345678901.s3-control-fips.cn-northwest-1." - "amazonaws.com/", - ), - ### - Case( - ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", - None), - "https://012345678901.s3-control-fips.us-east-1." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", - "ap-south-1a"), - "https://012345678901.s3-control-fips.ap-south-1a." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." - "amazonaws.com", - None), - "https://012345678901.s3-control-fips.us-gov-east-1." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://012345678901.s3-control-fips.cn-northwest-1." - "amazonaws.com/", - ), - ### - Case( - ("https://s3-accesspoint.amazonaws.com", None), - "https://s3.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint.amazonaws.com", - "ap-south-1a"), - "https://s3.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", - None), - "https://s3.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", - "cn-northwest-1"), - "https://s3.cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://s3-accesspoint.dualstack.amazonaws.com", - None), - "https://s3.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint.dualstack.amazonaws.com", - "ap-south-1a"), - "https://s3.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint.dualstack.us-gov-east-1." - "amazonaws.com", - None), - "https://s3.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://s3.cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://s3-accesspoint-fips.amazonaws.com", None), - "https://s3.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint-fips.amazonaws.com", - "ap-south-1a"), - "https://s3.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint-fips.us-gov-east-1." - "amazonaws.com", - None), - "https://s3.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint-fips.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://s3.cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://s3-accesspoint-fips.dualstack.amazonaws.com", - None), - "https://s3.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint-fips.dualstack.amazonaws.com", - "ap-south-1a"), - "https://s3.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." - "amazonaws.com", - None), - "https://s3.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://s3.cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://my-load-balancer-1234567890.us-west-2.elb." - "amazonaws.com", "us-west-2"), - "https://my-load-balancer-1234567890.us-west-2.elb." - "amazonaws.com/", - ), - ] - - for case in cases: - base_url = BaseURL(*case.args) - url = urlunsplit(base_url.build( - "GET", base_url.region or "us-east-1")) - self.assertEqual(str(url), case.result) - - def test_aws_bucket_build(self): - Case = namedtuple("Case", ["args", "result"]) - cases = [ - Case( - ("https://s3.amazonaws.com", None), - "https://my-bucket.s3.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3.amazonaws.com", "ap-south-1a"), - "https://my-bucket.s3.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://s3.us-gov-east-1.amazonaws.com", None), - "https://my-bucket.s3.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://s3.me-south-1.amazonaws.com", "cn-northwest-1"), - "https://my-bucket.s3.cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://s3.dualstack.amazonaws.com", None), - "https://my-bucket.s3.dualstack.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3.dualstack.amazonaws.com", "ap-south-1a"), - "https://my-bucket.s3.dualstack.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://s3.dualstack.us-gov-east-1.amazonaws.com", None), - "https://my-bucket.s3.dualstack.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://s3.dualstack.me-south-1.amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3.dualstack.cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://s3-accelerate.amazonaws.com", None), - "https://my-bucket.s3-accelerate.amazonaws.com/", - ), - Case( - ("https://s3-accelerate.amazonaws.com", "ap-south-1a"), - "https://my-bucket.s3-accelerate.amazonaws.com/", - ), - Case( - ("https://s3-accelerate.us-gov-east-1.amazonaws.com", None), - "https://my-bucket.s3-accelerate.amazonaws.com/", - ), - Case( - ("https://s3-accelerate.me-south-1.amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3-accelerate.amazonaws.com/", - ), - ### - Case( - ("https://s3-accelerate.dualstack.amazonaws.com", None), - "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/", - ), - Case( - ("https://s3-accelerate.dualstack.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/", - ), - Case( - ("https://s3-accelerate.dualstack.us-gov-east-1.amazonaws.com", - None), - "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/", - ), - Case( - ("https://s3-accelerate.dualstack.me-south-1.amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/", - ), - ### - Case( - ("https://s3-fips.amazonaws.com", None), - "https://my-bucket.s3-fips.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3-fips.amazonaws.com", "ap-south-1a"), - "https://my-bucket.s3-fips.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://s3-fips.us-gov-east-1.amazonaws.com", None), - "https://my-bucket.s3-fips.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://s3-fips.me-south-1.amazonaws.com", "cn-northwest-1"), - "https://my-bucket.s3-fips.cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://s3-fips.dualstack.amazonaws.com", None), - "https://my-bucket.s3-fips.dualstack.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3-fips.dualstack.amazonaws.com", "ap-south-1a"), - "https://my-bucket.s3-fips.dualstack.ap-south-1a." - "amazonaws.com/", - ), - Case( - ("https://s3-fips.dualstack.us-gov-east-1.amazonaws.com", None), - "https://my-bucket.s3-fips.dualstack.us-gov-east-1." - "amazonaws.com/", - ), - Case( - ("https://s3-fips.dualstack.me-south-1.amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3-fips.dualstack.cn-northwest-1." - "amazonaws.com/", - ), - ### - Case( - ("https://s3-external-1.amazonaws.com", None), - "https://my-bucket.s3-external-1.amazonaws.com/", - ), - Case( - ("https://s3-us-gov-west-1.amazonaws.com", None), - "https://my-bucket.s3-us-gov-west-1.amazonaws.com/", - ), - Case( - ("https://s3-fips-us-gov-west-1.amazonaws.com", None), - "https://my-bucket.s3-fips-us-gov-west-1.amazonaws.com/", - ), - ### - Case( - ("https://bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com", None), - "https://my-bucket.bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com/", - ), - Case( - ("https://accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com", None), - "https://my-bucket.accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com/", - ), - ### - Case( - ("https://012345678901.s3-control.amazonaws.com", None), - "https://my-bucket.012345678901.s3-control.us-east-1." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.012345678901.s3-control.ap-south-1a." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", - None), - "https://my-bucket.012345678901.s3-control.us-gov-east-1." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.012345678901.s3-control.cn-northwest-1." - "amazonaws.com/", - ), - ### - Case( - ("https://012345678901.s3-control.dualstack.amazonaws.com", - None), - "https://my-bucket.012345678901.s3-control.dualstack.us-east-1." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control.dualstack.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.012345678901.s3-control.dualstack." - "ap-south-1a.amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control.dualstack.us-gov-east-1." - "amazonaws.com", - None), - "https://my-bucket.012345678901.s3-control.dualstack." - "us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.012345678901.s3-control.dualstack." - "cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://012345678901.s3-control-fips.amazonaws.com", None), - "https://my-bucket.012345678901.s3-control-fips.us-east-1." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control-fips.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.012345678901.s3-control-fips.ap-south-1a." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control-fips.us-gov-east-1." - "amazonaws.com", - None), - "https://my-bucket.012345678901.s3-control-fips.us-gov-east-1." - "amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control-fips.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.012345678901.s3-control-fips.cn-northwest-1." - "amazonaws.com/", - ), - ### - Case( - ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", - None), - "https://my-bucket.012345678901.s3-control-fips.dualstack." - "us-east-1.amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.012345678901.s3-control-fips.dualstack." - "ap-south-1a.amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." - "amazonaws.com", - None), - "https://my-bucket.012345678901.s3-control-fips.dualstack." - "us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.012345678901.s3-control-fips.dualstack." - "cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://s3-accesspoint.amazonaws.com", None), - "https://my-bucket.s3-accesspoint.us-east-1.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.s3-accesspoint.ap-south-1a.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", - None), - "https://my-bucket.s3-accesspoint.us-gov-east-1.amazonaws.com/", - ), - Case( - ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3-accesspoint.cn-northwest-1." - "amazonaws.com/", - ), - ### - Case( - ("https://s3-accesspoint.dualstack.amazonaws.com", - None), - "https://my-bucket.s3-accesspoint.dualstack.us-east-1." - "amazonaws.com/", - ), - Case( - ("https://s3-accesspoint.dualstack.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.s3-accesspoint.dualstack.ap-south-1a." - "amazonaws.com/", - ), - Case( - ("https://s3-accesspoint.dualstack.us-gov-east-1." - "amazonaws.com", - None), - "https://my-bucket.s3-accesspoint.dualstack.us-gov-east-1." - "amazonaws.com/", - ), - Case( - ("https://s3-accesspoint.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3-accesspoint.dualstack.cn-northwest-1." - "amazonaws.com/", - ), - ### - Case( - ("https://s3-accesspoint-fips.amazonaws.com", None), - "https://my-bucket.s3-accesspoint-fips.us-east-1." - "amazonaws.com/", - ), - Case( - ("https://s3-accesspoint-fips.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.s3-accesspoint-fips.ap-south-1a." - "amazonaws.com/", - ), - Case( - ("https://s3-accesspoint-fips.us-gov-east-1." - "amazonaws.com", - None), - "https://my-bucket.s3-accesspoint-fips.us-gov-east-1." - "amazonaws.com/", - ), - Case( - ("https://s3-accesspoint-fips.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3-accesspoint-fips.cn-northwest-1." - "amazonaws.com/", - ), - ### - Case( - ("https://s3-accesspoint-fips.dualstack.amazonaws.com", - None), - "https://my-bucket.s3-accesspoint-fips.dualstack.us-east-1." - "amazonaws.com/", - ), - Case( - ("https://s3-accesspoint-fips.dualstack.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.s3-accesspoint-fips.dualstack.ap-south-1a." - "amazonaws.com/", - ), - Case( - ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." - "amazonaws.com", - None), - "https://my-bucket.s3-accesspoint-fips.dualstack.us-gov-east-1." - "amazonaws.com/", - ), - Case( - ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3-accesspoint-fips.dualstack." - "cn-northwest-1.amazonaws.com/", - ), - ### - Case( - ("https://my-load-balancer-1234567890.us-west-2.elb." - "amazonaws.com", "us-west-2"), - "https://my-load-balancer-1234567890.us-west-2.elb." - "amazonaws.com/my-bucket", - ), - ] - - for case in cases: - base_url = BaseURL(*case.args) - url = urlunsplit(base_url.build( - "GET", base_url.region or "us-east-1", bucket_name="my-bucket")) - self.assertEqual(str(url), case.result) - - def test_aws_object_build(self): - Case = namedtuple("Case", ["args", "result"]) - cases = [ - Case( - ("https://s3.amazonaws.com", None), - "https://my-bucket.s3.us-east-1.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3.amazonaws.com", "ap-south-1a"), - "https://my-bucket.s3.ap-south-1a.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3.us-gov-east-1.amazonaws.com", None), - "https://my-bucket.s3.us-gov-east-1.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3.me-south-1.amazonaws.com", "cn-northwest-1"), - "https://my-bucket.s3.cn-northwest-1.amazonaws.com/" - "path/to/my/object", - ), - ### - Case( - ("https://s3.dualstack.amazonaws.com", None), - "https://my-bucket.s3.dualstack.us-east-1.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3.dualstack.amazonaws.com", "ap-south-1a"), - "https://my-bucket.s3.dualstack.ap-south-1a.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3.dualstack.us-gov-east-1.amazonaws.com", None), - "https://my-bucket.s3.dualstack.us-gov-east-1.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3.dualstack.me-south-1.amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3.dualstack.cn-northwest-1.amazonaws.com/" - "path/to/my/object", - ), - ### - Case( - ("https://s3-accelerate.amazonaws.com", None), - "https://my-bucket.s3-accelerate.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-accelerate.amazonaws.com", "ap-south-1a"), - "https://my-bucket.s3-accelerate.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-accelerate.us-gov-east-1.amazonaws.com", None), - "https://my-bucket.s3-accelerate.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-accelerate.me-south-1.amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3-accelerate.amazonaws.com/" - "path/to/my/object", - ), - ### - Case( - ("https://s3-accelerate.dualstack.amazonaws.com", None), - "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-accelerate.dualstack.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-accelerate.dualstack.us-gov-east-1.amazonaws.com", - None), - "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-accelerate.dualstack.me-south-1.amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/" - "path/to/my/object", - ), - ### - Case( - ("https://s3-fips.amazonaws.com", None), - "https://my-bucket.s3-fips.us-east-1.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-fips.amazonaws.com", "ap-south-1a"), - "https://my-bucket.s3-fips.ap-south-1a.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-fips.us-gov-east-1.amazonaws.com", None), - "https://my-bucket.s3-fips.us-gov-east-1.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-fips.me-south-1.amazonaws.com", "cn-northwest-1"), - "https://my-bucket.s3-fips.cn-northwest-1.amazonaws.com/" - "path/to/my/object", - ), - ### - Case( - ("https://s3-fips.dualstack.amazonaws.com", None), - "https://my-bucket.s3-fips.dualstack.us-east-1.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-fips.dualstack.amazonaws.com", "ap-south-1a"), - "https://my-bucket.s3-fips.dualstack.ap-south-1a." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://s3-fips.dualstack.us-gov-east-1.amazonaws.com", None), - "https://my-bucket.s3-fips.dualstack.us-gov-east-1." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://s3-fips.dualstack.me-south-1.amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3-fips.dualstack.cn-northwest-1." - "amazonaws.com/path/to/my/object", - ), - ### - Case( - ("https://s3-external-1.amazonaws.com", None), - "https://my-bucket.s3-external-1.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-us-gov-west-1.amazonaws.com", None), - "https://my-bucket.s3-us-gov-west-1.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-fips-us-gov-west-1.amazonaws.com", None), - "https://my-bucket.s3-fips-us-gov-west-1.amazonaws.com/" - "path/to/my/object", - ), - ### - Case( - ("https://bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com", None), - "https://my-bucket.bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com/path/to/my/object", - ), - Case( - ("https://accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com", None), - "https://my-bucket.accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." - "vpce.amazonaws.com/path/to/my/object", - ), - ### - Case( - ("https://012345678901.s3-control.amazonaws.com", None), - "https://my-bucket.012345678901.s3-control.us-east-1." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://012345678901.s3-control.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.012345678901.s3-control.ap-south-1a." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", - None), - "https://my-bucket.012345678901.s3-control.us-gov-east-1." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.012345678901.s3-control.cn-northwest-1." - "amazonaws.com/path/to/my/object", - ), - ### - Case( - ("https://012345678901.s3-control.dualstack.amazonaws.com", - None), - "https://my-bucket.012345678901.s3-control.dualstack.us-east-1." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://012345678901.s3-control.dualstack.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.012345678901.s3-control.dualstack." - "ap-south-1a.amazonaws.com/path/to/my/object", - ), - Case( - ("https://012345678901.s3-control.dualstack.us-gov-east-1." - "amazonaws.com", - None), - "https://my-bucket.012345678901.s3-control.dualstack." - "us-gov-east-1.amazonaws.com/path/to/my/object", - ), - Case( - ("https://012345678901.s3-control.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.012345678901.s3-control.dualstack." - "cn-northwest-1.amazonaws.com/path/to/my/object", - ), - ### - Case( - ("https://012345678901.s3-control-fips.amazonaws.com", None), - "https://my-bucket.012345678901.s3-control-fips.us-east-1." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://012345678901.s3-control-fips.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.012345678901.s3-control-fips.ap-south-1a." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://012345678901.s3-control-fips.us-gov-east-1." - "amazonaws.com", - None), - "https://my-bucket.012345678901.s3-control-fips.us-gov-east-1." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://012345678901.s3-control-fips.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.012345678901.s3-control-fips.cn-northwest-1." - "amazonaws.com/path/to/my/object", - ), - ### - Case( - ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", - None), - "https://my-bucket.012345678901.s3-control-fips.dualstack." - "us-east-1.amazonaws.com/path/to/my/object", - ), - Case( - ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.012345678901.s3-control-fips.dualstack." - "ap-south-1a.amazonaws.com/path/to/my/object", - ), - Case( - ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." - "amazonaws.com", - None), - "https://my-bucket.012345678901.s3-control-fips.dualstack." - "us-gov-east-1.amazonaws.com/path/to/my/object", - ), - Case( - ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.012345678901.s3-control-fips.dualstack." - "cn-northwest-1.amazonaws.com/path/to/my/object", - ), - ### - Case( - ("https://s3-accesspoint.amazonaws.com", None), - "https://my-bucket.s3-accesspoint.us-east-1.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-accesspoint.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.s3-accesspoint.ap-south-1a.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", - None), - "https://my-bucket.s3-accesspoint.us-gov-east-1.amazonaws.com/" - "path/to/my/object", - ), - Case( - ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3-accesspoint.cn-northwest-1." - "amazonaws.com/path/to/my/object", - ), - ### - Case( - ("https://s3-accesspoint.dualstack.amazonaws.com", - None), - "https://my-bucket.s3-accesspoint.dualstack.us-east-1." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://s3-accesspoint.dualstack.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.s3-accesspoint.dualstack.ap-south-1a." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://s3-accesspoint.dualstack.us-gov-east-1." - "amazonaws.com", - None), - "https://my-bucket.s3-accesspoint.dualstack.us-gov-east-1." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://s3-accesspoint.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3-accesspoint.dualstack.cn-northwest-1." - "amazonaws.com/path/to/my/object", - ), - ### - Case( - ("https://s3-accesspoint-fips.amazonaws.com", None), - "https://my-bucket.s3-accesspoint-fips.us-east-1." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://s3-accesspoint-fips.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.s3-accesspoint-fips.ap-south-1a." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://s3-accesspoint-fips.us-gov-east-1." - "amazonaws.com", - None), - "https://my-bucket.s3-accesspoint-fips.us-gov-east-1." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://s3-accesspoint-fips.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3-accesspoint-fips.cn-northwest-1." - "amazonaws.com/path/to/my/object", - ), - ### - Case( - ("https://s3-accesspoint-fips.dualstack.amazonaws.com", - None), - "https://my-bucket.s3-accesspoint-fips.dualstack.us-east-1." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://s3-accesspoint-fips.dualstack.amazonaws.com", - "ap-south-1a"), - "https://my-bucket.s3-accesspoint-fips.dualstack.ap-south-1a." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." - "amazonaws.com", - None), - "https://my-bucket.s3-accesspoint-fips.dualstack.us-gov-east-1." - "amazonaws.com/path/to/my/object", - ), - Case( - ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." - "amazonaws.com", - "cn-northwest-1"), - "https://my-bucket.s3-accesspoint-fips.dualstack." - "cn-northwest-1.amazonaws.com/path/to/my/object", - ), - ### - Case( - ("https://my-load-balancer-1234567890.us-west-2.elb." - "amazonaws.com", "us-west-2"), - "https://my-load-balancer-1234567890.us-west-2.elb." - "amazonaws.com/my-bucket/path/to/my/object", - ), - ] - - for case in cases: - base_url = BaseURL(*case.args) - url = urlunsplit(base_url.build( - "GET", base_url.region or "us-east-1", - bucket_name="my-bucket", object_name="path/to/my/object")) - self.assertEqual(str(url), case.result) diff --git a/tests/unit/helpers_test.py b/tests/unit/helpers_test.py new file mode 100644 index 00000000..aa25febc --- /dev/null +++ b/tests/unit/helpers_test.py @@ -0,0 +1,1897 @@ +# -*- coding: utf-8 -*- +# MinIO Python Library for Amazon S3 Compatible Cloud Storage, +# (C) 2015 MinIO, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import namedtuple +from unittest import TestCase +from urllib.parse import urlunsplit + +from minio.helpers import BaseURL + + +class BaseURLTests(TestCase): + def test_aws_new_baseurl_error(self): + cases = [ + # invalid Amazon AWS host error + "https://z3.amazonaws.com", + "https://1234567890.s3.amazonaws.com", + "https://1234567890.s3-accelerate.amazonaws.com", + "https://1234567890.abcdefgh.s3-control.amazonaws.com", + "https://s3fips.amazonaws.com", + "https://s3-fips.s3.amazonaws.com", + "https://s3-fips.s3accelerate.amazonaws.com", + "https://s3-fips.s3-accelerate.amazonaws.com", + "https://bucket.vpce.s3.us-east-1.vpce.amazonaws.com", + "https://bucket.bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com", + "https://accesspoint.accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com", + "https://accesspoint.vpce-1123.vpce-xyz.s3.amazonaws.com", + # use HTTPS scheme for host error + "http://s3-accesspoint.amazonaws.com", + # region missing in Amazon S3 China endpoint error + "https://s3.amazonaws.com.cn", + ] + for endpoint in cases: + self.assertRaises(ValueError, BaseURL, endpoint, None) + + def test_aws_new_baseurl(self): + Case = namedtuple("Case", ["args", "result"]) + cases = [ + Case( + ("https://s3.amazonaws.com", None), + { + "s3_prefix": "s3.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": False, + }, + ), + Case( + ("https://s3.amazonaws.com", "ap-south-1a"), + { + "s3_prefix": "s3.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": False, + }, + ), + Case( + ("https://s3.us-gov-east-1.amazonaws.com", None), + { + "s3_prefix": "s3.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": False, + }, + ), + Case( + ("https://s3.me-south-1.amazonaws.com", "cn-northwest-1"), + { + "s3_prefix": "s3.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": False, + }, + ), + ### + Case( + ("https://s3.dualstack.amazonaws.com", None), + { + "s3_prefix": "s3.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": True, + }, + ), + Case( + ("https://s3.dualstack.amazonaws.com", "ap-south-1a"), + { + "s3_prefix": "s3.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": True, + }, + ), + Case( + ("https://s3.dualstack.us-gov-east-1.amazonaws.com", None), + { + "s3_prefix": "s3.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": True, + }, + ), + Case( + ("https://s3.dualstack.me-south-1.amazonaws.com", + "cn-northwest-1"), + { + "s3_prefix": "s3.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": True, + }, + ), + ### + Case( + ("https://s3-accelerate.amazonaws.com", None), + { + "s3_prefix": "s3-accelerate.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": False, + }, + ), + Case( + ("https://s3-accelerate.amazonaws.com", "ap-south-1a"), + { + "s3_prefix": "s3-accelerate.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": False, + }, + ), + Case( + ("https://s3-accelerate.us-gov-east-1.amazonaws.com", None), + { + "s3_prefix": "s3-accelerate.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": False, + }, + ), + Case( + ("https://s3-accelerate.me-south-1.amazonaws.com", + "cn-northwest-1"), + { + "s3_prefix": "s3-accelerate.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": False, + }, + ), + ### + Case( + ("https://s3-accelerate.dualstack.amazonaws.com", None), + { + "s3_prefix": "s3-accelerate.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": True, + }, + ), + Case( + ("https://s3-accelerate.dualstack.amazonaws.com", + "ap-south-1a"), + { + "s3_prefix": "s3-accelerate.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": True, + }, + ), + Case( + ("https://s3-accelerate.dualstack.us-gov-east-1.amazonaws.com", + None), + { + "s3_prefix": "s3-accelerate.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": True, + }, + ), + Case( + ("https://s3-accelerate.dualstack.me-south-1.amazonaws.com", + "cn-northwest-1"), + { + "s3_prefix": "s3-accelerate.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": True, + }, + ), + ### + Case( + ("https://s3-fips.amazonaws.com", None), + { + "s3_prefix": "s3-fips.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": False, + }, + ), + Case( + ("https://s3-fips.amazonaws.com", "ap-south-1a"), + { + "s3_prefix": "s3-fips.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": False, + }, + ), + Case( + ("https://s3-fips.us-gov-east-1.amazonaws.com", None), + { + "s3_prefix": "s3-fips.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": False, + }, + ), + Case( + ("https://s3-fips.me-south-1.amazonaws.com", "cn-northwest-1"), + { + "s3_prefix": "s3-fips.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": False, + }, + ), + ### + Case( + ("https://s3-fips.dualstack.amazonaws.com", None), + { + "s3_prefix": "s3-fips.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": True, + }, + ), + Case( + ("https://s3-fips.dualstack.amazonaws.com", "ap-south-1a"), + { + "s3_prefix": "s3-fips.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": True, + }, + ), + Case( + ("https://s3-fips.dualstack.us-gov-east-1.amazonaws.com", None), + { + "s3_prefix": "s3-fips.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": True, + }, + ), + Case( + ("https://s3-fips.dualstack.me-south-1.amazonaws.com", + "cn-northwest-1"), + { + "s3_prefix": "s3-fips.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": True, + }, + ), + ### + Case( + ("https://s3-external-1.amazonaws.com", None), + { + "s3_prefix": "s3-external-1.", + "domain_suffix": "amazonaws.com", + "region": "us-east-1", + "dualstack": False, + }, + ), + Case( + ("https://s3-us-gov-west-1.amazonaws.com", None), + { + "s3_prefix": "s3-us-gov-west-1.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-west-1", + "dualstack": False, + }, + ), + Case( + ("https://s3-fips-us-gov-west-1.amazonaws.com", None), + { + "s3_prefix": "s3-fips-us-gov-west-1.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-west-1", + "dualstack": False, + }, + ), + ### + Case( + ("https://bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com", None), + { + "s3_prefix": "bucket.vpce-1a2b3c4d-5e6f.s3.", + "domain_suffix": "vpce.amazonaws.com", + "region": "us-east-1", + "dualstack": False, + }, + ), + Case( + ("https://accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com", None), + { + "s3_prefix": "accesspoint.vpce-1a2b3c4d-5e6f.s3.", + "domain_suffix": "vpce.amazonaws.com", + "region": "us-east-1", + "dualstack": False, + }, + ), + ### + Case( + ("https://012345678901.s3-control.amazonaws.com", None), + { + "s3_prefix": "012345678901.s3-control.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": False, + }, + ), + Case( + ("https://012345678901.s3-control.amazonaws.com", + "ap-south-1a"), + { + "s3_prefix": "012345678901.s3-control.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": False, + }, + ), + Case( + ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", + None), + { + "s3_prefix": "012345678901.s3-control.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": False, + }, + ), + Case( + ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", + "cn-northwest-1"), + { + "s3_prefix": "012345678901.s3-control.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": False, + }, + ), + ### + Case( + ("https://012345678901.s3-control.dualstack.amazonaws.com", + None), + { + "s3_prefix": "012345678901.s3-control.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": True, + }, + ), + Case( + ("https://012345678901.s3-control.dualstack.amazonaws.com", + "ap-south-1a"), + { + "s3_prefix": "012345678901.s3-control.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": True, + }, + ), + Case( + ("https://012345678901.s3-control.dualstack.us-gov-east-1." + "amazonaws.com", + None), + { + "s3_prefix": "012345678901.s3-control.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": True, + }, + ), + Case( + ("https://012345678901.s3-control.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + { + "s3_prefix": "012345678901.s3-control.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": True, + }, + ), + ### + Case( + ("https://012345678901.s3-control-fips.amazonaws.com", None), + { + "s3_prefix": "012345678901.s3-control-fips.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": False, + }, + ), + Case( + ("https://012345678901.s3-control-fips.amazonaws.com", + "ap-south-1a"), + { + "s3_prefix": "012345678901.s3-control-fips.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": False, + }, + ), + Case( + ("https://012345678901.s3-control-fips.us-gov-east-1." + "amazonaws.com", + None), + { + "s3_prefix": "012345678901.s3-control-fips.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": False, + }, + ), + Case( + ("https://012345678901.s3-control-fips.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + { + "s3_prefix": "012345678901.s3-control-fips.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": False, + }, + ), + ### + Case( + ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", + None), + { + "s3_prefix": "012345678901.s3-control-fips.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": True, + }, + ), + Case( + ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", + "ap-south-1a"), + { + "s3_prefix": "012345678901.s3-control-fips.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": True, + }, + ), + Case( + ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." + "amazonaws.com", + None), + { + "s3_prefix": "012345678901.s3-control-fips.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": True, + }, + ), + Case( + ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + { + "s3_prefix": "012345678901.s3-control-fips.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": True, + }, + ), + ### + Case( + ("https://s3-accesspoint.amazonaws.com", None), + { + "s3_prefix": "s3-accesspoint.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": False, + }, + ), + Case( + ("https://s3-accesspoint.amazonaws.com", + "ap-south-1a"), + { + "s3_prefix": "s3-accesspoint.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": False, + }, + ), + Case( + ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", + None), + { + "s3_prefix": "s3-accesspoint.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": False, + }, + ), + Case( + ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", + "cn-northwest-1"), + { + "s3_prefix": "s3-accesspoint.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": False, + }, + ), + ### + Case( + ("https://abcd-123456789012.s3-accesspoint.amazonaws.com", + None), + { + "s3_prefix": "abcd-123456789012.s3-accesspoint.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": False, + }, + ), + Case( + ("https://abcd-123456789012.s3-accesspoint.amazonaws.com", + "ap-south-1a"), + { + "s3_prefix": "abcd-123456789012.s3-accesspoint.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": False, + }, + ), + Case( + ("https://abcd-123456789012.s3-accesspoint." + "us-gov-east-1.amazonaws.com", + None), + { + "s3_prefix": "abcd-123456789012.s3-accesspoint.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": False, + }, + ), + Case( + ("https://abcd-123456789012.s3-accesspoint." + "us-gov-east-1.amazonaws.com", + "cn-northwest-1"), + { + "s3_prefix": "abcd-123456789012.s3-accesspoint.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": False, + }, + ), + ### + Case( + ("https://s3-accesspoint.dualstack.amazonaws.com", + None), + { + "s3_prefix": "s3-accesspoint.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": True, + }, + ), + Case( + ("https://s3-accesspoint.dualstack.amazonaws.com", + "ap-south-1a"), + { + "s3_prefix": "s3-accesspoint.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": True, + }, + ), + Case( + ("https://s3-accesspoint.dualstack.us-gov-east-1." + "amazonaws.com", + None), + { + "s3_prefix": "s3-accesspoint.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": True, + }, + ), + Case( + ("https://s3-accesspoint.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + { + "s3_prefix": "s3-accesspoint.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": True, + }, + ), + ### + Case( + ("https://s3-accesspoint-fips.amazonaws.com", None), + { + "s3_prefix": "s3-accesspoint-fips.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": False, + }, + ), + Case( + ("https://s3-accesspoint-fips.amazonaws.com", + "ap-south-1a"), + { + "s3_prefix": "s3-accesspoint-fips.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": False, + }, + ), + Case( + ("https://s3-accesspoint-fips.us-gov-east-1." + "amazonaws.com", + None), + { + "s3_prefix": "s3-accesspoint-fips.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": False, + }, + ), + Case( + ("https://s3-accesspoint-fips.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + { + "s3_prefix": "s3-accesspoint-fips.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": False, + }, + ), + ### + Case( + ("https://s3-accesspoint-fips.dualstack.amazonaws.com", + None), + { + "s3_prefix": "s3-accesspoint-fips.", + "domain_suffix": "amazonaws.com", + "region": None, + "dualstack": True, + }, + ), + Case( + ("https://s3-accesspoint-fips.dualstack.amazonaws.com", + "ap-south-1a"), + { + "s3_prefix": "s3-accesspoint-fips.", + "domain_suffix": "amazonaws.com", + "region": "ap-south-1a", + "dualstack": True, + }, + ), + Case( + ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." + "amazonaws.com", + None), + { + "s3_prefix": "s3-accesspoint-fips.", + "domain_suffix": "amazonaws.com", + "region": "us-gov-east-1", + "dualstack": True, + }, + ), + Case( + ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + { + "s3_prefix": "s3-accesspoint-fips.", + "domain_suffix": "amazonaws.com", + "region": "cn-northwest-1", + "dualstack": True, + }, + ), + ### + Case( + ("https://my-load-balancer-1234567890.us-west-2.elb." + "amazonaws.com", "us-west-2"), + None, + ), + ] + + for case in cases: + url = BaseURL(*case.args) + self.assertEqual(url._aws_info, case.result) + + def test_aws_list_buckets_build(self): + Case = namedtuple("Case", ["args", "result"]) + cases = [ + Case( + ("https://s3.amazonaws.com", None), + "https://s3.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3.amazonaws.com", "ap-south-1a"), + "https://s3.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://s3.us-gov-east-1.amazonaws.com", None), + "https://s3.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://s3.me-south-1.amazonaws.com", "cn-northwest-1"), + "https://s3.cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://s3.dualstack.amazonaws.com", None), + "https://s3.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3.dualstack.amazonaws.com", "ap-south-1a"), + "https://s3.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://s3.dualstack.us-gov-east-1.amazonaws.com", None), + "https://s3.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://s3.dualstack.me-south-1.amazonaws.com", + "cn-northwest-1"), + "https://s3.cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://s3-accelerate.amazonaws.com", None), + "https://s3.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3-accelerate.amazonaws.com", "ap-south-1a"), + "https://s3.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://s3-accelerate.us-gov-east-1.amazonaws.com", None), + "https://s3.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://s3-accelerate.me-south-1.amazonaws.com", + "cn-northwest-1"), + "https://s3.cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://s3-accelerate.dualstack.amazonaws.com", None), + "https://s3.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3-accelerate.dualstack.amazonaws.com", + "ap-south-1a"), + "https://s3.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://s3-accelerate.dualstack.us-gov-east-1.amazonaws.com", + None), + "https://s3.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://s3-accelerate.dualstack.me-south-1.amazonaws.com", + "cn-northwest-1"), + "https://s3.cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://s3-fips.amazonaws.com", None), + "https://s3.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3-fips.amazonaws.com", "ap-south-1a"), + "https://s3.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://s3-fips.us-gov-east-1.amazonaws.com", None), + "https://s3.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://s3-fips.me-south-1.amazonaws.com", "cn-northwest-1"), + "https://s3.cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://s3-fips.dualstack.amazonaws.com", None), + "https://s3.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3-fips.dualstack.amazonaws.com", "ap-south-1a"), + "https://s3.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://s3-fips.dualstack.us-gov-east-1.amazonaws.com", None), + "https://s3.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://s3-fips.dualstack.me-south-1.amazonaws.com", + "cn-northwest-1"), + "https://s3.cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://s3-external-1.amazonaws.com", None), + "https://s3-external-1.amazonaws.com/", + ), + Case( + ("https://s3-us-gov-west-1.amazonaws.com", None), + "https://s3-us-gov-west-1.amazonaws.com/", + ), + Case( + ("https://s3-fips-us-gov-west-1.amazonaws.com", None), + "https://s3-fips-us-gov-west-1.amazonaws.com/", + ), + ### + Case( + ("https://bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com", None), + "https://bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com/", + ), + Case( + ("https://accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com", None), + "https://accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com/", + ), + ### + Case( + ("https://012345678901.s3-control.amazonaws.com", None), + "https://012345678901.s3-control.us-east-1.amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control.amazonaws.com", + "ap-south-1a"), + "https://012345678901.s3-control.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", + None), + "https://012345678901.s3-control.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", + "cn-northwest-1"), + "https://012345678901.s3-control.cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://012345678901.s3-control.dualstack.amazonaws.com", + None), + "https://012345678901.s3-control.us-east-1." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control.dualstack.amazonaws.com", + "ap-south-1a"), + "https://012345678901.s3-control.ap-south-1a." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control.dualstack.us-gov-east-1." + "amazonaws.com", + None), + "https://012345678901.s3-control.us-gov-east-1." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://012345678901.s3-control.cn-northwest-1." + "amazonaws.com/", + ), + ### + Case( + ("https://012345678901.s3-control-fips.amazonaws.com", None), + "https://012345678901.s3-control-fips.us-east-1.amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control-fips.amazonaws.com", + "ap-south-1a"), + "https://012345678901.s3-control-fips.ap-south-1a." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control-fips.us-gov-east-1." + "amazonaws.com", + None), + "https://012345678901.s3-control-fips.us-gov-east-1." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control-fips.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://012345678901.s3-control-fips.cn-northwest-1." + "amazonaws.com/", + ), + ### + Case( + ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", + None), + "https://012345678901.s3-control-fips.us-east-1." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", + "ap-south-1a"), + "https://012345678901.s3-control-fips.ap-south-1a." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." + "amazonaws.com", + None), + "https://012345678901.s3-control-fips.us-gov-east-1." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://012345678901.s3-control-fips.cn-northwest-1." + "amazonaws.com/", + ), + ### + Case( + ("https://s3-accesspoint.amazonaws.com", None), + "https://s3.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint.amazonaws.com", + "ap-south-1a"), + "https://s3.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", + None), + "https://s3.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", + "cn-northwest-1"), + "https://s3.cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://abcd-123456789012.s3-accesspoint.amazonaws.com", + None), + "https://abcd-123456789012.s3-accesspoint.us-east-1." + "amazonaws.com/", + ), + Case( + ("https://abcd-123456789012.s3-accesspoint.amazonaws.com", + "ap-south-1a"), + "https://abcd-123456789012.s3-accesspoint.ap-south-1a." + "amazonaws.com/", + ), + Case( + ("https://abcd-123456789012.s3-accesspoint." + "us-gov-east-1.amazonaws.com", + None), + "https://abcd-123456789012.s3-accesspoint.us-gov-east-1." + "amazonaws.com/", + ), + Case( + ("https://abcd-123456789012.s3-accesspoint." + "us-gov-east-1.amazonaws.com", + "cn-northwest-1"), + "https://abcd-123456789012.s3-accesspoint.cn-northwest-1." + "amazonaws.com/", + ), + ### + Case( + ("https://s3-accesspoint.dualstack.amazonaws.com", + None), + "https://s3.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint.dualstack.amazonaws.com", + "ap-south-1a"), + "https://s3.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint.dualstack.us-gov-east-1." + "amazonaws.com", + None), + "https://s3.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://s3.cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://s3-accesspoint-fips.amazonaws.com", None), + "https://s3.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint-fips.amazonaws.com", + "ap-south-1a"), + "https://s3.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint-fips.us-gov-east-1." + "amazonaws.com", + None), + "https://s3.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint-fips.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://s3.cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://s3-accesspoint-fips.dualstack.amazonaws.com", + None), + "https://s3.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint-fips.dualstack.amazonaws.com", + "ap-south-1a"), + "https://s3.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." + "amazonaws.com", + None), + "https://s3.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://s3.cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://my-load-balancer-1234567890.us-west-2.elb." + "amazonaws.com", "us-west-2"), + "https://my-load-balancer-1234567890.us-west-2.elb." + "amazonaws.com/", + ), + ] + + for case in cases: + base_url = BaseURL(*case.args) + url = urlunsplit( + base_url.build( + method="GET", + region=base_url.region or "us-east-1", + ), + ) + self.assertEqual(str(url), case.result) + + def test_aws_bucket_build(self): + Case = namedtuple("Case", ["args", "result"]) + cases = [ + Case( + ("https://s3.amazonaws.com", None), + "https://my-bucket.s3.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3.amazonaws.com", "ap-south-1a"), + "https://my-bucket.s3.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://s3.us-gov-east-1.amazonaws.com", None), + "https://my-bucket.s3.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://s3.me-south-1.amazonaws.com", "cn-northwest-1"), + "https://my-bucket.s3.cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://s3.dualstack.amazonaws.com", None), + "https://my-bucket.s3.dualstack.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3.dualstack.amazonaws.com", "ap-south-1a"), + "https://my-bucket.s3.dualstack.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://s3.dualstack.us-gov-east-1.amazonaws.com", None), + "https://my-bucket.s3.dualstack.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://s3.dualstack.me-south-1.amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3.dualstack.cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://s3-accelerate.amazonaws.com", None), + "https://my-bucket.s3-accelerate.amazonaws.com/", + ), + Case( + ("https://s3-accelerate.amazonaws.com", "ap-south-1a"), + "https://my-bucket.s3-accelerate.amazonaws.com/", + ), + Case( + ("https://s3-accelerate.us-gov-east-1.amazonaws.com", None), + "https://my-bucket.s3-accelerate.amazonaws.com/", + ), + Case( + ("https://s3-accelerate.me-south-1.amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3-accelerate.amazonaws.com/", + ), + ### + Case( + ("https://s3-accelerate.dualstack.amazonaws.com", None), + "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/", + ), + Case( + ("https://s3-accelerate.dualstack.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/", + ), + Case( + ("https://s3-accelerate.dualstack.us-gov-east-1.amazonaws.com", + None), + "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/", + ), + Case( + ("https://s3-accelerate.dualstack.me-south-1.amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/", + ), + ### + Case( + ("https://s3-fips.amazonaws.com", None), + "https://my-bucket.s3-fips.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3-fips.amazonaws.com", "ap-south-1a"), + "https://my-bucket.s3-fips.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://s3-fips.us-gov-east-1.amazonaws.com", None), + "https://my-bucket.s3-fips.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://s3-fips.me-south-1.amazonaws.com", "cn-northwest-1"), + "https://my-bucket.s3-fips.cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://s3-fips.dualstack.amazonaws.com", None), + "https://my-bucket.s3-fips.dualstack.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3-fips.dualstack.amazonaws.com", "ap-south-1a"), + "https://my-bucket.s3-fips.dualstack.ap-south-1a." + "amazonaws.com/", + ), + Case( + ("https://s3-fips.dualstack.us-gov-east-1.amazonaws.com", None), + "https://my-bucket.s3-fips.dualstack.us-gov-east-1." + "amazonaws.com/", + ), + Case( + ("https://s3-fips.dualstack.me-south-1.amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3-fips.dualstack.cn-northwest-1." + "amazonaws.com/", + ), + ### + Case( + ("https://s3-external-1.amazonaws.com", None), + "https://my-bucket.s3-external-1.amazonaws.com/", + ), + Case( + ("https://s3-us-gov-west-1.amazonaws.com", None), + "https://my-bucket.s3-us-gov-west-1.amazonaws.com/", + ), + Case( + ("https://s3-fips-us-gov-west-1.amazonaws.com", None), + "https://my-bucket.s3-fips-us-gov-west-1.amazonaws.com/", + ), + ### + Case( + ("https://bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com", None), + "https://my-bucket.bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com/", + ), + Case( + ("https://accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com", None), + "https://my-bucket.accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com/", + ), + ### + Case( + ("https://012345678901.s3-control.amazonaws.com", None), + "https://my-bucket.012345678901.s3-control.us-east-1." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.012345678901.s3-control.ap-south-1a." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", + None), + "https://my-bucket.012345678901.s3-control.us-gov-east-1." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.012345678901.s3-control.cn-northwest-1." + "amazonaws.com/", + ), + ### + Case( + ("https://012345678901.s3-control.dualstack.amazonaws.com", + None), + "https://my-bucket.012345678901.s3-control.dualstack.us-east-1." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control.dualstack.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.012345678901.s3-control.dualstack." + "ap-south-1a.amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control.dualstack.us-gov-east-1." + "amazonaws.com", + None), + "https://my-bucket.012345678901.s3-control.dualstack." + "us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.012345678901.s3-control.dualstack." + "cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://012345678901.s3-control-fips.amazonaws.com", None), + "https://my-bucket.012345678901.s3-control-fips.us-east-1." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control-fips.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.012345678901.s3-control-fips.ap-south-1a." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control-fips.us-gov-east-1." + "amazonaws.com", + None), + "https://my-bucket.012345678901.s3-control-fips.us-gov-east-1." + "amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control-fips.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.012345678901.s3-control-fips.cn-northwest-1." + "amazonaws.com/", + ), + ### + Case( + ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", + None), + "https://my-bucket.012345678901.s3-control-fips.dualstack." + "us-east-1.amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.012345678901.s3-control-fips.dualstack." + "ap-south-1a.amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." + "amazonaws.com", + None), + "https://my-bucket.012345678901.s3-control-fips.dualstack." + "us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.012345678901.s3-control-fips.dualstack." + "cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://s3-accesspoint.amazonaws.com", None), + "https://my-bucket.s3-accesspoint.us-east-1.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.s3-accesspoint.ap-south-1a.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", + None), + "https://my-bucket.s3-accesspoint.us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3-accesspoint.cn-northwest-1." + "amazonaws.com/", + ), + ### + Case( + ("https://abcd-123456789012.s3-accesspoint.amazonaws.com", + None), + "https://my-bucket.abcd-123456789012.s3-accesspoint.us-east-1." + "amazonaws.com/", + ), + Case( + ("https://abcd-123456789012.s3-accesspoint.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.abcd-123456789012.s3-accesspoint." + "ap-south-1a.amazonaws.com/", + ), + Case( + ("https://abcd-123456789012.s3-accesspoint." + "us-gov-east-1.amazonaws.com", + None), + "https://my-bucket.abcd-123456789012.s3-accesspoint." + "us-gov-east-1.amazonaws.com/", + ), + Case( + ("https://abcd-123456789012.s3-accesspoint." + "us-gov-east-1.amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.abcd-123456789012.s3-accesspoint." + "cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://s3-accesspoint.dualstack.amazonaws.com", + None), + "https://my-bucket.s3-accesspoint.dualstack.us-east-1." + "amazonaws.com/", + ), + Case( + ("https://s3-accesspoint.dualstack.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.s3-accesspoint.dualstack.ap-south-1a." + "amazonaws.com/", + ), + Case( + ("https://s3-accesspoint.dualstack.us-gov-east-1." + "amazonaws.com", + None), + "https://my-bucket.s3-accesspoint.dualstack.us-gov-east-1." + "amazonaws.com/", + ), + Case( + ("https://s3-accesspoint.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3-accesspoint.dualstack.cn-northwest-1." + "amazonaws.com/", + ), + ### + Case( + ("https://s3-accesspoint-fips.amazonaws.com", None), + "https://my-bucket.s3-accesspoint-fips.us-east-1." + "amazonaws.com/", + ), + Case( + ("https://s3-accesspoint-fips.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.s3-accesspoint-fips.ap-south-1a." + "amazonaws.com/", + ), + Case( + ("https://s3-accesspoint-fips.us-gov-east-1." + "amazonaws.com", + None), + "https://my-bucket.s3-accesspoint-fips.us-gov-east-1." + "amazonaws.com/", + ), + Case( + ("https://s3-accesspoint-fips.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3-accesspoint-fips.cn-northwest-1." + "amazonaws.com/", + ), + ### + Case( + ("https://s3-accesspoint-fips.dualstack.amazonaws.com", + None), + "https://my-bucket.s3-accesspoint-fips.dualstack.us-east-1." + "amazonaws.com/", + ), + Case( + ("https://s3-accesspoint-fips.dualstack.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.s3-accesspoint-fips.dualstack.ap-south-1a." + "amazonaws.com/", + ), + Case( + ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." + "amazonaws.com", + None), + "https://my-bucket.s3-accesspoint-fips.dualstack.us-gov-east-1." + "amazonaws.com/", + ), + Case( + ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3-accesspoint-fips.dualstack." + "cn-northwest-1.amazonaws.com/", + ), + ### + Case( + ("https://my-load-balancer-1234567890.us-west-2.elb." + "amazonaws.com", "us-west-2"), + "https://my-load-balancer-1234567890.us-west-2.elb." + "amazonaws.com/my-bucket", + ), + ] + + for case in cases: + base_url = BaseURL(*case.args) + url = urlunsplit( + base_url.build( + method="GET", + region=base_url.region or "us-east-1", + bucket_name="my-bucket", + ), + ) + self.assertEqual(str(url), case.result) + + def test_aws_object_build(self): + Case = namedtuple("Case", ["args", "result"]) + cases = [ + Case( + ("https://s3.amazonaws.com", None), + "https://my-bucket.s3.us-east-1.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3.amazonaws.com", "ap-south-1a"), + "https://my-bucket.s3.ap-south-1a.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3.us-gov-east-1.amazonaws.com", None), + "https://my-bucket.s3.us-gov-east-1.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3.me-south-1.amazonaws.com", "cn-northwest-1"), + "https://my-bucket.s3.cn-northwest-1.amazonaws.com/" + "path/to/my/object", + ), + ### + Case( + ("https://s3.dualstack.amazonaws.com", None), + "https://my-bucket.s3.dualstack.us-east-1.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3.dualstack.amazonaws.com", "ap-south-1a"), + "https://my-bucket.s3.dualstack.ap-south-1a.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3.dualstack.us-gov-east-1.amazonaws.com", None), + "https://my-bucket.s3.dualstack.us-gov-east-1.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3.dualstack.me-south-1.amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3.dualstack.cn-northwest-1.amazonaws.com/" + "path/to/my/object", + ), + ### + Case( + ("https://s3-accelerate.amazonaws.com", None), + "https://my-bucket.s3-accelerate.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-accelerate.amazonaws.com", "ap-south-1a"), + "https://my-bucket.s3-accelerate.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-accelerate.us-gov-east-1.amazonaws.com", None), + "https://my-bucket.s3-accelerate.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-accelerate.me-south-1.amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3-accelerate.amazonaws.com/" + "path/to/my/object", + ), + ### + Case( + ("https://s3-accelerate.dualstack.amazonaws.com", None), + "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-accelerate.dualstack.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-accelerate.dualstack.us-gov-east-1.amazonaws.com", + None), + "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-accelerate.dualstack.me-south-1.amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3-accelerate.dualstack.amazonaws.com/" + "path/to/my/object", + ), + ### + Case( + ("https://s3-fips.amazonaws.com", None), + "https://my-bucket.s3-fips.us-east-1.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-fips.amazonaws.com", "ap-south-1a"), + "https://my-bucket.s3-fips.ap-south-1a.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-fips.us-gov-east-1.amazonaws.com", None), + "https://my-bucket.s3-fips.us-gov-east-1.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-fips.me-south-1.amazonaws.com", "cn-northwest-1"), + "https://my-bucket.s3-fips.cn-northwest-1.amazonaws.com/" + "path/to/my/object", + ), + ### + Case( + ("https://s3-fips.dualstack.amazonaws.com", None), + "https://my-bucket.s3-fips.dualstack.us-east-1.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-fips.dualstack.amazonaws.com", "ap-south-1a"), + "https://my-bucket.s3-fips.dualstack.ap-south-1a." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://s3-fips.dualstack.us-gov-east-1.amazonaws.com", None), + "https://my-bucket.s3-fips.dualstack.us-gov-east-1." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://s3-fips.dualstack.me-south-1.amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3-fips.dualstack.cn-northwest-1." + "amazonaws.com/path/to/my/object", + ), + ### + Case( + ("https://s3-external-1.amazonaws.com", None), + "https://my-bucket.s3-external-1.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-us-gov-west-1.amazonaws.com", None), + "https://my-bucket.s3-us-gov-west-1.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-fips-us-gov-west-1.amazonaws.com", None), + "https://my-bucket.s3-fips-us-gov-west-1.amazonaws.com/" + "path/to/my/object", + ), + ### + Case( + ("https://bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com", None), + "https://my-bucket.bucket.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com/path/to/my/object", + ), + Case( + ("https://accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com", None), + "https://my-bucket.accesspoint.vpce-1a2b3c4d-5e6f.s3.us-east-1." + "vpce.amazonaws.com/path/to/my/object", + ), + ### + Case( + ("https://012345678901.s3-control.amazonaws.com", None), + "https://my-bucket.012345678901.s3-control.us-east-1." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://012345678901.s3-control.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.012345678901.s3-control.ap-south-1a." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", + None), + "https://my-bucket.012345678901.s3-control.us-gov-east-1." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://012345678901.s3-control.us-gov-east-1.amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.012345678901.s3-control.cn-northwest-1." + "amazonaws.com/path/to/my/object", + ), + ### + Case( + ("https://012345678901.s3-control.dualstack.amazonaws.com", + None), + "https://my-bucket.012345678901.s3-control.dualstack.us-east-1." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://012345678901.s3-control.dualstack.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.012345678901.s3-control.dualstack." + "ap-south-1a.amazonaws.com/path/to/my/object", + ), + Case( + ("https://012345678901.s3-control.dualstack.us-gov-east-1." + "amazonaws.com", + None), + "https://my-bucket.012345678901.s3-control.dualstack." + "us-gov-east-1.amazonaws.com/path/to/my/object", + ), + Case( + ("https://012345678901.s3-control.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.012345678901.s3-control.dualstack." + "cn-northwest-1.amazonaws.com/path/to/my/object", + ), + ### + Case( + ("https://012345678901.s3-control-fips.amazonaws.com", None), + "https://my-bucket.012345678901.s3-control-fips.us-east-1." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://012345678901.s3-control-fips.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.012345678901.s3-control-fips.ap-south-1a." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://012345678901.s3-control-fips.us-gov-east-1." + "amazonaws.com", + None), + "https://my-bucket.012345678901.s3-control-fips.us-gov-east-1." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://012345678901.s3-control-fips.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.012345678901.s3-control-fips.cn-northwest-1." + "amazonaws.com/path/to/my/object", + ), + ### + Case( + ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", + None), + "https://my-bucket.012345678901.s3-control-fips.dualstack." + "us-east-1.amazonaws.com/path/to/my/object", + ), + Case( + ("https://012345678901.s3-control-fips.dualstack.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.012345678901.s3-control-fips.dualstack." + "ap-south-1a.amazonaws.com/path/to/my/object", + ), + Case( + ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." + "amazonaws.com", + None), + "https://my-bucket.012345678901.s3-control-fips.dualstack." + "us-gov-east-1.amazonaws.com/path/to/my/object", + ), + Case( + ("https://012345678901.s3-control-fips.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.012345678901.s3-control-fips.dualstack." + "cn-northwest-1.amazonaws.com/path/to/my/object", + ), + ### + Case( + ("https://s3-accesspoint.amazonaws.com", None), + "https://my-bucket.s3-accesspoint.us-east-1.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-accesspoint.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.s3-accesspoint.ap-south-1a.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", + None), + "https://my-bucket.s3-accesspoint.us-gov-east-1.amazonaws.com/" + "path/to/my/object", + ), + Case( + ("https://s3-accesspoint.us-gov-east-1.amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3-accesspoint.cn-northwest-1." + "amazonaws.com/path/to/my/object", + ), + ### + Case( + ("https://s3-accesspoint.dualstack.amazonaws.com", + None), + "https://my-bucket.s3-accesspoint.dualstack.us-east-1." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://s3-accesspoint.dualstack.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.s3-accesspoint.dualstack.ap-south-1a." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://s3-accesspoint.dualstack.us-gov-east-1." + "amazonaws.com", + None), + "https://my-bucket.s3-accesspoint.dualstack.us-gov-east-1." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://s3-accesspoint.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3-accesspoint.dualstack.cn-northwest-1." + "amazonaws.com/path/to/my/object", + ), + ### + Case( + ("https://abcd-123456789012.s3-accesspoint.amazonaws.com", + None), + "https://my-bucket.abcd-123456789012.s3-accesspoint.us-east-1." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://abcd-123456789012.s3-accesspoint.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.abcd-123456789012.s3-accesspoint." + "ap-south-1a.amazonaws.com/path/to/my/object", + ), + Case( + ("https://abcd-123456789012.s3-accesspoint." + "us-gov-east-1.amazonaws.com", + None), + "https://my-bucket.abcd-123456789012.s3-accesspoint." + "us-gov-east-1.amazonaws.com/path/to/my/object", + ), + Case( + ("https://abcd-123456789012.s3-accesspoint." + "us-gov-east-1.amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.abcd-123456789012.s3-accesspoint." + "cn-northwest-1.amazonaws.com/path/to/my/object", + ), + ### + Case( + ("https://s3-accesspoint-fips.amazonaws.com", None), + "https://my-bucket.s3-accesspoint-fips.us-east-1." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://s3-accesspoint-fips.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.s3-accesspoint-fips.ap-south-1a." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://s3-accesspoint-fips.us-gov-east-1." + "amazonaws.com", + None), + "https://my-bucket.s3-accesspoint-fips.us-gov-east-1." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://s3-accesspoint-fips.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3-accesspoint-fips.cn-northwest-1." + "amazonaws.com/path/to/my/object", + ), + ### + Case( + ("https://s3-accesspoint-fips.dualstack.amazonaws.com", + None), + "https://my-bucket.s3-accesspoint-fips.dualstack.us-east-1." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://s3-accesspoint-fips.dualstack.amazonaws.com", + "ap-south-1a"), + "https://my-bucket.s3-accesspoint-fips.dualstack.ap-south-1a." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." + "amazonaws.com", + None), + "https://my-bucket.s3-accesspoint-fips.dualstack.us-gov-east-1." + "amazonaws.com/path/to/my/object", + ), + Case( + ("https://s3-accesspoint-fips.dualstack.us-gov-east-1." + "amazonaws.com", + "cn-northwest-1"), + "https://my-bucket.s3-accesspoint-fips.dualstack." + "cn-northwest-1.amazonaws.com/path/to/my/object", + ), + ### + Case( + ("https://my-load-balancer-1234567890.us-west-2.elb." + "amazonaws.com", "us-west-2"), + "https://my-load-balancer-1234567890.us-west-2.elb." + "amazonaws.com/my-bucket/path/to/my/object", + ), + ] + + for case in cases: + base_url = BaseURL(*case.args) + url = urlunsplit( + base_url.build( + method="GET", + region=base_url.region or "us-east-1", + bucket_name="my-bucket", + object_name="path/to/my/object", + ), + ) + self.assertEqual(str(url), case.result) diff --git a/tests/unit/list_buckets_test.py b/tests/unit/list_buckets_test.py index d2d7518a..61d60958 100644 --- a/tests/unit/list_buckets_test.py +++ b/tests/unit/list_buckets_test.py @@ -14,9 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import unittest.mock as mock from datetime import datetime, timezone -from unittest import TestCase +from unittest import TestCase, mock from minio import Minio from minio.api import _DEFAULT_USER_AGENT @@ -34,16 +33,16 @@ def test_empty_list_buckets_works(self, mock_connection): mock_server = MockConnection() mock_connection.return_value = mock_server mock_server.mock_add_request( - MockResponse('GET', 'https://localhost:9000/', - {'User-Agent': _DEFAULT_USER_AGENT}, - 200, content=mock_data.encode()) + MockResponse( + 'GET', + 'https://localhost:9000/?max-buckets=10000', + {'User-Agent': _DEFAULT_USER_AGENT}, + 200, + content=mock_data.encode(), + ), ) - client = Minio('localhost:9000') - buckets = client.list_buckets() - count = 0 - for bucket in buckets: - count += 1 - self.assertEqual(0, count) + client = Minio(endpoint='localhost:9000') + self.assertEqual(0, len(list(client.list_buckets()))) @mock.patch('urllib3.PoolManager') def test_list_buckets_works(self, mock_connection): @@ -59,25 +58,24 @@ def test_list_buckets_works(self, mock_connection): mock_server = MockConnection() mock_connection.return_value = mock_server mock_server.mock_add_request( - MockResponse('GET', 'https://localhost:9000/', - {'User-Agent': _DEFAULT_USER_AGENT}, - 200, content=mock_data.encode()) + MockResponse( + 'GET', + 'https://localhost:9000/?max-buckets=10000', + {'User-Agent': _DEFAULT_USER_AGENT}, + 200, + content=mock_data.encode(), + ), ) - client = Minio('localhost:9000') - buckets = client.list_buckets() - buckets_list = [] - count = 0 - for bucket in buckets: - count += 1 - buckets_list.append(bucket) - self.assertEqual(2, count) - self.assertEqual('hello', buckets_list[0].name) + client = Minio(endpoint='localhost:9000') + buckets = list(client.list_buckets()) + self.assertEqual(2, len(buckets)) + self.assertEqual('hello', buckets[0].name) self.assertEqual( datetime(2015, 6, 22, 23, 7, 43, 240000, timezone.utc), - buckets_list[0].creation_date, + buckets[0].creation_date, ) - self.assertEqual('world', buckets_list[1].name) + self.assertEqual('world', buckets[1].name) self.assertEqual( datetime(2015, 6, 22, 23, 7, 56, 766000, timezone.utc), - buckets_list[1].creation_date, + buckets[1].creation_date, ) diff --git a/tests/unit/list_objects_test.py b/tests/unit/list_objects_test.py index 7c1d0f03..b47de276 100644 --- a/tests/unit/list_objects_test.py +++ b/tests/unit/list_objects_test.py @@ -48,8 +48,8 @@ def test_empty_list_objects_works(self, mock_connection): content=mock_data.encode(), ), ) - client = Minio('localhost:9000') - object_iter = client.list_objects('bucket', recursive=True) + client = Minio(endpoint='localhost:9000') + object_iter = client.list_objects(bucket_name='bucket', recursive=True) objects = [] for obj in object_iter: objects.append(obj) @@ -92,8 +92,8 @@ def test_list_objects_works(self, mock_connection): content=mock_data.encode(), ), ) - client = Minio('localhost:9000') - objects_iter = client.list_objects('bucket') + client = Minio(endpoint='localhost:9000') + objects_iter = client.list_objects(bucket_name='bucket') objects = [] for obj in objects_iter: objects.append(obj) diff --git a/tests/unit/list_objects_v1_test.py b/tests/unit/list_objects_v1_test.py index 019342fb..04ea3071 100644 --- a/tests/unit/list_objects_v1_test.py +++ b/tests/unit/list_objects_v1_test.py @@ -48,9 +48,9 @@ def test_empty_list_objects_works(self, mock_connection): content=mock_data.encode(), ), ) - client = Minio('localhost:9000') + client = Minio(endpoint='localhost:9000') bucket_iter = client.list_objects( - 'bucket', recursive=True, use_api_v1=True, + bucket_name='bucket', recursive=True, use_api_v1=True, ) buckets = [] for bucket in bucket_iter: @@ -103,8 +103,9 @@ def test_list_objects_works(self, mock_connection): content=mock_data.encode(), ), ) - client = Minio('localhost:9000') - bucket_iter = client.list_objects('bucket', use_api_v1=True) + client = Minio(endpoint='localhost:9000') + bucket_iter = client.list_objects( + bucket_name='bucket', use_api_v1=True) buckets = [] for bucket in bucket_iter: # cause an xml exception and fail if we try retrieving again @@ -202,9 +203,9 @@ def test_list_objects_works_well(self, mock_connection): content=mock_data1.encode(), ), ) - client = Minio('localhost:9000') + client = Minio(endpoint='localhost:9000') bucket_iter = client.list_objects( - 'bucket', recursive=True, use_api_v1=True, + bucket_name='bucket', recursive=True, use_api_v1=True, ) buckets = [] for bucket in bucket_iter: diff --git a/tests/unit/make_bucket_test.py b/tests/unit/make_bucket_test.py index 5e8fb186..af2bc3f0 100644 --- a/tests/unit/make_bucket_test.py +++ b/tests/unit/make_bucket_test.py @@ -27,12 +27,14 @@ class MakeBucket(TestCase): def test_bucket_is_string(self): - client = Minio('localhost:9000') - self.assertRaises(TypeError, client.make_bucket, 1234) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(TypeError): + client.make_bucket(bucket_name=1234) def test_bucket_is_not_empty_string(self): - client = Minio('localhost:9000') - self.assertRaises(ValueError, client.make_bucket, ' \t \n ') + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.make_bucket(bucket_name=' \t \n ') @mock.patch('urllib3.PoolManager') def test_make_bucket_works(self, mock_connection): @@ -44,7 +46,7 @@ def test_make_bucket_works(self, mock_connection): {'User-Agent': _DEFAULT_USER_AGENT}, 200) ) - Minio('localhost:9000') + Minio(endpoint='localhost:9000') @mock.patch('urllib3.PoolManager') def test_make_bucket_throws_fail(self, mock_connection): @@ -61,5 +63,6 @@ def test_make_bucket_throws_fail(self, mock_connection): response_headers={"Content-Type": "application/xml"}, content=error_xml.encode()) ) - client = Minio('localhost:9000') - self.assertRaises(S3Error, client.make_bucket, 'hello') + client = Minio(endpoint='localhost:9000') + with self.assertRaises(S3Error): + client.make_bucket(bucket_name='hello') diff --git a/tests/unit/minio_test.py b/tests/unit/minio_test.py index 1ad74ac5..21eb162a 100644 --- a/tests/unit/minio_test.py +++ b/tests/unit/minio_test.py @@ -145,26 +145,27 @@ def test_minio_requires_string(self): self.assertRaises(TypeError, Minio, 10) def test_minio_requires_hostname(self): - self.assertRaises(ValueError, Minio, 'http://') + with self.assertRaises(ValueError): + Minio(endpoint='http://') class UserAgentTests(TestCase): def test_default_user_agent(self): - client = Minio('localhost') + client = Minio(endpoint='localhost') self.assertEqual(client._user_agent, _DEFAULT_USER_AGENT) def test_set_app_info(self): - client = Minio('localhost') + client = Minio(endpoint='localhost') expected_user_agent = _DEFAULT_USER_AGENT + ' hello/' + minio_version client.set_app_info('hello', minio_version) self.assertEqual(client._user_agent, expected_user_agent) def test_set_app_info_requires_non_empty_name(self): - client = Minio('localhost:9000') + client = Minio(endpoint='localhost:9000') self.assertRaises(ValueError, client.set_app_info, '', minio_version) def test_set_app_info_requires_non_empty_version(self): - client = Minio('localhost:9000') + client = Minio(endpoint='localhost:9000') self.assertRaises(ValueError, client.set_app_info, 'hello', '') @@ -175,7 +176,7 @@ def test_region_none(self): def test_region_us_west(self): region = BaseURL('https://s3-us-west-1.amazonaws.com', None).region - self.assertEqual(region, "") + self.assertEqual(region, None) def test_region_with_dot(self): region = BaseURL('https://s3.us-west-1.amazonaws.com', None).region @@ -189,7 +190,7 @@ def test_region_with_dualstack(self): def test_region_us_east(self): region = BaseURL('http://s3.amazonaws.com', None).region - self.assertEqual(region, "") + self.assertEqual(region, None) def test_invalid_value(self): self.assertRaises(ValueError, BaseURL, None, None) diff --git a/tests/unit/presigned_get_object_test.py b/tests/unit/presigned_get_object_test.py index 1061aa8a..f2d604ab 100644 --- a/tests/unit/presigned_get_object_test.py +++ b/tests/unit/presigned_get_object_test.py @@ -19,37 +19,48 @@ from unittest import TestCase from minio import Minio +from minio.helpers import HTTPQueryDict class PresignedGetObjectTest(TestCase): def test_object_is_string(self): - client = Minio('localhost:9000') - self.assertRaises( - TypeError, client.presigned_get_object, 'hello', 1234) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(TypeError): + client.presigned_get_object(bucket_name='hello', object_name=1234) def test_object_is_not_empty_string(self): - client = Minio('localhost:9000') - self.assertRaises( - ValueError, client.presigned_get_object, 'hello', ' \t \n ') + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.presigned_get_object( + bucket_name='hello', + object_name=' \t \n ', + ) def test_expiry_limit(self): - client = Minio('localhost:9000') - self.assertRaises( - ValueError, - client.presigned_get_object, 'hello', 'key', - expires=timedelta(days=8) - ) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.presigned_get_object( + bucket_name='hello', + object_name='key', + expires=timedelta(days=8), + ) def test_can_include_response_headers(self): - client = Minio('localhost:9000', 'my_access_key', 'my_secret_key', - secure=True) + client = Minio( + endpoint='localhost:9000', + access_key='my_access_key', + secret_key='my_secret_key', + secure=True, + ) client._get_region = mock.Mock(return_value='us-east-1') r = client.presigned_get_object( - 'mybucket', 'myfile.pdf', - response_headers={ + bucket_name='mybucket', + object_name='myfile.pdf', + extra_query_params=HTTPQueryDict({ 'Response-Content-Type': 'application/pdf', 'Response-Content-Disposition': 'inline; filename="test.pdf"' - }) + }), + ) self.assertIn('inline', r) self.assertIn('test.pdf', r) self.assertIn('application%2Fpdf', r) diff --git a/tests/unit/presigned_put_object_test.py b/tests/unit/presigned_put_object_test.py index a535ad07..8fcfca03 100644 --- a/tests/unit/presigned_put_object_test.py +++ b/tests/unit/presigned_put_object_test.py @@ -22,19 +22,23 @@ class PresignedPutObjectTest(TestCase): def test_object_is_string(self): - client = Minio('localhost:9000') - self.assertRaises( - TypeError, client.presigned_put_object, 'hello', 1234) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(TypeError): + client.presigned_put_object(bucket_name='hello', object_name=1234) def test_object_is_not_empty_string(self): - client = Minio('localhost:9000') - self.assertRaises( - ValueError, client.presigned_put_object, 'hello', ' \t \n ') + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.presigned_put_object( + bucket_name='hello', + object_name=' \t \n ', + ) def test_expiry_limit(self): - client = Minio('localhost:9000') - self.assertRaises( - ValueError, - client.presigned_put_object, 'hello', 'key', - expires=timedelta(days=8) - ) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.presigned_put_object( + bucket_name='hello', + object_name='key', + expires=timedelta(days=8), + ) diff --git a/tests/unit/put_object_test.py b/tests/unit/put_object_test.py index c59fb799..8e9fb2ae 100644 --- a/tests/unit/put_object_test.py +++ b/tests/unit/put_object_test.py @@ -21,29 +21,41 @@ class PutObjectTest(TestCase): def test_object_is_string(self): - client = Minio('localhost:9000') - self.assertRaises( - TypeError, - client.put_object, 'hello', 1234, 1, iter([1, 2, 3]) - ) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(TypeError): + client.put_object( + bucket_name='hello', + object_name=1234, + data=1, + length=iter([1, 2, 3]), + ) def test_object_is_not_empty_string(self): - client = Minio('localhost:9000') - self.assertRaises( - ValueError, - client.put_object, 'hello', ' \t \n ', 1, iter([1, 2, 3]) - ) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.put_object( + bucket_name='hello', + object_name=' \t \n ', + data=1, + length=iter([1, 2, 3]), + ) def test_length_is_string(self): - client = Minio('localhost:9000') - self.assertRaises( - TypeError, - client.put_object, 'hello', 1234, '1', iter([1, 2, 3]) - ) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(TypeError): + client.put_object( + bucket_name='hello', + object_name=1234, + data='1', + length=iter([1, 2, 3]), + ) def test_length_is_not_empty_string(self): - client = Minio('localhost:9000') - self.assertRaises( - ValueError, - client.put_object, 'hello', ' \t \n ', -1, iter([1, 2, 3]) - ) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.put_object( + bucket_name='hello', + object_name=' \t \n ', + data=-1, + length=iter([1, 2, 3]), + ) diff --git a/tests/unit/remove_bucket_test.py b/tests/unit/remove_bucket_test.py index bbde0cf4..6025e4ad 100644 --- a/tests/unit/remove_bucket_test.py +++ b/tests/unit/remove_bucket_test.py @@ -25,16 +25,19 @@ class RemoveBucket(TestCase): def test_bucket_is_string(self): - client = Minio('localhost:9000') - self.assertRaises(TypeError, client.remove_bucket, 1234) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(TypeError): + client.remove_bucket(bucket_name=1234) def test_bucket_is_not_empty_string(self): - client = Minio('localhost:9000') - self.assertRaises(ValueError, client.remove_bucket, ' \t \n ') + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.remove_bucket(bucket_name=' \t \n ') def test_remove_bucket_invalid_name(self): - client = Minio('localhost:9000') - self.assertRaises(ValueError, client.remove_bucket, 'AB*CD') + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.remove_bucket(bucket_name='AB*CD') @mock.patch('urllib3.PoolManager') def test_remove_bucket_works(self, mock_connection): @@ -45,5 +48,5 @@ def test_remove_bucket_works(self, mock_connection): 'https://localhost:9000/hello', {'User-Agent': _DEFAULT_USER_AGENT}, 204) ) - client = Minio('localhost:9000') - client.remove_bucket('hello') + client = Minio(endpoint='localhost:9000') + client.remove_bucket(bucket_name='hello') diff --git a/tests/unit/remove_object_test.py b/tests/unit/remove_object_test.py index 174a97a9..746b4ea1 100644 --- a/tests/unit/remove_object_test.py +++ b/tests/unit/remove_object_test.py @@ -25,17 +25,25 @@ class StatObject(TestCase): def test_object_is_string(self): - client = Minio('localhost:9000') - self.assertRaises(TypeError, client.remove_object, 'hello', 1234) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(TypeError): + client.remove_object(bucket_name='hello', object_name=1234) def test_object_is_not_empty_string(self): - client = Minio('localhost:9000') - self.assertRaises(ValueError, client.remove_object, - 'hello', ' \t \n ') + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.remove_object( + bucket_name='hello', + object_name=' \t \n ', + ) def test_remove_bucket_invalid_name(self): - client = Minio('localhost:9000') - self.assertRaises(ValueError, client.remove_object, 'AB*CD', 'world') + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.remove_object( + bucket_name='AB*CD', + object_name='world', + ) @mock.patch('urllib3.PoolManager') def test_remove_object_works(self, mock_connection): @@ -46,5 +54,8 @@ def test_remove_object_works(self, mock_connection): 'https://localhost:9000/hello/world', {'User-Agent': _DEFAULT_USER_AGENT}, 204) ) - client = Minio('localhost:9000') - client.remove_object('hello', 'world') + client = Minio(endpoint='localhost:9000') + client.remove_object( + bucket_name='hello', + object_name='world', + ) diff --git a/tests/unit/remove_objects_test.py b/tests/unit/remove_objects_test.py index 970cf031..8b32d287 100644 --- a/tests/unit/remove_objects_test.py +++ b/tests/unit/remove_objects_test.py @@ -34,13 +34,13 @@ def test_object_is_list(self, mock_connection): MockResponse('POST', 'https://localhost:9000/hello?delete=', {'User-Agent': _DEFAULT_USER_AGENT, - 'Content-Md5': u'Te1kmIjQRNNz70DJjsrD8A=='}, 200, + 'Content-Md5': 'Te1kmIjQRNNz70DJjsrD8A=='}, 200, content=b'') ) - client = Minio('localhost:9000') + client = Minio(endpoint='localhost:9000') for err in client.remove_objects( - "hello", - [DeleteObject("Ab"), DeleteObject("c")], + bucket_name="hello", + delete_object_list=[DeleteObject("Ab"), DeleteObject("c")], ): print(err) @@ -52,13 +52,13 @@ def test_object_is_tuple(self, mock_connection): MockResponse('POST', 'https://localhost:9000/hello?delete=', {'User-Agent': _DEFAULT_USER_AGENT, - 'Content-Md5': u'Te1kmIjQRNNz70DJjsrD8A=='}, 200, + 'Content-Md5': 'Te1kmIjQRNNz70DJjsrD8A=='}, 200, content=b'') ) - client = Minio('localhost:9000') + client = Minio(endpoint='localhost:9000') for err in client.remove_objects( - "hello", - (DeleteObject("Ab"), DeleteObject("c")), + bucket_name="hello", + delete_object_list=(DeleteObject("Ab"), DeleteObject("c")), ): print(err) @@ -70,10 +70,12 @@ def test_object_is_iterator(self, mock_connection): MockResponse('POST', 'https://localhost:9000/hello?delete=', {'User-Agent': _DEFAULT_USER_AGENT, - 'Content-Md5': u'Te1kmIjQRNNz70DJjsrD8A=='}, 200, + 'Content-Md5': 'Te1kmIjQRNNz70DJjsrD8A=='}, 200, content=b'') ) - client = Minio('localhost:9000') + client = Minio(endpoint='localhost:9000') it = itertools.chain((DeleteObject("Ab"), DeleteObject("c"))) - for err in client.remove_objects('hello', it): + result = client.remove_objects( + bucket_name='hello', delete_object_list=it) + for err in result: print(err) diff --git a/tests/unit/sign_test.py b/tests/unit/sign_test.py index 632ce23a..bc869fc7 100644 --- a/tests/unit/sign_test.py +++ b/tests/unit/sign_test.py @@ -20,6 +20,8 @@ from unittest import TestCase from urllib.parse import urlsplit, urlunsplit +from urllib3._collections import HTTPHeaderDict + from minio import Minio from minio.credentials import Credentials from minio.helpers import queryencode, quote, sha256_hash @@ -40,12 +42,16 @@ def test_simple_request(self): empty_hash, 'x-amz-date:dateString', '', ';'.join(expected_signed_headers), empty_hash] - headers_to_sign = {'x-amz-date': 'dateString', - 'x-amz-content-sha256': empty_hash} + headers_to_sign = HTTPHeaderDict( + {'x-amz-date': 'dateString', 'x-amz-content-sha256': empty_hash}, + ) expected_request = sha256_hash('\n'.join(expected_request_array)) actual_request = _get_canonical_request_hash( - "PUT", url, headers_to_sign, empty_hash, + method="PUT", + url=url, + headers=headers_to_sign, + content_sha256=empty_hash, ) self.assertEqual(expected_request, actual_request[0]) @@ -60,10 +66,14 @@ def test_request_with_query(self): expected_request = sha256_hash('\n'.join(expected_request_array)) - headers_to_sign = {'x-amz-date': 'dateString', - 'x-amz-content-sha256': empty_hash} + headers_to_sign = HTTPHeaderDict( + {'x-amz-date': 'dateString', 'x-amz-content-sha256': empty_hash}, + ) actual_request = _get_canonical_request_hash( - "PUT", url, headers_to_sign, empty_hash, + method="PUT", + url=url, + headers=headers_to_sign, + content_sha256=empty_hash, ) self.assertEqual(expected_request, actual_request[0]) @@ -133,15 +143,19 @@ def test_presigned_versioned_id(self): class SignV4Test(TestCase): def test_signv4(self): - client = Minio("localhost:9000", access_key="minio", - secret_key="minio123", secure=False) + client = Minio( + endpoint="localhost:9000", + access_key="minio", + secret_key="minio123", + secure=False, + ) creds = client._provider.retrieve() - headers = { + headers = HTTPHeaderDict({ 'Host': 'localhost:9000', 'x-amz-content-sha256': 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855', 'x-amz-date': '20150620T010203Z', - } + }) url = client._base_url.build( method="PUT", region="us-east-1", diff --git a/tests/unit/stat_object_test.py b/tests/unit/stat_object_test.py index ebf32ade..0dd76c21 100644 --- a/tests/unit/stat_object_test.py +++ b/tests/unit/stat_object_test.py @@ -25,16 +25,19 @@ class StatObject(TestCase): def test_object_is_string(self): - client = Minio('localhost:9000') - self.assertRaises(TypeError, client.stat_object, 'hello', 1234) + client = Minio(endpoint='localhost:9000') + with self.assertRaises(TypeError): + client.stat_object(bucket_name='hello', object_name=1234) def test_object_is_not_empty_string(self): - client = Minio('localhost:9000') - self.assertRaises(ValueError, client.stat_object, 'hello', ' \t \n ') + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.stat_object(bucket_name='hello', object_name=' \t \n ') def test_stat_object_invalid_name(self): - client = Minio('localhost:9000') - self.assertRaises(ValueError, client.stat_object, 'AB#CD', 'world') + client = Minio(endpoint='localhost:9000') + with self.assertRaises(ValueError): + client.stat_object(bucket_name='AB#CD', object_name='world') @mock.patch('urllib3.PoolManager') def test_stat_object_works(self, mock_connection): @@ -52,5 +55,5 @@ def test_stat_object_works(self, mock_connection): {'User-Agent': _DEFAULT_USER_AGENT}, 200, response_headers=mock_headers) ) - client = Minio('localhost:9000') - client.stat_object('hello', 'world') + client = Minio(endpoint='localhost:9000') + client.stat_object(bucket_name='hello', object_name='world') diff --git a/tests/unit/trace_test.py b/tests/unit/trace_test.py index 71b97c32..c67617a0 100644 --- a/tests/unit/trace_test.py +++ b/tests/unit/trace_test.py @@ -21,5 +21,5 @@ class TraceTest(TestCase): def test_bucket_is_string(self): - client = Minio('localhost:9000') + client = Minio(endpoint='localhost:9000') self.assertRaises(ValueError, client.trace_on, None)