diff --git a/.chloggen/watsonx-sem-convention.yaml b/.chloggen/watsonx-sem-convention.yaml
new file mode 100644
index 0000000000..c727f69d22
--- /dev/null
+++ b/.chloggen/watsonx-sem-convention.yaml
@@ -0,0 +1,22 @@
+# Use this changelog template to create an entry for release notes.
+#
+# If your change doesn't affect end users you should instead start
+# your pull request title with [chore] or use the "Skip Changelog" label.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: enhancement
+
+# The name of the area of concern in the attributes-registry, (e.g. http, cloud, db)
+component: gen-ai
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: "Add IBM WatsonX AI semantic conventions"
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+# The values here must be integers.
+issues: [1650]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
diff --git a/docs/gen-ai/README.md b/docs/gen-ai/README.md
index 0a0b588660..5bb453a714 100644
--- a/docs/gen-ai/README.md
+++ b/docs/gen-ai/README.md
@@ -39,5 +39,6 @@ Technology specific semantic conventions are defined for the following GenAI sys
* [Azure AI Inference](./azure-ai-inference.md): Semantic Conventions for Azure AI Inference.
* [OpenAI](./openai.md): Semantic Conventions for OpenAI.
* [AWS Bedrock](./aws-bedrock.md): Semantic Conventions for AWS Bedrock.
+* [IBM WatsonX AI](./ibm-watsonx-ai.md): Semantic Conventions for IBM WatsonX AI.
[DocumentStatus]: https://opentelemetry.io/docs/specs/otel/document-status
diff --git a/docs/gen-ai/gen-ai-metrics.md b/docs/gen-ai/gen-ai-metrics.md
index 3819a5c9d5..44d41f60f5 100644
--- a/docs/gen-ai/gen-ai-metrics.md
+++ b/docs/gen-ai/gen-ai-metrics.md
@@ -15,6 +15,10 @@ linkTitle: Metrics
- [Metric: `gen_ai.server.request.duration`](#metric-gen_aiserverrequestduration)
- [Metric: `gen_ai.server.time_per_output_token`](#metric-gen_aiservertime_per_output_token)
- [Metric: `gen_ai.server.time_to_first_token`](#metric-gen_aiservertime_to_first_token)
+- [IBM WatsonX.AI specific metrics](#ibm-watsonxai-specific-metrics)
+ - [Metric: `ibm.watsonx.ai.completions.tokens`](#metric-ibmwatsonxaicompletionstokens)
+ - [Metric: `ibm.watsonx.ai.completions.responses`](#metric-ibmwatsonxaicompletionsresponses)
+ - [Metric: `ibm.watsonx.ai.completions.exceptions`](#metric-ibmwatsonxaicompletionsexceptions)
@@ -607,6 +611,203 @@ applicable `aws.bedrock.*` attributes and are not expected to include
+## IBM WatsonX.AI specific metrics
+
+The following metrics are specific to IBM WatsonX.AI and extend the standard GenAI metrics.
+
+### Metric: `ibm.watsonx.ai.completions.tokens`
+
+This metric is [recommended][MetricRecommended] to track the number of tokens processed by IBM WatsonX.AI completions.
+
+
+
+
+
+
+
+
+| Name | Instrument Type | Unit (UCUM) | Description | Stability | Entity Associations |
+| -------- | --------------- | ----------- | -------------- | --------- | ------ |
+| `ibm.watsonx.ai.completions.tokens` | Counter | `{token}` | Number of tokens processed by IBM WatsonX.AI completions. |  | |
+
+| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability |
+|---|---|---|---|---|---|
+| [`gen_ai.operation.name`](/docs/registry/attributes/gen-ai.md) | string | The name of the operation being performed. [1] | `chat`; `generate_content`; `text_completion` | `Required` |  |
+| [`gen_ai.token.type`](/docs/registry/attributes/gen-ai.md) | string | The type of token being counted. | `input`; `output` | `Required` |  |
+| [`gen_ai.request.model`](/docs/registry/attributes/gen-ai.md) | string | The name of the GenAI model a request is being made to. | `gpt-4` | `Recommended` |  |
+
+**[1] `gen_ai.operation.name`:** If one of the predefined values applies, but specific system uses a different name it's RECOMMENDED to document it in the semantic conventions for specific GenAI system and use system-specific name in the instrumentation. If a different name is not documented, instrumentation libraries SHOULD use applicable predefined value.
+
+---
+
+`gen_ai.operation.name` has the following list of well-known values. If one of them applies, then the respective value MUST be used; otherwise, a custom value MAY be used.
+
+| Value | Description | Stability |
+|---|---|---|
+| `chat` | Chat completion operation such as [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) |  |
+| `create_agent` | Create GenAI agent |  |
+| `embeddings` | Embeddings operation such as [OpenAI Create embeddings API](https://platform.openai.com/docs/api-reference/embeddings/create) |  |
+| `execute_tool` | Execute a tool |  |
+| `generate_content` | Multimodal content generation operation such as [Gemini Generate Content](https://ai.google.dev/api/generate-content) |  |
+| `invoke_agent` | Invoke GenAI agent |  |
+| `text_completion` | Text completions operation such as [OpenAI Completions API (Legacy)](https://platform.openai.com/docs/api-reference/completions) |  |
+
+---
+
+`gen_ai.token.type` has the following list of well-known values. If one of them applies, then the respective value MUST be used; otherwise, a custom value MAY be used.
+
+| Value | Description | Stability |
+|---|---|---|
+| `input` | Input tokens (prompt, input, etc.) |  |
+| `output` | Output tokens (completion, response, etc.) |  |
+
+
+
+
+
+
+| Name | Instrument Type | Unit (UCUM) | Description | Stability | Entity Associations |
+| -------- | --------------- | ----------- | -------------- | --------- | ------ |
+| `ibm.watsonx.ai.completions.tokens` | Counter | `{token}` | Number of tokens processed by IBM WatsonX.AI completions. |  | |
+
+| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability |
+|---|---|---|---|---|---|
+| [`gen_ai.operation.name`](/docs/registry/attributes/gen-ai.md) | string | The name of the operation being performed. | `chat`; `text_completion` | `Required` |  |
+| [`gen_ai.token.type`](/docs/registry/attributes/gen-ai.md) | string | The type of token being counted. | `input`; `output` | `Required` |  |
+| [`gen_ai.request.model`](/docs/registry/attributes/gen-ai.md) | string | The name of the GenAI model a request is being made to. | `ibm/granite-13b-chat-v2` | `Recommended` |  |
+
+### Metric: `ibm.watsonx.ai.completions.responses`
+
+This metric is [recommended][MetricRecommended] to track the number of responses generated by IBM WatsonX.AI completions.
+
+
+
+
+
+
+
+
+| Name | Instrument Type | Unit (UCUM) | Description | Stability | Entity Associations |
+| -------- | --------------- | ----------- | -------------- | --------- | ------ |
+| `ibm.watsonx.ai.completions.responses` | Counter | `{response}` | Number of responses generated by IBM WatsonX.AI completions. |  | |
+
+| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability |
+|---|---|---|---|---|---|
+| [`gen_ai.operation.name`](/docs/registry/attributes/gen-ai.md) | string | The name of the operation being performed. [1] | `chat`; `generate_content`; `text_completion` | `Required` |  |
+| [`gen_ai.request.model`](/docs/registry/attributes/gen-ai.md) | string | The name of the GenAI model a request is being made to. | `gpt-4` | `Recommended` |  |
+| [`gen_ai.response.finish_reasons`](/docs/registry/attributes/gen-ai.md) | string[] | Array of reasons the model stopped generating tokens, corresponding to each generation received. | `["stop"]`; `["stop", "length"]` | `Recommended` |  |
+
+**[1] `gen_ai.operation.name`:** If one of the predefined values applies, but specific system uses a different name it's RECOMMENDED to document it in the semantic conventions for specific GenAI system and use system-specific name in the instrumentation. If a different name is not documented, instrumentation libraries SHOULD use applicable predefined value.
+
+---
+
+`gen_ai.operation.name` has the following list of well-known values. If one of them applies, then the respective value MUST be used; otherwise, a custom value MAY be used.
+
+| Value | Description | Stability |
+|---|---|---|
+| `chat` | Chat completion operation such as [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) |  |
+| `create_agent` | Create GenAI agent |  |
+| `embeddings` | Embeddings operation such as [OpenAI Create embeddings API](https://platform.openai.com/docs/api-reference/embeddings/create) |  |
+| `execute_tool` | Execute a tool |  |
+| `generate_content` | Multimodal content generation operation such as [Gemini Generate Content](https://ai.google.dev/api/generate-content) |  |
+| `invoke_agent` | Invoke GenAI agent |  |
+| `text_completion` | Text completions operation such as [OpenAI Completions API (Legacy)](https://platform.openai.com/docs/api-reference/completions) |  |
+
+
+
+
+
+
+| Name | Instrument Type | Unit (UCUM) | Description | Stability | Entity Associations |
+| -------- | --------------- | ----------- | -------------- | --------- | ------ |
+| `ibm.watsonx.ai.completions.responses` | Counter | `{response}` | Number of responses generated by IBM WatsonX.AI completions. |  | |
+
+| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability |
+|---|---|---|---|---|---|
+| [`gen_ai.operation.name`](/docs/registry/attributes/gen-ai.md) | string | The name of the operation being performed. | `chat`; `text_completion` | `Required` |  |
+| [`gen_ai.request.model`](/docs/registry/attributes/gen-ai.md) | string | The name of the GenAI model a request is being made to. | `ibm/granite-13b-chat-v2` | `Recommended` |  |
+| [`gen_ai.response.finish_reasons`](/docs/registry/attributes/gen-ai.md) | string[] | Array of reasons the model stopped generating tokens. | `["stop"]`; `["length"]` | `Recommended` |  |
+
+### Metric: `ibm.watsonx.ai.completions.exceptions`
+
+This metric is [recommended][MetricRecommended] to track the number of exceptions encountered during IBM WatsonX.AI completions.
+
+
+
+
+
+
+
+
+| Name | Instrument Type | Unit (UCUM) | Description | Stability | Entity Associations |
+| -------- | --------------- | ----------- | -------------- | --------- | ------ |
+| `ibm.watsonx.ai.completions.exceptions` | Counter | `{exception}` | Number of exceptions encountered during IBM WatsonX.AI completions. |  | |
+
+| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability |
+|---|---|---|---|---|---|
+| [`error.type`](/docs/registry/attributes/error.md) | string | Describes a class of error the operation ended with. [1] | `timeout`; `java.net.UnknownHostException`; `server_certificate_invalid`; `500` | `Required` |  |
+| [`gen_ai.operation.name`](/docs/registry/attributes/gen-ai.md) | string | The name of the operation being performed. [2] | `chat`; `generate_content`; `text_completion` | `Required` |  |
+| [`gen_ai.request.model`](/docs/registry/attributes/gen-ai.md) | string | The name of the GenAI model a request is being made to. | `gpt-4` | `Recommended` |  |
+
+**[1] `error.type`:** The `error.type` SHOULD be predictable, and SHOULD have low cardinality.
+
+When `error.type` is set to a type (e.g., an exception type), its
+canonical class name identifying the type within the artifact SHOULD be used.
+
+Instrumentations SHOULD document the list of errors they report.
+
+The cardinality of `error.type` within one instrumentation library SHOULD be low.
+Telemetry consumers that aggregate data from multiple instrumentation libraries and applications
+should be prepared for `error.type` to have high cardinality at query time when no
+additional filters are applied.
+
+If the operation has completed successfully, instrumentations SHOULD NOT set `error.type`.
+
+If a specific domain defines its own set of error identifiers (such as HTTP or gRPC status codes),
+it's RECOMMENDED to:
+
+- Use a domain-specific attribute
+- Set `error.type` to capture all errors, regardless of whether they are defined within the domain-specific set or not.
+
+**[2] `gen_ai.operation.name`:** If one of the predefined values applies, but specific system uses a different name it's RECOMMENDED to document it in the semantic conventions for specific GenAI system and use system-specific name in the instrumentation. If a different name is not documented, instrumentation libraries SHOULD use applicable predefined value.
+
+---
+
+`error.type` has the following list of well-known values. If one of them applies, then the respective value MUST be used; otherwise, a custom value MAY be used.
+
+| Value | Description | Stability |
+|---|---|---|
+| `_OTHER` | A fallback error value to be used when the instrumentation doesn't define a custom value. |  |
+
+---
+
+`gen_ai.operation.name` has the following list of well-known values. If one of them applies, then the respective value MUST be used; otherwise, a custom value MAY be used.
+
+| Value | Description | Stability |
+|---|---|---|
+| `chat` | Chat completion operation such as [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) |  |
+| `create_agent` | Create GenAI agent |  |
+| `embeddings` | Embeddings operation such as [OpenAI Create embeddings API](https://platform.openai.com/docs/api-reference/embeddings/create) |  |
+| `execute_tool` | Execute a tool |  |
+| `generate_content` | Multimodal content generation operation such as [Gemini Generate Content](https://ai.google.dev/api/generate-content) |  |
+| `invoke_agent` | Invoke GenAI agent |  |
+| `text_completion` | Text completions operation such as [OpenAI Completions API (Legacy)](https://platform.openai.com/docs/api-reference/completions) |  |
+
+
+
+
+
+
+| Name | Instrument Type | Unit (UCUM) | Description | Stability | Entity Associations |
+| -------- | --------------- | ----------- | -------------- | --------- | ------ |
+| `ibm.watsonx.ai.completions.exceptions` | Counter | `{exception}` | Number of exceptions encountered during IBM WatsonX.AI completions. |  | |
+
+| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability |
+|---|---|---|---|---|---|
+| [`gen_ai.operation.name`](/docs/registry/attributes/gen-ai.md) | string | The name of the operation being performed. | `chat`; `text_completion` | `Required` |  |
+| [`gen_ai.request.model`](/docs/registry/attributes/gen-ai.md) | string | The name of the GenAI model a request is being made to. | `ibm/granite-13b-chat-v2` | `Recommended` |  |
+| [`error.type`](/docs/registry/attributes/error.md) | string | Describes a class of error the operation ended with. | `timeout`; `rate_limit_exceeded`; `invalid_request` | `Required` |  |
+
[DocumentStatus]: https://opentelemetry.io/docs/specs/otel/document-status
[MetricRequired]: /docs/general/metric-requirement-level.md#required
[MetricRecommended]: /docs/general/metric-requirement-level.md#recommended
diff --git a/docs/gen-ai/ibm-watsonx-ai.md b/docs/gen-ai/ibm-watsonx-ai.md
new file mode 100644
index 0000000000..c427747d38
--- /dev/null
+++ b/docs/gen-ai/ibm-watsonx-ai.md
@@ -0,0 +1,264 @@
+
+
+# Semantic conventions for IBM WatsonX.AI client operations
+
+**Status**: [Development][DocumentStatus]
+
+
+
+- [Spans](#spans)
+ - [Inference](#inference)
+- [Metrics](#metrics)
+ - [Metric: `gen_ai.client.token.usage`](#metric-gen_aiclienttokenusage)
+ - [Metric: `gen_ai.client.operation.duration`](#metric-gen_aiclientoperationduration)
+
+
+
+> [!Warning]
+>
+> Existing GenAI instrumentations that are using
+> [v1.36.0 of this document](https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/gen-ai/README.md)
+> (or prior):
+>
+> * SHOULD NOT change the version of the GenAI conventions that they emit by default.
+> Conventions include, but are not limited to, attributes, metric, span and event names,
+> span kind and unit of measure.
+> * SHOULD introduce an environment variable `OTEL_SEMCONV_STABILITY_OPT_IN`
+> as a comma-separated list of category-specific values. The list of values
+> includes:
+> * `gen_ai_latest_experimental` - emit the latest experimental version of
+> GenAI conventions (supported by the instrumentation) and do not emit the
+> old one (v1.36.0 or prior).
+> * The default behavior is to continue emitting whatever version of the GenAI
+> conventions the instrumentation was emitting (1.36.0 or prior).
+>
+> This transition plan will be updated to include stable version before the
+> GenAI conventions are marked as stable.
+
+The Semantic Conventions for [IBM WatsonX.AI](https://www.ibm.com/products/watsonx-ai) extend and override the [Gen AI Semantic Conventions](/docs/gen-ai/README.md).
+
+## Spans
+
+`gen_ai.provider.name` MUST be set to `"ibm.watsonx.ai"`.
+
+### Inference
+
+
+
+
+
+
+
+
+**Status:** 
+
+Semantic Conventions for [IBM WatsonX.AI](https://www.ibm.com/products/watsonx-ai) client spans extend and override the semantic conventions for [Gen AI Spans](gen-ai-spans.md).
+
+`gen_ai.provider.name` MUST be set to `"ibm.watsonx.ai"` and SHOULD be provided **at span creation time**.
+
+**Span name** SHOULD be `{gen_ai.operation.name} {gen_ai.request.model}`.
+
+**Span kind** SHOULD be `CLIENT`.
+
+**Span status** SHOULD follow the [Recording Errors](/docs/general/recording-errors.md) document.
+
+| Attribute | Type | Description | Examples | [Requirement Level](https://opentelemetry.io/docs/specs/semconv/general/attribute-requirement-level/) | Stability |
+|---|---|---|---|---|---|
+| [`gen_ai.operation.name`](/docs/registry/attributes/gen-ai.md) | string | The name of the operation being performed. [1] | `chat`; `generate_content`; `text_completion` | `Required` |  |
+| [`gen_ai.request.model`](/docs/registry/attributes/gen-ai.md) | string | The name of the GenAI model a request is being made to. [2] | `gpt-4` | `Required` |  |
+| [`error.type`](/docs/registry/attributes/error.md) | string | Describes a class of error the operation ended with. [3] | `timeout`; `java.net.UnknownHostException`; `server_certificate_invalid`; `500` | `Conditionally Required` if the operation ended in an error |  |
+| [`gen_ai.conversation.id`](/docs/registry/attributes/gen-ai.md) | string | The unique identifier for a conversation (session, thread), used to store and correlate messages within this conversation. [4] | `conv_5j66UpCpwteGg4YSxUnt7lPY` | `Conditionally Required` when available |  |
+| [`gen_ai.output.type`](/docs/registry/attributes/gen-ai.md) | string | Represents the content type requested by the client. [5] | `text`; `json`; `image` | `Conditionally Required` [6] |  |
+| [`gen_ai.request.choice.count`](/docs/registry/attributes/gen-ai.md) | int | The target number of candidate completions to return. | `3` | `Conditionally Required` if available, in the request, and !=1 |  |
+| [`gen_ai.request.seed`](/docs/registry/attributes/gen-ai.md) | int | Requests with same seed value more likely to return same result. | `100` | `Conditionally Required` if applicable and if the request includes a seed |  |
+| [`ibm.watsonx.ai.request.project_id`](/docs/registry/attributes/ibm.md) | string | The project ID in IBM WatsonX AI. | `12345678-abcd-1234-efgh-1234567890ab` | `Conditionally Required` if the request includes a project_id |  |
+| [`ibm.watsonx.ai.request.space_id`](/docs/registry/attributes/ibm.md) | string | The space ID in IBM WatsonX AI. | `abcdef12-3456-7890-abcd-ef1234567890` | `Conditionally Required` if the request includes a space_id |  |
+| [`ibm.watsonx.ai.response.trace_id`](/docs/registry/attributes/ibm.md) | string | The trace ID returned by IBM WatsonX AI. | `wxt-12345678-abcd-1234-efgh-1234567890ab` | `Conditionally Required` [7] |  |
+| [`server.port`](/docs/registry/attributes/server.md) | int | GenAI server port. [8] | `80`; `8080`; `443` | `Conditionally Required` If `server.address` is set. |  |
+| [`gen_ai.request.frequency_penalty`](/docs/registry/attributes/gen-ai.md) | double | The frequency penalty setting for the GenAI request. | `0.1` | `Recommended` |  |
+| [`gen_ai.request.max_tokens`](/docs/registry/attributes/gen-ai.md) | int | The maximum number of tokens the model generates for a request. | `100` | `Recommended` |  |
+| [`gen_ai.request.presence_penalty`](/docs/registry/attributes/gen-ai.md) | double | The presence penalty setting for the GenAI request. | `0.1` | `Recommended` |  |
+| [`gen_ai.request.stop_sequences`](/docs/registry/attributes/gen-ai.md) | string[] | List of sequences that the model will use to stop generating further tokens. | `["forest", "lived"]` | `Recommended` |  |
+| [`gen_ai.request.temperature`](/docs/registry/attributes/gen-ai.md) | double | The temperature setting for the GenAI request. | `0.0` | `Recommended` |  |
+| [`gen_ai.request.top_p`](/docs/registry/attributes/gen-ai.md) | double | The top_p sampling setting for the GenAI request. | `1.0` | `Recommended` |  |
+| [`gen_ai.response.finish_reasons`](/docs/registry/attributes/gen-ai.md) | string[] | Array of reasons the model stopped generating tokens, corresponding to each generation received. | `["stop"]`; `["stop", "length"]` | `Recommended` |  |
+| [`gen_ai.response.id`](/docs/registry/attributes/gen-ai.md) | string | The unique identifier for the completion. | `chatcmpl-123` | `Recommended` |  |
+| [`gen_ai.response.model`](/docs/registry/attributes/gen-ai.md) | string | The name of the model that generated the response. [9] | `gpt-4-0613` | `Recommended` |  |
+| [`gen_ai.usage.input_tokens`](/docs/registry/attributes/gen-ai.md) | int | The number of tokens used in the GenAI input (prompt). | `100` | `Recommended` |  |
+| [`gen_ai.usage.output_tokens`](/docs/registry/attributes/gen-ai.md) | int | The number of tokens used in the GenAI response (completion). | `180` | `Recommended` |  |
+| [`ibm.watsonx.ai.decoding_method`](/docs/registry/attributes/ibm.md) | string | The decoding method used by WatsonX for generating responses. | `greedy`; `sample` | `Recommended` |  |
+| [`ibm.watsonx.ai.max_new_tokens`](/docs/registry/attributes/ibm.md) | int | The maximum number of new tokens to generate in the response. | `100` | `Recommended` |  |
+| [`ibm.watsonx.ai.min_new_tokens`](/docs/registry/attributes/ibm.md) | int | The minimum number of new tokens to generate in the response. | `10` | `Recommended` |  |
+| [`ibm.watsonx.ai.random_seed`](/docs/registry/attributes/ibm.md) | int | The random seed used by WatsonX for deterministic generation. | `42` | `Recommended` |  |
+| [`ibm.watsonx.ai.repetition_penalty`](/docs/registry/attributes/ibm.md) | double | The penalty applied to repeated tokens in the generated response. | `1.2` | `Recommended` |  |
+| [`ibm.watsonx.ai.request.version`](/docs/registry/attributes/ibm.md) | string | The version of the model being used. | `1.0`; `2.3.1` | `Recommended` |  |
+| [`server.address`](/docs/registry/attributes/server.md) | string | GenAI server address. [10] | `example.com`; `10.1.2.80`; `/tmp/my.sock` | `Recommended` |  |
+| [`gen_ai.input.messages`](/docs/registry/attributes/gen-ai.md) | any | The chat history provided to the model as an input. [11] | [
{
"role": "user",
"parts": [
{
"type": "text",
"content": "Weather in Paris?"
}
]
},
{
"role": "assistant",
"parts": [
{
"type": "tool_call",
"id": "call_VSPygqKTWdrhaFErNvMV18Yl",
"name": "get_weather",
"arguments": {
"location": "Paris"
}
}
]
},
{
"role": "tool",
"parts": [
{
"type": "tool_call_response",
"id": " call_VSPygqKTWdrhaFErNvMV18Yl",
"result": "rainy, 57°F"
}
]
}
] | `Opt-In` |  |
+| [`gen_ai.output.messages`](/docs/registry/attributes/gen-ai.md) | any | Messages returned by the model where each message represents a specific model response (choice, candidate). [12] | [
{
"role": "assistant",
"parts": [
{
"type": "text",
"content": "The weather in Paris is currently rainy with a temperature of 57°F."
}
],
"finish_reason": "stop"
}
] | `Opt-In` |  |
+| [`gen_ai.system_instructions`](/docs/registry/attributes/gen-ai.md) | any | The system message or instructions provided to the GenAI model separately from the chat history. [13] | [
{
"type": "text",
"content": "You are an Agent that greet users, always use greetings tool to respond"
}
]; [
{
"type": "text",
"content": "You are a language translator."
},
{
"type": "text",
"content": "Your mission is to translate text in English to French."
}
] | `Opt-In` |  |
+| [`gen_ai.tool.definitions`](/docs/registry/attributes/gen-ai.md) | any | The list of source system tool definitions available to the GenAI agent or model. [14] | [
{
"type": "function",
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": [
"celsius",
"fahrenheit"
]
}
},
"required": [
"location",
"unit"
]
}
}
] | `Opt-In` |  |
+
+**[1] `gen_ai.operation.name`:** If one of the predefined values applies, but specific system uses a different name it's RECOMMENDED to document it in the semantic conventions for specific GenAI system and use system-specific name in the instrumentation. If a different name is not documented, instrumentation libraries SHOULD use applicable predefined value.
+
+**[2] `gen_ai.request.model`:** The name of the GenAI model a request is being made to. If the model is supplied by a vendor, then the value must be the exact name of the model requested. If the model is a fine-tuned custom model, the value should have a more specific name than the base model that's been fine-tuned.
+
+**[3] `error.type`:** The `error.type` SHOULD match the error code returned by the Generative AI provider or the client library,
+the canonical name of exception that occurred, or another low-cardinality error identifier.
+Instrumentations SHOULD document the list of errors they report.
+
+**[4] `gen_ai.conversation.id`:** Instrumentations SHOULD populate conversation id when they have it readily available
+for a given operation, for example:
+
+- when client framework being instrumented manages conversation history
+(see [LlamaIndex chat store](https://docs.llamaindex.ai/en/stable/module_guides/storing/chat_stores/))
+
+- when instrumenting GenAI client libraries that maintain conversation on the backend side
+(see [AWS Bedrock agent sessions](https://docs.aws.amazon.com/bedrock/latest/userguide/agents-session-state.html),
+[OpenAI Assistant threads](https://platform.openai.com/docs/api-reference/threads))
+
+Application developers that manage conversation history MAY add conversation id to GenAI and other
+spans or logs using custom span or log record processors or hooks provided by instrumentation
+libraries.
+
+**[5] `gen_ai.output.type`:** This attribute SHOULD be used when the client requests output of a specific type. The model may return zero or more outputs of this type.
+This attribute specifies the output modality and not the actual output format. For example, if an image is requested, the actual output could be a URL pointing to an image file.
+Additional output format details may be recorded in the future in the `gen_ai.output.{type}.*` attributes.
+
+**[6] `gen_ai.output.type`:** when applicable and if the request includes an output format.
+
+**[7] `ibm.watsonx.ai.response.trace_id`:** if the response was received and includes a trace_id
+
+**[8] `server.port`:** When observed from the client side, and when communicating through an intermediary, `server.port` SHOULD represent the server port behind any intermediaries, for example proxies, if it's available.
+
+**[9] `gen_ai.response.model`:** If available. The name of the GenAI model that provided the response. If the model is supplied by a vendor, then the value must be the exact name of the model actually used. If the model is a fine-tuned custom model, the value should have a more specific name than the base model that's been fine-tuned.
+
+**[10] `server.address`:** When observed from the client side, and when communicating through an intermediary, `server.address` SHOULD represent the server address behind any intermediaries, for example proxies, if it's available.
+
+**[11] `gen_ai.input.messages`:** Instrumentations MUST follow [Input messages JSON schema](/docs/gen-ai/gen-ai-input-messages.json).
+When the attribute is recorded on events, it MUST be recorded in structured
+form. When recorded on spans, it MAY be recorded as a JSON string if structured
+format is not supported and SHOULD be recorded in structured form otherwise.
+
+Messages MUST be provided in the order they were sent to the model.
+Instrumentations MAY provide a way for users to filter or truncate
+input messages.
+
+> [!Warning]
+> This attribute is likely to contain sensitive information including user/PII data.
+
+See [Recording content on attributes](/docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes)
+section for more details.
+
+**[12] `gen_ai.output.messages`:** Instrumentations MUST follow [Output messages JSON schema](/docs/gen-ai/gen-ai-output-messages.json)
+
+Each message represents a single output choice/candidate generated by
+the model. Each message corresponds to exactly one generation
+(choice/candidate) and vice versa - one choice cannot be split across
+multiple messages or one message cannot contain parts from multiple choices.
+
+When the attribute is recorded on events, it MUST be recorded in structured
+form. When recorded on spans, it MAY be recorded as a JSON string if structured
+format is not supported and SHOULD be recorded in structured form otherwise.
+
+Instrumentations MAY provide a way for users to filter or truncate
+output messages.
+
+> [!Warning]
+> This attribute is likely to contain sensitive information including user/PII data.
+
+See [Recording content on attributes](/docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes)
+section for more details.
+
+**[13] `gen_ai.system_instructions`:** This attribute SHOULD be used when the corresponding provider or API
+allows to provide system instructions or messages separately from the
+chat history.
+
+Instructions that are part of the chat history SHOULD be recorded in
+`gen_ai.input.messages` attribute instead.
+
+Instrumentations MUST follow [System instructions JSON schema](/docs/gen-ai/gen-ai-system-instructions.json).
+
+When recorded on spans, it MAY be recorded as a JSON string if structured
+format is not supported and SHOULD be recorded in structured form otherwise.
+
+Instrumentations MAY provide a way for users to filter or truncate
+system instructions.
+
+> [!Warning]
+> This attribute may contain sensitive information.
+
+See [Recording content on attributes](/docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes)
+section for more details.
+
+**[14] `gen_ai.tool.definitions`:** The value of this attribute matches source system tool definition format.
+
+It's expected to be an array of objects where each object represents a tool definition. In case a serialized string is available
+to the instrumentation, the instrumentation SHOULD do the best effort to
+deserialize it to an array. When recorded on spans, it MAY be recorded as a JSON string if structured format is not supported and SHOULD be recorded in structured form otherwise.
+
+Since this attribute could be large, it's NOT RECOMMENDED to populate
+it by default. Instrumentations MAY provide a way to enable
+populating this attribute.
+
+---
+
+`error.type` has the following list of well-known values. If one of them applies, then the respective value MUST be used; otherwise, a custom value MAY be used.
+
+| Value | Description | Stability |
+|---|---|---|
+| `_OTHER` | A fallback error value to be used when the instrumentation doesn't define a custom value. |  |
+
+---
+
+`gen_ai.operation.name` has the following list of well-known values. If one of them applies, then the respective value MUST be used; otherwise, a custom value MAY be used.
+
+| Value | Description | Stability |
+|---|---|---|
+| `chat` | Chat completion operation such as [OpenAI Chat API](https://platform.openai.com/docs/api-reference/chat) |  |
+| `create_agent` | Create GenAI agent |  |
+| `embeddings` | Embeddings operation such as [OpenAI Create embeddings API](https://platform.openai.com/docs/api-reference/embeddings/create) |  |
+| `execute_tool` | Execute a tool |  |
+| `generate_content` | Multimodal content generation operation such as [Gemini Generate Content](https://ai.google.dev/api/generate-content) |  |
+| `invoke_agent` | Invoke GenAI agent |  |
+| `text_completion` | Text completions operation such as [OpenAI Completions API (Legacy)](https://platform.openai.com/docs/api-reference/completions) |  |
+
+---
+
+`gen_ai.output.type` has the following list of well-known values. If one of them applies, then the respective value MUST be used; otherwise, a custom value MAY be used.
+
+| Value | Description | Stability |
+|---|---|---|
+| `image` | Image |  |
+| `json` | JSON object with known or unknown schema |  |
+| `speech` | Speech |  |
+| `text` | Plain text |  |
+
+
+
+
+
+
+## Metrics
+
+IBM WatsonX.AI uses the standard [Generative AI metrics](gen-ai-metrics.md) defined in the OpenTelemetry semantic conventions.
+
+For details on available metrics, please refer to the [Generative AI metrics documentation](gen-ai-metrics.md).
+
+IBM WatsonX.AI specific metrics are defined in the [Generative AI metrics documentation](gen-ai-metrics.md#ibm-watsonxai-specific-metrics).
+
+## Common Models
+
+IBM WatsonX.AI provides several foundation models. Here are some common models:
+
+| Model Name | Description |
+| --- | --- |
+| `ibm/granite-13b-chat-v2` | IBM's Granite 13B chat model |
+| `ibm/granite-13b-instruct-v2` | IBM's Granite 13B instruct model |
+| `ibm/mpt-7b-instruct` | MPT 7B instruct model |
+| `ibm/flan-ul2` | Flan UL2 model |
+| `meta-llama/llama-2-70b-chat` | Meta's Llama 2 70B chat model |
+| `bigcode/starcoder` | StarCoder model for code generation |
+
+[DocumentStatus]: https://opentelemetry.io/docs/specs/otel/document-status
diff --git a/docs/registry/attributes/README.md b/docs/registry/attributes/README.md
index 1b355b7680..6492c670b5 100644
--- a/docs/registry/attributes/README.md
+++ b/docs/registry/attributes/README.md
@@ -72,6 +72,7 @@ Currently, the following namespaces exist:
- [Heroku](heroku.md)
- [Host](host.md)
- [HTTP](http.md)
+- [Ibm](ibm.md)
- [iOS](ios.md)
- [JVM](jvm.md)
- [K8s](k8s.md)
diff --git a/docs/registry/attributes/ibm.md b/docs/registry/attributes/ibm.md
new file mode 100644
index 0000000000..ef2818d406
--- /dev/null
+++ b/docs/registry/attributes/ibm.md
@@ -0,0 +1,20 @@
+
+
+
+# Ibm
+
+## WatsonX AI Attributes
+
+This group defines attributes for IBM WatsonX AI.
+
+| Attribute | Type | Description | Examples | Stability |
+|---|---|---|---|---|
+| `ibm.watsonx.ai.decoding_method` | string | The decoding method used by WatsonX for generating responses. | `greedy`; `sample` |  |
+| `ibm.watsonx.ai.max_new_tokens` | int | The maximum number of new tokens to generate in the response. | `100` |  |
+| `ibm.watsonx.ai.min_new_tokens` | int | The minimum number of new tokens to generate in the response. | `10` |  |
+| `ibm.watsonx.ai.random_seed` | int | The random seed used by WatsonX for deterministic generation. | `42` |  |
+| `ibm.watsonx.ai.repetition_penalty` | double | The penalty applied to repeated tokens in the generated response. | `1.2` |  |
+| `ibm.watsonx.ai.request.project_id` | string | The project ID in IBM WatsonX AI. | `12345678-abcd-1234-efgh-1234567890ab` |  |
+| `ibm.watsonx.ai.request.space_id` | string | The space ID in IBM WatsonX AI. | `abcdef12-3456-7890-abcd-ef1234567890` |  |
+| `ibm.watsonx.ai.request.version` | string | The version of the model being used. | `1.0`; `2.3.1` |  |
+| `ibm.watsonx.ai.response.trace_id` | string | The trace ID returned by IBM WatsonX AI. | `wxt-12345678-abcd-1234-efgh-1234567890ab` |  |
diff --git a/model/gen-ai/metrics.yaml b/model/gen-ai/metrics.yaml
index f378e6aa0f..a2a0e7bee9 100644
--- a/model/gen-ai/metrics.yaml
+++ b/model/gen-ai/metrics.yaml
@@ -94,6 +94,69 @@ groups:
unit: "s"
stability: development
extends: metric_attributes.gen_ai
+ - id: metric_attributes.ibm.watsonx.ai
+ type: attribute_group
+ brief: 'This group describes IBM WatsonX.AI metrics attributes'
+ attributes:
+ - ref: gen_ai.operation.name
+ requirement_level: required
+ - ref: gen_ai.request.model
+ requirement_level: recommended
+ - ref: gen_ai.token.type
+ requirement_level: required
+ - ref: error.type
+ requirement_level: required
+ - id: metric.ibm.watsonx.ai.completions.tokens
+ type: metric
+ metric_name: ibm.watsonx.ai.completions.tokens
+ annotations:
+ code_generation:
+ metric_value_type: int
+ brief: 'Number of tokens processed by IBM WatsonX.AI completions.'
+ instrument: counter
+ unit: "{token}"
+ stability: development
+ attributes:
+ - ref: gen_ai.operation.name
+ requirement_level: required
+ - ref: gen_ai.token.type
+ requirement_level: required
+ - ref: gen_ai.request.model
+ requirement_level: recommended
+ - id: metric.ibm.watsonx.ai.completions.responses
+ type: metric
+ metric_name: ibm.watsonx.ai.completions.responses
+ annotations:
+ code_generation:
+ metric_value_type: int
+ brief: 'Number of responses generated by IBM WatsonX.AI completions.'
+ instrument: counter
+ unit: "{response}"
+ stability: development
+ attributes:
+ - ref: gen_ai.operation.name
+ requirement_level: required
+ - ref: gen_ai.request.model
+ requirement_level: recommended
+ - ref: gen_ai.response.finish_reasons
+ requirement_level: recommended
+ - id: metric.ibm.watsonx.ai.completions.exceptions
+ type: metric
+ metric_name: ibm.watsonx.ai.completions.exceptions
+ annotations:
+ code_generation:
+ metric_value_type: int
+ brief: 'Number of exceptions encountered during IBM WatsonX.AI completions.'
+ instrument: counter
+ unit: "{exception}"
+ stability: development
+ attributes:
+ - ref: gen_ai.operation.name
+ requirement_level: required
+ - ref: gen_ai.request.model
+ requirement_level: recommended
+ - ref: error.type
+ requirement_level: required
- id: metric.gen_ai.server.time_to_first_token
type: metric
metric_name: gen_ai.server.time_to_first_token
diff --git a/model/gen-ai/spans.yaml b/model/gen-ai/spans.yaml
index 60ac3785c5..5af7a3162f 100644
--- a/model/gen-ai/spans.yaml
+++ b/model/gen-ai/spans.yaml
@@ -335,3 +335,40 @@ groups:
requirement_level: required
- ref: aws.bedrock.knowledge_base.id
requirement_level: recommended
+
+ - id: span.ibm.watsonx.ai.inference.client
+ extends: attributes.gen_ai.inference.client
+ stability: development
+ span_kind: client
+ type: span
+ brief: >
+ Semantic Conventions for [IBM WatsonX.AI](https://www.ibm.com/products/watsonx-ai) client spans extend
+ and override the semantic conventions for [Gen AI Spans](gen-ai-spans.md).
+ note: |
+ `gen_ai.provider.name` MUST be set to `"ibm.watsonx.ai"` and SHOULD be provided **at span creation time**.
+
+ **Span name** SHOULD be `{gen_ai.operation.name} {gen_ai.request.model}`.
+ attributes:
+ - ref: gen_ai.request.model
+ requirement_level: required
+ - ref: ibm.watsonx.ai.request.project_id
+ requirement_level:
+ conditionally_required: if the request includes a project_id
+ - ref: ibm.watsonx.ai.request.space_id
+ requirement_level:
+ conditionally_required: if the request includes a space_id
+ - ref: ibm.watsonx.ai.response.trace_id
+ requirement_level:
+ conditionally_required: if the response was received and includes a trace_id
+ - ref: ibm.watsonx.ai.request.version
+ requirement_level: recommended
+ - ref: ibm.watsonx.ai.decoding_method
+ requirement_level: recommended
+ - ref: ibm.watsonx.ai.random_seed
+ requirement_level: recommended
+ - ref: ibm.watsonx.ai.max_new_tokens
+ requirement_level: recommended
+ - ref: ibm.watsonx.ai.min_new_tokens
+ requirement_level: recommended
+ - ref: ibm.watsonx.ai.repetition_penalty
+ requirement_level: recommended
diff --git a/model/ibm-watsonx-ai/registry.yaml b/model/ibm-watsonx-ai/registry.yaml
new file mode 100644
index 0000000000..a3e3d43ab0
--- /dev/null
+++ b/model/ibm-watsonx-ai/registry.yaml
@@ -0,0 +1,53 @@
+groups:
+ - id: registry.ibm.watsonx.ai
+ type: attribute_group
+ display_name: WatsonX AI Attributes
+ brief: >
+ This group defines attributes for IBM WatsonX AI.
+ attributes:
+ - id: ibm.watsonx.ai.request.project_id
+ stability: development
+ type: string
+ brief: The project ID in IBM WatsonX AI.
+ examples: ['12345678-abcd-1234-efgh-1234567890ab']
+ - id: ibm.watsonx.ai.request.space_id
+ stability: development
+ type: string
+ brief: The space ID in IBM WatsonX AI.
+ examples: ['abcdef12-3456-7890-abcd-ef1234567890']
+ - id: ibm.watsonx.ai.request.version
+ stability: development
+ type: string
+ brief: The version of the model being used.
+ examples: ['1.0', '2.3.1']
+ - id: ibm.watsonx.ai.response.trace_id
+ stability: development
+ type: string
+ brief: The trace ID returned by IBM WatsonX AI.
+ examples: ['wxt-12345678-abcd-1234-efgh-1234567890ab']
+ - id: ibm.watsonx.ai.decoding_method
+ stability: development
+ type: string
+ brief: The decoding method used by WatsonX for generating responses.
+ examples: ['greedy', 'sample']
+ - id: ibm.watsonx.ai.random_seed
+ stability: development
+ type: int
+ brief: The random seed used by WatsonX for deterministic generation.
+ examples: [42]
+ - id: ibm.watsonx.ai.max_new_tokens
+ stability: development
+ type: int
+ brief: The maximum number of new tokens to generate in the response.
+ examples: [100]
+ - id: ibm.watsonx.ai.min_new_tokens
+ stability: development
+ type: int
+ brief: The minimum number of new tokens to generate in the response.
+ examples: [10]
+ - id: ibm.watsonx.ai.repetition_penalty
+ stability: development
+ type: double
+ brief: The penalty applied to repeated tokens in the generated response.
+ examples: [1.2]
+