Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions sqlserver/changelog.d/21304.fixed
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Support `only_custom_queries` configuration option in sqlserver integration.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a new config option. We need to change the changelog to added and not fixed

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's technically not a new config option since it exists in the sqlserver conf.yaml already & customers were reporting that it doesnt work as expected. But it's definitely new functionality if it works now so I see how added makes more sense

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah I see yeah it looks like it was added to a bunch of configs in the past, but I don't see how it was ever actually wired up. I'm fine with fixed in that case

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nope it was never really wired up. that config was pulled in at the same place custom_queries was pulled in, not in the actual integration but by the QueryManager here. so, it was being referenced but it didnt do anything since we dont use the QueryManager in the intended way

19 changes: 19 additions & 0 deletions sqlserver/datadog_checks/sqlserver/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ def __init__(self, init_config, instance, log):

self.proc: str = instance.get('stored_procedure')
self.custom_metrics: list[dict] = init_config.get('custom_metrics', []) or []
self.only_custom_queries: bool = is_affirmative(instance.get('only_custom_queries', False))
self.ignore_missing_database = is_affirmative(instance.get("ignore_missing_database", False))
if self.ignore_missing_database:
self.log.warning(
Expand Down Expand Up @@ -132,6 +133,8 @@ def __init__(self, init_config, instance, log):
additional_tags=["raw_query_statement:enabled"] if self.collect_raw_query_statement["enabled"] else [],
)

self._validate_only_custom_queries(instance)

def _compile_valid_patterns(self, patterns: list[str]) -> re.Pattern:
valid_patterns = []

Expand Down Expand Up @@ -271,3 +274,19 @@ def _build_database_metrics_configs(self, instance):
if value is not None:
config[key] = value
return configurable_metrics

def _validate_only_custom_queries(self, instance):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's wait for @sethsamuel to return from PTO Monday on this one. We're in the process of reworking config handling / validation for surfacing agent health work. Want to make sure this will play nice with the pattern we're working towards

# Warn about any metric-collecting options that are enabled
if self.only_custom_queries:
if self.dbm_enabled:
self.log.warning(
"only_custom_queries is enabled with DBM. if you don't want to collect DBM metrics, set dbm: false"
)

if self.proc:
self.log.warning(
"`stored_procedure` is deprecated. to run custom queries, add to the `custom_queries` configuration"
)

if instance.get('custom_queries', []) == []:
self.log.warning("only_custom_queries is enabled but no custom queries are defined")
108 changes: 58 additions & 50 deletions sqlserver/datadog_checks/sqlserver/sqlserver.py
Original file line number Diff line number Diff line change
Expand Up @@ -831,10 +831,12 @@ def check(self, _):
)
self._query_manager.compile_queries()
self._send_database_instance_metadata()

if self._config.proc:
self.do_stored_procedure_check()
else:
self.collect_metrics()

if self._config.autodiscovery and self._config.autodiscovery_db_service_check:
self._check_database_conns()
if self._config.dbm_enabled:
Expand All @@ -852,6 +854,7 @@ def check(self, _):
handler.run_job_loop(self.tag_manager.get_tags())
except Exception as e:
self.log.error("Error running XE session handler for %s: %s", handler.session_name, e)

else:
self.log.debug("Skipping check")

Expand Down Expand Up @@ -926,52 +929,56 @@ def log_missing_metric(self, metric_name, major_version, engine_version):
else:
self.log.warning("%s metrics are not supported on Azure engine version: %s", metric_name, engine_version)

def collect_metrics(self):
"""Fetch the metrics from all the associated database tables."""
# queries for default integration metrics from the database
def load_basic_metrics(self, cursor):
# initiate autodiscovery or if the server was down at check __init__ key could be missing.
if self.autodiscover_databases(cursor) or not self.instance_metrics:
self._make_metric_list_to_collect(self._config.custom_metrics)

instance_results = {}
engine_edition = self.static_info_cache.get(STATIC_INFO_ENGINE_EDITION, "")
# Execute the `fetch_all` operations first to minimize the database calls
for cls, metric_names in self.instance_per_type_metrics.items():
if not metric_names:
instance_results[cls] = None, None
else:
try:
db_names = [d.name for d in self.databases] or [
self.instance.get("database", self.connection.DEFAULT_DATABASE)
]
metric_cls = getattr(metrics, cls)
with tracked_query(self, operation=metric_cls.OPERATION_NAME):
rows, cols = metric_cls.fetch_all_values(
cursor,
list(metric_names),
self.log,
databases=db_names,
engine_edition=engine_edition,
)
except Exception as e:
self.log.error("Error running `fetch_all` for metrics %s - skipping. Error: %s", cls, e)
rows, cols = None, None

with self.connection.open_managed_default_connection():
with self.connection.get_managed_cursor() as cursor:
# initiate autodiscovery or if the server was down at check __init__ key could be missing.
if self.autodiscover_databases(cursor) or not self.instance_metrics:
self._make_metric_list_to_collect(self._config.custom_metrics)
instance_results[cls] = rows, cols

instance_results = {}
engine_edition = self.static_info_cache.get(STATIC_INFO_ENGINE_EDITION, "")
# Execute the `fetch_all` operations first to minimize the database calls
for cls, metric_names in self.instance_per_type_metrics.items():
if not metric_names:
instance_results[cls] = None, None
for metric in self.instance_metrics:
key = metric.__class__.__name__
if key not in instance_results:
self.log.warning("No %s metrics found, skipping", str(key))
else:
rows, cols = instance_results[key]
if rows is not None:
if key == "SqlIncrFractionMetric":
metric.fetch_metric(rows, cols, self.sqlserver_incr_fraction_metric_previous_values)
else:
try:
db_names = [d.name for d in self.databases] or [
self.instance.get("database", self.connection.DEFAULT_DATABASE)
]
metric_cls = getattr(metrics, cls)
with tracked_query(self, operation=metric_cls.OPERATION_NAME):
rows, cols = metric_cls.fetch_all_values(
cursor,
list(metric_names),
self.log,
databases=db_names,
engine_edition=engine_edition,
)
except Exception as e:
self.log.error("Error running `fetch_all` for metrics %s - skipping. Error: %s", cls, e)
rows, cols = None, None

instance_results[cls] = rows, cols
metric.fetch_metric(rows, cols)

for metric in self.instance_metrics:
key = metric.__class__.__name__
if key not in instance_results:
self.log.warning("No %s metrics found, skipping", str(key))
else:
rows, cols = instance_results[key]
if rows is not None:
if key == "SqlIncrFractionMetric":
metric.fetch_metric(rows, cols, self.sqlserver_incr_fraction_metric_previous_values)
else:
metric.fetch_metric(rows, cols)
def collect_metrics(self):
"""Fetch the metrics from all the associated database tables."""
with self.connection.open_managed_default_connection():
if not self._config.only_custom_queries:
with self.connection.get_managed_cursor() as cursor:
self.load_basic_metrics(cursor)

# Neither pyodbc nor adodbapi are able to read results of a query if the number of rows affected
# statement are returned as part of the result set, so we disable for the entire connection
Expand All @@ -980,14 +987,15 @@ def collect_metrics(self):
with self.connection.get_managed_cursor() as cursor:
cursor.execute("SET NOCOUNT ON")
try:
# restore the current database after executing dynamic queries
# this is to ensure the current database context is not changed
with self.connection.restore_current_database_context():
if self.database_metrics:
for database_metric in self.database_metrics:
database_metric.execute()

# reuse connection for any custom queries
if not self._config.only_custom_queries:
# restore the current database after executing dynamic queries
# this is to ensure the current database context is not changed
with self.connection.restore_current_database_context():
if self.database_metrics:
for database_metric in self.database_metrics:
database_metric.execute()

# reuse the connection for custom queries
self._query_manager.execute()
finally:
with self.connection.get_managed_cursor() as cursor:
Expand Down
30 changes: 30 additions & 0 deletions sqlserver/tests/test_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -506,6 +506,36 @@ def test_custom_queries(aggregator, dd_run_check, instance_docker, custom_query,
aggregator.assert_metric(metric_name, **kwargs)


@pytest.mark.integration
Copy link
Contributor

@sangeetashivaji sangeetashivaji Sep 11, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm guessing when store procedure checks are enabled and when only_custom_queries are enabled we do not collect stored procedure metrics? Should we test that?
Also what happens when the only_custom_queries is set and no custom_queries are given?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If stored procedure checks are enabled and only_custom_queries is enabled, we would collect stored procedure metrics. The idea is that if a user has a check instance with only_custom_queries enabled they'd be removing the other collection parameters.

I think it would be good to validate these things in the config init though; I can add those situations as warning logs. What do you think?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

also, will definitely add tests for that regardless

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it would be good to validate these things in the config init though; I can add those situations as warning logs. What do you think?

Yeah I think we should definitely do that. Thank you!

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Made the config validation change! I added a test for log output given a certain config, rather than adding extra only_custom_queries gates around dbm metrics, sproc metrics. Since we're technically leaking only_custom_queries outside of the QueryManager I figured it'd be best to gate with it only where there's no disabling config option (ie for base metrics only)

@pytest.mark.usefixtures('dd_environment')
def test_only_custom_queries(aggregator, dd_run_check, instance_docker):
"""Test that only_custom_queries=True skips regular metrics but executes custom queries."""
instance = copy(instance_docker)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll clean this up in a follow up since it's already done all over but just want to point out that this copy(instance_docker) is technically susceptible to mutation issues between tests because we're doing a shallow copy of a dictionary that we're regularly mutating deeper levels within the tests.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

makes sense! I guess updating this to deepcopy() right now wouldn't make much of a difference until we're resetting the instance between tests?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah it's not going to make much of a difference atm and is already done elsewhere. Easier to clean it up and avoid future footguns in a follow up!

instance['only_custom_queries'] = True
instance['custom_queries'] = [
{
'query': "SELECT 42 as custom_value",
'columns': [{'name': 'custom_value', 'type': 'gauge'}],
'tags': ['test:only_custom'],
}
]
instance['procedure_metrics'] = {'enabled': False}

check = SQLServer(CHECK_NAME, {}, [instance])
dd_run_check(check)

# Verify that custom query metrics ARE collected
instance_tags = check._config.tags + [
"database_hostname:{}".format("stubbed.hostname"),
"database_instance:{}".format("stubbed.hostname"),
"ddagenthostname:{}".format("stubbed.hostname"),
"dd.internal.resource:database_instance:{}".format("stubbed.hostname"),
"sqlserver_servername:{}".format(check.static_info_cache.get(STATIC_INFO_SERVERNAME)),
]
aggregator.assert_metric('sqlserver.custom_value', value=42, tags=instance_tags + ['test:only_custom'], count=1)
aggregator.assert_all_metrics_covered()


@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
def test_load_static_information(aggregator, dd_run_check, instance_docker):
Expand Down
135 changes: 135 additions & 0 deletions sqlserver/tests/test_unit.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
# Licensed under a 3-clause BSD style license (see LICENSE)
import copy
import json
import logging
import os
import re
import time
Expand Down Expand Up @@ -925,3 +926,137 @@ def test_database_identifier(instance_docker, template, expected, tags):
check._database_identifier = None

assert check.database_identifier == expected


def test_only_custom_queries_validation_warnings(caplog):
"""Test that appropriate warning logs are emitted when only_custom_queries conflicts with other configurations."""
from datadog_checks.sqlserver.config import SQLServerConfig

caplog.clear()
caplog.set_level(logging.WARNING)

# Use a real logger that will be captured by caplog
real_logger = logging.getLogger('test_sqlserver_config')

# Test case 1: only_custom_queries with DBM enabled
instance_with_dbm = {
'host': 'localhost',
'username': 'test',
'password': 'test',
'only_custom_queries': True,
'dbm': True,
'custom_queries': [
{
'query': "SELECT 1 as test_value",
'columns': [{'name': 'test_value', 'type': 'gauge'}],
'tags': ['test:dbm_warning'],
}
],
}

config = SQLServerConfig({}, instance_with_dbm, real_logger)
config._validate_only_custom_queries(instance_with_dbm)

# Check for DBM warning
dbm_warning_found = any("only_custom_queries is enabled with DBM" in record.message for record in caplog.records)
assert dbm_warning_found, "Expected warning about only_custom_queries with DBM not found"

# Test case 2: only_custom_queries with stored procedure
caplog.clear()
instance_with_proc = {
'host': 'localhost',
'username': 'test',
'password': 'test',
'only_custom_queries': True,
'stored_procedure': 'pyStoredProc',
'custom_queries': [
{
'query': "SELECT 1 as test_value",
'columns': [{'name': 'test_value', 'type': 'gauge'}],
'tags': ['test:proc_warning'],
}
],
}

config = SQLServerConfig({}, instance_with_proc, real_logger)
config._validate_only_custom_queries(instance_with_proc)

# Check for stored procedure warning
proc_warning_found = any("`stored_procedure` is deprecated" in record.message for record in caplog.records)
assert proc_warning_found, "Expected warning about only_custom_queries with stored_procedure not found"

# Test case 3: only_custom_queries with no custom queries defined
caplog.clear()
instance_no_queries = {
'host': 'localhost',
'username': 'test',
'password': 'test',
'only_custom_queries': True,
'custom_queries': [],
}

config = SQLServerConfig({}, instance_no_queries, real_logger)
config._validate_only_custom_queries(instance_no_queries)

# Check for no custom queries warning
no_queries_warning_found = any(
"only_custom_queries is enabled but no custom queries are defined" in record.message
for record in caplog.records
)
assert no_queries_warning_found, "Expected warning about only_custom_queries with no custom queries not found"

# Test case 4: only_custom_queries with all conflicts (should emit all warnings)
caplog.clear()
instance_all_conflicts = {
'host': 'localhost',
'username': 'test',
'password': 'test',
'only_custom_queries': True,
'dbm': True,
'stored_procedure': 'pyStoredProc',
'custom_queries': [],
}

config = SQLServerConfig({}, instance_all_conflicts, real_logger)
config._validate_only_custom_queries(instance_all_conflicts)

# Check that all three warnings are emitted
dbm_warning_found = any("only_custom_queries is enabled with DBM" in record.message for record in caplog.records)
proc_warning_found = any("`stored_procedure` is deprecated" in record.message for record in caplog.records)
no_queries_warning_found = any(
"only_custom_queries is enabled but no custom queries are defined" in record.message
for record in caplog.records
)

assert dbm_warning_found, "Expected warning about only_custom_queries with DBM not found in all-conflicts test"
assert proc_warning_found, (
"Expected warning about only_custom_queries with stored_procedure not found in all-conflicts test"
)
assert no_queries_warning_found, (
"Expected warning about only_custom_queries with no custom queries not found in all-conflicts test"
)

# Test case 5: only_custom_queries with no conflicts (should emit no warnings)
caplog.clear()
instance_no_conflicts = {
'host': 'localhost',
'username': 'test',
'password': 'test',
'only_custom_queries': True,
'dbm': False,
'custom_queries': [
{
'query': "SELECT 1 as test_value",
'columns': [{'name': 'test_value', 'type': 'gauge'}],
'tags': ['test:no_conflicts'],
}
],
}

config = SQLServerConfig({}, instance_no_conflicts, real_logger)
config._validate_only_custom_queries(instance_no_conflicts)

# Check that no warnings are emitted
warning_count = len([record for record in caplog.records if record.levelno >= logging.WARNING])
warning_messages = [record.message for record in caplog.records if record.levelno >= logging.WARNING]
assert warning_count == 0, f"Expected no warnings but found {warning_count} warnings: {warning_messages}"
Loading