Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,5 @@ build :
pipenv lock --requirements > requirements.txt
pipenv install --dev
pipenv run pipenv-setup sync
pipenv check
# see https://github.com/pypa/pipenv/issues/3860
export PIPENV_PYUP_API_KEY="" && pipenv check
2 changes: 1 addition & 1 deletion Pipfile
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ numba = "*"

[dev-packages]
pytest = "*"
pylint = "*"
pipenv-setup = "*"
pytest-cov = "*"
adaptive-alerting-detector-build = {editable = true,path = "."}
Expand All @@ -26,6 +25,7 @@ pytest-env = "*"
pipenv = "*"
flake8 = "*"
freezegun = "*"
pylint = "*"

[requires]
python_version = "3.7"
535 changes: 256 additions & 279 deletions Pipfile.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion adaptive_alerting_detector_build/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = "0.5.5"
__version__ = "0.5.6"
3 changes: 2 additions & 1 deletion adaptive_alerting_detector_build/datasources/_graphite.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ def query(self, tags, start="-168hours", end="now", interval=None, fn="sum"):
query = f"{query}|summarize('{interval}','{fn}')"
elif fn == "sum":
query = f"sumSeries({query})"
params = {"target": query, "from": start, "until": end, "format": "json"}
# maxDataPoints is set to 2147483647 so that we get raw data as often as possible
params = {"target": query, "from": start, "until": end, "format": "json", "maxDataPoints": "2147483647"}
response = requests.get(self._render_url, params=params, headers=self._headers, timeout=60)
response.raise_for_status()
response_list = response.json()
Expand Down
16 changes: 8 additions & 8 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,27 +1,27 @@
-i https://pypi.org/simple
attrs==19.3.0
certifi==2019.11.28
certifi==2020.4.5.1
chardet==3.0.4
cycler==0.10.0
dataclasses==0.6 ; python_version < '3.7'
docopt==0.6.2
future==0.18.2
idna==2.9
kiwisolver==1.2.0
llvmlite==0.31.0
llvmlite==0.32.1
matplotlib==3.2.1
numba==0.48.0
numpy==1.18.2
numba==0.49.1
numpy==1.18.4
pandas==0.24.2
patsy==0.5.1
pyparsing==2.4.6
pyparsing==2.4.7
python-dateutil==2.8.1
pytz==2019.3
pytz==2020.1
pyyaml==5.3.1
related==0.7.2
requests==2.23.0
scipy==1.4.1
seaborn==0.10.0
seaborn==0.10.1
six==1.14.0
statsmodels==0.11.1
urllib3==1.25.8
urllib3==1.25.9
16 changes: 8 additions & 8 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,31 +14,31 @@
packages=find_packages(exclude=["tests"]),
install_requires=[
"attrs==19.3.0",
"certifi==2019.11.28",
"certifi==2020.4.5.1",
"chardet==3.0.4",
"cycler==0.10.0",
"dataclasses==0.6; python_version < '3.7'",
"docopt==0.6.2",
"future==0.18.2",
"idna==2.9",
"kiwisolver==1.2.0",
"llvmlite==0.31.0",
"llvmlite==0.32.1",
"matplotlib==3.2.1",
"numba==0.48.0",
"numpy==1.18.2",
"numba==0.49.1",
"numpy==1.18.4",
"pandas==0.24.2",
"patsy==0.5.1",
"pyparsing==2.4.6",
"pyparsing==2.4.7",
"python-dateutil==2.8.1",
"pytz==2019.3",
"pytz==2020.1",
"pyyaml==5.3.1",
"related==0.7.2",
"requests==2.23.0",
"scipy==1.4.1",
"seaborn==0.10.0",
"seaborn==0.10.1",
"six==1.14.0",
"statsmodels==0.11.1",
"urllib3==1.25.8",
"urllib3==1.25.9",
],
classifiers=["Programming Language :: Python :: 3",],
entry_points={
Expand Down
10 changes: 5 additions & 5 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def test_cli_build_new_detectors(caplog):
status=200)
responses.add(
responses.GET,
"http://graphite/render?target=sumSeries(seriesByTag('app=my-web-app','what=elb_2xx'))&from=-168hours&until=now&format=json",
"http://graphite/render?target=sumSeries(seriesByTag('app=my-web-app','what=elb_2xx'))&from=-168hours&until=now&format=json&maxDataPoints=2147483647",
json=GRAPHITE_MOCK_RESPONSE,
status=200,
)
Expand All @@ -71,7 +71,7 @@ def test_cli_build_new_detectors(caplog):
status=200)
responses.add(
responses.GET,
"http://graphite/render?target=sumSeries(seriesByTag('app=my-web-app','what=elb_5xx'))&from=-168hours&until=now&format=json",
"http://graphite/render?target=sumSeries(seriesByTag('app=my-web-app','what=elb_5xx'))&from=-168hours&until=now&format=json&maxDataPoints=2147483647",
json=GRAPHITE_MOCK_RESPONSE,
status=200,
)
Expand All @@ -84,7 +84,7 @@ def test_cli_build_new_detectors(caplog):
status=200)
responses.add(
responses.GET,
"http://graphite/render?target=sumSeries(seriesByTag('app=my-web-app','what=elb_success_rate'))&from=-168hours&until=now&format=json",
"http://graphite/render?target=sumSeries(seriesByTag('app=my-web-app','what=elb_success_rate'))&from=-168hours&until=now&format=json&maxDataPoints=2147483647",
json=GRAPHITE_MOCK_RESPONSE,
status=200,
)
Expand All @@ -97,7 +97,7 @@ def test_cli_build_new_detectors(caplog):
status=200)
responses.add(
responses.GET,
"http://graphite/render?target=sumSeries(seriesByTag('app=my-web-app','what=tp90'))&from=-168hours&until=now&format=json",
"http://graphite/render?target=sumSeries(seriesByTag('app=my-web-app','what=tp90'))&from=-168hours&until=now&format=json&maxDataPoints=2147483647",
json=GRAPHITE_MOCK_RESPONSE,
status=200,
)
Expand Down Expand Up @@ -160,7 +160,7 @@ def test_cli_train_metric_detectors_sparse_data(caplog):
status=200)
responses.add(
responses.GET,
"http://graphite/render?target=sumSeries(seriesByTag('app=my-web-app','what=tp90'))&from=-168hours&until=now&format=json",
"http://graphite/render?target=sumSeries(seriesByTag('app=my-web-app','what=tp90'))&from=-168hours&until=now&format=json&maxDataPoints=2147483647",
json=GRAPHITE_SPARSE_DATA_MOCK_RESPONSE,
status=200,
)
Expand Down
8 changes: 4 additions & 4 deletions tests/test_datasources.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
def test_graphite_query():
responses.add(
responses.GET,
"http://graphite/render?target=sumSeries(seriesByTag('role=my-web-app','what=elb_2xx'))&from=-168hours&until=now&format=json",
"http://graphite/render?target=sumSeries(seriesByTag('role=my-web-app','what=elb_2xx'))&from=-168hours&until=now&format=json&maxDataPoints=2147483647",
json=GRAPHITE_MOCK_RESPONSE,
status=200,
)
Expand All @@ -25,7 +25,7 @@ def test_graphite_query():
def test_graphite_query_with_function_tag():
responses.add(
responses.GET,
"http://graphite/render?target=summarize(sumSeries(seriesByTag('role=my-web-app','what=elb_2xx')),'1min','sum',false)&from=-168hours&until=now&format=json",
"http://graphite/render?target=summarize(sumSeries(seriesByTag('role=my-web-app','what=elb_2xx')),'1min','sum',false)&from=-168hours&until=now&format=json&maxDataPoints=2147483647",
json=GRAPHITE_MOCK_RESPONSE,
status=200,
)
Expand All @@ -48,7 +48,7 @@ def test_graphite_query_with_function_tag():
def test_graphite_query_with_interval():
responses.add(
responses.GET,
"http://graphite/render?target=seriesByTag('role=my-web-app','what=elb_2xx')|summarize('1min','sum')&from=-168hours&until=now&format=json",
"http://graphite/render?target=seriesByTag('role=my-web-app','what=elb_2xx')|summarize('1min','sum')&from=-168hours&until=now&format=json&maxDataPoints=2147483647",
json=GRAPHITE_MOCK_RESPONSE,
status=200,
)
Expand All @@ -67,7 +67,7 @@ def test_graphite_query_with_interval():
def test_graphite_query_with_empty_response():
responses.add(
responses.GET,
"http://graphite/render?target=seriesByTag('role=my-web-app','what=elb_2xx')|summarize('1min','sum')&from=-168hours&until=now&format=json",
"http://graphite/render?target=seriesByTag('role=my-web-app','what=elb_2xx')|summarize('1min','sum')&from=-168hours&until=now&format=json&maxDataPoints=2147483647",
json=[],
status=200,
)
Expand Down