diff --git a/.gitignore b/.gitignore index a3f1e738..b3aa6b4d 100644 --- a/.gitignore +++ b/.gitignore @@ -5,4 +5,164 @@ result.txt test_results/ results/ logs/ -.DS_Store \ No newline at end of file +.DS_Store +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/README.md b/README.md index 6e371d8d..7db0f80c 100644 --- a/README.md +++ b/README.md @@ -1,68 +1,72 @@ # Are Transformers Effective for Time Series Forecasting? (AAAI 2023) -This repo is the official Pytorch implementation of LTSF-Linear: "[Are Transformers Effective for Time Series Forecasting?](https://arxiv.org/pdf/2205.13504.pdf)". - +This repo is the official Pytorch implementation of LTSF-Linear: "[Are Transformers Effective for Time Series Forecasting?](https://arxiv.org/pdf/2205.13504.pdf)". ## Updates + - [2022/11/23] Accepted to AAAI 2023 with three strong accept! We also release a **[benchmark for long-term time series forecasting](LTSF-Benchmark.md)** for further research. - [2022/08/25] We update our [paper](https://arxiv.org/pdf/2205.13504.pdf) with comprehensive analyses on why existing LTSF-Transformers do not work well on the LTSF problem! - [2022/08/25] Besides DLinear, we're exicted to add two Linear models to the paper and this repo. Now we have a LTSF-Linear family! - Linear: Just one linear layer. - DLinear: Decomposition Linear to handle data with trend and seasonality patterns. - - NLinear: A Normalized Linear to deal with train-test set distribution shifts. See section 'LTSF-Linear' for more details. + - NLinear: A Normalized Linear to deal with train-test set distribution shifts. See section 'LTSF-Linear' for more details. -- [2022/08/25] We update some scripts of LTSF-Linear. +- [2022/08/25] We update some scripts of LTSF-Linear. - Linear, NLinear, and DLinear use the same scripts. - Some results of DLinear are slightly different now. - - ## Features + - [x] Add a [benchmark](LTSF-Benchmark.md) for long-term time series forecasting. - [x] Support both [Univariate](https://github.com/cure-lab/DLinear/tree/main/scripts/EXP-LongForecasting/DLinear/univariate) and [Multivariate](https://github.com/cure-lab/DLinear/tree/main/scripts/EXP-LongForecasting/DLinear) long-term time series forecasting. - [x] Support visualization of weights. - [x] Support scripts on different [look-back window size](https://github.com/cure-lab/DLinear/tree/main/scripts/EXP-LookBackWindow). Beside LTSF-Linear, we provide five significant forecasting Transformers to re-implement the results in the paper. + - [x] [Transformer](https://arxiv.org/abs/1706.03762) (NeuIPS 2017) - [x] [Informer](https://arxiv.org/abs/2012.07436) (AAAI 2021 Best paper) - [x] [Autoformer](https://arxiv.org/abs/2106.13008) (NeuIPS 2021) - [x] [Pyraformer](https://openreview.net/pdf?id=0EXmFzUn5I) (ICLR 2022 Oral) - [x] [FEDformer](https://arxiv.org/abs/2201.12740) (ICML 2022) - ## Detailed Description + We provide all experiment script files in `./scripts`: | Files | Interpretation | -| ------------- | -------------------------------------------------------| +| ------------- | -------------------------------------------------------| | EXP-LongForecasting | Long-term Time Series Forecasting Task | -| EXP-LookBackWindow | Study the impact of different look-back window sizes | +| EXP-LookBackWindow | Study the impact of different look-back window sizes | | EXP-Embedding | Study the effects of different embedding strategies | - This code is simply built on the code base of Autoformer. We appreciate the following GitHub repos a lot for their valuable code base or datasets: -The implementation of Autoformer, Informer, Transformer is from https://github.com/thuml/Autoformer +The implementation of Autoformer, Informer, Transformer is from -The implementation of FEDformer is from https://github.com/MAZiqing/FEDformer +The implementation of FEDformer is from -The implementation of Pyraformer is from https://github.com/alipay/Pyraformer +The implementation of Pyraformer is from ## LTSF-Linear + ### LTSF-Linear family + ![image](pics/Linear.png) -LTSF-Linear is a set of linear models. +LTSF-Linear is a set of linear models. + - Linear: It is just a one-layer linear model, but it outperforms Transformers. - NLinear: **To boost the performance of Linear when there is a distribution shift in the dataset**, NLinear first subtracts the input by the last value of the sequence. Then, the input goes through a linear layer, and the subtracted part is added back before making the final prediction. The subtraction and addition in NLinear are a simple normalization for the input sequence. -- DLinear: It is a combination of a Decomposition scheme used in Autoformer and FEDformer with linear layers. It first decomposes a raw data input into a trend component by a moving average kernel and a remainder (seasonal) component. Then, two one-layer linear layers are applied to each component and we sum up the two features to get the final prediction. By explicitly handling trend, **DLinear enhances the performance of a vanilla linear when there is a clear trend in the data.** +- DLinear: It is a combination of a Decomposition scheme used in Autoformer and FEDformer with linear layers. It first decomposes a raw data input into a trend component by a moving average kernel and a remainder (seasonal) component. Then, two one-layer linear layers are applied to each component and we sum up the two features to get the final prediction. By explicitly handling trend, **DLinear enhances the performance of a vanilla linear when there is a clear trend in the data.** Although LTSF-Linear is simple, it has some compelling characteristics: + - An O(1) maximum signal traversing path length: The shorter the path, the better the dependencies are captured, making LTSF-Linear capable of capturing both short-range and long-range temporal relations. - High-efficiency: As each branch has only one linear layer, it costs much lower memory and fewer parameters and has a faster inference speed than existing Transformers. - Interpretability: After training, we can visualize weights to have some insights on the predicted values. - Easy-to-use: LTSF-Linear can be obtained easily without tuning model hyper-parameters. ### Comparison with Transformers + Univariate Forecasting: ![image](pics/Uni-results.png) Multivariate Forecasting: @@ -70,14 +74,17 @@ Multivariate Forecasting: LTSF-Linear outperforms all transformer-based methods by a large margin. ### Efficiency + ![image](pics/efficiency.png) Comparison of method efficiency with Look-back window size 96 and Forecasting steps 720 on Electricity. MACs are the number of multiply-accumulate operations. We use DLinear for comparison, since it has the double cost in LTSF-Linear. The inference time averages 5 runs. ## Getting Started + ### Environment Requirements First, please make sure you have installed Conda. Then, our environment can be installed by: -``` + +```shell conda create -n LTSF_Linear python=3.6.9 conda activate LTSF_Linear pip install -r requirements.txt @@ -87,34 +94,40 @@ pip install -r requirements.txt You can obtain all the nine benchmarks from [Google Drive](https://drive.google.com/drive/folders/1ZOYpTUa82_jCcxIdTmyr0LXQfvaM9vIy) provided in Autoformer. All the datasets are well pre-processed and can be used easily. -``` +```shell mkdir dataset ``` + **Please put them in the `./dataset` directory** ### Training Example -- In `scripts/ `, we provide the model implementation *Dlinear/Autoformer/Informer/Transformer* + +- In `scripts/`, we provide the model implementation *Dlinear/Autoformer/Informer/Transformer* - In `FEDformer/scripts/`, we provide the *FEDformer* implementation - In `Pyraformer/scripts/`, we provide the *Pyraformer* implementation For example: To train the **LTSF-Linear** on **Exchange-Rate dataset**, you can use the scipt `scripts/EXP-LongForecasting/Linear/exchange_rate.sh`: -``` + +```shell sh scripts/EXP-LongForecasting/Linear/exchange_rate.sh ``` + It will start to train DLinear by default, the results will be shown in `logs/LongForecasting`. You can specify the name of the model in the script. (Linear, DLinear, NLinear) -All scripts about using LTSF-Linear on long forecasting task is in `scripts/EXP-LongForecasting/Linear/`, you can run them in a similar way. The default look-back window in scripts is 336, LTSF-Linear generally achieves better results with longer look-back window as dicussed in the paper. +All scripts about using LTSF-Linear on long forecasting task is in `scripts/EXP-LongForecasting/Linear/`, you can run them in a similar way. The default look-back window in scripts is 336, LTSF-Linear generally achieves better results with longer look-back window as dicussed in the paper. Scripts about look-back window size and long forecasting of FEDformer and Pyraformer is in `FEDformer/scripts` and `Pyraformer/scripts`, respectively. To run them, you need to first `cd FEDformer` or `cd Pyraformer`. Then, you can use sh to run them in a similar way. Logs will store in `logs/`. -Each experiment in `scripts/EXP-LongForecasting/Linear/` takes 5min-20min. For other Transformer scripts, since we put all related experiments in one script file, directly running them will take 8 hours-1 day. You can keep the experiments you interested in and comment out the others. +Each experiment in `scripts/EXP-LongForecasting/Linear/` takes 5min-20min. For other Transformer scripts, since we put all related experiments in one script file, directly running them will take 8 hours-1 day. You can keep the experiments you interested in and comment out the others. ### Weights Visualization + As shown in our paper, the weights of LTSF-Linear can reveal some charateristic of the data, i.e., the periodicity. As an example, we provide the weight visualization of DLinear in `weight_plot.py`. To run the visualization, you need to input the model path (model_name) of DLinear (the model directory in `./checkpoint` by default). To obtain smooth and clear patterns, you can use the initialization we provided in the file of linear models. ![image](pics/Visualization_DLinear.png) + ## Citing If you find this repository useful for your work, please consider citing it as follows: diff --git a/ltsf_linear/__init__.py b/ltsf_linear/__init__.py new file mode 100644 index 00000000..40692a7a --- /dev/null +++ b/ltsf_linear/__init__.py @@ -0,0 +1 @@ +__version__="0.1.0" diff --git a/data_provider/__init__.py b/ltsf_linear/data_provider/__init__.py similarity index 100% rename from data_provider/__init__.py rename to ltsf_linear/data_provider/__init__.py diff --git a/data_provider/data_factory.py b/ltsf_linear/data_provider/data_factory.py similarity index 91% rename from data_provider/data_factory.py rename to ltsf_linear/data_provider/data_factory.py index ac9bc944..98d90788 100644 --- a/data_provider/data_factory.py +++ b/ltsf_linear/data_provider/data_factory.py @@ -1,4 +1,4 @@ -from data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Pred +from ltsf_linear.data_provider.data_loader import Dataset_ETT_hour, Dataset_ETT_minute, Dataset_Custom, Dataset_Pred from torch.utils.data import DataLoader data_dict = { diff --git a/data_provider/data_loader.py b/ltsf_linear/data_provider/data_loader.py similarity index 99% rename from data_provider/data_loader.py rename to ltsf_linear/data_provider/data_loader.py index 0aa5344c..e2928705 100644 --- a/data_provider/data_loader.py +++ b/ltsf_linear/data_provider/data_loader.py @@ -1,11 +1,9 @@ import os -import numpy as np import pandas as pd import os -import torch -from torch.utils.data import Dataset, DataLoader +from torch.utils.data import Dataset from sklearn.preprocessing import StandardScaler -from utils.timefeatures import time_features +from ltsf_linear.utils.timefeatures import time_features import warnings warnings.filterwarnings('ignore') diff --git a/exp/exp_basic.py b/ltsf_linear/exp/exp_basic.py similarity index 100% rename from exp/exp_basic.py rename to ltsf_linear/exp/exp_basic.py diff --git a/exp/exp_main.py b/ltsf_linear/exp/exp_main.py similarity index 97% rename from exp/exp_main.py rename to ltsf_linear/exp/exp_main.py index 850370b6..63e15e75 100644 --- a/exp/exp_main.py +++ b/ltsf_linear/exp/exp_main.py @@ -1,8 +1,8 @@ -from data_provider.data_factory import data_provider -from exp.exp_basic import Exp_Basic -from models import Informer, Autoformer, Transformer, DLinear, Linear, NLinear -from utils.tools import EarlyStopping, adjust_learning_rate, visual, test_params_flop -from utils.metrics import metric +from ltsf_linear.data_provider.data_factory import data_provider +from ltsf_linear.exp.exp_basic import Exp_Basic +from ltsf_linear.models import Informer, Autoformer, Transformer, DLinear, Linear, NLinear +from ltsf_linear.utils.tools import EarlyStopping, adjust_learning_rate, visual, test_params_flop +from ltsf_linear.utils.metrics import metric import numpy as np import pandas as pd @@ -14,7 +14,6 @@ import time import warnings -import matplotlib.pyplot as plt import numpy as np warnings.filterwarnings('ignore') diff --git a/exp/exp_stat.py b/ltsf_linear/exp/exp_stat.py similarity index 91% rename from exp/exp_stat.py rename to ltsf_linear/exp/exp_stat.py index 80a37be6..112a10c4 100644 --- a/exp/exp_stat.py +++ b/ltsf_linear/exp/exp_stat.py @@ -1,17 +1,14 @@ -from data_provider.data_factory import data_provider -from exp.exp_basic import Exp_Basic -from utils.tools import EarlyStopping, adjust_learning_rate, visual -from utils.metrics import metric +from ltsf_linear.data_provider.data_factory import data_provider +from ltsf_linear.exp.exp_basic import Exp_Basic +from ltsf_linear.utils.tools import visual +from ltsf_linear.utils.metrics import metric import numpy as np import torch -import torch.nn as nn -from torch import optim + import os -import time import warnings -import matplotlib.pyplot as plt from models.Stat_models import * warnings.filterwarnings('ignore') @@ -96,4 +93,4 @@ def test(self, setting, test=0): np.save(folder_path + 'pred.npy', preds) np.save(folder_path + 'true.npy', trues) # np.save(folder_path + 'x.npy', inputx) - return \ No newline at end of file + return diff --git a/layers/AutoCorrelation.py b/ltsf_linear/layers/AutoCorrelation.py similarity index 98% rename from layers/AutoCorrelation.py rename to ltsf_linear/layers/AutoCorrelation.py index a6fb63c9..5ae7bdff 100644 --- a/layers/AutoCorrelation.py +++ b/ltsf_linear/layers/AutoCorrelation.py @@ -1,12 +1,6 @@ import torch import torch.nn as nn -import torch.nn.functional as F -import matplotlib.pyplot as plt -import numpy as np import math -from math import sqrt -import os - class AutoCorrelation(nn.Module): """ diff --git a/layers/Autoformer_EncDec.py b/ltsf_linear/layers/Autoformer_EncDec.py similarity index 100% rename from layers/Autoformer_EncDec.py rename to ltsf_linear/layers/Autoformer_EncDec.py diff --git a/layers/Embed.py b/ltsf_linear/layers/Embed.py similarity index 100% rename from layers/Embed.py rename to ltsf_linear/layers/Embed.py diff --git a/layers/SelfAttention_Family.py b/ltsf_linear/layers/SelfAttention_Family.py similarity index 97% rename from layers/SelfAttention_Family.py rename to ltsf_linear/layers/SelfAttention_Family.py index c8138e28..9f4f5c96 100644 --- a/layers/SelfAttention_Family.py +++ b/ltsf_linear/layers/SelfAttention_Family.py @@ -1,14 +1,9 @@ import torch import torch.nn as nn -import torch.nn.functional as F - -import matplotlib.pyplot as plt import numpy as np -import math from math import sqrt -from utils.masking import TriangularCausalMask, ProbMask -import os +from ltsf_linear.utils.masking import TriangularCausalMask, ProbMask class FullAttention(nn.Module): diff --git a/layers/Transformer_EncDec.py b/ltsf_linear/layers/Transformer_EncDec.py similarity index 100% rename from layers/Transformer_EncDec.py rename to ltsf_linear/layers/Transformer_EncDec.py diff --git a/models/Autoformer.py b/ltsf_linear/models/Autoformer.py similarity index 94% rename from models/Autoformer.py rename to ltsf_linear/models/Autoformer.py index 8e66d01a..aa346875 100644 --- a/models/Autoformer.py +++ b/ltsf_linear/models/Autoformer.py @@ -1,9 +1,9 @@ import torch import torch.nn as nn import torch.nn.functional as F -from layers.Embed import DataEmbedding, DataEmbedding_wo_pos,DataEmbedding_wo_pos_temp,DataEmbedding_wo_temp -from layers.AutoCorrelation import AutoCorrelation, AutoCorrelationLayer -from layers.Autoformer_EncDec import Encoder, Decoder, EncoderLayer, DecoderLayer, my_Layernorm, series_decomp +from ltsf_linear.layers.Embed import DataEmbedding, DataEmbedding_wo_pos,DataEmbedding_wo_pos_temp,DataEmbedding_wo_temp +from ltsf_linear.layers.AutoCorrelation import AutoCorrelation, AutoCorrelationLayer +from ltsf_linear.layers.Autoformer_EncDec import Encoder, Decoder, EncoderLayer, DecoderLayer, my_Layernorm, series_decomp import math import numpy as np diff --git a/models/DLinear.py b/ltsf_linear/models/DLinear.py similarity index 100% rename from models/DLinear.py rename to ltsf_linear/models/DLinear.py diff --git a/models/Informer.py b/ltsf_linear/models/Informer.py similarity index 92% rename from models/Informer.py rename to ltsf_linear/models/Informer.py index ef0be31d..0f22c096 100644 --- a/models/Informer.py +++ b/ltsf_linear/models/Informer.py @@ -1,10 +1,10 @@ import torch import torch.nn as nn import torch.nn.functional as F -from utils.masking import TriangularCausalMask, ProbMask -from layers.Transformer_EncDec import Decoder, DecoderLayer, Encoder, EncoderLayer, ConvLayer -from layers.SelfAttention_Family import FullAttention, ProbAttention, AttentionLayer -from layers.Embed import DataEmbedding,DataEmbedding_wo_pos,DataEmbedding_wo_temp,DataEmbedding_wo_pos_temp +from ltsf_linear.utils.masking import TriangularCausalMask, ProbMask +from ltsf_linear.layers.Transformer_EncDec import Decoder, DecoderLayer, Encoder, EncoderLayer, ConvLayer +from ltsf_linear.layers.SelfAttention_Family import FullAttention, ProbAttention, AttentionLayer +from ltsf_linear.layers.Embed import DataEmbedding,DataEmbedding_wo_pos,DataEmbedding_wo_temp,DataEmbedding_wo_pos_temp import numpy as np diff --git a/models/Linear.py b/ltsf_linear/models/Linear.py similarity index 100% rename from models/Linear.py rename to ltsf_linear/models/Linear.py diff --git a/models/NLinear.py b/ltsf_linear/models/NLinear.py similarity index 100% rename from models/NLinear.py rename to ltsf_linear/models/NLinear.py diff --git a/models/Stat_models.py b/ltsf_linear/models/Stat_models.py similarity index 100% rename from models/Stat_models.py rename to ltsf_linear/models/Stat_models.py diff --git a/models/Transformer.py b/ltsf_linear/models/Transformer.py similarity index 93% rename from models/Transformer.py rename to ltsf_linear/models/Transformer.py index c55824d0..785f8245 100644 --- a/models/Transformer.py +++ b/ltsf_linear/models/Transformer.py @@ -1,9 +1,9 @@ import torch import torch.nn as nn import torch.nn.functional as F -from layers.Transformer_EncDec import Decoder, DecoderLayer, Encoder, EncoderLayer, ConvLayer -from layers.SelfAttention_Family import FullAttention, AttentionLayer -from layers.Embed import DataEmbedding,DataEmbedding_wo_pos,DataEmbedding_wo_temp,DataEmbedding_wo_pos_temp +from ltsf_linear.layers.Transformer_EncDec import Decoder, DecoderLayer, Encoder, EncoderLayer, ConvLayer +from ltsf_linear.layers.SelfAttention_Family import FullAttention, AttentionLayer +from ltsf_linear.layers.Embed import DataEmbedding,DataEmbedding_wo_pos,DataEmbedding_wo_temp,DataEmbedding_wo_pos_temp import numpy as np diff --git a/utils/masking.py b/ltsf_linear/utils/masking.py similarity index 100% rename from utils/masking.py rename to ltsf_linear/utils/masking.py diff --git a/utils/metrics.py b/ltsf_linear/utils/metrics.py similarity index 100% rename from utils/metrics.py rename to ltsf_linear/utils/metrics.py diff --git a/utils/timefeatures.py b/ltsf_linear/utils/timefeatures.py similarity index 100% rename from utils/timefeatures.py rename to ltsf_linear/utils/timefeatures.py diff --git a/utils/tools.py b/ltsf_linear/utils/tools.py similarity index 100% rename from utils/tools.py rename to ltsf_linear/utils/tools.py diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..a76a9c97 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,27 @@ +[project] +name = "ltsf-linear" +authors = [ + {name = "Ailing Zeng", email = "alzeng@cse.cuhk.edu.hk"}, + {name = "Muxi Chen", email = "mxchen21@cse.cuhk.edu.hk"}, + {name = "Lei Zhang", email = "leizhang@idea.edu.cn"}, + {name = "Qiang Xu", email = "qxu@cse.cuhk.edu.hk"}, +] +requires-python = ">=3.6,<=3.9" +dependencies = [ + "numpy", + "matplotlib", + "pandas", + "scikit-learn", + "torch==1.9.0", +] +dynamic = ["version"] + +[tool.setuptools] +packages = ["ltsf_linear"] + +[tool.setuptools.dynamic] +version = {attr = "ltsf_linear.__version__"} + +[project.optional-dependencies] +stats=["pmdarima"] +all = ["ltsf_linear[stats]"] diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 2bb29e12..00000000 --- a/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -numpy -matplotlib -pandas -scikit-learn -torch==1.9.0 \ No newline at end of file diff --git a/run_longExp.py b/run_longExp.py index d672f96f..212f1283 100644 --- a/run_longExp.py +++ b/run_longExp.py @@ -1,7 +1,6 @@ import argparse -import os import torch -from exp.exp_main import Exp_Main +from ltsf_linear.exp.exp_main import Exp_Main import random import numpy as np diff --git a/run_stat.py b/run_stat.py index 86fed311..8e9cb7a6 100644 --- a/run_stat.py +++ b/run_stat.py @@ -1,7 +1,6 @@ import argparse -import os import torch -from exp.exp_stat import Exp_Main +from ltsf_linear.exp.exp_stat import Exp_Main import random import numpy as np diff --git a/scripts/EXP-Embedding/Formers_Embedding.sh b/scripts/EXP-Embedding/Formers_Embedding.sh old mode 100644 new mode 100755 index d9c49931..4ad5bf81 --- a/scripts/EXP-Embedding/Formers_Embedding.sh +++ b/scripts/EXP-Embedding/Formers_Embedding.sh @@ -4,13 +4,9 @@ # 3: value embedding + positional embedding # 4: value embedding -if [ ! -d "./logs" ]; then - mkdir ./logs -fi - -if [ ! -d "./logs/Embedding" ]; then - mkdir ./logs/Embedding -fi +mkdir -p ./logs +mkdir -p ./logs/Embedding +./scripts/download_datasets.sh for embed_type in 1 2 3 4 do @@ -211,4 +207,4 @@ do --itr 1 --embed_type $embed_type >logs/Embedding/$embed_type'_'$model_name'_ili_'$pred_len.log done done -done \ No newline at end of file +done diff --git a/scripts/EXP-LongForecasting/Formers_Long.sh b/scripts/EXP-LongForecasting/Formers_Long.sh old mode 100644 new mode 100755 index de53c43f..43f58614 --- a/scripts/EXP-LongForecasting/Formers_Long.sh +++ b/scripts/EXP-LongForecasting/Formers_Long.sh @@ -6,7 +6,7 @@ fi if [ ! -d "./logs/LongForecasting" ]; then mkdir ./logs/LongForecasting fi - +./scripts/download_datasets.sh for model_name in Autoformer Informer Transformer do for pred_len in 96 192 336 720 diff --git a/scripts/EXP-LongForecasting/Linear-I.sh b/scripts/EXP-LongForecasting/Linear-I.sh old mode 100644 new mode 100755 index 925c8968..189341b0 --- a/scripts/EXP-LongForecasting/Linear-I.sh +++ b/scripts/EXP-LongForecasting/Linear-I.sh @@ -5,6 +5,7 @@ fi if [ ! -d "./logs/LongForecasting" ]; then mkdir ./logs/LongForecasting fi +./scripts/download_datasets.sh seq_len=336 model_name=NLinear for pred_len in 96 192 336 729 diff --git a/scripts/EXP-LongForecasting/Stat_Long.sh b/scripts/EXP-LongForecasting/Stat_Long.sh old mode 100644 new mode 100755 index ad488cb6..518aeff3 --- a/scripts/EXP-LongForecasting/Stat_Long.sh +++ b/scripts/EXP-LongForecasting/Stat_Long.sh @@ -8,6 +8,7 @@ fi if [ ! -d "./logs/LongForecasting" ]; then mkdir ./logs/LongForecasting fi +./scripts/download_datasets.sh # for model_name in Naive GBRT ARIMA SARIMA for model_name in Naive @@ -139,4 +140,4 @@ for model_name in Naive --des 'Exp' \ --itr 1 >logs/LongForecasting/$model_name'_ili_'$pred_len.log done -done \ No newline at end of file +done diff --git a/scripts/EXP-LookBackWindow/Formers_LookBackWindow.sh b/scripts/EXP-LookBackWindow/Formers_LookBackWindow.sh old mode 100644 new mode 100755 index f4ccd7ff..ca875791 --- a/scripts/EXP-LookBackWindow/Formers_LookBackWindow.sh +++ b/scripts/EXP-LookBackWindow/Formers_LookBackWindow.sh @@ -5,6 +5,7 @@ fi if [ ! -d "./logs/LookBackWindow" ]; then mkdir ./logs/LookBackWindow fi +./scripts/download_datasets.sh for model_name in Autoformer Informer Transformer do @@ -216,4 +217,4 @@ do --itr 1 >logs/LookBackWindow/$model_name'_ili'_$seq_len'_'$pred_len.log done done -done \ No newline at end of file +done diff --git a/scripts/EXP-LookBackWindow/Linear_DiffWindow.sh b/scripts/EXP-LookBackWindow/Linear_DiffWindow.sh old mode 100644 new mode 100755 index 5e2fd854..c3b45085 --- a/scripts/EXP-LookBackWindow/Linear_DiffWindow.sh +++ b/scripts/EXP-LookBackWindow/Linear_DiffWindow.sh @@ -1,5 +1,5 @@ model_name=DLinear - +./scripts/download_datasets.sh for pred_len in 24 720 do for seq_len in 48 72 96 120 144 168 192 336 504 672 720 @@ -145,4 +145,4 @@ do --des 'Exp' \ --itr 1 --batch_size 32 --learning_rate 0.05 >logs/LookBackWindow/$model_name'_'ili_$seq_len'_'$pred_len.log done -done \ No newline at end of file +done diff --git a/scripts/download_datasets.sh b/scripts/download_datasets.sh new file mode 100755 index 00000000..cd753450 --- /dev/null +++ b/scripts/download_datasets.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# Downlad the datasets from google drive if not already present +if [ ! -d ./dataset ]; then + pip freeze | grep -q "^gdown" || pip install gdown + gdown "1alE33S1GmP5wACMXaLu50rDIoVzBM4ik" + unzip -n -j all_six_datasets.zip -d dataset + rm dataset/.[!.]* + rm all_six_datasets.zip +fi diff --git a/weight_plot.py b/weight_plot.py index 2e23bb4e..d1614db7 100644 --- a/weight_plot.py +++ b/weight_plot.py @@ -1,5 +1,4 @@ import torch -import numpy as np import os import matplotlib.pyplot as plt