From 57d0ba9018409fb74116d44f07ac5c6f25c463ba Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 13 Aug 2025 10:08:07 -0700 Subject: [PATCH 01/83] added auto-publish workflow --- .github/auto-publish.yaml | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 .github/auto-publish.yaml diff --git a/.github/auto-publish.yaml b/.github/auto-publish.yaml new file mode 100644 index 0000000..2fbea5a --- /dev/null +++ b/.github/auto-publish.yaml @@ -0,0 +1,33 @@ +# Automatically builds and publishes Python package to PyPI when a GitHub release is published. +# Uses build library to create distribution files from pyproject.toml configuration, +# then uploads to PyPI using official PyPA GitHub Action with stored API token. +# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries + +name: Upload Package to PyPI + +on: + release: + types: [published] + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: Install Building Dependencies + run: | + python -m pip install --upgrade pip + python -m pip install --upgrade build + - name: Build package + run: | + python -m build + - name: pypi-publish + uses: pypa/gh-action-pypi-publish@v1.12.4 + with: + verbose: true + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }} \ No newline at end of file From 3d4664fb4637d166ffee8182136106b22a9ab2fd Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 13 Aug 2025 14:45:21 -0700 Subject: [PATCH 02/83] added pyproject.toml --- .gitignore | 1 + pyproject.toml | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) create mode 100644 pyproject.toml diff --git a/.gitignore b/.gitignore index 45fd48d..de0bef9 100755 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ GuPPy/.DS_Store z-score_methods.tgn GuPPy/runFiberPhotometryAnalysis.ipynb .vscode/ +*.egg-info/ diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..438602f --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,48 @@ +[build-system] +requires = ["setuptools>=64"] +build-backend = "setuptools.build_meta" + + +[project] +name = "guppy" +version = "0.1.0" +description = "Guided Photometry Analysis in Python, a free and open-source fiber photometry data analysis tool." +readme = "README.md" +authors = [ + { name = "Venus Sherathiya", email = "venus.sherathiya@northwestern.edu" }, + { name = "Michael Schaid" }, + { name = "Jillian Seiler" }, + { name = "Gabriela Lopez" }, + { name = "Talia Lerner" }, + { name = "Paul Adkisson" }, + { name = "Luiz Tauffer" }, +] + + +license = { file = "LICENSE" } +keywords = [ + "neuroscience", + "fiber-photometry", + "calcium-imaging", + "data-analysis", + "gui", + "visualization", + "signal-processing", +] +classifiers = [ + "Intended Audience :: Science/Research", + "Programming Language :: Python :: 3.12", + "Operating System :: POSIX :: Linux", + "Operating System :: Microsoft :: Windows", + "Operating System :: MacOS", + "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", + +] +dependencies = [] # TODO: add dependencies + + +[project.urls] +"Homepage" = "https://github.com/LernerLab/GuPPy" + +[tool.setuptools.packages.find] +where = ["GuPPy"] \ No newline at end of file From f376da2d03bfcf7bb89ab09365b6ee329d0508f2 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 13 Aug 2025 16:20:43 -0700 Subject: [PATCH 03/83] added sparse_environment.yaml --- new_spec_file_mac.txt | 196 ++++++++++++++++++++++++++++++++++++++++ old_spec_file_mac.txt | 167 ++++++++++++++++++++++++++++++++++ sparse_environment.yaml | 23 +++++ 3 files changed, 386 insertions(+) create mode 100644 new_spec_file_mac.txt create mode 100644 old_spec_file_mac.txt create mode 100644 sparse_environment.yaml diff --git a/new_spec_file_mac.txt b/new_spec_file_mac.txt new file mode 100644 index 0000000..0f8dade --- /dev/null +++ b/new_spec_file_mac.txt @@ -0,0 +1,196 @@ +# This file may be used to create an environment using: +# $ conda create --name --file +# platform: osx-64 +appnope=0.1.3=pyhd8ed1ab_0 +argon2-cffi=21.1.0=py36hfa26744_0 +async_generator=1.10=pyhd8ed1ab_1 +attrs=22.2.0=pyh71513ae_0 +backcall=0.2.0=pyh9f0ad1d_0 +backports=1.0=pyhd8ed1ab_4 +backports.functools_lru_cache=2.0.0=pyhd8ed1ab_0 +bleach=6.1.0=pyhd8ed1ab_0 +blosc=1.21.5=hafa3907_1 +bokeh=2.3.3=py36h79c6626_0 +brotlipy=0.7.0=py36hfa26744_1001 +bzip2=1.0.8=hfdf4475_7 +ca-certificates=2025.8.3=hbd8a1cb_0 +cairo=1.16.0=he43a7df_1008 +cairocffi=1.2.0=pyhd8ed1ab_0 +cairosvg=2.5.2=pyhd8ed1ab_0 +certifi=2021.5.30=py36hecd8cb5_0 +cffi=1.14.6=py36h66709a0_0 +charset-normalizer=2.1.1=pyhd8ed1ab_0 +click=7.1.2=pyh9f0ad1d_0 +cloudpickle=2.2.1=pyhd8ed1ab_0 +colorama=0.4.5=pyhd8ed1ab_0 +colorcet=3.0.1=py_0 +conda=4.10.3=py36h79c6626_2 +conda-package-handling=1.7.3=py36hfa26744_0 +conda-tree=1.1.0=pyhd8ed1ab_2 +contextvars=2.4=py_0 +cryptography=35.0.0=py36ha6a00b0_0 +cssselect2=0.2.1=pyh9f0ad1d_1 +cycler=0.11.0=pyhd8ed1ab_0 +cytoolz=0.11.0=py36hfa26744_3 +dask=2021.3.0=pyhd8ed1ab_0 +dask-core=2021.3.0=pyhd8ed1ab_0 +datashader=0.11.0=py_0 +datashape=0.5.4=py_1 +decorator=5.1.1=pyhd8ed1ab_0 +defusedxml=0.7.1=pyhd8ed1ab_0 +distributed=2021.3.0=py36h79c6626_0 +entrypoints=0.4=pyhd8ed1ab_0 +firefox=78.0esr=h4a8c4bd_0 +fontconfig=2.13.94=h10f422b_0 +freetype=2.12.1=h60636b9_2 +fsspec=2023.1.0=pyhd8ed1ab_0 +geckodriver=0.26.0=h4a8c4bd_0 +gettext=0.25.1=he52a196_1 +gettext-tools=0.25.1=h3184127_1 +h5py=2.10.0=py36h3134771_0 +hdf5=1.10.4=nompi_h0cbb7df_1106 +heapdict=1.0.1=py_0 +holoviews=1.14.9=py_0 +hvplot=0.6.0=py_0 +icu=68.2=he49afe7_0 +idna=3.10=pyhd8ed1ab_0 +immutables=0.16=py36hfa26744_0 +importlib-metadata=4.8.1=py36h79c6626_0 +ipykernel=5.5.5=py36h495a4c6_0 +ipython=7.16.1=py36h9cf137f_2 +ipython_genutils=0.2.0=pyhd8ed1ab_1 +jedi=0.17.2=py36h79c6626_1 +jinja2=3.0.3=pyhd8ed1ab_0 +jpeg=9e=hb7f2c08_3 +jsonschema=4.1.2=pyhd8ed1ab_0 +jupyter_client=7.1.2=pyhd8ed1ab_0 +jupyter_core=4.8.1=py36h79c6626_0 +jupyterlab_pygments=0.1.2=pyh9f0ad1d_0 +kiwisolver=1.3.1=py36hc61eee1_1 +lcms2=2.12=h577c468_0 +lerc=3.0=he49afe7_0 +libasprintf=0.25.1=h3184127_1 +libasprintf-devel=0.25.1=h3184127_1 +libblas=3.9.0=15_osx64_openblas +libcblas=3.9.0=15_osx64_openblas +libcxx=20.1.8=h3d58e20_1 +libdeflate=1.10=h0d85af4_0 +libffi=3.3=h046ec9c_2 +libgettextpo=0.25.1=h3184127_1 +libgettextpo-devel=0.25.1=h3184127_1 +libgfortran=3.0.1=0 +libglib=2.68.4=hd556434_0 +libiconv=1.18=h57a12c2_2 +libintl=0.25.1=h3184127_1 +libintl-devel=0.25.1=h3184127_1 +liblapack=3.9.0=15_osx64_openblas +libllvm8=8.0.1=h770b8ee_0 +liblzma=5.8.1=hd471939_2 +liblzma-devel=5.8.1=hd471939_2 +libopenblas=0.3.20=h9a5756b_0 +libpng=1.6.43=h92b6c6a_0 +libsodium=1.0.18=hbcb3906_1 +libsqlite=3.46.0=h1b8f9f3_0 +libtiff=4.3.0=hfca7e8f_4 +libwebp-base=1.6.0=hb807250_0 +libxml2=2.9.12=h93ec3fd_0 +libzlib=1.2.13=h87427d6_6 +llvmlite=0.31.0=py36hde82470_1 +locket=1.0.0=pyhd8ed1ab_0 +lz4-c=1.9.4=hf0c8a7f_0 +lzo=2.10=h4132b18_1002 +markdown=3.1.1=py36_0 +markupsafe=2.0.1=py36hfa26744_0 +matplotlib=3.3.4=py36h79c6626_0 +matplotlib-base=3.3.4=py36h4ea959b_0 +mistune=0.8.4=pyh1a96a4e_1006 +mock=5.1.0=pyhd8ed1ab_0 +msgpack-python=1.0.2=py36hc61eee1_1 +multipledispatch=0.6.0=pyhd8ed1ab_1 +nbclient=0.5.9=pyhd8ed1ab_0 +nbconvert=6.0.7=py36h79c6626_3 +nbformat=5.1.3=pyhd8ed1ab_0 +ncurses=6.5=h0622a9a_3 +nest-asyncio=1.6.0=pyhd8ed1ab_0 +networkx=2.5=py_0 +notebook=6.3.0=py36h79c6626_0 +numba=0.48.0=py36h4f17bb1_0 +numexpr=2.7.3=py36he43235d_0 +numpy=1.19.5=py36h08b5fde_2 +olefile=0.46=pyh9f0ad1d_1 +openjpeg=2.5.0=h69f46e4_0 +openssl=1.1.1w=hca72f7f_0 +packaging=21.3=pyhd8ed1ab_0 +pandas=1.1.5=py36h2be6da3_0 +pandoc=2.19.2=h694c41f_2 +pandocfilters=1.5.0=pyhd8ed1ab_0 +panel=0.12.1=py_0 +param=1.13.0=py_0 +parso=0.7.1=pyh9f0ad1d_0 +partd=1.2.0=pyhd8ed1ab_0 +pcre=8.45=he49afe7_0 +pexpect=4.8.0=pyh1a96a4e_2 +phantomjs=2.1.1=1 +pickleshare=0.7.5=py_1003 +pillow=8.3.2=py36h950f3bb_0 +pip=21.3.1=pyhd8ed1ab_0 +pixman=0.46.4=ha059160_1 +prometheus_client=0.17.1=pyhd8ed1ab_0 +prompt-toolkit=3.0.36=pyha770c72_0 +psutil=5.8.0=py36hfa26744_1 +ptyprocess=0.7.0=pyhd3deb0d_0 +pycosat=0.6.3=py36hfa26744_1006 +pycparser=2.21=pyhd8ed1ab_0 +pyct=0.4.8=py_0 +pyct-core=0.4.8=py_0 +pygments=2.14.0=pyhd8ed1ab_0 +pyobjc-core=7.3=py36hfa26744_2 +pyobjc-framework-cocoa=7.3=py36h79c6626_0 +pyopenssl=22.0.0=pyhd8ed1ab_1 +pyparsing=3.1.4=pyhd8ed1ab_0 +pyrsistent=0.17.3=py36hfa26744_2 +pysocks=1.7.1=py36h79c6626_3 +pytables=3.6.1=py36h5bccee9_0 +python=3.6.10=hf48f09d_2 +python-dateutil=2.8.2=pyhd8ed1ab_0 +python_abi=3.6=2_cp36m +pytz=2023.3.post1=pyhd8ed1ab_0 +pyviz_comms=3.0.0=py_0 +pyyaml=5.4.1=py36hfa26744_1 +pyzmq=22.3.0=py36h50cd92c_0 +readline=8.2=h7cca4af_2 +requests=2.28.1=pyhd8ed1ab_0 +ruamel_yaml=0.15.80=py36hfa26744_1004 +scipy=1.3.1=py36hab3da7d_2 +selenium=3.141.0=py36h1de35cc_0 +send2trash=1.8.2=pyhd1c38e8_0 +setuptools=58.0.4=py36h79c6626_2 +six=1.16.0=pyh6c4a22f_0 +snappy=1.2.2=h25c286d_0 +sortedcontainers=2.4.0=pyhd8ed1ab_0 +sqlite=3.46.0=h28673e1_0 +tblib=1.7.0=pyhd8ed1ab_0 +terminado=0.12.1=py36h79c6626_0 +testpath=0.6.0=pyhd8ed1ab_0 +tinycss2=1.4.0=pyhd8ed1ab_0 +tk=8.6.13=h1abcd95_1 +toolz=0.12.0=pyhd8ed1ab_0 +tornado=6.1=py36hfa26744_1 +tqdm=4.65.0=pyhd8ed1ab_0 +traitlets=4.3.3=pyhd8ed1ab_2 +typing-extensions=4.1.1=hd8ed1ab_0 +typing_extensions=4.1.1=pyha770c72_0 +urllib3=1.26.15=pyhd8ed1ab_0 +wcwidth=0.2.10=pyhd8ed1ab_0 +webencodings=0.5.1=pyhd8ed1ab_2 +wheel=0.37.1=pyhd8ed1ab_0 +xarray=0.18.2=pyhd8ed1ab_0 +xz=5.8.1=h357f2ed_2 +xz-gpl-tools=5.8.1=h357f2ed_2 +xz-tools=5.8.1=hd471939_2 +yaml=0.2.5=h4132b18_3 +zeromq=4.3.5=h93d8f39_0 +zict=2.0.0=py_0 +zipp=3.6.0=pyhd8ed1ab_0 +zlib=1.2.13=h87427d6_6 +zstd=1.5.6=h915ae27_0 diff --git a/old_spec_file_mac.txt b/old_spec_file_mac.txt new file mode 100644 index 0000000..4b2e812 --- /dev/null +++ b/old_spec_file_mac.txt @@ -0,0 +1,167 @@ +# This file may be used to create an environment using: +# $ conda create --name --file +# platform: osx-64 +appnope=0.1.0=py36hf537a9a_0 +attrs=19.3.0=py_0 +backcall=0.2.0=py_0 +blas=1.0=mkl +bleach=3.1.5=py_0 +blosc=1.19.0=hab81aa3_0 +bokeh=2.3.1=py36hecd8cb5_0 +brotlipy=0.7.0=py36haf1e3a3_1000 +bzip2=1.0.8=h1de35cc_0 +ca-certificates=2020.12.5=h033912b_0 +cairo=1.14.12=he6fea26_5 +cairocffi=1.2.0=pyhd8ed1ab_0 +cairosvg=2.5.2=pyhd8ed1ab_0 +certifi=2020.12.5=py36h79c6626_1 +cffi=1.14.0=py36hc512035_1 +chardet=3.0.4=py36_1003 +click=7.1.2=py_0 +cloudpickle=1.5.0=py_0 +colorcet=2.0.2=py_0 +contextvars=2.4=py_0 +cryptography=2.9.2=py36ha12b0ac_0 +cssselect2=0.2.1=pyh9f0ad1d_1 +cycler=0.10.0=py36hfc81398_0 +cytoolz=0.10.1=py36h1de35cc_0 +dask=2.20.0=py_0 +dask-core=2.20.0=py_0 +datashader=0.11.0=py_0 +datashape=0.5.4=py36_1 +decorator=4.4.2=py_0 +defusedxml=0.6.0=py_0 +distributed=2.20.0=py36_0 +entrypoints=0.3=py36_0 +firefox=78.0esr=h4a8c4bd_0 +fontconfig=2.13.1=h1027ab8_1000 +freetype=2.10.2=ha233b18_0 +fsspec=0.7.4=py_0 +geckodriver=0.26.0=h4a8c4bd_0 +gettext=0.19.8.1=h1f1d5ed_1 +glib=2.55.0=0 +h5py=2.10.0=py36h3134771_0 +hdf5=1.10.4=hfa1e0ec_0 +heapdict=1.0.1=py_0 +holoviews=1.14.2=py_0 +hvplot=0.6.0=py_0 +icu=58.2=h0a44026_1000 +idna=2.10=py_0 +immutables=0.14=py36haf1e3a3_0 +importlib-metadata=1.7.0=py36_0 +importlib_metadata=1.7.0=0 +intel-openmp=2019.4=233 +ipykernel=5.3.3=py36h5ca1d4c_0 +ipython=7.16.1=py36h5ca1d4c_0 +ipython_genutils=0.2.0=py36_0 +jedi=0.17.1=py36_0 +jinja2=2.11.2=py_0 +jpeg=9b=he5867d9_2 +jsonschema=3.2.0=py36_0 +jupyter_client=6.1.6=py_0 +jupyter_core=4.6.3=py36_0 +kiwisolver=1.2.0=py36h04f5b5a_0 +lcms2=2.11=h92f6f08_0 +libcxx=10.0.0=1 +libedit=3.1.20191231=h1de35cc_1 +libffi=3.3=hb1e8313_2 +libgfortran=3.0.1=h93005f0_2 +libiconv=1.15=h0b31af3_1006 +libpng=1.6.37=ha441bb4_0 +libsodium=1.0.18=h1de35cc_0 +libtiff=4.1.0=hcb84e12_1 +libxml2=2.9.9=hd80cff7_2 +llvmlite=0.31.0=py36h1341992_0 +locket=0.2.0=py36hca03003_1 +lz4-c=1.9.2=hb1e8313_1 +lzo=2.10=h1de35cc_2 +markdown=3.1.1=py36_0 +markupsafe=1.1.1=py36h1de35cc_0 +matplotlib=3.2.2=0 +matplotlib-base=3.2.2=py36h5670ca0_0 +mistune=0.8.4=py36h1de35cc_0 +mkl=2019.4=233 +mkl-service=2.3.0=py36hfbe908c_0 +mkl_fft=1.1.0=py36hc64f4ea_0 +mkl_random=1.1.1=py36h959d312_0 +mock=4.0.2=py_0 +msgpack-python=1.0.0=py36h04f5b5a_1 +multipledispatch=0.6.0=py36_0 +nbconvert=5.6.1=py36_0 +nbformat=5.0.7=py_0 +ncurses=6.2=h0a44026_1 +notebook=6.0.3=py36_0 +numba=0.48.0=py36h6c726b0_0 +numexpr=2.7.1=py36hce01a72_0 +numpy=1.18.5=py36h55a193a_0 +numpy-base=1.18.5=py36h3304bdc_0 +olefile=0.46=py36_0 +openssl=1.1.1k=h0d85af4_0 +packaging=20.4=py_0 +pandas=1.0.5=py36h959d312_0 +pandoc=2.10=0 +pandocfilters=1.4.2=py36_1 +panel=0.11.0=py_0 +param=1.10.1=py_0 +parso=0.7.0=py_0 +partd=1.1.0=py_0 +pcre=8.44=h4a8c4bd_0 +pexpect=4.8.0=py36_0 +phantomjs=2.1.1=1 +pickleshare=0.7.5=py36_0 +pillow=7.2.0=py36ha54b6ba_0 +pip=20.1.1=py36_1 +pixman=0.34.0=h1de35cc_1003 +prometheus_client=0.8.0=py_0 +prompt-toolkit=3.0.5=py_0 +psutil=5.7.0=py36h1de35cc_0 +ptyprocess=0.6.0=py36_0 +pycparser=2.20=py_2 +pyct=0.4.6=py_0 +pyct-core=0.4.6=py_0 +pygments=2.6.1=py_0 +pyopenssl=19.1.0=py_1 +pyparsing=2.4.7=py_0 +pyrsistent=0.16.0=py36h1de35cc_0 +pysocks=1.7.1=py36_0 +pytables=3.6.1=py36h5bccee9_0 +python=3.6.10=hf48f09d_2 +python-dateutil=2.8.1=py_0 +python_abi=3.6=1_cp36m +pytz=2020.1=py_0 +pyviz_comms=0.7.6=py_0 +pyyaml=5.3.1=py36haf1e3a3_1 +pyzmq=19.0.1=py36hb1e8313_1 +readline=8.0=h1de35cc_0 +requests=2.24.0=py_0 +scipy=1.5.0=py36h912ce22_0 +selenium=3.141.0=py36h1de35cc_0 +send2trash=1.5.0=py36_0 +setuptools=49.2.0=py36_0 +six=1.15.0=py_0 +snappy=1.1.8=hb1e8313_0 +sortedcontainers=2.2.2=py_0 +sqlite=3.32.3=hffcf06c_0 +tbb=2020.0=h04f5b5a_0 +tblib=1.6.0=py_0 +terminado=0.8.3=py36_0 +testpath=0.4.4=py_0 +tinycss2=1.1.0=pyhd8ed1ab_0 +tk=8.6.10=hb0a8c7a_0 +toolz=0.10.0=py_0 +tornado=6.0.4=py36h1de35cc_1 +tqdm=4.47.0=py_0 +traitlets=4.3.3=py36_0 +typing_extensions=3.7.4.2=py_0 +urllib3=1.25.9=py_0 +wcwidth=0.2.5=py_0 +webencodings=0.5.1=py36_1 +wheel=0.34.2=py36_0 +xarray=0.16.0=py_0 +xz=5.2.5=h1de35cc_0 +yaml=0.2.5=haf1e3a3_0 +zeromq=4.3.2=hb1e8313_2 +zict=2.0.0=py_0 +zipp=3.1.0=py_0 +zlib=1.2.11=h1de35cc_3 +zstd=1.4.5=h41d2c2f_0 diff --git a/sparse_environment.yaml b/sparse_environment.yaml new file mode 100644 index 0000000..2d2972d --- /dev/null +++ b/sparse_environment.yaml @@ -0,0 +1,23 @@ +name: new_guppy_env +channels: + - pyviz + - conda-forge + - anaconda + - defaults +dependencies: + - python=3.6.10=hf48f09d_2 + # high-level (leaf) dependencies + - hvplot=0.6.0=py_0 + - geckodriver=0.26.0=h4a8c4bd_0 + - datashader=0.11.0=py_0 + - phantomjs=2.1.1=1 + - firefox=78.0esr=h4a8c4bd_0 + - h5py=2.10.0=py36h3134771_0 + - selenium=3.141.0=py36h1de35cc_0 + - pytables=3.6.1=py36h5bccee9_0 + - cairosvg=2.5.2=pyhd8ed1ab_0 + # fixed dependencies + - markdown=3.1.1=py36_0 # from __future__ import annotations + # Debugging dependencies + - conda-tree +prefix: /opt/anaconda3/envs/new_guppy_env From f51bc39d2782ac8a950dec85d6ab88784e206a77 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 14 Aug 2025 09:18:37 -0700 Subject: [PATCH 04/83] pinned openssl and certifi --- sparse_environment.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sparse_environment.yaml b/sparse_environment.yaml index 2d2972d..2db0248 100644 --- a/sparse_environment.yaml +++ b/sparse_environment.yaml @@ -18,6 +18,8 @@ dependencies: - cairosvg=2.5.2=pyhd8ed1ab_0 # fixed dependencies - markdown=3.1.1=py36_0 # from __future__ import annotations + - openssl=1.1.1w=hca72f7f_0 # Needed otherwise guppy crashes on step 2 + - certifi=2021.5.30=py36hecd8cb5_0 # Needed otherwise guppy crashes on step 2 # Debugging dependencies - conda-tree prefix: /opt/anaconda3/envs/new_guppy_env From 9cc85fab642d67c7461eea7060c03fba80a4c831 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 15 Aug 2025 17:33:14 -0700 Subject: [PATCH 05/83] updated syntax for pn.Card in savingInputParameters --- GuPPy/savingInputParameters.ipynb | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/GuPPy/savingInputParameters.ipynb b/GuPPy/savingInputParameters.ipynb index a888ff1..83b0869 100755 --- a/GuPPy/savingInputParameters.ipynb +++ b/GuPPy/savingInputParameters.ipynb @@ -519,9 +519,10 @@ "widget = pn.Column(mark_down_1, files_1, pn.Row(individual_analysis_wd_2, psth_baseline_param))\n", "\n", "#file_selector = pn.WidgetBox(files_1)\n", - "individual = pn.Card(widget, title='Individual Analysis', background='WhiteSmoke', width=850)\n", - "group = pn.Card(group_analysis_wd_1, title='Group Analysis', background='WhiteSmoke', width=850)\n", - "visualize = pn.Card(visualization_wd, title='Visualization Parameters', background='WhiteSmoke', width=850)\n", + "styles = dict(background='WhiteSmoke')\n", + "individual = pn.Card(widget, title='Individual Analysis', styles=styles, width=850)\n", + "group = pn.Card(group_analysis_wd_1, title='Group Analysis', styles=styles, width=850)\n", + "visualize = pn.Card(visualization_wd, title='Visualization Parameters', styles=styles, width=850)\n", "\n", "#template.main.append(file_selector)\n", "template.main.append(individual)\n", From 2e9741287bfb4089dd762150a941b08549817e62 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 15 Aug 2025 17:33:35 -0700 Subject: [PATCH 06/83] established initial python 3.12 environment --- spec_file_mac_312.txt | 108 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 spec_file_mac_312.txt diff --git a/spec_file_mac_312.txt b/spec_file_mac_312.txt new file mode 100644 index 0000000..eecd7d2 --- /dev/null +++ b/spec_file_mac_312.txt @@ -0,0 +1,108 @@ +# This file may be used to create an environment using: +# $ conda create --name --file +# platform: osx-64 +attrs=25.3.0=pypi_0 +beautifulsoup4=4.13.4=pypi_0 +bleach=6.2.0=pypi_0 +bokeh=3.7.3=pypi_0 +bzip2=1.0.8=h6c40b1e_6 +ca-certificates=2025.7.15=hecd8cb5_0 +cairocffi=1.7.1=pypi_0 +cairosvg=2.8.2=pypi_0 +certifi=2025.8.3=pypi_0 +cffi=1.17.1=pypi_0 +charset-normalizer=3.4.3=pypi_0 +colorcet=3.1.0=pypi_0 +contourpy=1.3.3=pypi_0 +cssselect2=0.8.0=pypi_0 +cycler=0.12.1=pypi_0 +datashader=0.18.2=pypi_0 +defusedxml=0.7.1=pypi_0 +expat=2.7.1=h6d0c2b6_0 +fastjsonschema=2.21.2=pypi_0 +fonttools=4.59.1=pypi_0 +h11=0.16.0=pypi_0 +h5py=3.14.0=pypi_0 +holoviews=1.21.0=pypi_0 +hvplot=0.12.0=pypi_0 +idna=3.10=pypi_0 +jinja2=3.1.6=pypi_0 +jsonschema=4.25.0=pypi_0 +jsonschema-specifications=2025.4.1=pypi_0 +jupyter-client=8.6.3=pypi_0 +jupyter-core=5.8.1=pypi_0 +jupyterlab-pygments=0.3.0=pypi_0 +kiwisolver=1.4.9=pypi_0 +libcxx=19.1.7=haebbb44_3 +libffi=3.4.4=hecd8cb5_1 +linkify-it-py=2.0.3=pypi_0 +llvmlite=0.44.0=pypi_0 +markdown=3.8.2=pypi_0 +markdown-it-py=4.0.0=pypi_0 +markupsafe=3.0.2=pypi_0 +matplotlib=3.10.5=pypi_0 +mdit-py-plugins=0.5.0=pypi_0 +mdurl=0.1.2=pypi_0 +mistune=3.1.3=pypi_0 +multipledispatch=1.0.0=pypi_0 +narwhals=2.1.2=pypi_0 +nbclient=0.10.2=pypi_0 +nbconvert=7.16.6=pypi_0 +nbformat=5.10.4=pypi_0 +ncurses=6.5=h923df54_0 +numba=0.61.2=pypi_0 +numpy=2.2.6=pypi_0 +openssl=3.0.17=hee2dfae_0 +outcome=1.3.0.post0=pypi_0 +packaging=25.0=pypi_0 +pandas=2.3.1=pypi_0 +pandocfilters=1.5.1=pypi_0 +panel=1.7.5=pypi_0 +param=2.2.1=pypi_0 +phantomjs=1.4.1=pypi_0 +pillow=11.3.0=pypi_0 +pip=25.1=pyhc872135_2 +platformdirs=4.3.8=pypi_0 +pycparser=2.22=pypi_0 +pyct=0.5.0=pypi_0 +pygments=2.19.2=pypi_0 +pyparsing=3.2.3=pypi_0 +pysocks=1.7.1=pypi_0 +python=3.12.11=he8d2d4c_0 +python-dateutil=2.9.0.post0=pypi_0 +pytz=2025.2=pypi_0 +pyviz-comms=3.0.6=pypi_0 +pyyaml=6.0.2=pypi_0 +pyzmq=27.0.1=pypi_0 +readline=8.3=h49f2429_0 +referencing=0.36.2=pypi_0 +requests=2.32.4=pypi_0 +rpds-py=0.27.0=pypi_0 +scipy=1.16.1=pypi_0 +selenium=4.35.0=pypi_0 +setuptools=78.1.1=py312hecd8cb5_0 +six=1.17.0=pypi_0 +sniffio=1.3.1=pypi_0 +sortedcontainers=2.4.0=pypi_0 +soupsieve=2.7=pypi_0 +sqlite=3.50.2=hc8b0dd6_1 +tinycss2=1.4.0=pypi_0 +tk=8.6.15=h3a5a201_0 +toolz=1.0.0=pypi_0 +tornado=6.5.2=pypi_0 +tqdm=4.67.1=pypi_0 +traitlets=5.14.3=pypi_0 +trio=0.30.0=pypi_0 +trio-websocket=0.12.2=pypi_0 +typing-extensions=4.14.1=pypi_0 +tzdata=2025.2=pypi_0 +uc-micro-py=1.0.3=pypi_0 +urllib3=2.5.0=pypi_0 +webencodings=0.5.1=pypi_0 +websocket-client=1.8.0=pypi_0 +wheel=0.45.1=py312hecd8cb5_0 +wsproto=1.2.0=pypi_0 +xarray=2025.8.0=pypi_0 +xyzservices=2025.4.0=pypi_0 +xz=5.6.4=h46256e1_1 +zlib=1.2.13=h4b97444_1 From 622327a4f1a8d2dd936093b31d3edba6a6a6a7a0 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 18 Aug 2025 15:06:07 -0700 Subject: [PATCH 07/83] Fixed homepage by swapping the panel.widgets.DataFrame with panel.widgets.Tabulator --- GuPPy/savingInputParameters.ipynb | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/GuPPy/savingInputParameters.ipynb b/GuPPy/savingInputParameters.ipynb index 83b0869..1b50a12 100755 --- a/GuPPy/savingInputParameters.ipynb +++ b/GuPPy/savingInputParameters.ipynb @@ -291,13 +291,12 @@ " 'Peak End time': [0, 3, 10, np.nan, np.nan, \n", " np.nan, np.nan, np.nan, np.nan, np.nan]})\n", "\n", - "df_widget = pn.widgets.DataFrame(start_end_point_df, name='DataFrame', \n", - " auto_edit=True, show_index=False, row_height=20, width=450)\n", + "df_widget = pn.widgets.Tabulator(start_end_point_df, name='DataFrame', show_index=False, row_height=20, width=450)\n", "\n", "\n", "peak_param_wd = pn.WidgetBox(\"### Peak and AUC Parameters\", \n", " peak_explain, df_widget,\n", - " height=400) \n", + " height=400)\n", "\n", "\n", "\n", From dafb0c314730cae6ac57030c3f2a45d66c529d2e Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 18 Aug 2025 15:24:21 -0700 Subject: [PATCH 08/83] implement venus' changes in savesStoresList.py --- GuPPy/saveStoresList.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/GuPPy/saveStoresList.py b/GuPPy/saveStoresList.py index 364a63c..288353d 100755 --- a/GuPPy/saveStoresList.py +++ b/GuPPy/saveStoresList.py @@ -22,7 +22,7 @@ from tkinter import ttk, StringVar, messagebox #hv.extension() -pn.extension() +pn.extension('ace') def scanPortsAndFind(start_port=5000, end_port=5200, host='127.0.0.1'): while True: @@ -229,7 +229,7 @@ def plot(plot_select): # creating GUI template - template = pn.template.MaterialTemplate(title='Storenames GUI - {}'.format(os.path.basename(filepath), mark_down)) + template = pn.template.BootstrapTemplate(title='Storenames GUI - {}'.format(os.path.basename(filepath), mark_down)) @@ -241,7 +241,7 @@ def plot(plot_select): #literal_input_2 = pn.widgets.LiteralInput(name='Names for Storenames (list)', type=list) repeat_storenames = pn.widgets.Checkbox(name='Storenames to repeat', value=False) - repeat_storename_wd = pn.WidgetBox('', background='white', width=600) + repeat_storename_wd = pn.WidgetBox('', width=600) def callback(target, event): if event.new==True: target.objects = [multi_choice, literal_input_1] @@ -267,7 +267,7 @@ def callback(target, event): overwrite_button = pn.widgets.MenuButton(name='over-write storeslist file or create a new one? ', items=['over_write_file', 'create_new_file'], button_type='default', split=True, align='end') - literal_input_2 = pn.widgets.Ace(value="""{}""", sizing_mode='stretch_both', theme='tomorrow', language='json', height=250) + literal_input_2 = pn.widgets.JSONEditor(value="""{}""", height=250) alert = pn.pane.Alert('#### No alerts !!', alert_type='danger', height=80) @@ -332,10 +332,10 @@ def fetchValues(): names_for_storenames.append(comboBoxValues[i]) d = dict() - print(text.value) + print("Fetch values: ", text.value) d["storenames"] = text.value d["names_for_storenames"] = names_for_storenames - literal_input_2.value = str(json.dumps(d)) + literal_input_2.value = d #str(json.dumps(d)) # on clicking 'Select Storenames' button, following function is executed def update_values(event): @@ -358,6 +358,9 @@ def update_values(event): for w in change_widgets: w.value = storenames + print('set values to text box') + print(storenames) + storenames_cache = dict() if os.path.exists(os.path.join(Path.home(), '.storesList.json')): with open(os.path.join(Path.home(), '.storesList.json')) as f: @@ -432,8 +435,8 @@ def comboBoxSelected(event): note = ttk.Label(root, text="Note : Click on Show button after appropriate selections and close the window.").grid(row=(len(storenames)*2)+2, column=2) button = ttk.Button(root, text='Show', command=fetchValues).grid(row=(len(storenames)*2)+4, column=2) - root.lift() - root.after(500, lambda: root.lift()) + # root.lift() + # root.after(500, lambda: root.lift()) root.mainloop() @@ -442,7 +445,8 @@ def comboBoxSelected(event): def save_button(event=None): global storenames - d = json.loads(literal_input_2.value) + d = literal_input_2.value #json.loads(literal_input_2.value) + print(d) arr1, arr2 = np.asarray(d["storenames"]), np.asarray(d["names_for_storenames"]) if np.where(arr2=="")[0].size>0: From cbf9692326c6e3a7228a4ad19bdef0f23637d834 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 19 Aug 2025 11:59:29 -0700 Subject: [PATCH 09/83] Revert "implement venus' changes in savesStoresList.py" This reverts commit dafb0c314730cae6ac57030c3f2a45d66c529d2e. --- GuPPy/saveStoresList.py | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/GuPPy/saveStoresList.py b/GuPPy/saveStoresList.py index 288353d..364a63c 100755 --- a/GuPPy/saveStoresList.py +++ b/GuPPy/saveStoresList.py @@ -22,7 +22,7 @@ from tkinter import ttk, StringVar, messagebox #hv.extension() -pn.extension('ace') +pn.extension() def scanPortsAndFind(start_port=5000, end_port=5200, host='127.0.0.1'): while True: @@ -229,7 +229,7 @@ def plot(plot_select): # creating GUI template - template = pn.template.BootstrapTemplate(title='Storenames GUI - {}'.format(os.path.basename(filepath), mark_down)) + template = pn.template.MaterialTemplate(title='Storenames GUI - {}'.format(os.path.basename(filepath), mark_down)) @@ -241,7 +241,7 @@ def plot(plot_select): #literal_input_2 = pn.widgets.LiteralInput(name='Names for Storenames (list)', type=list) repeat_storenames = pn.widgets.Checkbox(name='Storenames to repeat', value=False) - repeat_storename_wd = pn.WidgetBox('', width=600) + repeat_storename_wd = pn.WidgetBox('', background='white', width=600) def callback(target, event): if event.new==True: target.objects = [multi_choice, literal_input_1] @@ -267,7 +267,7 @@ def callback(target, event): overwrite_button = pn.widgets.MenuButton(name='over-write storeslist file or create a new one? ', items=['over_write_file', 'create_new_file'], button_type='default', split=True, align='end') - literal_input_2 = pn.widgets.JSONEditor(value="""{}""", height=250) + literal_input_2 = pn.widgets.Ace(value="""{}""", sizing_mode='stretch_both', theme='tomorrow', language='json', height=250) alert = pn.pane.Alert('#### No alerts !!', alert_type='danger', height=80) @@ -332,10 +332,10 @@ def fetchValues(): names_for_storenames.append(comboBoxValues[i]) d = dict() - print("Fetch values: ", text.value) + print(text.value) d["storenames"] = text.value d["names_for_storenames"] = names_for_storenames - literal_input_2.value = d #str(json.dumps(d)) + literal_input_2.value = str(json.dumps(d)) # on clicking 'Select Storenames' button, following function is executed def update_values(event): @@ -358,9 +358,6 @@ def update_values(event): for w in change_widgets: w.value = storenames - print('set values to text box') - print(storenames) - storenames_cache = dict() if os.path.exists(os.path.join(Path.home(), '.storesList.json')): with open(os.path.join(Path.home(), '.storesList.json')) as f: @@ -435,8 +432,8 @@ def comboBoxSelected(event): note = ttk.Label(root, text="Note : Click on Show button after appropriate selections and close the window.").grid(row=(len(storenames)*2)+2, column=2) button = ttk.Button(root, text='Show', command=fetchValues).grid(row=(len(storenames)*2)+4, column=2) - # root.lift() - # root.after(500, lambda: root.lift()) + root.lift() + root.after(500, lambda: root.lift()) root.mainloop() @@ -445,8 +442,7 @@ def comboBoxSelected(event): def save_button(event=None): global storenames - d = literal_input_2.value #json.loads(literal_input_2.value) - print(d) + d = json.loads(literal_input_2.value) arr1, arr2 = np.asarray(d["storenames"]), np.asarray(d["names_for_storenames"]) if np.where(arr2=="")[0].size>0: From 3421a46e68f64bc1805fb5a70f14d81bbad1fb2c Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 20 Aug 2025 09:18:34 -0700 Subject: [PATCH 10/83] added dependencies to pyproject.toml --- pyproject.toml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 438602f..575c213 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,7 +38,18 @@ classifiers = [ "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", ] -dependencies = [] # TODO: add dependencies +dependencies = [ + "brotlicffi", + "CairoSVG", + "datashader", + "h5py", + "hvplot", + "matplotlib", + "nbconvert", + "phantomjs", + "PySocks", + "selenium", +] [project.urls] From df45f33af35925e3668fa9b01190e7fa3b998ad0 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 20 Aug 2025 09:33:36 -0700 Subject: [PATCH 11/83] Updated auto-publish to use TestPyPI --- .github/auto-publish.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/auto-publish.yaml b/.github/auto-publish.yaml index 2fbea5a..a733e70 100644 --- a/.github/auto-publish.yaml +++ b/.github/auto-publish.yaml @@ -3,7 +3,7 @@ # then uploads to PyPI using official PyPA GitHub Action with stored API token. # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries -name: Upload Package to PyPI +name: Upload Package to TestPyPI on: release: @@ -28,6 +28,7 @@ jobs: - name: pypi-publish uses: pypa/gh-action-pypi-publish@v1.12.4 with: + repository_url: https://test.pypi.org/legacy/ verbose: true user: __token__ - password: ${{ secrets.PYPI_API_TOKEN }} \ No newline at end of file + password: ${{ secrets.TEST_PYPI_API_TOKEN }} \ No newline at end of file From 54241e75822bfc2baa4070aa8d27235354dd1bd6 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 20 Aug 2025 09:33:54 -0700 Subject: [PATCH 12/83] removed old lock files --- new_spec_file_mac.txt | 196 ---------------------------------------- old_spec_file_mac.txt | 167 ---------------------------------- sparse_environment.yaml | 25 ----- spec_file_mac_312.txt | 108 ---------------------- 4 files changed, 496 deletions(-) delete mode 100644 new_spec_file_mac.txt delete mode 100644 old_spec_file_mac.txt delete mode 100644 sparse_environment.yaml delete mode 100644 spec_file_mac_312.txt diff --git a/new_spec_file_mac.txt b/new_spec_file_mac.txt deleted file mode 100644 index 0f8dade..0000000 --- a/new_spec_file_mac.txt +++ /dev/null @@ -1,196 +0,0 @@ -# This file may be used to create an environment using: -# $ conda create --name --file -# platform: osx-64 -appnope=0.1.3=pyhd8ed1ab_0 -argon2-cffi=21.1.0=py36hfa26744_0 -async_generator=1.10=pyhd8ed1ab_1 -attrs=22.2.0=pyh71513ae_0 -backcall=0.2.0=pyh9f0ad1d_0 -backports=1.0=pyhd8ed1ab_4 -backports.functools_lru_cache=2.0.0=pyhd8ed1ab_0 -bleach=6.1.0=pyhd8ed1ab_0 -blosc=1.21.5=hafa3907_1 -bokeh=2.3.3=py36h79c6626_0 -brotlipy=0.7.0=py36hfa26744_1001 -bzip2=1.0.8=hfdf4475_7 -ca-certificates=2025.8.3=hbd8a1cb_0 -cairo=1.16.0=he43a7df_1008 -cairocffi=1.2.0=pyhd8ed1ab_0 -cairosvg=2.5.2=pyhd8ed1ab_0 -certifi=2021.5.30=py36hecd8cb5_0 -cffi=1.14.6=py36h66709a0_0 -charset-normalizer=2.1.1=pyhd8ed1ab_0 -click=7.1.2=pyh9f0ad1d_0 -cloudpickle=2.2.1=pyhd8ed1ab_0 -colorama=0.4.5=pyhd8ed1ab_0 -colorcet=3.0.1=py_0 -conda=4.10.3=py36h79c6626_2 -conda-package-handling=1.7.3=py36hfa26744_0 -conda-tree=1.1.0=pyhd8ed1ab_2 -contextvars=2.4=py_0 -cryptography=35.0.0=py36ha6a00b0_0 -cssselect2=0.2.1=pyh9f0ad1d_1 -cycler=0.11.0=pyhd8ed1ab_0 -cytoolz=0.11.0=py36hfa26744_3 -dask=2021.3.0=pyhd8ed1ab_0 -dask-core=2021.3.0=pyhd8ed1ab_0 -datashader=0.11.0=py_0 -datashape=0.5.4=py_1 -decorator=5.1.1=pyhd8ed1ab_0 -defusedxml=0.7.1=pyhd8ed1ab_0 -distributed=2021.3.0=py36h79c6626_0 -entrypoints=0.4=pyhd8ed1ab_0 -firefox=78.0esr=h4a8c4bd_0 -fontconfig=2.13.94=h10f422b_0 -freetype=2.12.1=h60636b9_2 -fsspec=2023.1.0=pyhd8ed1ab_0 -geckodriver=0.26.0=h4a8c4bd_0 -gettext=0.25.1=he52a196_1 -gettext-tools=0.25.1=h3184127_1 -h5py=2.10.0=py36h3134771_0 -hdf5=1.10.4=nompi_h0cbb7df_1106 -heapdict=1.0.1=py_0 -holoviews=1.14.9=py_0 -hvplot=0.6.0=py_0 -icu=68.2=he49afe7_0 -idna=3.10=pyhd8ed1ab_0 -immutables=0.16=py36hfa26744_0 -importlib-metadata=4.8.1=py36h79c6626_0 -ipykernel=5.5.5=py36h495a4c6_0 -ipython=7.16.1=py36h9cf137f_2 -ipython_genutils=0.2.0=pyhd8ed1ab_1 -jedi=0.17.2=py36h79c6626_1 -jinja2=3.0.3=pyhd8ed1ab_0 -jpeg=9e=hb7f2c08_3 -jsonschema=4.1.2=pyhd8ed1ab_0 -jupyter_client=7.1.2=pyhd8ed1ab_0 -jupyter_core=4.8.1=py36h79c6626_0 -jupyterlab_pygments=0.1.2=pyh9f0ad1d_0 -kiwisolver=1.3.1=py36hc61eee1_1 -lcms2=2.12=h577c468_0 -lerc=3.0=he49afe7_0 -libasprintf=0.25.1=h3184127_1 -libasprintf-devel=0.25.1=h3184127_1 -libblas=3.9.0=15_osx64_openblas -libcblas=3.9.0=15_osx64_openblas -libcxx=20.1.8=h3d58e20_1 -libdeflate=1.10=h0d85af4_0 -libffi=3.3=h046ec9c_2 -libgettextpo=0.25.1=h3184127_1 -libgettextpo-devel=0.25.1=h3184127_1 -libgfortran=3.0.1=0 -libglib=2.68.4=hd556434_0 -libiconv=1.18=h57a12c2_2 -libintl=0.25.1=h3184127_1 -libintl-devel=0.25.1=h3184127_1 -liblapack=3.9.0=15_osx64_openblas -libllvm8=8.0.1=h770b8ee_0 -liblzma=5.8.1=hd471939_2 -liblzma-devel=5.8.1=hd471939_2 -libopenblas=0.3.20=h9a5756b_0 -libpng=1.6.43=h92b6c6a_0 -libsodium=1.0.18=hbcb3906_1 -libsqlite=3.46.0=h1b8f9f3_0 -libtiff=4.3.0=hfca7e8f_4 -libwebp-base=1.6.0=hb807250_0 -libxml2=2.9.12=h93ec3fd_0 -libzlib=1.2.13=h87427d6_6 -llvmlite=0.31.0=py36hde82470_1 -locket=1.0.0=pyhd8ed1ab_0 -lz4-c=1.9.4=hf0c8a7f_0 -lzo=2.10=h4132b18_1002 -markdown=3.1.1=py36_0 -markupsafe=2.0.1=py36hfa26744_0 -matplotlib=3.3.4=py36h79c6626_0 -matplotlib-base=3.3.4=py36h4ea959b_0 -mistune=0.8.4=pyh1a96a4e_1006 -mock=5.1.0=pyhd8ed1ab_0 -msgpack-python=1.0.2=py36hc61eee1_1 -multipledispatch=0.6.0=pyhd8ed1ab_1 -nbclient=0.5.9=pyhd8ed1ab_0 -nbconvert=6.0.7=py36h79c6626_3 -nbformat=5.1.3=pyhd8ed1ab_0 -ncurses=6.5=h0622a9a_3 -nest-asyncio=1.6.0=pyhd8ed1ab_0 -networkx=2.5=py_0 -notebook=6.3.0=py36h79c6626_0 -numba=0.48.0=py36h4f17bb1_0 -numexpr=2.7.3=py36he43235d_0 -numpy=1.19.5=py36h08b5fde_2 -olefile=0.46=pyh9f0ad1d_1 -openjpeg=2.5.0=h69f46e4_0 -openssl=1.1.1w=hca72f7f_0 -packaging=21.3=pyhd8ed1ab_0 -pandas=1.1.5=py36h2be6da3_0 -pandoc=2.19.2=h694c41f_2 -pandocfilters=1.5.0=pyhd8ed1ab_0 -panel=0.12.1=py_0 -param=1.13.0=py_0 -parso=0.7.1=pyh9f0ad1d_0 -partd=1.2.0=pyhd8ed1ab_0 -pcre=8.45=he49afe7_0 -pexpect=4.8.0=pyh1a96a4e_2 -phantomjs=2.1.1=1 -pickleshare=0.7.5=py_1003 -pillow=8.3.2=py36h950f3bb_0 -pip=21.3.1=pyhd8ed1ab_0 -pixman=0.46.4=ha059160_1 -prometheus_client=0.17.1=pyhd8ed1ab_0 -prompt-toolkit=3.0.36=pyha770c72_0 -psutil=5.8.0=py36hfa26744_1 -ptyprocess=0.7.0=pyhd3deb0d_0 -pycosat=0.6.3=py36hfa26744_1006 -pycparser=2.21=pyhd8ed1ab_0 -pyct=0.4.8=py_0 -pyct-core=0.4.8=py_0 -pygments=2.14.0=pyhd8ed1ab_0 -pyobjc-core=7.3=py36hfa26744_2 -pyobjc-framework-cocoa=7.3=py36h79c6626_0 -pyopenssl=22.0.0=pyhd8ed1ab_1 -pyparsing=3.1.4=pyhd8ed1ab_0 -pyrsistent=0.17.3=py36hfa26744_2 -pysocks=1.7.1=py36h79c6626_3 -pytables=3.6.1=py36h5bccee9_0 -python=3.6.10=hf48f09d_2 -python-dateutil=2.8.2=pyhd8ed1ab_0 -python_abi=3.6=2_cp36m -pytz=2023.3.post1=pyhd8ed1ab_0 -pyviz_comms=3.0.0=py_0 -pyyaml=5.4.1=py36hfa26744_1 -pyzmq=22.3.0=py36h50cd92c_0 -readline=8.2=h7cca4af_2 -requests=2.28.1=pyhd8ed1ab_0 -ruamel_yaml=0.15.80=py36hfa26744_1004 -scipy=1.3.1=py36hab3da7d_2 -selenium=3.141.0=py36h1de35cc_0 -send2trash=1.8.2=pyhd1c38e8_0 -setuptools=58.0.4=py36h79c6626_2 -six=1.16.0=pyh6c4a22f_0 -snappy=1.2.2=h25c286d_0 -sortedcontainers=2.4.0=pyhd8ed1ab_0 -sqlite=3.46.0=h28673e1_0 -tblib=1.7.0=pyhd8ed1ab_0 -terminado=0.12.1=py36h79c6626_0 -testpath=0.6.0=pyhd8ed1ab_0 -tinycss2=1.4.0=pyhd8ed1ab_0 -tk=8.6.13=h1abcd95_1 -toolz=0.12.0=pyhd8ed1ab_0 -tornado=6.1=py36hfa26744_1 -tqdm=4.65.0=pyhd8ed1ab_0 -traitlets=4.3.3=pyhd8ed1ab_2 -typing-extensions=4.1.1=hd8ed1ab_0 -typing_extensions=4.1.1=pyha770c72_0 -urllib3=1.26.15=pyhd8ed1ab_0 -wcwidth=0.2.10=pyhd8ed1ab_0 -webencodings=0.5.1=pyhd8ed1ab_2 -wheel=0.37.1=pyhd8ed1ab_0 -xarray=0.18.2=pyhd8ed1ab_0 -xz=5.8.1=h357f2ed_2 -xz-gpl-tools=5.8.1=h357f2ed_2 -xz-tools=5.8.1=hd471939_2 -yaml=0.2.5=h4132b18_3 -zeromq=4.3.5=h93d8f39_0 -zict=2.0.0=py_0 -zipp=3.6.0=pyhd8ed1ab_0 -zlib=1.2.13=h87427d6_6 -zstd=1.5.6=h915ae27_0 diff --git a/old_spec_file_mac.txt b/old_spec_file_mac.txt deleted file mode 100644 index 4b2e812..0000000 --- a/old_spec_file_mac.txt +++ /dev/null @@ -1,167 +0,0 @@ -# This file may be used to create an environment using: -# $ conda create --name --file -# platform: osx-64 -appnope=0.1.0=py36hf537a9a_0 -attrs=19.3.0=py_0 -backcall=0.2.0=py_0 -blas=1.0=mkl -bleach=3.1.5=py_0 -blosc=1.19.0=hab81aa3_0 -bokeh=2.3.1=py36hecd8cb5_0 -brotlipy=0.7.0=py36haf1e3a3_1000 -bzip2=1.0.8=h1de35cc_0 -ca-certificates=2020.12.5=h033912b_0 -cairo=1.14.12=he6fea26_5 -cairocffi=1.2.0=pyhd8ed1ab_0 -cairosvg=2.5.2=pyhd8ed1ab_0 -certifi=2020.12.5=py36h79c6626_1 -cffi=1.14.0=py36hc512035_1 -chardet=3.0.4=py36_1003 -click=7.1.2=py_0 -cloudpickle=1.5.0=py_0 -colorcet=2.0.2=py_0 -contextvars=2.4=py_0 -cryptography=2.9.2=py36ha12b0ac_0 -cssselect2=0.2.1=pyh9f0ad1d_1 -cycler=0.10.0=py36hfc81398_0 -cytoolz=0.10.1=py36h1de35cc_0 -dask=2.20.0=py_0 -dask-core=2.20.0=py_0 -datashader=0.11.0=py_0 -datashape=0.5.4=py36_1 -decorator=4.4.2=py_0 -defusedxml=0.6.0=py_0 -distributed=2.20.0=py36_0 -entrypoints=0.3=py36_0 -firefox=78.0esr=h4a8c4bd_0 -fontconfig=2.13.1=h1027ab8_1000 -freetype=2.10.2=ha233b18_0 -fsspec=0.7.4=py_0 -geckodriver=0.26.0=h4a8c4bd_0 -gettext=0.19.8.1=h1f1d5ed_1 -glib=2.55.0=0 -h5py=2.10.0=py36h3134771_0 -hdf5=1.10.4=hfa1e0ec_0 -heapdict=1.0.1=py_0 -holoviews=1.14.2=py_0 -hvplot=0.6.0=py_0 -icu=58.2=h0a44026_1000 -idna=2.10=py_0 -immutables=0.14=py36haf1e3a3_0 -importlib-metadata=1.7.0=py36_0 -importlib_metadata=1.7.0=0 -intel-openmp=2019.4=233 -ipykernel=5.3.3=py36h5ca1d4c_0 -ipython=7.16.1=py36h5ca1d4c_0 -ipython_genutils=0.2.0=py36_0 -jedi=0.17.1=py36_0 -jinja2=2.11.2=py_0 -jpeg=9b=he5867d9_2 -jsonschema=3.2.0=py36_0 -jupyter_client=6.1.6=py_0 -jupyter_core=4.6.3=py36_0 -kiwisolver=1.2.0=py36h04f5b5a_0 -lcms2=2.11=h92f6f08_0 -libcxx=10.0.0=1 -libedit=3.1.20191231=h1de35cc_1 -libffi=3.3=hb1e8313_2 -libgfortran=3.0.1=h93005f0_2 -libiconv=1.15=h0b31af3_1006 -libpng=1.6.37=ha441bb4_0 -libsodium=1.0.18=h1de35cc_0 -libtiff=4.1.0=hcb84e12_1 -libxml2=2.9.9=hd80cff7_2 -llvmlite=0.31.0=py36h1341992_0 -locket=0.2.0=py36hca03003_1 -lz4-c=1.9.2=hb1e8313_1 -lzo=2.10=h1de35cc_2 -markdown=3.1.1=py36_0 -markupsafe=1.1.1=py36h1de35cc_0 -matplotlib=3.2.2=0 -matplotlib-base=3.2.2=py36h5670ca0_0 -mistune=0.8.4=py36h1de35cc_0 -mkl=2019.4=233 -mkl-service=2.3.0=py36hfbe908c_0 -mkl_fft=1.1.0=py36hc64f4ea_0 -mkl_random=1.1.1=py36h959d312_0 -mock=4.0.2=py_0 -msgpack-python=1.0.0=py36h04f5b5a_1 -multipledispatch=0.6.0=py36_0 -nbconvert=5.6.1=py36_0 -nbformat=5.0.7=py_0 -ncurses=6.2=h0a44026_1 -notebook=6.0.3=py36_0 -numba=0.48.0=py36h6c726b0_0 -numexpr=2.7.1=py36hce01a72_0 -numpy=1.18.5=py36h55a193a_0 -numpy-base=1.18.5=py36h3304bdc_0 -olefile=0.46=py36_0 -openssl=1.1.1k=h0d85af4_0 -packaging=20.4=py_0 -pandas=1.0.5=py36h959d312_0 -pandoc=2.10=0 -pandocfilters=1.4.2=py36_1 -panel=0.11.0=py_0 -param=1.10.1=py_0 -parso=0.7.0=py_0 -partd=1.1.0=py_0 -pcre=8.44=h4a8c4bd_0 -pexpect=4.8.0=py36_0 -phantomjs=2.1.1=1 -pickleshare=0.7.5=py36_0 -pillow=7.2.0=py36ha54b6ba_0 -pip=20.1.1=py36_1 -pixman=0.34.0=h1de35cc_1003 -prometheus_client=0.8.0=py_0 -prompt-toolkit=3.0.5=py_0 -psutil=5.7.0=py36h1de35cc_0 -ptyprocess=0.6.0=py36_0 -pycparser=2.20=py_2 -pyct=0.4.6=py_0 -pyct-core=0.4.6=py_0 -pygments=2.6.1=py_0 -pyopenssl=19.1.0=py_1 -pyparsing=2.4.7=py_0 -pyrsistent=0.16.0=py36h1de35cc_0 -pysocks=1.7.1=py36_0 -pytables=3.6.1=py36h5bccee9_0 -python=3.6.10=hf48f09d_2 -python-dateutil=2.8.1=py_0 -python_abi=3.6=1_cp36m -pytz=2020.1=py_0 -pyviz_comms=0.7.6=py_0 -pyyaml=5.3.1=py36haf1e3a3_1 -pyzmq=19.0.1=py36hb1e8313_1 -readline=8.0=h1de35cc_0 -requests=2.24.0=py_0 -scipy=1.5.0=py36h912ce22_0 -selenium=3.141.0=py36h1de35cc_0 -send2trash=1.5.0=py36_0 -setuptools=49.2.0=py36_0 -six=1.15.0=py_0 -snappy=1.1.8=hb1e8313_0 -sortedcontainers=2.2.2=py_0 -sqlite=3.32.3=hffcf06c_0 -tbb=2020.0=h04f5b5a_0 -tblib=1.6.0=py_0 -terminado=0.8.3=py36_0 -testpath=0.4.4=py_0 -tinycss2=1.1.0=pyhd8ed1ab_0 -tk=8.6.10=hb0a8c7a_0 -toolz=0.10.0=py_0 -tornado=6.0.4=py36h1de35cc_1 -tqdm=4.47.0=py_0 -traitlets=4.3.3=py36_0 -typing_extensions=3.7.4.2=py_0 -urllib3=1.25.9=py_0 -wcwidth=0.2.5=py_0 -webencodings=0.5.1=py36_1 -wheel=0.34.2=py36_0 -xarray=0.16.0=py_0 -xz=5.2.5=h1de35cc_0 -yaml=0.2.5=haf1e3a3_0 -zeromq=4.3.2=hb1e8313_2 -zict=2.0.0=py_0 -zipp=3.1.0=py_0 -zlib=1.2.11=h1de35cc_3 -zstd=1.4.5=h41d2c2f_0 diff --git a/sparse_environment.yaml b/sparse_environment.yaml deleted file mode 100644 index 2db0248..0000000 --- a/sparse_environment.yaml +++ /dev/null @@ -1,25 +0,0 @@ -name: new_guppy_env -channels: - - pyviz - - conda-forge - - anaconda - - defaults -dependencies: - - python=3.6.10=hf48f09d_2 - # high-level (leaf) dependencies - - hvplot=0.6.0=py_0 - - geckodriver=0.26.0=h4a8c4bd_0 - - datashader=0.11.0=py_0 - - phantomjs=2.1.1=1 - - firefox=78.0esr=h4a8c4bd_0 - - h5py=2.10.0=py36h3134771_0 - - selenium=3.141.0=py36h1de35cc_0 - - pytables=3.6.1=py36h5bccee9_0 - - cairosvg=2.5.2=pyhd8ed1ab_0 - # fixed dependencies - - markdown=3.1.1=py36_0 # from __future__ import annotations - - openssl=1.1.1w=hca72f7f_0 # Needed otherwise guppy crashes on step 2 - - certifi=2021.5.30=py36hecd8cb5_0 # Needed otherwise guppy crashes on step 2 - # Debugging dependencies - - conda-tree -prefix: /opt/anaconda3/envs/new_guppy_env diff --git a/spec_file_mac_312.txt b/spec_file_mac_312.txt deleted file mode 100644 index eecd7d2..0000000 --- a/spec_file_mac_312.txt +++ /dev/null @@ -1,108 +0,0 @@ -# This file may be used to create an environment using: -# $ conda create --name --file -# platform: osx-64 -attrs=25.3.0=pypi_0 -beautifulsoup4=4.13.4=pypi_0 -bleach=6.2.0=pypi_0 -bokeh=3.7.3=pypi_0 -bzip2=1.0.8=h6c40b1e_6 -ca-certificates=2025.7.15=hecd8cb5_0 -cairocffi=1.7.1=pypi_0 -cairosvg=2.8.2=pypi_0 -certifi=2025.8.3=pypi_0 -cffi=1.17.1=pypi_0 -charset-normalizer=3.4.3=pypi_0 -colorcet=3.1.0=pypi_0 -contourpy=1.3.3=pypi_0 -cssselect2=0.8.0=pypi_0 -cycler=0.12.1=pypi_0 -datashader=0.18.2=pypi_0 -defusedxml=0.7.1=pypi_0 -expat=2.7.1=h6d0c2b6_0 -fastjsonschema=2.21.2=pypi_0 -fonttools=4.59.1=pypi_0 -h11=0.16.0=pypi_0 -h5py=3.14.0=pypi_0 -holoviews=1.21.0=pypi_0 -hvplot=0.12.0=pypi_0 -idna=3.10=pypi_0 -jinja2=3.1.6=pypi_0 -jsonschema=4.25.0=pypi_0 -jsonschema-specifications=2025.4.1=pypi_0 -jupyter-client=8.6.3=pypi_0 -jupyter-core=5.8.1=pypi_0 -jupyterlab-pygments=0.3.0=pypi_0 -kiwisolver=1.4.9=pypi_0 -libcxx=19.1.7=haebbb44_3 -libffi=3.4.4=hecd8cb5_1 -linkify-it-py=2.0.3=pypi_0 -llvmlite=0.44.0=pypi_0 -markdown=3.8.2=pypi_0 -markdown-it-py=4.0.0=pypi_0 -markupsafe=3.0.2=pypi_0 -matplotlib=3.10.5=pypi_0 -mdit-py-plugins=0.5.0=pypi_0 -mdurl=0.1.2=pypi_0 -mistune=3.1.3=pypi_0 -multipledispatch=1.0.0=pypi_0 -narwhals=2.1.2=pypi_0 -nbclient=0.10.2=pypi_0 -nbconvert=7.16.6=pypi_0 -nbformat=5.10.4=pypi_0 -ncurses=6.5=h923df54_0 -numba=0.61.2=pypi_0 -numpy=2.2.6=pypi_0 -openssl=3.0.17=hee2dfae_0 -outcome=1.3.0.post0=pypi_0 -packaging=25.0=pypi_0 -pandas=2.3.1=pypi_0 -pandocfilters=1.5.1=pypi_0 -panel=1.7.5=pypi_0 -param=2.2.1=pypi_0 -phantomjs=1.4.1=pypi_0 -pillow=11.3.0=pypi_0 -pip=25.1=pyhc872135_2 -platformdirs=4.3.8=pypi_0 -pycparser=2.22=pypi_0 -pyct=0.5.0=pypi_0 -pygments=2.19.2=pypi_0 -pyparsing=3.2.3=pypi_0 -pysocks=1.7.1=pypi_0 -python=3.12.11=he8d2d4c_0 -python-dateutil=2.9.0.post0=pypi_0 -pytz=2025.2=pypi_0 -pyviz-comms=3.0.6=pypi_0 -pyyaml=6.0.2=pypi_0 -pyzmq=27.0.1=pypi_0 -readline=8.3=h49f2429_0 -referencing=0.36.2=pypi_0 -requests=2.32.4=pypi_0 -rpds-py=0.27.0=pypi_0 -scipy=1.16.1=pypi_0 -selenium=4.35.0=pypi_0 -setuptools=78.1.1=py312hecd8cb5_0 -six=1.17.0=pypi_0 -sniffio=1.3.1=pypi_0 -sortedcontainers=2.4.0=pypi_0 -soupsieve=2.7=pypi_0 -sqlite=3.50.2=hc8b0dd6_1 -tinycss2=1.4.0=pypi_0 -tk=8.6.15=h3a5a201_0 -toolz=1.0.0=pypi_0 -tornado=6.5.2=pypi_0 -tqdm=4.67.1=pypi_0 -traitlets=5.14.3=pypi_0 -trio=0.30.0=pypi_0 -trio-websocket=0.12.2=pypi_0 -typing-extensions=4.14.1=pypi_0 -tzdata=2025.2=pypi_0 -uc-micro-py=1.0.3=pypi_0 -urllib3=2.5.0=pypi_0 -webencodings=0.5.1=pypi_0 -websocket-client=1.8.0=pypi_0 -wheel=0.45.1=py312hecd8cb5_0 -wsproto=1.2.0=pypi_0 -xarray=2025.8.0=pypi_0 -xyzservices=2025.4.0=pypi_0 -xz=5.6.4=h46256e1_1 -zlib=1.2.13=h4b97444_1 From 6a6f03d29c49aff11de76a03031225f9819e16ee Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 20 Aug 2025 09:46:29 -0700 Subject: [PATCH 13/83] updated version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 575c213..5a103ec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "guppy" -version = "0.1.0" +version = "2.0.0-alpha1" description = "Guided Photometry Analysis in Python, a free and open-source fiber photometry data analysis tool." readme = "README.md" authors = [ From 764480a02e27112ca49d550a39ca6d818425bd1c Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 20 Aug 2025 09:52:31 -0700 Subject: [PATCH 14/83] added workflow_dispatch --- .github/auto-publish.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/auto-publish.yaml b/.github/auto-publish.yaml index a733e70..42e5f01 100644 --- a/.github/auto-publish.yaml +++ b/.github/auto-publish.yaml @@ -6,6 +6,7 @@ name: Upload Package to TestPyPI on: + workflow_dispatch: release: types: [published] From e68180abfd3a31dd727225a80f1bc56dbab19a45 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 20 Aug 2025 09:56:30 -0700 Subject: [PATCH 15/83] moved auto-publish to the correct location --- .github/auto-publish.yaml | 35 ----------------------------------- 1 file changed, 35 deletions(-) delete mode 100644 .github/auto-publish.yaml diff --git a/.github/auto-publish.yaml b/.github/auto-publish.yaml deleted file mode 100644 index 42e5f01..0000000 --- a/.github/auto-publish.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# Automatically builds and publishes Python package to PyPI when a GitHub release is published. -# Uses build library to create distribution files from pyproject.toml configuration, -# then uploads to PyPI using official PyPA GitHub Action with stored API token. -# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries - -name: Upload Package to TestPyPI - -on: - workflow_dispatch: - release: - types: [published] - -jobs: - deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v5 - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.12" - - name: Install Building Dependencies - run: | - python -m pip install --upgrade pip - python -m pip install --upgrade build - - name: Build package - run: | - python -m build - - name: pypi-publish - uses: pypa/gh-action-pypi-publish@v1.12.4 - with: - repository_url: https://test.pypi.org/legacy/ - verbose: true - user: __token__ - password: ${{ secrets.TEST_PYPI_API_TOKEN }} \ No newline at end of file From 06b0f23d15cc46bd44f25c6748ec067ff05a3e3d Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 20 Aug 2025 09:56:43 -0700 Subject: [PATCH 16/83] moved auto-publish to the correct location --- .github/workflows/auto-publish.yaml | 35 +++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 .github/workflows/auto-publish.yaml diff --git a/.github/workflows/auto-publish.yaml b/.github/workflows/auto-publish.yaml new file mode 100644 index 0000000..42e5f01 --- /dev/null +++ b/.github/workflows/auto-publish.yaml @@ -0,0 +1,35 @@ +# Automatically builds and publishes Python package to PyPI when a GitHub release is published. +# Uses build library to create distribution files from pyproject.toml configuration, +# then uploads to PyPI using official PyPA GitHub Action with stored API token. +# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries + +name: Upload Package to TestPyPI + +on: + workflow_dispatch: + release: + types: [published] + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v5 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + - name: Install Building Dependencies + run: | + python -m pip install --upgrade pip + python -m pip install --upgrade build + - name: Build package + run: | + python -m build + - name: pypi-publish + uses: pypa/gh-action-pypi-publish@v1.12.4 + with: + repository_url: https://test.pypi.org/legacy/ + verbose: true + user: __token__ + password: ${{ secrets.TEST_PYPI_API_TOKEN }} \ No newline at end of file From c1735d02ef06e7d5ecba2e139bd40d4f2bcc16d5 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 20 Aug 2025 09:58:29 -0700 Subject: [PATCH 17/83] .yml --- .github/workflows/{auto-publish.yaml => auto-publish.yml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/workflows/{auto-publish.yaml => auto-publish.yml} (100%) diff --git a/.github/workflows/auto-publish.yaml b/.github/workflows/auto-publish.yml similarity index 100% rename from .github/workflows/auto-publish.yaml rename to .github/workflows/auto-publish.yml From dc1bbcb52297f3fb094ac815df77516ede9548d7 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 20 Aug 2025 10:59:27 -0700 Subject: [PATCH 18/83] fixed workflow warning --- .github/workflows/auto-publish.yml | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/auto-publish.yml b/.github/workflows/auto-publish.yml index 42e5f01..972ca03 100644 --- a/.github/workflows/auto-publish.yml +++ b/.github/workflows/auto-publish.yml @@ -29,7 +29,7 @@ jobs: - name: pypi-publish uses: pypa/gh-action-pypi-publish@v1.12.4 with: - repository_url: https://test.pypi.org/legacy/ + repository-url: https://test.pypi.org/legacy/ verbose: true user: __token__ password: ${{ secrets.TEST_PYPI_API_TOKEN }} \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 5a103ec..fa702d6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "guppy" -version = "2.0.0-alpha1" +version = "2.0.0-alpha2" description = "Guided Photometry Analysis in Python, a free and open-source fiber photometry data analysis tool." readme = "README.md" authors = [ From 01494738ff53c036aabf6029dcbf1b23d4cdbbdf Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 20 Aug 2025 11:06:17 -0700 Subject: [PATCH 19/83] switched to modern attestations approach --- .github/workflows/auto-publish.yml | 3 +-- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/auto-publish.yml b/.github/workflows/auto-publish.yml index 972ca03..50a6daa 100644 --- a/.github/workflows/auto-publish.yml +++ b/.github/workflows/auto-publish.yml @@ -31,5 +31,4 @@ jobs: with: repository-url: https://test.pypi.org/legacy/ verbose: true - user: __token__ - password: ${{ secrets.TEST_PYPI_API_TOKEN }} \ No newline at end of file + attestations: true \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index fa702d6..d49a6d8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "guppy" -version = "2.0.0-alpha2" +version = "2.0.0-alpha3" description = "Guided Photometry Analysis in Python, a free and open-source fiber photometry data analysis tool." readme = "README.md" authors = [ From a4373686719bad7355b329109800481eadda2e77 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 20 Aug 2025 11:10:14 -0700 Subject: [PATCH 20/83] switched to modern attestations approach --- .github/workflows/auto-publish.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/auto-publish.yml b/.github/workflows/auto-publish.yml index 50a6daa..bb5af51 100644 --- a/.github/workflows/auto-publish.yml +++ b/.github/workflows/auto-publish.yml @@ -13,6 +13,8 @@ on: jobs: deploy: runs-on: ubuntu-latest + permissions: + id-token: write # IMPORTANT: this permission is mandatory for trusted publishing steps: - uses: actions/checkout@v5 - name: Set up Python From c8f16d9f7c1d61ad0c7c179090f79a83254745ab Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 20 Aug 2025 11:15:15 -0700 Subject: [PATCH 21/83] updated comment header --- .github/workflows/auto-publish.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/auto-publish.yml b/.github/workflows/auto-publish.yml index bb5af51..ac4d922 100644 --- a/.github/workflows/auto-publish.yml +++ b/.github/workflows/auto-publish.yml @@ -1,6 +1,8 @@ -# Automatically builds and publishes Python package to PyPI when a GitHub release is published. +# Automatically builds and publishes Python package to TestPyPI when a GitHub release is published. # Uses build library to create distribution files from pyproject.toml configuration, -# then uploads to PyPI using official PyPA GitHub Action with stored API token. +# then uploads to TestPyPI using official PyPA GitHub Action with Trusted Publishing. +# Trusted Publishing eliminates the need for API tokens by using cryptographic attestations +# to verify the package was built by this specific GitHub workflow. # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries name: Upload Package to TestPyPI From 2d56292a1beb9a53c112219800cce93b1276d519 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 20 Aug 2025 11:15:47 -0700 Subject: [PATCH 22/83] updated version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d49a6d8..62b6c5f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "guppy" -version = "2.0.0-alpha3" +version = "2.0.0-alpha4" description = "Guided Photometry Analysis in Python, a free and open-source fiber photometry data analysis tool." readme = "README.md" authors = [ From 3fc2de35fc5088574f29c9d4fc43e5962f257e93 Mon Sep 17 00:00:00 2001 From: Paul Adkisson Date: Thu, 21 Aug 2025 11:56:27 -0400 Subject: [PATCH 23/83] Fix bugs exposed by pip-installable environment (#145) --- GuPPy/saveStoresList.py | 237 +++++++++++++++++++++++----------------- pyproject.toml | 1 + 2 files changed, 135 insertions(+), 103 deletions(-) diff --git a/GuPPy/saveStoresList.py b/GuPPy/saveStoresList.py index 364a63c..cab877a 100755 --- a/GuPPy/saveStoresList.py +++ b/GuPPy/saveStoresList.py @@ -22,7 +22,7 @@ from tkinter import ttk, StringVar, messagebox #hv.extension() -pn.extension() +pn.extension('ace') def scanPortsAndFind(start_port=5000, end_port=5200, host='127.0.0.1'): while True: @@ -229,7 +229,7 @@ def plot(plot_select): # creating GUI template - template = pn.template.MaterialTemplate(title='Storenames GUI - {}'.format(os.path.basename(filepath), mark_down)) + template = pn.template.BootstrapTemplate(title='Storenames GUI - {}'.format(os.path.basename(filepath), mark_down)) @@ -241,7 +241,7 @@ def plot(plot_select): #literal_input_2 = pn.widgets.LiteralInput(name='Names for Storenames (list)', type=list) repeat_storenames = pn.widgets.Checkbox(name='Storenames to repeat', value=False) - repeat_storename_wd = pn.WidgetBox('', background='white', width=600) + repeat_storename_wd = pn.WidgetBox('', width=600) def callback(target, event): if event.new==True: target.objects = [multi_choice, literal_input_1] @@ -253,9 +253,9 @@ def callback(target, event): update_options = pn.widgets.Button(name='Select Storenames') save = pn.widgets.Button(name='Save') - text = pn.widgets.LiteralInput(value=[], name='Selected Store Names', type=list) + text = pn.widgets.LiteralInput(value=[], name='Selected Store Names', type=list, width=500) - path = pn.widgets.TextInput(name='Location to Stores List file', width=500, sizing_mode="stretch_width") + path = pn.widgets.TextInput(name='Location to Stores List file', width=500) mark_down_for_overwrite = pn.pane.Markdown(""" Select option from below if user wants to over-write a file or create a new file. **Creating a new file will make a new ouput folder and will get saved at that location.** @@ -267,7 +267,7 @@ def callback(target, event): overwrite_button = pn.widgets.MenuButton(name='over-write storeslist file or create a new one? ', items=['over_write_file', 'create_new_file'], button_type='default', split=True, align='end') - literal_input_2 = pn.widgets.Ace(value="""{}""", sizing_mode='stretch_both', theme='tomorrow', language='json', height=250) + literal_input_2 = pn.widgets.CodeEditor(value="""{}""", sizing_mode='stretch_both', theme='tomorrow', language='json', height=250) alert = pn.pane.Alert('#### No alerts !!', alert_type='danger', height=80) @@ -283,6 +283,8 @@ def callback(target, event): storenames = [] + storename_dropdowns = {} + storename_textboxes = {} if len(allnames)==0: alert.object = '####Alert !! \n No storenames found. There are not any TDT files or csv files to look for storenames.' @@ -296,48 +298,73 @@ def overwrite_button_actions(event): select_location.options = [show_dir(filepath)] #select_location.value = select_location.options[0] - def fetchValues(): + def fetchValues(event): + global storenames alert.object = '#### No alerts !!' + + if not storename_dropdowns or not len(storenames) > 0: + alert.object = '####Alert !! \n No storenames selected.' + return + storenames_cache = dict() if os.path.exists(os.path.join(Path.home(), '.storesList.json')): with open(os.path.join(Path.home(), '.storesList.json')) as f: storenames_cache = json.load(f) - comboBox_keys = list(hold_comboBoxValues.keys()) - textBox_keys = list(hold_textBoxValues.keys()) - + comboBoxValues, textBoxValues = [], [] - for i in range(len(comboBox_keys)): - comboBoxValues.append(hold_comboBoxValues[comboBox_keys[i]].get()) + dropdown_keys = list(storename_dropdowns.keys()) + textbox_keys = list(storename_textboxes.keys()) if storename_textboxes else [] - for i in range(len(textBox_keys)): - textBoxValues.append(hold_textBoxValues[textBox_keys[i]].get()) - if len(textBoxValues[i].split())>1: - alert.object = '####Alert !! \n Whitespace is not allowed in the text box entry.' - if textBoxValues[i]==None and comboBoxValues[i] not in storenames_cache: - print(textBoxValues[i], comboBoxValues[i]) - alert.object = '####Alert !! \n One of the text box entry is empty.' - - if len(comboBoxValues)!=len(textBoxValues): + # Get dropdown values + for key in dropdown_keys: + comboBoxValues.append(storename_dropdowns[key].value) + + # Get textbox values (matching with dropdown keys) + for key in dropdown_keys: + if key in storename_textboxes: + textbox_value = storename_textboxes[key].value or "" + textBoxValues.append(textbox_value) + + # Validation: Check for whitespace + if len(textbox_value.split()) > 1: + alert.object = '####Alert !! \n Whitespace is not allowed in the text box entry.' + return + + # Validation: Check for empty required fields + dropdown_value = storename_dropdowns[key].value + if not textbox_value and dropdown_value not in storenames_cache and dropdown_value in ['control', 'signal', 'event TTLs']: + alert.object = '####Alert !! \n One of the text box entry is empty.' + return + else: + # For cached values, use the dropdown value directly + textBoxValues.append(storename_dropdowns[key].value) + + if len(comboBoxValues) != len(textBoxValues): alert.object = '####Alert !! \n Number of entries in combo box and text box should be same.' + return names_for_storenames = [] for i in range(len(comboBoxValues)): - if comboBoxValues[i]=='control' or comboBoxValues[i]=="signal": + if comboBoxValues[i] == 'control' or comboBoxValues[i] == "signal": if '_' in textBoxValues[i]: - messagebox.showwarning("Warning", "Please do not use underscore in region name") + alert.object = '####Alert !! \n Please do not use underscore in region name.' + return names_for_storenames.append("{}_{}".format(comboBoxValues[i], textBoxValues[i])) - elif comboBoxValues[i]=='event TTLs': + elif comboBoxValues[i] == 'event TTLs': names_for_storenames.append(textBoxValues[i]) else: names_for_storenames.append(comboBoxValues[i]) d = dict() - print(text.value) d["storenames"] = text.value d["names_for_storenames"] = names_for_storenames - literal_input_2.value = str(json.dumps(d)) + literal_input_2.value = str(json.dumps(d, indent=2)) + + # Panel-based storename configuration (replaces Tkinter dialog) + storename_config_widgets = pn.Column(visible=False) + show_config_button = pn.widgets.Button(name='Show Selected Configuration', button_type='primary') - # on clicking 'Select Storenames' button, following function is executed + # on clicking 'Select Storenames' button, following function is executed def update_values(event): global storenames, vars_list arr = [] @@ -363,78 +390,83 @@ def update_values(event): with open(os.path.join(Path.home(), '.storesList.json')) as f: storenames_cache = json.load(f) - - def comboBoxSelected(event): - row, col = event.widget.grid_info()['row'], event.widget.grid_info()['column'] - if event.widget.get()=="control": - label = ttk.Label(root, - text="Type appropriate region name in the text box below :").grid(row=row, column=col+1) - elif event.widget.get()=="signal": - label = ttk.Label(root, - text="Type appropriate region name in the text box below :").grid(row=row, column=col+1) - elif event.widget.get()=="event TTLs": - label = ttk.Label(root, - text="Type event name for the TTLs in the text box below :").grid(row=row, column=col+1) - else: - pass + # Create Panel widgets for storename configuration + config_widgets = [] + storename_dropdowns.clear() + storename_textboxes.clear() - global hold_comboBoxValues, hold_textBoxValues - root = tk.Tk() - root.title('Select options for storenames and give appropriate names (if asked)') - root.geometry('1200x1000') - hold_comboBoxValues = dict() - hold_textBoxValues = dict() - - for i in range(len(storenames)): - if storenames[i] in storenames_cache: - T = ttk.Label(root, text="Select appropriate option for {} : ".format(storenames[i])).grid(row=i+1, column=1) - if storenames[i] in hold_comboBoxValues and storenames[i] in hold_textBoxValues: - hold_comboBoxValues[storenames[i]+'_'+str(i)] = StringVar() - hold_textBoxValues[storenames[i]+'_'+str(i)] = StringVar() - myCombo = ttk.Combobox(root, - textvariable=hold_comboBoxValues[storenames[i]+'_'+str(i)], - value=storenames_cache[storenames[i]], - width=20) - else: - hold_comboBoxValues[storenames[i]] = StringVar() - hold_textBoxValues[storenames[i]] = StringVar() - myCombo = ttk.Combobox(root, - textvariable=hold_comboBoxValues[storenames[i]], - value=storenames_cache[storenames[i]], - width=20) - myCombo.grid(row=i+1, column=2) - myCombo.current(0) - myCombo.bind("<>", comboBoxSelected) - else: - T = ttk.Label(root, text="Select appropriate option for {} : ".format(storenames[i])).grid(row=i+1, column=1) - if storenames[i] in hold_comboBoxValues and storenames[i] in hold_textBoxValues: - hold_comboBoxValues[storenames[i]+'_'+str(i)] = StringVar() - hold_textBoxValues[storenames[i]+'_'+str(i)] = StringVar() - myCombo = ttk.Combobox(root, - textvariable=hold_comboBoxValues[storenames[i]+'_'+str(i)], - value=['', 'control', 'signal', 'event TTLs'], - width=12) - textBox = tk.Entry(root, - textvariable=hold_textBoxValues[storenames[i]+'_'+str(i)]) + if len(storenames) > 0: + config_widgets.append(pn.pane.Markdown("## Configure Storenames\nSelect appropriate options for each storename and provide names as needed:")) + + for i, storename in enumerate(storenames): + # Create a row for each storename + row_widgets = [] + + # Label + label = pn.pane.Markdown(f"**{storename}:**") + row_widgets.append(label) + + # Dropdown options + if storename in storenames_cache: + options = storenames_cache[storename] + default_value = options[0] if options else '' else: - hold_comboBoxValues[storenames[i]] = StringVar() - hold_textBoxValues[storenames[i]] = StringVar() - myCombo = ttk.Combobox(root, - textvariable=hold_comboBoxValues[storenames[i]], - value=['', 'control', 'signal', 'event TTLs'], - width=12) - textBox = tk.Entry(root, - textvariable=hold_textBoxValues[storenames[i]]) - myCombo.grid(row=i+1, column=2) - textBox.grid(row=i+1, column=4) - myCombo.current(0) - myCombo.bind("<>", comboBoxSelected) - - note = ttk.Label(root, text="Note : Click on Show button after appropriate selections and close the window.").grid(row=(len(storenames)*2)+2, column=2) - button = ttk.Button(root, text='Show', command=fetchValues).grid(row=(len(storenames)*2)+4, column=2) - root.lift() - root.after(500, lambda: root.lift()) - root.mainloop() + options = ['', 'control', 'signal', 'event TTLs'] + default_value = '' + + # Create unique key for widget + widget_key = f"{storename}_{i}" if f"{storename}_{i}" not in storename_dropdowns else f"{storename}_{i}_{len(storename_dropdowns)}" + + dropdown = pn.widgets.Select( + name='Type', + value=default_value, + options=options, + width=150 + ) + storename_dropdowns[widget_key] = dropdown + row_widgets.append(dropdown) + + # Text input (only show if not cached or if control/signal/event TTLs selected) + if storename not in storenames_cache or default_value in ['control', 'signal', 'event TTLs']: + textbox = pn.widgets.TextInput( + name='Name', + value='', + placeholder='Enter region/event name', + width=200 + ) + storename_textboxes[widget_key] = textbox + row_widgets.append(textbox) + + # Add helper text based on selection + def create_help_function(dropdown_widget, help_pane_container): + @pn.depends(dropdown_widget.param.value, watch=True) + def update_help(dropdown_value): + if dropdown_value == 'control': + help_pane_container[0] = pn.pane.Markdown("*Type appropriate region name*", styles={'color': 'gray', 'font-size': '12px'}) + elif dropdown_value == 'signal': + help_pane_container[0] = pn.pane.Markdown("*Type appropriate region name*", styles={'color': 'gray', 'font-size': '12px'}) + elif dropdown_value == 'event TTLs': + help_pane_container[0] = pn.pane.Markdown("*Type event name for the TTLs*", styles={'color': 'gray', 'font-size': '12px'}) + else: + help_pane_container[0] = pn.pane.Markdown("", styles={'color': 'gray', 'font-size': '12px'}) + return update_help + + help_container = [pn.pane.Markdown("")] + help_function = create_help_function(dropdown, help_container) + help_function(dropdown.value) # Initialize + row_widgets.append(help_container[0]) + + # Add the row to config widgets + config_widgets.append(pn.Row(*row_widgets, margin=(5, 0))) + + # Add show button + config_widgets.append(pn.Spacer(height=20)) + config_widgets.append(show_config_button) + config_widgets.append(pn.pane.Markdown("*Click 'Show Selected Configuration' to apply your selections.*", styles={'font-size': '12px', 'color': 'gray'})) + + # Update the configuration panel + storename_config_widgets.objects = config_widgets + storename_config_widgets.visible = len(storenames) > 0 @@ -500,7 +532,9 @@ def save_button(event=None): insertLog('Storeslist : \n'+str(arr), logging.INFO) + # Connect button callbacks update_options.on_click(update_values) + show_config_button.on_click(fetchValues) save.on_click(save_button) overwrite_button.on_click(overwrite_button_actions) @@ -511,6 +545,7 @@ def save_button(event=None): widget_1 = pn.Column('# '+os.path.basename(filepath), mark_down, mark_down_np, plot_select, plot) widget_2 = pn.Column(repeat_storenames, repeat_storename_wd, pn.Spacer(height=20), cross_selector, update_options, + storename_config_widgets, pn.Spacer(height=10), text, literal_input_2, alert, mark_down_for_overwrite, overwrite_button, select_location, save, path) template.main.append(pn.Row(widget_1, widget_2)) @@ -519,6 +554,7 @@ def save_button(event=None): widget_1 = pn.Column('# '+os.path.basename(filepath), mark_down) widget_2 = pn.Column(repeat_storenames, repeat_storename_wd, pn.Spacer(height=20), cross_selector, update_options, + storename_config_widgets, pn.Spacer(height=10), text, literal_input_2, alert, mark_down_for_overwrite, overwrite_button, select_location, save, path) template.main.append(pn.Row(widget_1, widget_2)) @@ -800,7 +836,7 @@ def import_np_doric_csv(filepath, isosbestic_control, num_ch): # used assigned flags to process the files and read the data if flag=='event_or_data_np': arr = list(df.iloc[:,1]) - check_float = [True for i in arr if type(i)==np.float] + check_float = [True for i in arr if isinstance(i, float)] if len(arr)==len(check_float) and columns_isstr == False: flag = 'data_np' elif columns_isstr == True and ('value' in np.char.lower(np.array(cols))): @@ -967,8 +1003,3 @@ def execute(inputParameters): except Exception as e: insertLog(str(e), logging.ERROR) raise e - - - - - diff --git a/pyproject.toml b/pyproject.toml index 62b6c5f..a10f9fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,6 +49,7 @@ dependencies = [ "phantomjs", "PySocks", "selenium", + "tables", ] From fe4d3dd6fe845a635056eed2bf92bdef4f7b2911 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 21 Aug 2025 09:06:15 -0700 Subject: [PATCH 24/83] alpha5 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index a10f9fc..d278612 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "guppy" -version = "2.0.0-alpha4" +version = "2.0.0-alpha5" description = "Guided Photometry Analysis in Python, a free and open-source fiber photometry data analysis tool." readme = "README.md" authors = [ From 316c63ab3908ce464249dc95b0e03d5d51c9e77d Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 21 Aug 2025 09:43:00 -0700 Subject: [PATCH 25/83] reorganized into src/guppy pattern --- GuPPy/images/.DS_Store | Bin 6148 -> 0 bytes pyproject.toml | 4 ++-- src/guppy/__init__.py | 0 {GuPPy => src/guppy}/combineDataFn.py | 0 {GuPPy => src/guppy}/computeCorr.py | 0 {GuPPy => src/guppy}/computePsth.py | 0 {GuPPy => src/guppy}/findTransientsFreqAndAmp.py | 0 {GuPPy => src/guppy}/images/Input_Parameters.png | Bin .../images/Input_Parameters_GroupAnalysis.png | Bin .../images/Input_Parameters_Individual.png | Bin {GuPPy => src/guppy}/images/Storenames_GUI.png | Bin .../guppy}/images/Visualization_Heatmap.png | Bin .../guppy}/images/Visualization_PSTH.png | Bin .../images/artifacts_removal_replace_nan.png | Bin .../guppy}/images/control_signal_artifacts.png | Bin .../images/control_signal_artifacts_removal.png | Bin .../images/control_signal_chunk_selection.png | Bin .../guppy}/images/repeat_storenames.png | Bin .../guppy}/images/transients_detection.png | Bin {GuPPy => src/guppy}/preprocess.py | 0 {GuPPy => src/guppy}/readTevTsq.py | 0 .../guppy}/runFiberPhotometryAnalysis.ipynb | 0 {GuPPy => src/guppy}/saveStoresList.py | 0 {GuPPy => src/guppy}/savingInputParameters.ipynb | 0 {GuPPy => src/guppy}/visualizePlot.py | 0 25 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 GuPPy/images/.DS_Store create mode 100644 src/guppy/__init__.py rename {GuPPy => src/guppy}/combineDataFn.py (100%) rename {GuPPy => src/guppy}/computeCorr.py (100%) rename {GuPPy => src/guppy}/computePsth.py (100%) rename {GuPPy => src/guppy}/findTransientsFreqAndAmp.py (100%) rename {GuPPy => src/guppy}/images/Input_Parameters.png (100%) rename {GuPPy => src/guppy}/images/Input_Parameters_GroupAnalysis.png (100%) rename {GuPPy => src/guppy}/images/Input_Parameters_Individual.png (100%) rename {GuPPy => src/guppy}/images/Storenames_GUI.png (100%) rename {GuPPy => src/guppy}/images/Visualization_Heatmap.png (100%) rename {GuPPy => src/guppy}/images/Visualization_PSTH.png (100%) rename {GuPPy => src/guppy}/images/artifacts_removal_replace_nan.png (100%) rename {GuPPy => src/guppy}/images/control_signal_artifacts.png (100%) rename {GuPPy => src/guppy}/images/control_signal_artifacts_removal.png (100%) rename {GuPPy => src/guppy}/images/control_signal_chunk_selection.png (100%) rename {GuPPy => src/guppy}/images/repeat_storenames.png (100%) rename {GuPPy => src/guppy}/images/transients_detection.png (100%) rename {GuPPy => src/guppy}/preprocess.py (100%) rename {GuPPy => src/guppy}/readTevTsq.py (100%) rename {GuPPy => src/guppy}/runFiberPhotometryAnalysis.ipynb (100%) rename {GuPPy => src/guppy}/saveStoresList.py (100%) rename {GuPPy => src/guppy}/savingInputParameters.ipynb (100%) rename {GuPPy => src/guppy}/visualizePlot.py (100%) diff --git a/GuPPy/images/.DS_Store b/GuPPy/images/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 Date: Thu, 21 Aug 2025 11:51:34 -0700 Subject: [PATCH 26/83] added main entry point --- pyproject.toml | 3 + src/guppy/computePsth.py | 8 +- src/guppy/findTransientsFreqAndAmp.py | 2 +- src/guppy/main.py | 13 + src/guppy/preprocess.py | 2 +- src/guppy/readTevTsq.py | 8 +- src/guppy/savingInputParameters.ipynb | 575 -------------------------- src/guppy/savingInputParameters.py | 526 +++++++++++++++++++++++ src/guppy/visualizePlot.py | 2 +- 9 files changed, 555 insertions(+), 584 deletions(-) create mode 100644 src/guppy/main.py delete mode 100755 src/guppy/savingInputParameters.ipynb create mode 100644 src/guppy/savingInputParameters.py diff --git a/pyproject.toml b/pyproject.toml index 3c389a4..5207165 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,6 +52,9 @@ dependencies = [ "tables", ] +[project.scripts] +guppy = "guppy.main:main" + [project.urls] "Homepage" = "https://github.com/LernerLab/GuPPy" diff --git a/src/guppy/computePsth.py b/src/guppy/computePsth.py index 3585738..f091c4f 100755 --- a/src/guppy/computePsth.py +++ b/src/guppy/computePsth.py @@ -15,10 +15,10 @@ import multiprocessing as mp from scipy import signal as ss from collections import OrderedDict -from preprocess import get_all_stores_for_combining_data -from computeCorr import computeCrossCorrelation -from computeCorr import getCorrCombinations -from computeCorr import make_dir +from .preprocess import get_all_stores_for_combining_data +from .computeCorr import computeCrossCorrelation +from .computeCorr import getCorrCombinations +from .computeCorr import make_dir def takeOnlyDirs(paths): removePaths = [] diff --git a/src/guppy/findTransientsFreqAndAmp.py b/src/guppy/findTransientsFreqAndAmp.py index 7cbab39..c063b61 100755 --- a/src/guppy/findTransientsFreqAndAmp.py +++ b/src/guppy/findTransientsFreqAndAmp.py @@ -11,7 +11,7 @@ from scipy.signal import argrelextrema import matplotlib.pyplot as plt from itertools import repeat -from preprocess import get_all_stores_for_combining_data +from .preprocess import get_all_stores_for_combining_data def takeOnlyDirs(paths): removePaths = [] diff --git a/src/guppy/main.py b/src/guppy/main.py new file mode 100644 index 0000000..038114c --- /dev/null +++ b/src/guppy/main.py @@ -0,0 +1,13 @@ +""" +Main entry point for GuPPy (Guided Photometry Analysis in Python) +""" +import panel as pn +from .savingInputParameters import savingInputParameters + +def main(): + """Main entry point for GuPPy""" + template = savingInputParameters() + pn.serve(template, show=True) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index 916d114..9c73569 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -15,7 +15,7 @@ from scipy.optimize import curve_fit import matplotlib.pyplot as plt from matplotlib.widgets import MultiCursor -from combineDataFn import processTimestampsForCombiningData +from .combineDataFn import processTimestampsForCombiningData plt.switch_backend('TKAgg') def takeOnlyDirs(paths): diff --git a/src/guppy/readTevTsq.py b/src/guppy/readTevTsq.py index e5defe2..409ddca 100755 --- a/src/guppy/readTevTsq.py +++ b/src/guppy/readTevTsq.py @@ -550,10 +550,10 @@ def readRawData(inputParameters): insertLog('Raw data fetched and saved.', logging.INFO) insertLog("#" * 400, logging.INFO) -if __name__ == "__main__": +def main(input_parameters): print('run') try: - readRawData(json.loads(sys.argv[1])) + readRawData(input_parameters) insertLog('#'*400, logging.INFO) except Exception as e: with open(os.path.join(os.path.expanduser('~'), 'pbSteps.txt'), 'a') as file: @@ -561,3 +561,7 @@ def readRawData(inputParameters): insertLog(f"An error occurred: {e}", logging.ERROR) raise e +if __name__ == "__main__": + input_parameters = json.loads(sys.argv[1]) + main(input_parameters=input_parameters) + diff --git a/src/guppy/savingInputParameters.ipynb b/src/guppy/savingInputParameters.ipynb deleted file mode 100755 index 1b50a12..0000000 --- a/src/guppy/savingInputParameters.ipynb +++ /dev/null @@ -1,575 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import sys\n", - "import time\n", - "import subprocess\n", - "import json\n", - "import panel as pn \n", - "import numpy as np\n", - "import pandas as pd\n", - "import logging\n", - "import tkinter as tk\n", - "from tkinter import ttk\n", - "from tkinter import filedialog\n", - "from threading import Thread\n", - "from preprocess import extractTsAndSignal\n", - "from visualizePlot import visualizeResults\n", - "from saveStoresList import execute\n", - "pn.extension()\n", - "\n", - "log_file = os.path.join('.','..','guppy.log')\n", - "if os.path.exists(log_file):\n", - " os.remove(log_file)\n", - "else:\n", - " pass\n", - "\n", - "# Create the main window\n", - "folder_selection = tk.Tk()\n", - "folder_selection.title(\"Select the folder path where your data is located\")\n", - "folder_selection.geometry(\"700x200\")\n", - "def select_folder():\n", - " global folder_path\n", - " folder_path = filedialog.askdirectory(title=\"Select the folder path where your data is located\")\n", - " if folder_path:\n", - " print(f\"Folder path set to {folder_path}\")\n", - " folder_selection.destroy()\n", - " else:\n", - " folder_path = os.path.expanduser('~')\n", - " print(f\"Folder path set to {folder_path}\")\n", - "\n", - "select_button = ttk.Button(folder_selection, text=\"Select a Folder\", command=select_folder)\n", - "select_button.pack(pady=5)\n", - "folder_selection.mainloop()\n", - "\n", - "current_dir = os.getcwd()\n", - "\n", - "def insertLog(text, level):\n", - " file = os.path.join('.','..','guppy.log')\n", - " format = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n", - " infoLog = logging.FileHandler(file)\n", - " infoLog.setFormatter(format)\n", - " logger = logging.getLogger(file)\n", - " logger.setLevel(level)\n", - " \n", - " if not logger.handlers:\n", - " logger.addHandler(infoLog)\n", - " if level == logging.DEBUG:\n", - " logger.debug(text)\n", - " if level == logging.INFO:\n", - " logger.info(text)\n", - " if level == logging.ERROR:\n", - " logger.exception(text)\n", - " if level == logging.WARNING:\n", - " logger.warning(text)\n", - " \n", - " infoLog.close()\n", - " logger.removeHandler(infoLog)\n", - "\n", - "def make_dir(filepath):\n", - " op = os.path.join(filepath, 'inputParameters')\n", - " if not os.path.exists(op):\n", - " os.mkdir(op)\n", - " return op\n", - "\n", - "def readRawData():\n", - " inputParameters = getInputParameters()\n", - " subprocess.call([\"python\", os.path.join(current_dir,\"GuPPy\",\"readTevTsq.py\"), json.dumps(inputParameters)])\n", - "\n", - "def extractTs():\n", - " inputParameters = getInputParameters()\n", - " subprocess.call([\"python\", os.path.join(current_dir,\"GuPPy\",\"preprocess.py\"), json.dumps(inputParameters)])\n", - "\n", - "def psthComputation():\n", - " inputParameters = getInputParameters()\n", - " inputParameters['curr_dir'] = current_dir\n", - " subprocess.call([\"python\", os.path.join(current_dir,\"GuPPy\",\"computePsth.py\"), json.dumps(inputParameters)])\n", - "\n", - "\n", - "def readPBIncrementValues(progressBar):\n", - " print(\"Read progress bar increment values function started...\")\n", - " file_path = os.path.join(os.path.expanduser('~'), 'pbSteps.txt')\n", - " if os.path.exists(file_path):\n", - " os.remove(file_path)\n", - " increment, maximum = 0, 100\n", - " progressBar.value = increment\n", - " progressBar.bar_color = 'success'\n", - " while True:\n", - " try:\n", - " with open(file_path, 'r') as file:\n", - " content = file.readlines()\n", - " if len(content)==0:\n", - " pass\n", - " else:\n", - " maximum = int(content[0])\n", - " increment = int(content[-1])\n", - " \n", - " if increment==-1:\n", - " progressBar.bar_color = 'danger'\n", - " os.remove(file_path)\n", - " break\n", - " progressBar.max = maximum\n", - " progressBar.value = increment\n", - " time.sleep(0.001) \n", - " except FileNotFoundError:\n", - " time.sleep(0.001)\n", - " except PermissionError:\n", - " time.sleep(0.001) \n", - " except Exception as e:\n", - " # Handle other exceptions that may occur\n", - " print(f\"An error occurred while reading the file: {e}\")\n", - " break\n", - " if increment==maximum:\n", - " os.remove(file_path)\n", - " break\n", - "\n", - " print(\"Read progress bar increment values stopped.\")\n", - " \n", - "# progress bars = PB\n", - "read_progress = pn.indicators.Progress(name='Progress', value=100, max=100, width=200, sizing_mode=\"stretch_width\")\n", - "extract_progress = pn.indicators.Progress(name='Progress', value=100, max=100, width=200, sizing_mode=\"stretch_width\")\n", - "psth_progress = pn.indicators.Progress(name='Progress', value=100, max=100, width=200, sizing_mode=\"stretch_width\")\n", - "\n", - "\n", - "template = pn.template.MaterialTemplate(title='Input Parameters GUI')\n", - "\n", - "mark_down_1 = pn.pane.Markdown(\"\"\"**Select folders for the analysis from the file selector below**\"\"\", width=600)\n", - "\n", - "files_1 = pn.widgets.FileSelector(folder_path, name='folderNames', height=300, width=800)\n", - "\n", - "\n", - "explain_time_artifacts = pn.pane.Markdown(\"\"\"\n", - " - ***Number of cores :*** Number of cores used for analysis. Try to \n", - " keep it less than the number of cores in your machine. \n", - " - ***Combine Data? :*** Make this parameter ``` True ``` if user wants to combine \n", - " the data, especially when there is two different \n", - " data files for the same recording session.
\n", - " - ***Isosbestic Control Channel? :*** Make this parameter ``` False ``` if user\n", - " does not want to use isosbestic control channel in the analysis.
\n", - " - ***Eliminate first few seconds :*** It is the parameter to cut out first x seconds\n", - " from the data. Default is 1 seconds.
\n", - " - ***Window for Moving Average filter :*** The filtering of signals\n", - " is done using moving average filter. Default window used for moving \n", - " average filter is 100 datapoints. Change it based on the requirement.
\n", - " - ***Moving Window (transients detection) :*** Transients in the z-score \n", - " and/or \\u0394F/F are detected using this moving window. \n", - " Default is 15 seconds. Change it based on the requirement.
\n", - " - ***High Amplitude filtering threshold (HAFT) (transients detection) :*** High amplitude\n", - " events greater than x times the MAD above the median are filtered out. Here, x is \n", - " high amplitude filtering threshold. Default is 2.\n", - " - ***Transients detection threshold (TD Thresh):*** Peaks with local maxima greater than x times\n", - " the MAD above the median of the trace (after filtering high amplitude events) are detected\n", - " as transients. Here, x is transients detection threshold. Default is 3.\n", - " - ***Number of channels (Neurophotometrics only) :*** Number of\n", - " channels used while recording, when data files has no column names mentioning \"Flags\" \n", - " or \"LedState\".\n", - " - ***removeArtifacts? :*** Make this parameter ``` True``` if there are \n", - " artifacts and user wants to remove the artifacts.\n", - " - ***removeArtifacts method :*** Selecting ```concatenate``` will remove bad \n", - " chunks and concatenate the selected good chunks together.\n", - " Selecting ```replace with NaN``` will replace bad chunks with NaN\n", - " values.\n", - " \"\"\")\n", - "\n", - "timeForLightsTurnOn = pn.widgets.LiteralInput(name='Eliminate first few seconds (int)', value=1, type=int, width=250)\n", - "\n", - "isosbestic_control = pn.widgets.Select(name='Isosbestic Control Channel? (bool)', value=True, options=[True, False], width=250)\n", - "\n", - "numberOfCores = pn.widgets.LiteralInput(name='# of cores (int)', value=2, type=int, width=100)\n", - "\n", - "combine_data = pn.widgets.Select(name='Combine Data? (bool)', value=False, options=[True, False], width=125)\n", - "\n", - "computePsth = pn.widgets.Select(name='z_score and/or \\u0394F/F? (psth)', options=['z_score', 'dff', 'Both'], width=250)\n", - "\n", - "transients = pn.widgets.Select(name='z_score and/or \\u0394F/F? (transients)', options=['z_score', 'dff', 'Both'], width=250)\n", - "\n", - "plot_zScore_dff = pn.widgets.Select(name='z-score plot and/or \\u0394F/F plot?', options=['z_score', 'dff', 'Both', 'None'], value='None', width=250)\n", - "\n", - "moving_wd = pn.widgets.LiteralInput(name='Moving Window for transients detection (s) (int)', value=15, type=int, width=250)\n", - "\n", - "highAmpFilt = pn.widgets.LiteralInput(name='HAFT (int)', value=2, type=int, width=120)\n", - "\n", - "transientsThresh = pn.widgets.LiteralInput(name='TD Thresh (int)', value=3, type=int, width=120)\n", - "\n", - "moving_avg_filter = pn.widgets.LiteralInput(name='Window for Moving Average filter (int)', value=100, type=int, width=250)\n", - "\n", - "removeArtifacts = pn.widgets.Select(name='removeArtifacts? (bool)', value=False, options=[True, False], width=125)\n", - "\n", - "artifactsRemovalMethod = pn.widgets.Select(name='removeArtifacts method', \n", - " value='concatenate', \n", - " options=['concatenate', 'replace with NaN'],\n", - " width=100)\n", - "\n", - "no_channels_np = pn.widgets.LiteralInput(name='Number of channels (Neurophotometrics only)',\n", - " value=2, type=int, width=250)\n", - "\n", - "z_score_computation = pn.widgets.Select(name='z-score computation Method', \n", - " options=['standard z-score', 'baseline z-score', 'modified z-score'], \n", - " value='standard z-score', width=200)\n", - "baseline_wd_strt = pn.widgets.LiteralInput(name='Baseline Window Start Time (s) (int)', value=0, type=int, width=200)\n", - "baseline_wd_end = pn.widgets.LiteralInput(name='Baseline Window End Time (s) (int)', value=0, type=int, width=200)\n", - "\n", - "explain_z_score = pn.pane.Markdown(\"\"\"\n", - " ***Note :***
\n", - " - Details about z-score computation methods are explained in Github wiki.
\n", - " - The details will make user understand what computation method to use for \n", - " their data.
\n", - " - Baseline Window Parameters should be kept 0 unless you are using baseline
\n", - " z-score computation method. The parameters are in seconds.\n", - " \"\"\", width=500)\n", - "\n", - "explain_nsec = pn.pane.Markdown(\"\"\"\n", - " - ***Time Interval :*** To omit bursts of event timestamps, user defined time interval\n", - " is set so that if the time difference between two timestamps is less than this defined time\n", - " interval, it will be deleted for the calculation of PSTH.\n", - " - ***Compute Cross-correlation :*** Make this parameter ```True```, when user wants\n", - " to compute cross-correlation between PSTHs of two different signals or signals \n", - " recorded from different brain regions.\n", - " \"\"\", width=500)\n", - "\n", - "nSecPrev = pn.widgets.LiteralInput(name='Seconds before 0 (int)', value=-10, type=int, width=120)\n", - "\n", - "nSecPost = pn.widgets.LiteralInput(name='Seconds after 0 (int)', value=20, type=int, width=120)\n", - "\n", - "computeCorr = pn.widgets.Select(name='Compute Cross-correlation (bool)', \n", - " options=[True, False], \n", - " value=False, width=160)\n", - "\n", - "timeInterval = pn.widgets.LiteralInput(name='Time Interval (s)', value=2, type=int, width=120)\n", - "\n", - "use_time_or_trials = pn.widgets.Select(name='Bin PSTH trials (str)', \n", - " options = ['Time (min)', '# of trials'],\n", - " value='Time (min)', width=120)\n", - "\n", - "bin_psth_trials = pn.widgets.LiteralInput(name='Time(min) / # of trials \\n for binning? (int)', value=0, type=int, width=160)\n", - "\n", - "explain_baseline = pn.pane.Markdown(\"\"\"\n", - " ***Note :***
\n", - " - If user does not want to do baseline correction, \n", - " put both parameters 0.
\n", - " - If the first event timestamp is less than the length of baseline\n", - " window, it will be rejected in the PSTH computation step.
\n", - " - Baseline parameters must be within the PSTH parameters \n", - " set in the PSTH parameters section.\n", - " \"\"\", width=500)\n", - "\n", - "baselineCorrectionStart = pn.widgets.LiteralInput(name='Baseline Correction Start time(int)', value=-5, type=int, width=200)\n", - "\n", - "baselineCorrectionEnd = pn.widgets.LiteralInput(name='Baseline Correction End time(int)', value=0, type=int, width=200)\n", - "\n", - "zscore_param_wd = pn.WidgetBox(\"### Z-score Parameters\", explain_z_score,\n", - " z_score_computation,\n", - " pn.Row(baseline_wd_strt, baseline_wd_end),\n", - " width=500, height=350)\n", - "\n", - "psth_param_wd = pn.WidgetBox(\"### PSTH Parameters\", explain_nsec, \n", - " pn.Row(nSecPrev, nSecPost, computeCorr), \n", - " pn.Row(timeInterval, use_time_or_trials, bin_psth_trials), \n", - " width=500, height=350)\n", - "\n", - "baseline_param_wd = pn.WidgetBox(\"### Baseline Parameters\", explain_baseline, \n", - " pn.Row(baselineCorrectionStart, baselineCorrectionEnd), \n", - " width=500, height=300)\n", - "\n", - "peak_explain = pn.pane.Markdown(\"\"\"\n", - " ***Note :***
\n", - " - Peak and area are computed between the window set below.
\n", - " - Peak and AUC parameters must be within the PSTH parameters set in the PSTH parameters section.
\n", - " - Please make sure when user changes the parameters in the table below, click on any other cell after \n", - " changing a value in a particular cell.\n", - " \"\"\", width=500)\n", - "\n", - "\n", - "start_end_point_df = pd.DataFrame({'Peak Start time': [-5, 0, 5, np.nan, np.nan, \n", - " np.nan, np.nan, np.nan, np.nan, np.nan], \n", - " 'Peak End time': [0, 3, 10, np.nan, np.nan, \n", - " np.nan, np.nan, np.nan, np.nan, np.nan]})\n", - "\n", - "df_widget = pn.widgets.Tabulator(start_end_point_df, name='DataFrame', show_index=False, row_height=20, width=450)\n", - "\n", - "\n", - "peak_param_wd = pn.WidgetBox(\"### Peak and AUC Parameters\", \n", - " peak_explain, df_widget,\n", - " height=400)\n", - "\n", - "\n", - "\n", - "mark_down_2 = pn.pane.Markdown(\"\"\"**Select folders for the average analysis from the file selector below**\"\"\", width=600)\n", - "\n", - "files_2 = pn.widgets.FileSelector(folder_path, name='folderNamesForAvg', height=300, width=800)\n", - "\n", - "averageForGroup = pn.widgets.Select(name='Average Group? (bool)', value=False, options=[True, False], width=400)\n", - "\n", - "visualizeAverageResults = pn.widgets.Select(name='Visualize Average Results? (bool)', \n", - " value=False, options=[True, False], width=400)\n", - "\n", - "visualize_zscore_or_dff = pn.widgets.Select(name='z-score or \\u0394F/F? (for visualization)', options=['z_score', 'dff'], width=400)\n", - "\n", - "individual_analysis_wd_2 = pn.Column(\n", - " explain_time_artifacts, pn.Row(numberOfCores, combine_data), \n", - " isosbestic_control, timeForLightsTurnOn,\n", - " moving_avg_filter, computePsth, transients, plot_zScore_dff, \n", - " moving_wd, pn.Row(highAmpFilt, transientsThresh),\n", - " no_channels_np, pn.Row(removeArtifacts, artifactsRemovalMethod)\n", - " )\n", - "\n", - "group_analysis_wd_1 = pn.Column(mark_down_2, files_2, averageForGroup, width=800)\n", - "\n", - "visualization_wd = pn.Row(visualize_zscore_or_dff, visualizeAverageResults, width=800)\n", - "\n", - "\n", - "def getInputParameters():\n", - " abspath = getAbsPath()\n", - " inputParameters = {\n", - " \"abspath\": abspath[0],\n", - " \"folderNames\": files_1.value,\n", - " \"numberOfCores\": numberOfCores.value,\n", - " \"combine_data\": combine_data.value,\n", - " \"isosbestic_control\": isosbestic_control.value,\n", - " \"timeForLightsTurnOn\": timeForLightsTurnOn.value,\n", - " \"filter_window\": moving_avg_filter.value,\n", - " \"removeArtifacts\": removeArtifacts.value,\n", - " \"artifactsRemovalMethod\": artifactsRemovalMethod.value,\n", - " \"noChannels\": no_channels_np.value,\n", - " \"zscore_method\": z_score_computation.value,\n", - " \"baselineWindowStart\": baseline_wd_strt.value,\n", - " \"baselineWindowEnd\": baseline_wd_end.value,\n", - " \"nSecPrev\": nSecPrev.value,\n", - " \"nSecPost\": nSecPost.value,\n", - " \"computeCorr\": computeCorr.value,\n", - " \"timeInterval\": timeInterval.value,\n", - " \"bin_psth_trials\": bin_psth_trials.value,\n", - " \"use_time_or_trials\": use_time_or_trials.value,\n", - " \"baselineCorrectionStart\": baselineCorrectionStart.value,\n", - " \"baselineCorrectionEnd\": baselineCorrectionEnd.value,\n", - " \"peak_startPoint\": list(df_widget.value['Peak Start time']), #startPoint.value,\n", - " \"peak_endPoint\": list(df_widget.value['Peak End time']), #endPoint.value,\n", - " \"selectForComputePsth\": computePsth.value,\n", - " \"selectForTransientsComputation\": transients.value,\n", - " \"moving_window\": moving_wd.value,\n", - " \"highAmpFilt\": highAmpFilt.value,\n", - " \"transientsThresh\": transientsThresh.value,\n", - " \"plot_zScore_dff\": plot_zScore_dff.value,\n", - " \"visualize_zscore_or_dff\": visualize_zscore_or_dff.value,\n", - " \"folderNamesForAvg\": files_2.value,\n", - " \"averageForGroup\": averageForGroup.value,\n", - " \"visualizeAverageResults\": visualizeAverageResults.value\n", - " }\n", - " return inputParameters\n", - "\n", - "def checkSameLocation(arr, abspath):\n", - " #abspath = []\n", - " for i in range(len(arr)):\n", - " abspath.append(os.path.dirname(arr[i]))\n", - " abspath = np.asarray(abspath)\n", - " abspath = np.unique(abspath)\n", - " if len(abspath)>1:\n", - " insertLog('All the folders selected should be at the same location', \n", - " logging.ERROR)\n", - " raise Exception('All the folders selected should be at the same location')\n", - " \n", - " return abspath\n", - "\n", - "def getAbsPath():\n", - " arr_1, arr_2 = files_1.value, files_2.value \n", - " if len(arr_1)==0 and len(arr_2)==0:\n", - " insertLog('No folder is selected for analysis',\n", - " logging.ERROR)\n", - " raise Exception('No folder is selected for analysis')\n", - " \n", - " abspath = []\n", - " if len(arr_1)>0:\n", - " abspath = checkSameLocation(arr_1, abspath)\n", - " else:\n", - " abspath = checkSameLocation(arr_2, abspath)\n", - " \n", - " abspath = np.unique(abspath)\n", - " if len(abspath)>1:\n", - " insertLog('All the folders selected should be at the same location',\n", - " logging.ERROR)\n", - " raise Exception('All the folders selected should be at the same location')\n", - " return abspath\n", - "\n", - "def onclickProcess(event=None):\n", - " \n", - " insertLog('Saving Input Parameters file.',\n", - " logging.DEBUG)\n", - " abspath = getAbsPath()\n", - " analysisParameters = {\n", - " \"combine_data\": combine_data.value,\n", - " \"isosbestic_control\": isosbestic_control.value,\n", - " \"timeForLightsTurnOn\": timeForLightsTurnOn.value,\n", - " \"filter_window\": moving_avg_filter.value,\n", - " \"removeArtifacts\": removeArtifacts.value,\n", - " \"noChannels\": no_channels_np.value,\n", - " \"zscore_method\": z_score_computation.value,\n", - " \"baselineWindowStart\": baseline_wd_strt.value,\n", - " \"baselineWindowEnd\": baseline_wd_end.value,\n", - " \"nSecPrev\": nSecPrev.value,\n", - " \"nSecPost\": nSecPost.value,\n", - " \"timeInterval\": timeInterval.value,\n", - " \"bin_psth_trials\": bin_psth_trials.value,\n", - " \"use_time_or_trials\": use_time_or_trials.value,\n", - " \"baselineCorrectionStart\": baselineCorrectionStart.value,\n", - " \"baselineCorrectionEnd\": baselineCorrectionEnd.value,\n", - " \"peak_startPoint\": list(df_widget.value['Peak Start time']), #startPoint.value,\n", - " \"peak_endPoint\": list(df_widget.value['Peak End time']), #endPoint.value,\n", - " \"selectForComputePsth\": computePsth.value,\n", - " \"selectForTransientsComputation\": transients.value,\n", - " \"moving_window\": moving_wd.value,\n", - " \"highAmpFilt\": highAmpFilt.value,\n", - " \"transientsThresh\": transientsThresh.value \n", - " }\n", - " for folder in files_1.value:\n", - " with open(os.path.join(folder, 'GuPPyParamtersUsed.json'), 'w') as f:\n", - " json.dump(analysisParameters, f, indent=4)\n", - " insertLog(f\"Input Parameters file saved at {folder}\",\n", - " logging.INFO)\n", - " \n", - " insertLog('#'*400, logging.INFO)\n", - " \n", - " #path.value = (os.path.join(op, 'inputParameters.json')).replace('\\\\', '/')\n", - " print('Input Parameters File Saved.')\n", - "\n", - "def onclickStoresList(event=None):\n", - " inputParameters = getInputParameters()\n", - " execute(inputParameters)\n", - "\n", - "def onclickVisualization(event=None):\n", - " inputParameters = getInputParameters()\n", - " visualizeResults(inputParameters)\n", - "\n", - "def onclickreaddata(event=None):\n", - " thread = Thread(target=readRawData)\n", - " thread.start()\n", - " readPBIncrementValues(read_progress)\n", - " thread.join()\n", - "\n", - "def onclickextractts(event=None):\n", - " thread = Thread(target=extractTs)\n", - " thread.start()\n", - " readPBIncrementValues(extract_progress)\n", - " thread.join()\n", - " \n", - "def onclickpsth(event=None):\n", - " thread = Thread(target=psthComputation)\n", - " thread.start()\n", - " readPBIncrementValues(psth_progress)\n", - " thread.join()\n", - " \n", - "\n", - " \n", - "mark_down_ip = pn.pane.Markdown(\"\"\"**Step 1 : Save Input Parameters**\"\"\", width=500)\n", - "mark_down_ip_note = pn.pane.Markdown(\"\"\"***Note : ***
\n", - " - Save Input Parameters will save input parameters used for the analysis\n", - " in all the folders you selected for the analysis (useful for future\n", - " reference). All analysis steps will run without saving input parameters.\n", - " \"\"\", width=500, sizing_mode=\"stretch_width\")\n", - "save_button = pn.widgets.Button(name='Save to file...', button_type='primary', width=500, sizing_mode=\"stretch_width\", align='end')\n", - "mark_down_storenames = pn.pane.Markdown(\"\"\"**Step 2 : Open Storenames GUI
and save storenames**\"\"\", width=500)\n", - "open_storesList = pn.widgets.Button(name='Open Storenames GUI', button_type='primary', width=500, sizing_mode=\"stretch_width\", align='end')\n", - "mark_down_read = pn.pane.Markdown(\"\"\"**Step 3 : Read Raw Data**\"\"\", width=500)\n", - "read_rawData = pn.widgets.Button(name='Read Raw Data', button_type='primary', width=500, sizing_mode=\"stretch_width\", align='end')\n", - "mark_down_extract = pn.pane.Markdown(\"\"\"**Step 4 : Extract timestamps
and its correction**\"\"\", width=500)\n", - "extract_ts = pn.widgets.Button(name=\"Extract timestamps and it's correction\", button_type='primary', width=500, sizing_mode=\"stretch_width\", align='end')\n", - "mark_down_psth = pn.pane.Markdown(\"\"\"**Step 5 : PSTH Computation**\"\"\", width=500)\n", - "psth_computation = pn.widgets.Button(name=\"PSTH Computation\", button_type='primary', width=500, sizing_mode=\"stretch_width\", align='end')\n", - "mark_down_visualization = pn.pane.Markdown(\"\"\"**Step 6 : Visualization**\"\"\", width=500)\n", - "open_visualization = pn.widgets.Button(name='Open Visualization GUI', button_type='primary', width=500, sizing_mode=\"stretch_width\", align='end')\n", - "open_terminal = pn.widgets.Button(name='Open Terminal', button_type='primary', width=500, sizing_mode=\"stretch_width\", align='end')\n", - "\n", - "\n", - "save_button.on_click(onclickProcess)\n", - "open_storesList.on_click(onclickStoresList)\n", - "read_rawData.on_click(onclickreaddata)\n", - "extract_ts.on_click(onclickextractts)\n", - "psth_computation.on_click(onclickpsth)\n", - "open_visualization.on_click(onclickVisualization)\n", - "\n", - "\n", - "template.sidebar.append(mark_down_ip)\n", - "template.sidebar.append(mark_down_ip_note)\n", - "template.sidebar.append(save_button)\n", - "#template.sidebar.append(path)\n", - "template.sidebar.append(mark_down_storenames)\n", - "template.sidebar.append(open_storesList)\n", - "template.sidebar.append(mark_down_read)\n", - "template.sidebar.append(read_rawData)\n", - "template.sidebar.append(read_progress)\n", - "template.sidebar.append(mark_down_extract)\n", - "template.sidebar.append(extract_ts)\n", - "template.sidebar.append(extract_progress)\n", - "template.sidebar.append(mark_down_psth)\n", - "template.sidebar.append(psth_computation)\n", - "template.sidebar.append(psth_progress)\n", - "template.sidebar.append(mark_down_visualization)\n", - "template.sidebar.append(open_visualization)\n", - "#template.sidebar.append(open_terminal)\n", - "\n", - "\n", - "psth_baseline_param = pn.Column(zscore_param_wd, psth_param_wd, baseline_param_wd, peak_param_wd)\n", - "\n", - "widget = pn.Column(mark_down_1, files_1, pn.Row(individual_analysis_wd_2, psth_baseline_param))\n", - "\n", - "#file_selector = pn.WidgetBox(files_1)\n", - "styles = dict(background='WhiteSmoke')\n", - "individual = pn.Card(widget, title='Individual Analysis', styles=styles, width=850)\n", - "group = pn.Card(group_analysis_wd_1, title='Group Analysis', styles=styles, width=850)\n", - "visualize = pn.Card(visualization_wd, title='Visualization Parameters', styles=styles, width=850)\n", - "\n", - "#template.main.append(file_selector)\n", - "template.main.append(individual)\n", - "template.main.append(group)\n", - "template.main.append(visualize)\n", - "\n", - "template.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.6 (default, Dec 7 2023, 05:42:47) \n[Clang 15.0.0 (clang-1500.1.0.2.5)]" - }, - "vscode": { - "interpreter": { - "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6" - } - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/src/guppy/savingInputParameters.py b/src/guppy/savingInputParameters.py new file mode 100644 index 0000000..17aa10f --- /dev/null +++ b/src/guppy/savingInputParameters.py @@ -0,0 +1,526 @@ +import os +import sys +import time +import subprocess +import json +import panel as pn +import numpy as np +import pandas as pd +import logging +import tkinter as tk +from tkinter import ttk +from tkinter import filedialog +from threading import Thread +from .preprocess import extractTsAndSignal +from .visualizePlot import visualizeResults +from .saveStoresList import execute + +def savingInputParameters(): + pn.extension() + + log_file = os.path.join('.','..','guppy.log') + if os.path.exists(log_file): + os.remove(log_file) + else: + pass + + # Create the main window + folder_selection = tk.Tk() + folder_selection.title("Select the folder path where your data is located") + folder_selection.geometry("700x200") + def select_folder(): + global folder_path + folder_path = filedialog.askdirectory(title="Select the folder path where your data is located") + if folder_path: + print(f"Folder path set to {folder_path}") + folder_selection.destroy() + else: + folder_path = os.path.expanduser('~') + print(f"Folder path set to {folder_path}") + + select_button = ttk.Button(folder_selection, text="Select a Folder", command=select_folder) + select_button.pack(pady=5) + folder_selection.mainloop() + + current_dir = os.getcwd() + + def insertLog(text, level): + file = os.path.join('.','..','guppy.log') + format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') + infoLog = logging.FileHandler(file) + infoLog.setFormatter(format) + logger = logging.getLogger(file) + logger.setLevel(level) + + if not logger.handlers: + logger.addHandler(infoLog) + if level == logging.DEBUG: + logger.debug(text) + if level == logging.INFO: + logger.info(text) + if level == logging.ERROR: + logger.exception(text) + if level == logging.WARNING: + logger.warning(text) + + infoLog.close() + logger.removeHandler(infoLog) + + def make_dir(filepath): + op = os.path.join(filepath, 'inputParameters') + if not os.path.exists(op): + os.mkdir(op) + return op + + def readRawData(): + inputParameters = getInputParameters() + from .readTevTsq import main as read_tev_tsq_main + read_tev_tsq_main(input_parameters=inputParameters) + + def extractTs(): + inputParameters = getInputParameters() + subprocess.call(["python", os.path.join(current_dir,"GuPPy","preprocess.py"), json.dumps(inputParameters)]) + + def psthComputation(): + inputParameters = getInputParameters() + inputParameters['curr_dir'] = current_dir + subprocess.call(["python", os.path.join(current_dir,"GuPPy","computePsth.py"), json.dumps(inputParameters)]) + + + def readPBIncrementValues(progressBar): + print("Read progress bar increment values function started...") + file_path = os.path.join(os.path.expanduser('~'), 'pbSteps.txt') + if os.path.exists(file_path): + os.remove(file_path) + increment, maximum = 0, 100 + progressBar.value = increment + progressBar.bar_color = 'success' + while True: + try: + with open(file_path, 'r') as file: + content = file.readlines() + if len(content)==0: + pass + else: + maximum = int(content[0]) + increment = int(content[-1]) + + if increment==-1: + progressBar.bar_color = 'danger' + os.remove(file_path) + break + progressBar.max = maximum + progressBar.value = increment + time.sleep(0.001) + except FileNotFoundError: + time.sleep(0.001) + except PermissionError: + time.sleep(0.001) + except Exception as e: + # Handle other exceptions that may occur + print(f"An error occurred while reading the file: {e}") + break + if increment==maximum: + os.remove(file_path) + break + + print("Read progress bar increment values stopped.") + + # progress bars = PB + read_progress = pn.indicators.Progress(name='Progress', value=100, max=100, width=200, sizing_mode="stretch_width") + extract_progress = pn.indicators.Progress(name='Progress', value=100, max=100, width=200, sizing_mode="stretch_width") + psth_progress = pn.indicators.Progress(name='Progress', value=100, max=100, width=200, sizing_mode="stretch_width") + + + template = pn.template.MaterialTemplate(title='Input Parameters GUI') + + mark_down_1 = pn.pane.Markdown("""**Select folders for the analysis from the file selector below**""", width=600) + + files_1 = pn.widgets.FileSelector(folder_path, name='folderNames', height=300, width=800) + + + explain_time_artifacts = pn.pane.Markdown(""" + - ***Number of cores :*** Number of cores used for analysis. Try to + keep it less than the number of cores in your machine. + - ***Combine Data? :*** Make this parameter ``` True ``` if user wants to combine + the data, especially when there is two different + data files for the same recording session.
+ - ***Isosbestic Control Channel? :*** Make this parameter ``` False ``` if user + does not want to use isosbestic control channel in the analysis.
+ - ***Eliminate first few seconds :*** It is the parameter to cut out first x seconds + from the data. Default is 1 seconds.
+ - ***Window for Moving Average filter :*** The filtering of signals + is done using moving average filter. Default window used for moving + average filter is 100 datapoints. Change it based on the requirement.
+ - ***Moving Window (transients detection) :*** Transients in the z-score + and/or \u0394F/F are detected using this moving window. + Default is 15 seconds. Change it based on the requirement.
+ - ***High Amplitude filtering threshold (HAFT) (transients detection) :*** High amplitude + events greater than x times the MAD above the median are filtered out. Here, x is + high amplitude filtering threshold. Default is 2. + - ***Transients detection threshold (TD Thresh):*** Peaks with local maxima greater than x times + the MAD above the median of the trace (after filtering high amplitude events) are detected + as transients. Here, x is transients detection threshold. Default is 3. + - ***Number of channels (Neurophotometrics only) :*** Number of + channels used while recording, when data files has no column names mentioning "Flags" + or "LedState". + - ***removeArtifacts? :*** Make this parameter ``` True``` if there are + artifacts and user wants to remove the artifacts. + - ***removeArtifacts method :*** Selecting ```concatenate``` will remove bad + chunks and concatenate the selected good chunks together. + Selecting ```replace with NaN``` will replace bad chunks with NaN + values. + """) + + timeForLightsTurnOn = pn.widgets.LiteralInput(name='Eliminate first few seconds (int)', value=1, type=int, width=250) + + isosbestic_control = pn.widgets.Select(name='Isosbestic Control Channel? (bool)', value=True, options=[True, False], width=250) + + numberOfCores = pn.widgets.LiteralInput(name='# of cores (int)', value=2, type=int, width=100) + + combine_data = pn.widgets.Select(name='Combine Data? (bool)', value=False, options=[True, False], width=125) + + computePsth = pn.widgets.Select(name='z_score and/or \u0394F/F? (psth)', options=['z_score', 'dff', 'Both'], width=250) + + transients = pn.widgets.Select(name='z_score and/or \u0394F/F? (transients)', options=['z_score', 'dff', 'Both'], width=250) + + plot_zScore_dff = pn.widgets.Select(name='z-score plot and/or \u0394F/F plot?', options=['z_score', 'dff', 'Both', 'None'], value='None', width=250) + + moving_wd = pn.widgets.LiteralInput(name='Moving Window for transients detection (s) (int)', value=15, type=int, width=250) + + highAmpFilt = pn.widgets.LiteralInput(name='HAFT (int)', value=2, type=int, width=120) + + transientsThresh = pn.widgets.LiteralInput(name='TD Thresh (int)', value=3, type=int, width=120) + + moving_avg_filter = pn.widgets.LiteralInput(name='Window for Moving Average filter (int)', value=100, type=int, width=250) + + removeArtifacts = pn.widgets.Select(name='removeArtifacts? (bool)', value=False, options=[True, False], width=125) + + artifactsRemovalMethod = pn.widgets.Select(name='removeArtifacts method', + value='concatenate', + options=['concatenate', 'replace with NaN'], + width=100) + + no_channels_np = pn.widgets.LiteralInput(name='Number of channels (Neurophotometrics only)', + value=2, type=int, width=250) + + z_score_computation = pn.widgets.Select(name='z-score computation Method', + options=['standard z-score', 'baseline z-score', 'modified z-score'], + value='standard z-score', width=200) + baseline_wd_strt = pn.widgets.LiteralInput(name='Baseline Window Start Time (s) (int)', value=0, type=int, width=200) + baseline_wd_end = pn.widgets.LiteralInput(name='Baseline Window End Time (s) (int)', value=0, type=int, width=200) + + explain_z_score = pn.pane.Markdown(""" + ***Note :***
+ - Details about z-score computation methods are explained in Github wiki.
+ - The details will make user understand what computation method to use for + their data.
+ - Baseline Window Parameters should be kept 0 unless you are using baseline
+ z-score computation method. The parameters are in seconds. + """, width=500) + + explain_nsec = pn.pane.Markdown(""" + - ***Time Interval :*** To omit bursts of event timestamps, user defined time interval + is set so that if the time difference between two timestamps is less than this defined time + interval, it will be deleted for the calculation of PSTH. + - ***Compute Cross-correlation :*** Make this parameter ```True```, when user wants + to compute cross-correlation between PSTHs of two different signals or signals + recorded from different brain regions. + """, width=500) + + nSecPrev = pn.widgets.LiteralInput(name='Seconds before 0 (int)', value=-10, type=int, width=120) + + nSecPost = pn.widgets.LiteralInput(name='Seconds after 0 (int)', value=20, type=int, width=120) + + computeCorr = pn.widgets.Select(name='Compute Cross-correlation (bool)', + options=[True, False], + value=False, width=160) + + timeInterval = pn.widgets.LiteralInput(name='Time Interval (s)', value=2, type=int, width=120) + + use_time_or_trials = pn.widgets.Select(name='Bin PSTH trials (str)', + options = ['Time (min)', '# of trials'], + value='Time (min)', width=120) + + bin_psth_trials = pn.widgets.LiteralInput(name='Time(min) / # of trials \n for binning? (int)', value=0, type=int, width=160) + + explain_baseline = pn.pane.Markdown(""" + ***Note :***
+ - If user does not want to do baseline correction, + put both parameters 0.
+ - If the first event timestamp is less than the length of baseline + window, it will be rejected in the PSTH computation step.
+ - Baseline parameters must be within the PSTH parameters + set in the PSTH parameters section. + """, width=500) + + baselineCorrectionStart = pn.widgets.LiteralInput(name='Baseline Correction Start time(int)', value=-5, type=int, width=200) + + baselineCorrectionEnd = pn.widgets.LiteralInput(name='Baseline Correction End time(int)', value=0, type=int, width=200) + + zscore_param_wd = pn.WidgetBox("### Z-score Parameters", explain_z_score, + z_score_computation, + pn.Row(baseline_wd_strt, baseline_wd_end), + width=500, height=350) + + psth_param_wd = pn.WidgetBox("### PSTH Parameters", explain_nsec, + pn.Row(nSecPrev, nSecPost, computeCorr), + pn.Row(timeInterval, use_time_or_trials, bin_psth_trials), + width=500, height=350) + + baseline_param_wd = pn.WidgetBox("### Baseline Parameters", explain_baseline, + pn.Row(baselineCorrectionStart, baselineCorrectionEnd), + width=500, height=300) + + peak_explain = pn.pane.Markdown(""" + ***Note :***
+ - Peak and area are computed between the window set below.
+ - Peak and AUC parameters must be within the PSTH parameters set in the PSTH parameters section.
+ - Please make sure when user changes the parameters in the table below, click on any other cell after + changing a value in a particular cell. + """, width=500) + + + start_end_point_df = pd.DataFrame({'Peak Start time': [-5, 0, 5, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan], + 'Peak End time': [0, 3, 10, np.nan, np.nan, + np.nan, np.nan, np.nan, np.nan, np.nan]}) + + df_widget = pn.widgets.Tabulator(start_end_point_df, name='DataFrame', show_index=False, row_height=20, width=450) + + + peak_param_wd = pn.WidgetBox("### Peak and AUC Parameters", + peak_explain, df_widget, + height=400) + + + + mark_down_2 = pn.pane.Markdown("""**Select folders for the average analysis from the file selector below**""", width=600) + + files_2 = pn.widgets.FileSelector(folder_path, name='folderNamesForAvg', height=300, width=800) + + averageForGroup = pn.widgets.Select(name='Average Group? (bool)', value=False, options=[True, False], width=400) + + visualizeAverageResults = pn.widgets.Select(name='Visualize Average Results? (bool)', + value=False, options=[True, False], width=400) + + visualize_zscore_or_dff = pn.widgets.Select(name='z-score or \u0394F/F? (for visualization)', options=['z_score', 'dff'], width=400) + + individual_analysis_wd_2 = pn.Column( + explain_time_artifacts, pn.Row(numberOfCores, combine_data), + isosbestic_control, timeForLightsTurnOn, + moving_avg_filter, computePsth, transients, plot_zScore_dff, + moving_wd, pn.Row(highAmpFilt, transientsThresh), + no_channels_np, pn.Row(removeArtifacts, artifactsRemovalMethod) + ) + + group_analysis_wd_1 = pn.Column(mark_down_2, files_2, averageForGroup, width=800) + + visualization_wd = pn.Row(visualize_zscore_or_dff, visualizeAverageResults, width=800) + + + def getInputParameters(): + abspath = getAbsPath() + inputParameters = { + "abspath": abspath[0], + "folderNames": files_1.value, + "numberOfCores": numberOfCores.value, + "combine_data": combine_data.value, + "isosbestic_control": isosbestic_control.value, + "timeForLightsTurnOn": timeForLightsTurnOn.value, + "filter_window": moving_avg_filter.value, + "removeArtifacts": removeArtifacts.value, + "artifactsRemovalMethod": artifactsRemovalMethod.value, + "noChannels": no_channels_np.value, + "zscore_method": z_score_computation.value, + "baselineWindowStart": baseline_wd_strt.value, + "baselineWindowEnd": baseline_wd_end.value, + "nSecPrev": nSecPrev.value, + "nSecPost": nSecPost.value, + "computeCorr": computeCorr.value, + "timeInterval": timeInterval.value, + "bin_psth_trials": bin_psth_trials.value, + "use_time_or_trials": use_time_or_trials.value, + "baselineCorrectionStart": baselineCorrectionStart.value, + "baselineCorrectionEnd": baselineCorrectionEnd.value, + "peak_startPoint": list(df_widget.value['Peak Start time']), #startPoint.value, + "peak_endPoint": list(df_widget.value['Peak End time']), #endPoint.value, + "selectForComputePsth": computePsth.value, + "selectForTransientsComputation": transients.value, + "moving_window": moving_wd.value, + "highAmpFilt": highAmpFilt.value, + "transientsThresh": transientsThresh.value, + "plot_zScore_dff": plot_zScore_dff.value, + "visualize_zscore_or_dff": visualize_zscore_or_dff.value, + "folderNamesForAvg": files_2.value, + "averageForGroup": averageForGroup.value, + "visualizeAverageResults": visualizeAverageResults.value + } + return inputParameters + + def checkSameLocation(arr, abspath): + #abspath = [] + for i in range(len(arr)): + abspath.append(os.path.dirname(arr[i])) + abspath = np.asarray(abspath) + abspath = np.unique(abspath) + if len(abspath)>1: + insertLog('All the folders selected should be at the same location', + logging.ERROR) + raise Exception('All the folders selected should be at the same location') + + return abspath + + def getAbsPath(): + arr_1, arr_2 = files_1.value, files_2.value + if len(arr_1)==0 and len(arr_2)==0: + insertLog('No folder is selected for analysis', + logging.ERROR) + raise Exception('No folder is selected for analysis') + + abspath = [] + if len(arr_1)>0: + abspath = checkSameLocation(arr_1, abspath) + else: + abspath = checkSameLocation(arr_2, abspath) + + abspath = np.unique(abspath) + if len(abspath)>1: + insertLog('All the folders selected should be at the same location', + logging.ERROR) + raise Exception('All the folders selected should be at the same location') + return abspath + + def onclickProcess(event=None): + + insertLog('Saving Input Parameters file.', + logging.DEBUG) + abspath = getAbsPath() + analysisParameters = { + "combine_data": combine_data.value, + "isosbestic_control": isosbestic_control.value, + "timeForLightsTurnOn": timeForLightsTurnOn.value, + "filter_window": moving_avg_filter.value, + "removeArtifacts": removeArtifacts.value, + "noChannels": no_channels_np.value, + "zscore_method": z_score_computation.value, + "baselineWindowStart": baseline_wd_strt.value, + "baselineWindowEnd": baseline_wd_end.value, + "nSecPrev": nSecPrev.value, + "nSecPost": nSecPost.value, + "timeInterval": timeInterval.value, + "bin_psth_trials": bin_psth_trials.value, + "use_time_or_trials": use_time_or_trials.value, + "baselineCorrectionStart": baselineCorrectionStart.value, + "baselineCorrectionEnd": baselineCorrectionEnd.value, + "peak_startPoint": list(df_widget.value['Peak Start time']), #startPoint.value, + "peak_endPoint": list(df_widget.value['Peak End time']), #endPoint.value, + "selectForComputePsth": computePsth.value, + "selectForTransientsComputation": transients.value, + "moving_window": moving_wd.value, + "highAmpFilt": highAmpFilt.value, + "transientsThresh": transientsThresh.value + } + for folder in files_1.value: + with open(os.path.join(folder, 'GuPPyParamtersUsed.json'), 'w') as f: + json.dump(analysisParameters, f, indent=4) + insertLog(f"Input Parameters file saved at {folder}", + logging.INFO) + + insertLog('#'*400, logging.INFO) + + #path.value = (os.path.join(op, 'inputParameters.json')).replace('\\', '/') + print('Input Parameters File Saved.') + + def onclickStoresList(event=None): + inputParameters = getInputParameters() + execute(inputParameters) + + def onclickVisualization(event=None): + inputParameters = getInputParameters() + visualizeResults(inputParameters) + + def onclickreaddata(event=None): + thread = Thread(target=readRawData) + thread.start() + readPBIncrementValues(read_progress) + thread.join() + + def onclickextractts(event=None): + thread = Thread(target=extractTs) + thread.start() + readPBIncrementValues(extract_progress) + thread.join() + + def onclickpsth(event=None): + thread = Thread(target=psthComputation) + thread.start() + readPBIncrementValues(psth_progress) + thread.join() + + + + mark_down_ip = pn.pane.Markdown("""**Step 1 : Save Input Parameters**""", width=500) + mark_down_ip_note = pn.pane.Markdown("""***Note : ***
+ - Save Input Parameters will save input parameters used for the analysis + in all the folders you selected for the analysis (useful for future + reference). All analysis steps will run without saving input parameters. + """, width=500, sizing_mode="stretch_width") + save_button = pn.widgets.Button(name='Save to file...', button_type='primary', width=500, sizing_mode="stretch_width", align='end') + mark_down_storenames = pn.pane.Markdown("""**Step 2 : Open Storenames GUI
and save storenames**""", width=500) + open_storesList = pn.widgets.Button(name='Open Storenames GUI', button_type='primary', width=500, sizing_mode="stretch_width", align='end') + mark_down_read = pn.pane.Markdown("""**Step 3 : Read Raw Data**""", width=500) + read_rawData = pn.widgets.Button(name='Read Raw Data', button_type='primary', width=500, sizing_mode="stretch_width", align='end') + mark_down_extract = pn.pane.Markdown("""**Step 4 : Extract timestamps
and its correction**""", width=500) + extract_ts = pn.widgets.Button(name="Extract timestamps and it's correction", button_type='primary', width=500, sizing_mode="stretch_width", align='end') + mark_down_psth = pn.pane.Markdown("""**Step 5 : PSTH Computation**""", width=500) + psth_computation = pn.widgets.Button(name="PSTH Computation", button_type='primary', width=500, sizing_mode="stretch_width", align='end') + mark_down_visualization = pn.pane.Markdown("""**Step 6 : Visualization**""", width=500) + open_visualization = pn.widgets.Button(name='Open Visualization GUI', button_type='primary', width=500, sizing_mode="stretch_width", align='end') + open_terminal = pn.widgets.Button(name='Open Terminal', button_type='primary', width=500, sizing_mode="stretch_width", align='end') + + + save_button.on_click(onclickProcess) + open_storesList.on_click(onclickStoresList) + read_rawData.on_click(onclickreaddata) + extract_ts.on_click(onclickextractts) + psth_computation.on_click(onclickpsth) + open_visualization.on_click(onclickVisualization) + + + template.sidebar.append(mark_down_ip) + template.sidebar.append(mark_down_ip_note) + template.sidebar.append(save_button) + #template.sidebar.append(path) + template.sidebar.append(mark_down_storenames) + template.sidebar.append(open_storesList) + template.sidebar.append(mark_down_read) + template.sidebar.append(read_rawData) + template.sidebar.append(read_progress) + template.sidebar.append(mark_down_extract) + template.sidebar.append(extract_ts) + template.sidebar.append(extract_progress) + template.sidebar.append(mark_down_psth) + template.sidebar.append(psth_computation) + template.sidebar.append(psth_progress) + template.sidebar.append(mark_down_visualization) + template.sidebar.append(open_visualization) + #template.sidebar.append(open_terminal) + + + psth_baseline_param = pn.Column(zscore_param_wd, psth_param_wd, baseline_param_wd, peak_param_wd) + + widget = pn.Column(mark_down_1, files_1, pn.Row(individual_analysis_wd_2, psth_baseline_param)) + + #file_selector = pn.WidgetBox(files_1) + styles = dict(background='WhiteSmoke') + individual = pn.Card(widget, title='Individual Analysis', styles=styles, width=850) + group = pn.Card(group_analysis_wd_1, title='Group Analysis', styles=styles, width=850) + visualize = pn.Card(visualization_wd, title='Visualization Parameters', styles=styles, width=850) + + #template.main.append(file_selector) + template.main.append(individual) + template.main.append(group) + template.main.append(visualize) + + return template \ No newline at end of file diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index f8287a7..31d8e20 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -15,7 +15,7 @@ from holoviews.operation.datashader import datashade import datashader as ds import matplotlib.pyplot as plt -from preprocess import get_all_stores_for_combining_data +from .preprocess import get_all_stores_for_combining_data import panel as pn pn.extension() From ed7d14170256889dd8e4d1e389edf480a6c2f181 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 21 Aug 2025 12:02:35 -0700 Subject: [PATCH 27/83] added main entry point pt 2 --- src/guppy/computePsth.py | 8 +++++--- src/guppy/preprocess.py | 11 +++++------ src/guppy/savingInputParameters.py | 8 +++++--- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/src/guppy/computePsth.py b/src/guppy/computePsth.py index f091c4f..10bd03d 100755 --- a/src/guppy/computePsth.py +++ b/src/guppy/computePsth.py @@ -772,9 +772,9 @@ def psthForEachStorename(inputParameters): print("PSTH, Area and Peak are computed for all events.") return inputParameters -if __name__ == "__main__": +def main(input_parameters): try: - inputParameters = psthForEachStorename(json.loads(sys.argv[1])) + inputParameters = psthForEachStorename(input_parameters) subprocess.call(["python", os.path.join(inputParameters["curr_dir"],"GuPPy","findTransientsFreqAndAmp.py"), json.dumps(inputParameters)]) @@ -785,4 +785,6 @@ def psthForEachStorename(inputParameters): insertLog(str(e), logging.ERROR) raise e - +if __name__ == "__main__": + input_parameters = json.loads(sys.argv[1]) + main(input_parameters=input_parameters) diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index 9c73569..5a7d715 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -1224,17 +1224,16 @@ def extractTsAndSignal(inputParameters): -if __name__ == "__main__": +def main(input_parameters): try: - extractTsAndSignal(json.loads(sys.argv[1])) + extractTsAndSignal(input_parameters) insertLog('#'*400, logging.INFO) except Exception as e: with open(os.path.join(os.path.expanduser('~'), 'pbSteps.txt'), 'a') as file: file.write(str(-1)+"\n") insertLog(str(e), logging.ERROR) raise e - - - - \ No newline at end of file +if __name__ == "__main__": + input_parameters = json.loads(sys.argv[1]) + main(input_parameters=input_parameters) diff --git a/src/guppy/savingInputParameters.py b/src/guppy/savingInputParameters.py index 17aa10f..575fed9 100644 --- a/src/guppy/savingInputParameters.py +++ b/src/guppy/savingInputParameters.py @@ -79,12 +79,14 @@ def readRawData(): def extractTs(): inputParameters = getInputParameters() - subprocess.call(["python", os.path.join(current_dir,"GuPPy","preprocess.py"), json.dumps(inputParameters)]) + from .preprocess import main as preprocess_main + preprocess_main(inputParameters) def psthComputation(): inputParameters = getInputParameters() inputParameters['curr_dir'] = current_dir - subprocess.call(["python", os.path.join(current_dir,"GuPPy","computePsth.py"), json.dumps(inputParameters)]) + from .computePsth import main as computePsth_main + computePsth_main(inputParameters) def readPBIncrementValues(progressBar): @@ -523,4 +525,4 @@ def onclickpsth(event=None): template.main.append(group) template.main.append(visualize) - return template \ No newline at end of file + return template From c3d0ac12686b3db7f9a32128a3d356e11c46cc8d Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 21 Aug 2025 12:14:58 -0700 Subject: [PATCH 28/83] reverted back to subprocess calls bc matplotlib needs to be in the main thread for interactive plots --- src/guppy/savingInputParameters.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/guppy/savingInputParameters.py b/src/guppy/savingInputParameters.py index 575fed9..61c0ced 100644 --- a/src/guppy/savingInputParameters.py +++ b/src/guppy/savingInputParameters.py @@ -11,7 +11,6 @@ from tkinter import ttk from tkinter import filedialog from threading import Thread -from .preprocess import extractTsAndSignal from .visualizePlot import visualizeResults from .saveStoresList import execute @@ -79,14 +78,12 @@ def readRawData(): def extractTs(): inputParameters = getInputParameters() - from .preprocess import main as preprocess_main - preprocess_main(inputParameters) + subprocess.call([sys.executable, "-m", "guppy.preprocess", json.dumps(inputParameters)]) def psthComputation(): inputParameters = getInputParameters() inputParameters['curr_dir'] = current_dir - from .computePsth import main as computePsth_main - computePsth_main(inputParameters) + subprocess.call([sys.executable, "-m", "guppy.computePsth", json.dumps(inputParameters)]) def readPBIncrementValues(progressBar): From 32ad20eb84be538cf84c0be97f2ec53ac842b207 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 21 Aug 2025 12:18:07 -0700 Subject: [PATCH 29/83] fixed subprocess.call syntax to work with guppy entry point --- src/guppy/computePsth.py | 4 +--- src/guppy/savingInputParameters.py | 3 +-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/src/guppy/computePsth.py b/src/guppy/computePsth.py index 10bd03d..5d0eceb 100755 --- a/src/guppy/computePsth.py +++ b/src/guppy/computePsth.py @@ -775,9 +775,7 @@ def psthForEachStorename(inputParameters): def main(input_parameters): try: inputParameters = psthForEachStorename(input_parameters) - subprocess.call(["python", - os.path.join(inputParameters["curr_dir"],"GuPPy","findTransientsFreqAndAmp.py"), - json.dumps(inputParameters)]) + subprocess.call([sys.executable, "-m", "guppy.findTransientsFreqAndAmp", json.dumps(inputParameters)]) insertLog('#'*400, logging.INFO) except Exception as e: with open(os.path.join(os.path.expanduser('~'), 'pbSteps.txt'), 'a') as file: diff --git a/src/guppy/savingInputParameters.py b/src/guppy/savingInputParameters.py index 61c0ced..62d7595 100644 --- a/src/guppy/savingInputParameters.py +++ b/src/guppy/savingInputParameters.py @@ -73,8 +73,7 @@ def make_dir(filepath): def readRawData(): inputParameters = getInputParameters() - from .readTevTsq import main as read_tev_tsq_main - read_tev_tsq_main(input_parameters=inputParameters) + subprocess.call([sys.executable, "-m", "guppy.readTevTsq", json.dumps(inputParameters)]) def extractTs(): inputParameters = getInputParameters() From c04067203211c40559b6730c46ac0a6774724141 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 21 Aug 2025 13:44:06 -0700 Subject: [PATCH 30/83] removed readh5 clutter --- readh5.ipynb | 57 ---------------------------------------------------- readh5.m | 21 ------------------- readh5.py | 11 ---------- readhdf5.m | 17 ---------------- readhdf5.py | 21 ------------------- 5 files changed, 127 deletions(-) delete mode 100755 readh5.ipynb delete mode 100755 readh5.m delete mode 100755 readh5.py delete mode 100755 readhdf5.m delete mode 100755 readhdf5.py diff --git a/readh5.ipynb b/readh5.ipynb deleted file mode 100755 index 0da3dfb..0000000 --- a/readh5.ipynb +++ /dev/null @@ -1,57 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# filepath where your file is located \n", - "filepath = '/Users/VENUS/Downloads/JIllian/Abby/average/peak_AUC_UnrewardedPort_DLS_z_score_DLS.h5'\n", - "\n", - "# read file and make a dataframe\n", - "df = pd.read_hdf(filepath, key='df', mode='r')\n", - "\n", - "# print dataframe\n", - "df" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "fiberphotometry", - "language": "python", - "name": "fiberphotometry" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.10" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/readh5.m b/readh5.m deleted file mode 100755 index b11f304..0000000 --- a/readh5.m +++ /dev/null @@ -1,21 +0,0 @@ -% filepath to the file which needs to be read -filepath = '/Users/VENUS/Downloads/JIllian/habitEarly/average/peak_AUC_rwdNP_DMS_z_score_DMS.h5'; -% access h5 file column names -info_data = h5read(filepath, '/df/axis0'); -% access each column data in h5 file -data = h5read(filepath, '/df/block0_values'); - -% access h5 file row names -info_filename = h5read(filepath, '/df/axis1'); - -% construct a struct with three keys : filename, freq, amplitude -S.filename = info_filename; - -% for reading freq and amplitude from z-score -S.freq = data(1,:)'; -S.amplitude = data(2,:)'; - -% construct table from the above struct -T = struct2table(S); - -% To access each column in table use "T.{column_name}" \ No newline at end of file diff --git a/readh5.py b/readh5.py deleted file mode 100755 index 2f6632b..0000000 --- a/readh5.py +++ /dev/null @@ -1,11 +0,0 @@ -import pandas as pd - - -# filepath where your file is located -filepath = '/Users/VENUS/Downloads/JIllian/Abby/average/peak_AUC_UnrewardedPort_DLS_z_score_DLS.h5' - -# read file and make a dataframe -df = pd.read_hdf(filepath, key='df', mode='r') - -# print dataframe -print(df) \ No newline at end of file diff --git a/readhdf5.m b/readhdf5.m deleted file mode 100755 index e508b8d..0000000 --- a/readhdf5.m +++ /dev/null @@ -1,17 +0,0 @@ -% filepath to the control and signal data which needs to be read -filepath_data = '/Users/VENUS/Downloads/controlA.hdf5'; - -% To read control and signal data, use the following syntax -data = h5read(filepath_data, '/data'); - -% filepath to control and signal data timestamps which needs to be read -filepath_timestamps = '/Users/VENUS/Downloads/timeCorrection_A.hdf5'; - -% To read control and signal data timestamps, use the following syntax -timestamps = h5read(filepath_timestamps, '/timestampNew'); - -% filepath to event timestamps which needs to be read -filepath_event = '/Users/VENUS/Downloads/rwdNP_A.hdf5'; - -% To read event timestamps files, use the following syntax -ts = h5read(filepath_event, '/ts'); diff --git a/readhdf5.py b/readhdf5.py deleted file mode 100755 index 2318ce8..0000000 --- a/readhdf5.py +++ /dev/null @@ -1,21 +0,0 @@ -import os -import numpy as np -import h5py - - -# path to z-score file -fp = "/Users/VENUS/Downloads/FP_Data/habitEarly/Photo_63_207-181030-103332/Photo_63_207-181030-103332_output_1/z_score_DMS.hdf5" - -with h5py.File(fp, 'r') as f: - - # print keys in hdf5 files - keys = list(f.keys()) - print(list(f.keys())) - - # create a data dictionary - data = dict() - - # loop through each key and save the data corresponding to a key in a dictionary - for key in keys: - data[key] = np.array(f[key]) - From ad3dd98c89d67b5314224990309f5932394b0d29 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 21 Aug 2025 14:30:32 -0700 Subject: [PATCH 31/83] saving log file to home directory to avoid permissions errors --- src/guppy/computeCorr.py | 4 ++-- src/guppy/computePsth.py | 3 ++- src/guppy/findTransientsFreqAndAmp.py | 5 ++--- src/guppy/preprocess.py | 3 ++- src/guppy/readTevTsq.py | 4 ++-- src/guppy/saveStoresList.py | 2 +- src/guppy/savingInputParameters.py | 7 ++++--- src/guppy/visualizePlot.py | 5 +++-- 8 files changed, 18 insertions(+), 15 deletions(-) diff --git a/src/guppy/computeCorr.py b/src/guppy/computeCorr.py index b869c38..404e9df 100644 --- a/src/guppy/computeCorr.py +++ b/src/guppy/computeCorr.py @@ -7,9 +7,10 @@ import numpy as np import pandas as pd from scipy import signal +from pathlib import Path def insertLog(text, level): - file = os.path.join('.','..','guppy.log') + file = os.path.join(Path.home(), 'guppy.log') format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') infoLog = logging.FileHandler(file) infoLog.setFormatter(format) @@ -192,4 +193,3 @@ def computeCrossCorrelation(filepath, event, inputParameters): create_Df(make_dir(filepath), 'corr_'+event, type[j]+'_'+corr_info[i-1]+'_'+corr_info[i], cross_corr, cols) insertLog(f"Cross-correlation for event {event} computed.", logging.INFO) print("Cross-correlation for event {} computed.".format(event)) - diff --git a/src/guppy/computePsth.py b/src/guppy/computePsth.py index 5d0eceb..b6a9548 100755 --- a/src/guppy/computePsth.py +++ b/src/guppy/computePsth.py @@ -15,6 +15,7 @@ import multiprocessing as mp from scipy import signal as ss from collections import OrderedDict +from pathlib import Path from .preprocess import get_all_stores_for_combining_data from .computeCorr import computeCrossCorrelation from .computeCorr import getCorrCombinations @@ -28,7 +29,7 @@ def takeOnlyDirs(paths): return list(set(paths)-set(removePaths)) def insertLog(text, level): - file = os.path.join('.','..','guppy.log') + file = os.path.join(Path.home(), 'guppy.log') format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') infoLog = logging.FileHandler(file) infoLog.setFormatter(format) diff --git a/src/guppy/findTransientsFreqAndAmp.py b/src/guppy/findTransientsFreqAndAmp.py index c063b61..96caaaf 100755 --- a/src/guppy/findTransientsFreqAndAmp.py +++ b/src/guppy/findTransientsFreqAndAmp.py @@ -11,6 +11,7 @@ from scipy.signal import argrelextrema import matplotlib.pyplot as plt from itertools import repeat +from pathlib import Path from .preprocess import get_all_stores_for_combining_data def takeOnlyDirs(paths): @@ -21,7 +22,7 @@ def takeOnlyDirs(paths): return list(set(paths)-set(removePaths)) def insertLog(text, level): - file = os.path.join('.','..','guppy.log') + file = os.path.join(Path.home(), 'guppy.log') format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') infoLog = logging.FileHandler(file) infoLog.setFormatter(format) @@ -382,5 +383,3 @@ def executeFindFreqAndAmp(inputParameters): file.write(str(-1)+"\n") insertLog(str(e), logging.ERROR) raise e - - diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index 5a7d715..3f472b6 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -15,6 +15,7 @@ from scipy.optimize import curve_fit import matplotlib.pyplot as plt from matplotlib.widgets import MultiCursor +from pathlib import Path from .combineDataFn import processTimestampsForCombiningData plt.switch_backend('TKAgg') @@ -26,7 +27,7 @@ def takeOnlyDirs(paths): return list(set(paths)-set(removePaths)) def insertLog(text, level): - file = os.path.join('.','..','guppy.log') + file = os.path.join(Path.home(), 'guppy.log') format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') infoLog = logging.FileHandler(file) infoLog.setFormatter(format) diff --git a/src/guppy/readTevTsq.py b/src/guppy/readTevTsq.py index 409ddca..e81d03d 100755 --- a/src/guppy/readTevTsq.py +++ b/src/guppy/readTevTsq.py @@ -12,6 +12,7 @@ import pandas as pd from numpy import int32, uint32, uint8, uint16, float64, int64, int32, float32 import multiprocessing as mp +from pathlib import Path def takeOnlyDirs(paths): removePaths = [] @@ -21,7 +22,7 @@ def takeOnlyDirs(paths): return list(set(paths)-set(removePaths)) def insertLog(text, level): - file = os.path.join('.','..','guppy.log') + file = os.path.join(Path.home(), 'guppy.log') format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') infoLog = logging.FileHandler(file) infoLog.setFormatter(format) @@ -564,4 +565,3 @@ def main(input_parameters): if __name__ == "__main__": input_parameters = json.loads(sys.argv[1]) main(input_parameters=input_parameters) - diff --git a/src/guppy/saveStoresList.py b/src/guppy/saveStoresList.py index cab877a..3a01bbb 100755 --- a/src/guppy/saveStoresList.py +++ b/src/guppy/saveStoresList.py @@ -45,7 +45,7 @@ def takeOnlyDirs(paths): return list(set(paths)-set(removePaths)) def insertLog(text, level): - file = os.path.join('.','..','guppy.log') + file = os.path.join(Path.home(), 'guppy.log') format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') infoLog = logging.FileHandler(file) infoLog.setFormatter(format) diff --git a/src/guppy/savingInputParameters.py b/src/guppy/savingInputParameters.py index 62d7595..6c0ce45 100644 --- a/src/guppy/savingInputParameters.py +++ b/src/guppy/savingInputParameters.py @@ -11,13 +11,14 @@ from tkinter import ttk from tkinter import filedialog from threading import Thread +from pathlib import Path from .visualizePlot import visualizeResults from .saveStoresList import execute def savingInputParameters(): pn.extension() - log_file = os.path.join('.','..','guppy.log') + log_file = os.path.join(Path.home(), 'guppy.log') if os.path.exists(log_file): os.remove(log_file) else: @@ -44,7 +45,7 @@ def select_folder(): current_dir = os.getcwd() def insertLog(text, level): - file = os.path.join('.','..','guppy.log') + file = os.path.join(Path.home(), 'guppy.log') format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') infoLog = logging.FileHandler(file) infoLog.setFormatter(format) @@ -61,7 +62,7 @@ def insertLog(text, level): logger.exception(text) if level == logging.WARNING: logger.warning(text) - + infoLog.close() logger.removeHandler(infoLog) diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index 31d8e20..af15cc8 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -15,8 +15,9 @@ from holoviews.operation.datashader import datashade import datashader as ds import matplotlib.pyplot as plt +from pathlib import Path from .preprocess import get_all_stores_for_combining_data -import panel as pn +import panel as pn pn.extension() def scanPortsAndFind(start_port=5000, end_port=5200, host='127.0.0.1'): @@ -40,7 +41,7 @@ def takeOnlyDirs(paths): return list(set(paths)-set(removePaths)) def insertLog(text, level): - file = os.path.join('.','..','guppy.log') + file = os.path.join(Path.home(), 'guppy.log') format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') infoLog = logging.FileHandler(file) infoLog.setFormatter(format) From c6525cc61e08cb8d3e203e066e0cd0038320754e Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 21 Aug 2025 14:31:04 -0700 Subject: [PATCH 32/83] update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 5207165..da7743b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "guppy" -version = "2.0.0-alpha6" +version = "2.0.0-alpha7" description = "Guided Photometry Analysis in Python, a free and open-source fiber photometry data analysis tool." readme = "README.md" authors = [ From c30b8ec76573a70c49c16759deac5d73f46a33f2 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 26 Aug 2025 11:16:36 -0700 Subject: [PATCH 33/83] update pyproject.toml to use direct dependencies --- pyproject.toml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index da7743b..133d10a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,16 +39,16 @@ classifiers = [ ] dependencies = [ - "brotlicffi", - "CairoSVG", + "bokeh", "datashader", "h5py", - "hvplot", + "holoviews", "matplotlib", - "nbconvert", - "phantomjs", - "PySocks", - "selenium", + "numpy", + "pandas", + "panel", + "param", + "scipy", "tables", ] @@ -60,4 +60,4 @@ guppy = "guppy.main:main" "Homepage" = "https://github.com/LernerLab/GuPPy" [tool.setuptools.packages.find] -where = ["src"] \ No newline at end of file +where = ["src"] From 38943b31872cbc15ac7434a6ffe26a44fb71a634 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 26 Aug 2025 14:29:33 -0700 Subject: [PATCH 34/83] added explicit support for python 3.10-3.13 --- pyproject.toml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 133d10a..aa0a3c9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "guppy" -version = "2.0.0-alpha7" +version = "2.0.0-alpha8" description = "Guided Photometry Analysis in Python, a free and open-source fiber photometry data analysis tool." readme = "README.md" authors = [ @@ -31,13 +31,17 @@ keywords = [ ] classifiers = [ "Intended Audience :: Science/Research", + "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.10", "Operating System :: POSIX :: Linux", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS", "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", ] +requires-python = ">=3.10" dependencies = [ "bokeh", "datashader", From 81aa0c0ac757720c662b91336c435cd761dfed6d Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 26 Aug 2025 17:34:49 -0700 Subject: [PATCH 35/83] removed old spec files --- spec_file_linux.txt | 282 ---------------------------------------- spec_file_mac.txt | 168 ------------------------ spec_file_windows10.txt | 162 ----------------------- 3 files changed, 612 deletions(-) delete mode 100755 spec_file_linux.txt delete mode 100644 spec_file_mac.txt delete mode 100755 spec_file_windows10.txt diff --git a/spec_file_linux.txt b/spec_file_linux.txt deleted file mode 100755 index e65aa0c..0000000 --- a/spec_file_linux.txt +++ /dev/null @@ -1,282 +0,0 @@ -# This file may be used to create an environment using: -# $ conda create --name --file -# platform: linux-64 -@EXPLICIT -https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/ca-certificates-2022.9.24-ha878542_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/font-ttf-dejavu-sans-mono-2.37-hab24e00_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/font-ttf-inconsolata-3.000-h77eed37_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/font-ttf-source-code-pro-2.038-h77eed37_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/font-ttf-ubuntu-0.83-hab24e00_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.39-hc81fddc_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libgfortran5-12.2.0-h337968e_19.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-12.2.0-h46fd767_19.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/nomkl-1.0-h5ca1d4c_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/tzdata-2022f-h191b570_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/fonts-conda-forge-1-0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libgfortran-ng-12.2.0-h69a702a_19.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/fonts-conda-ecosystem-1-0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-2_kmp_llvm.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-12.2.0-h65d4601_19.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/alsa-lib-1.2.8-h166bdaf_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/attr-2.5.1-h166bdaf_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-h7f98852_4.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.18.1-h7f98852_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/expat-2.5.0-h27087fc_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/fftw-3.3.10-nompi_hf0379b8_105.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/firefox-100.0.2-h27087fc_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/geckodriver-0.30.0-h3146498_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/gettext-0.21.1-h27087fc_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/icu-70.1-h27087fc_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/jpeg-9e-h166bdaf_2.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.1-h166bdaf_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/lame-3.100-h166bdaf_1003.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/lerc-4.0.0-h27087fc_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libbrotlicommon-1.0.9-h166bdaf_8.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libdb-6.2.32-h9c3ff4c_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libdeflate-1.14-h166bdaf_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libev-4.33-h516909a_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.2-h7f98852_5.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libiconv-1.17-h166bdaf_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.0-h7f98852_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libogg-1.3.4-h7f98852_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libopenblas-0.3.21-pthreads_h78a6416_3.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libopus-1.3.1-h7f98852_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libsodium-1.0.18-h36c2ea0_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libtool-2.4.6-h9c3ff4c_1008.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libudev1-252-h166bdaf_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.32.1-h7f98852_1000.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libwebp-base-1.2.4-h166bdaf_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.2.13-h166bdaf_4.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.9.3-h9c3ff4c_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/lzo-2.10-h516909a_1000.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/mpg123-1.30.2-h27087fc_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.3-h27087fc_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/nspr-4.32-h9c3ff4c_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/openssl-1.1.1s-h166bdaf_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/pthread-stubs-0.4-h36c2ea0_1001.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/snappy-1.1.9-hbd366e4_2.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/xorg-libxau-1.0.9-h7f98852_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/xorg-libxdmcp-1.1.3-h7f98852_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/xz-5.2.6-h166bdaf_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/yaml-0.2.5-h7f98852_2.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libblas-3.9.0-16_linux64_openblas.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libbrotlidec-1.0.9-h166bdaf_8.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libbrotlienc-1.0.9-h166bdaf_8.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libcap-2.66-ha37c62d_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20191231-he28a2e2_2.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libevent-2.1.10-h9b69904_4.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libflac-1.4.2-h27087fc_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libllvm11-11.1.0-he0ac6c6_5.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libnghttp2-1.47.0-hdcd2b5c_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libpng-1.6.38-h753d276_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libsqlite-3.40.0-h753d276_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libssh2-1.10.0-haa6b8db_3.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libvorbis-1.3.7-h9c3ff4c_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.13-h7f98852_1004.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libxml2-2.10.3-h7463322_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/llvm-openmp-15.0.5-he0ac6c6_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/mysql-common-8.0.31-haf5c9bc_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/openblas-0.3.21-pthreads_h320a7e8_3.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/pandoc-2.19.2-h32600fe_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/pcre2-10.40-hc3806b6_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/readline-8.1.2-h0f457ee_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.12-h27826a3_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/zeromq-4.3.4-h9c3ff4c_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/zlib-1.2.13-h166bdaf_4.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/zstd-1.5.2-h6239696_4.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/blosc-1.21.1-h83bc5f7_3.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.0.9-h166bdaf_8.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/freetype-2.12.1-hca18f0e_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/krb5-1.19.3-h3790be6_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.9.0-16_linux64_openblas.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libglib-2.74.1-h606061b_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.9.0-16_linux64_openblas.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libllvm15-15.0.5-h63197d8_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libsndfile-1.1.0-h27087fc_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libtiff-4.4.0-h55922b4_4.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libxkbcommon-1.0.3-he3ba5ed_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/mysql-libs-8.0.31-h28c427c_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/python-3.10.6-h582c2e5_0_cpython.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/sqlite-3.40.0-h4ff8645_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/xcb-util-0.4.0-h166bdaf_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/xcb-util-keysyms-0.4.0-h166bdaf_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/xcb-util-renderutil-0.3.9-h166bdaf_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/xcb-util-wm-0.4.1-h166bdaf_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/appdirs-1.4.4-pyh9f0ad1d_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/async_generator-1.10-py_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/attrs-22.1.0-pyh71513ae_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/backcall-0.2.0-pyh9f0ad1d_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/backports-1.0-py_2.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/brotli-1.0.9-h166bdaf_8.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/cached_property-1.5.2-pyha770c72_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/certifi-2022.9.24-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-2.1.1-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/cloudpickle-2.2.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.6-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/cycler-0.11.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/dbus-1.13.6-h5008d03_3.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/decorator-5.1.1-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/defusedxml-0.7.1-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/entrypoints-0.4-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/executing-1.2.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/flit-core-3.8.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/fontconfig-2.14.1-hc2a2eb6_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/fsspec-2022.11.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/glib-tools-2.74.1-h6239696_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/heapdict-1.0.1-py_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/idna-3.4-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/ipython_genutils-0.2.0-py_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/jack-1.9.21-he978b8e_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/lcms2-2.14-h6ed2654_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libclang13-15.0.5-default_h3a83d3e_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libcups-2.3.3-h3e49a29_2.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libcurl-7.86.0-h7bff187_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/liblapacke-3.9.0-16_linux64_openblas.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libpq-14.5-hd77ab85_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/locket-1.0.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/mistune-2.0.4-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyh9f0ad1d_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/nest-asyncio-1.5.6-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/nss-3.78-h2350873_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/openjpeg-2.5.0-h7d73246_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/pandocfilters-1.5.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/param-1.13.0-py_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/parso-0.8.3-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/pickleshare-0.7.5-py_1003.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/pkgutil-resolve-name-1.3.10-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/platformdirs-2.5.2-pyhd8ed1ab_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/ply-3.11-py_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/prometheus_client-0.15.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/ptyprocess-0.7.0-pyhd3deb0d_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/pure_eval-0.2.2-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/pycparser-2.21-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.0.9-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/python-fastjsonschema-2.16.2-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.10-2_cp310.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/pytz-2022.6-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/send2trash-1.8.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/setuptools-65.5.1-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/six-1.16.0-pyh6c4a22f_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/sniffio-1.3.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/sortedcontainers-2.4.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/soupsieve-2.3.2.post1-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/tblib-1.7.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/toml-0.10.2-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/toolz-0.12.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/traitlets-5.5.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/typing_extensions-4.4.0-pyha770c72_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/webencodings-0.5.1-py_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/websocket-client-1.4.2-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/wheel-0.38.4-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/xcb-util-image-0.4.0-h166bdaf_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/zipp-3.10.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/anyio-3.6.2-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/asttokens-2.1.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/backports.functools_lru_cache-1.6.4-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/beautifulsoup4-4.11.1-pyha770c72_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/blas-devel-3.9.0-16_linux64_openblas.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/cached-property-1.5.2-hd8ed1ab_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/cffi-1.15.1-py310h255011f_2.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/click-8.1.3-py310hff52083_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/cytoolz-0.12.0-py310h5764c6d_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/debugpy-1.6.3-py310hd8f1fbe_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/glib-2.74.1-h6239696_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/h11-0.14.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/hdf5-1.12.1-nompi_h2386368_104.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/importlib-metadata-5.0.0-pyha770c72_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/importlib_resources-5.10.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/jedi-0.18.1-pyhd8ed1ab_2.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/jupyter_core-5.0.0-py310hff52083_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.4.4-py310hbf28c38_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/libclang-15.0.5-default_h2e3cab8_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/llvmlite-0.39.1-py310h58363a5_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/lz4-4.0.2-py310h5d5e884_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/markupsafe-2.1.1-py310h5764c6d_2.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/matplotlib-inline-0.1.6-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/mock-4.0.3-pyhd8ed1ab_4.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/msgpack-python-1.0.4-py310hbf28c38_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/multipledispatch-0.6.0-py_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/numpy-1.23.4-py310h53a5b5f_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/outcome-1.2.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/packaging-21.3-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/partd-1.3.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/pexpect-4.8.0-pyh1a96a4e_2.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/pillow-9.2.0-py310h454ad03_3.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/pip-22.3.1-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/psutil-5.9.4-py310h5764c6d_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/pulseaudio-14.0-h0d2025b_11.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/pyct-core-0.5.0-py_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/pygments-2.13.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/pyrsistent-0.19.2-py310h5764c6d_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/pysocks-1.7.1-py310hff52083_5.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.8.2-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/pyviz_comms-2.3.2-py_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0-py310h5764c6d_5.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/pyzmq-24.0.1-py310h330234f_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.2.1-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/tornado-6.1-py310h5764c6d_3.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/tqdm-4.64.1-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-15.0.0-py310h5764c6d_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/zict-2.2.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/argon2-cffi-bindings-21.2.0-py310h5764c6d_3.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/blas-2.116-openblas.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/bleach-5.0.1-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/brotlipy-0.7.0-py310h5764c6d_1005.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/contourpy-1.0.6-py310hbf28c38_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/cryptography-38.0.3-py310h597c629_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/dask-core-2022.11.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/datashape-0.5.4-py_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/fonttools-4.38.0-py310h5764c6d_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/gstreamer-1.21.1-hd4edc92_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/h5py-3.7.0-nompi_py310h06dffec_100.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/jinja2-3.1.2-pyhd8ed1ab_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/jsonschema-4.17.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/jupyter_client-7.3.4-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/jupyterlab_pygments-0.2.2-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/markdown-3.4.1-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/numba-0.56.3-py310ha5257ce_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/numexpr-2.8.3-py310hf05e7a9_101.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/pandas-1.5.1-py310h769672d_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/sip-6.7.4-py310hd8f1fbe_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/stack_data-0.6.1-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/terminado-0.15.0-py310hff52083_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/trio-0.21.0-py310hff52083_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/wcwidth-0.2.5-pyh9f0ad1d_2.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/wsproto-1.2.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/argon2-cffi-21.3.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/bokeh-2.4.3-pyhd8ed1ab_3.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/gst-plugins-base-1.21.1-h3e40eee_1.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.6.2-py310h8d5ebf3_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/nbformat-5.7.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/prompt-toolkit-3.0.32-pyha770c72_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/pyopenssl-22.1.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/pyqt5-sip-12.11.0-py310hd8f1fbe_2.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/pytables-3.6.1-py310hf5df6ce_5.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/trio-websocket-0.9.2-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/xarray-2022.11.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/ipython-8.4.0-py310hff52083_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/nbclient-0.7.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/qt-main-5.15.6-hd477bba_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/urllib3-1.26.11-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/distributed-2022.11.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/ipykernel-6.14.0-py310hfdc917e_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/nbconvert-core-7.2.5-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/pyqt-5.15.7-py310h29803b5_2.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/requests-2.28.1-pyhd8ed1ab_1.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/selenium-4.6.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/dask-2022.11.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/jupyter_server-1.23.2-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/linux-64/matplotlib-3.6.2-py310hff52083_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/nbconvert-pandoc-7.2.5-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/pooch-1.6.0-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/pyct-0.5.0-py_0.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/colorcet-3.0.1-py_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/nbconvert-7.2.5-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/notebook-shim-0.2.2-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/panel-0.13.1-py_0.tar.bz2 -https://conda.anaconda.org/anaconda/linux-64/scipy-1.10.0-py310heeff2f4_0.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/datashader-0.13.0-py_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/nbclassic-0.4.8-pyhd8ed1ab_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/notebook-6.5.2-pyha770c72_1.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/holoviews-1.15.0-py_0.tar.bz2 diff --git a/spec_file_mac.txt b/spec_file_mac.txt deleted file mode 100644 index fd86a36..0000000 --- a/spec_file_mac.txt +++ /dev/null @@ -1,168 +0,0 @@ -# This file may be used to create an environment using: -# $ conda create --name --file -# platform: osx-64 -@EXPLICIT -https://conda.anaconda.org/conda-forge/osx-64/phantomjs-2.1.1-1.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/blas-1.0-mkl.tar.bz2 -https://conda.anaconda.org/anaconda/osx-64/bzip2-1.0.8-h1de35cc_0.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/ca-certificates-2020.12.5-h033912b_0.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/gettext-0.19.8.1-h1f1d5ed_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/intel-openmp-2019.4-233.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/jpeg-9b-he5867d9_2.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/libcxx-10.0.0-1.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/libgfortran-3.0.1-h93005f0_2.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/libiconv-1.15-h0b31af3_1006.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/libsodium-1.0.18-h1de35cc_0.tar.bz2 -https://conda.anaconda.org/anaconda/osx-64/lzo-2.10-h1de35cc_2.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/pandoc-2.10-0.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/pixman-0.34.0-h1de35cc_1003.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/xz-5.2.5-h1de35cc_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/yaml-0.2.5-haf1e3a3_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/zlib-1.2.11-h1de35cc_3.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/firefox-78.0esr-h4a8c4bd_0.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/geckodriver-0.26.0-h4a8c4bd_0.tar.bz2 -https://conda.anaconda.org/anaconda/osx-64/hdf5-1.10.4-hfa1e0ec_0.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/icu-58.2-h0a44026_1000.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/libffi-3.3-hb1e8313_2.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/libpng-1.6.37-ha441bb4_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/lz4-c-1.9.2-hb1e8313_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/mkl-2019.4-233.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/ncurses-6.2-h0a44026_1.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/openssl-1.1.1k-h0d85af4_0.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/pcre-8.44-h4a8c4bd_0.tar.bz2 -https://conda.anaconda.org/anaconda/osx-64/snappy-1.1.8-hb1e8313_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/tbb-2020.0-h04f5b5a_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/tk-8.6.10-hb0a8c7a_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/zeromq-4.3.2-hb1e8313_2.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/freetype-2.10.2-ha233b18_0.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/glib-2.55.0-0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/libedit-3.1.20191231-h1de35cc_1.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/libxml2-2.9.9-hd80cff7_2.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/readline-8.0-h1de35cc_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/zstd-1.4.5-h41d2c2f_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/blosc-1.19.0-hab81aa3_0.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/fontconfig-2.13.1-h1027ab8_1000.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/libtiff-4.1.0-hcb84e12_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/sqlite-3.32.3-hffcf06c_0.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/cairo-1.14.12-he6fea26_5.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/lcms2-2.11-h92f6f08_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/python-3.6.10-hf48f09d_2.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/appnope-0.1.0-py36hf537a9a_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/attrs-19.3.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/backcall-0.2.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/chardet-3.0.4-py36_1003.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/click-7.1.2-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/cloudpickle-1.5.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/decorator-4.4.2-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/defusedxml-0.6.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/entrypoints-0.3-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/fsspec-0.7.4-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/heapdict-1.0.1-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/idna-2.10-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/immutables-0.14-py36haf1e3a3_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/ipython_genutils-0.2.0-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/kiwisolver-1.2.0-py36h04f5b5a_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/llvmlite-0.31.0-py36h1341992_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/locket-0.2.0-py36hca03003_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/markupsafe-1.1.1-py36h1de35cc_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/mistune-0.8.4-py36h1de35cc_0.tar.bz2 -https://conda.anaconda.org/anaconda/noarch/mock-4.0.2-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/msgpack-python-1.0.0-py36h04f5b5a_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/olefile-0.46-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/pandocfilters-1.4.2-py36_1.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/param-1.10.1-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/parso-0.7.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/pickleshare-0.7.5-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/prometheus_client-0.8.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/psutil-5.7.0-py36h1de35cc_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/ptyprocess-0.6.0-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/pycparser-2.20-py_2.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/pyparsing-2.4.7-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/pysocks-1.7.1-py36_0.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/python_abi-3.6-1_cp36m.tar.bz2 -https://conda.anaconda.org/anaconda/noarch/pytz-2020.1-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/pyyaml-5.3.1-py36haf1e3a3_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/pyzmq-19.0.1-py36hb1e8313_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/send2trash-1.5.0-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/six-1.15.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/sortedcontainers-2.2.2-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/tblib-1.6.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/testpath-0.4.4-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/toolz-0.10.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/tornado-6.0.4-py36h1de35cc_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/tqdm-4.47.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/typing_extensions-3.7.4.2-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/wcwidth-0.2.5-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/webencodings-0.5.1-py36_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/zipp-3.1.0-py_0.tar.bz2 -https://conda.anaconda.org/conda-forge/osx-64/certifi-2020.12.5-py36h79c6626_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/cffi-1.14.0-py36hc512035_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/contextvars-2.4-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/cycler-0.10.0-py36hfc81398_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/cytoolz-0.10.1-py36h1de35cc_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/dask-core-2.20.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/importlib-metadata-1.7.0-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/jedi-0.17.1-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/mkl-service-2.3.0-py36hfbe908c_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/multipledispatch-0.6.0-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/packaging-20.4-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/partd-1.1.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/pexpect-4.8.0-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/pillow-7.2.0-py36ha54b6ba_0.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/pyct-core-0.4.6-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/pyrsistent-0.16.0-py36h1de35cc_0.tar.bz2 -https://conda.anaconda.org/anaconda/noarch/python-dateutil-2.8.1-py_0.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/pyviz_comms-0.7.6-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/terminado-0.8.3-py36_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/tinycss2-1.1.0-pyhd8ed1ab_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/traitlets-4.3.3-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/zict-2.0.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/brotlipy-0.7.0-py36haf1e3a3_1000.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/cairocffi-1.2.0-pyhd8ed1ab_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/cryptography-2.9.2-py36ha12b0ac_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/cssselect2-0.2.1-pyh9f0ad1d_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/importlib_metadata-1.7.0-0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/jupyter_core-4.6.3-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/numpy-base-1.18.5-py36h3304bdc_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/setuptools-49.2.0-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/bleach-3.1.5-py_0.tar.bz2 -https://conda.anaconda.org/conda-forge/noarch/cairosvg-2.5.2-pyhd8ed1ab_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/distributed-2.20.0-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/jinja2-2.11.2-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/jsonschema-3.2.0-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/jupyter_client-6.1.6-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/markdown-3.1.1-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/pygments-2.6.1-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/pyopenssl-19.1.0-py_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/wheel-0.34.2-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/nbformat-5.0.7-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/pip-20.1.1-py36_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/prompt-toolkit-3.0.5-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/urllib3-1.25.9-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/ipython-7.16.1-py36h5ca1d4c_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/nbconvert-5.6.1-py36_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/requests-2.24.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/selenium-3.141.0-py36h1de35cc_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/ipykernel-5.3.3-py36h5ca1d4c_0.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/pyct-0.4.6-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/colorcet-2.0.2-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/notebook-6.0.3-py36_0.tar.bz2 -https://conda.anaconda.org/anaconda/osx-64/h5py-2.10.0-py36h3134771_0.tar.bz2 -https://conda.anaconda.org/anaconda/osx-64/numexpr-2.7.1-py36hce01a72_0.tar.bz2 -https://conda.anaconda.org/anaconda/osx-64/pandas-1.0.5-py36h959d312_0.tar.bz2 -https://conda.anaconda.org/anaconda/osx-64/pytables-3.6.1-py36h5bccee9_0.tar.bz2 -https://conda.anaconda.org/anaconda/osx-64/scipy-1.5.0-py36h912ce22_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/xarray-0.16.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/bokeh-2.3.1-py36hecd8cb5_0.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/panel-0.11.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/dask-2.20.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/datashape-0.5.4-py36_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/matplotlib-3.2.2-0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/matplotlib-base-3.2.2-py36h5670ca0_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/mkl_fft-1.1.0-py36hc64f4ea_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/mkl_random-1.1.1-py36h959d312_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/numpy-1.18.5-py36h55a193a_0.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/holoviews-1.14.2-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/osx-64/numba-0.48.0-py36h6c726b0_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/datashader-0.11.0-py_0.tar.bz2 -https://conda.anaconda.org/pyviz/noarch/hvplot-0.6.0-py_0.tar.bz2 diff --git a/spec_file_windows10.txt b/spec_file_windows10.txt deleted file mode 100755 index 80434f3..0000000 --- a/spec_file_windows10.txt +++ /dev/null @@ -1,162 +0,0 @@ -# This file may be used to create an environment using: -# $ conda create --name --file -# platform: win-64 -@EXPLICIT -https://repo.anaconda.com/pkgs/main/win-64/blas-1.0-mkl.conda -https://conda.anaconda.org/anaconda/win-64/ca-certificates-2020.10.14-0.tar.bz2 -https://repo.anaconda.com/pkgs/main/win-64/icc_rt-2019.0.0-h0cc432a_1.conda -https://repo.anaconda.com/pkgs/main/win-64/intel-openmp-2021.2.0-haa95532_616.conda -https://repo.anaconda.com/pkgs/msys2/win-64/msys2-conda-epoch-20160418-1.tar.bz2 -https://repo.anaconda.com/pkgs/main/win-64/pandoc-2.12-haa95532_0.conda -https://repo.anaconda.com/pkgs/main/win-64/vs2015_runtime-14.27.29016-h5e58377_2.conda -https://repo.anaconda.com/pkgs/main/win-64/winpty-0.4.3-4.conda -https://repo.anaconda.com/pkgs/msys2/win-64/m2w64-gmp-6.1.0-2.tar.bz2 -https://repo.anaconda.com/pkgs/msys2/win-64/m2w64-libwinpthread-git-5.0.0.4634.697f757-2.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/mkl-2020.2-256.tar.bz2 -https://repo.anaconda.com/pkgs/main/win-64/vc-14.2-h21ff451_1.conda -https://conda.anaconda.org/anaconda/win-64/bzip2-1.0.8-he774522_0.tar.bz2 -https://conda.anaconda.org/conda-forge/win-64/firefox-88.0-h0e60522_0.tar.bz2 -https://conda.anaconda.org/conda-forge/win-64/geckodriver-0.29.0-hdb13177_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/win-64/icu-58.2-ha925a31_3.conda -https://repo.anaconda.com/pkgs/main/win-64/jpeg-9b-hb83a4c4_2.conda -https://repo.anaconda.com/pkgs/main/win-64/libsodium-1.0.18-h62dcd97_0.conda -https://repo.anaconda.com/pkgs/main/win-64/lz4-c-1.9.3-h2bbff1b_0.conda -https://conda.anaconda.org/anaconda/win-64/lzo-2.10-vc14h0a64fa6_1.tar.bz2 -https://repo.anaconda.com/pkgs/msys2/win-64/m2w64-gcc-libs-core-5.3.0-7.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/openssl-1.1.1h-he774522_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/win-64/sqlite-3.35.4-h2bbff1b_0.conda -https://repo.anaconda.com/pkgs/main/win-64/tk-8.6.10-he774522_0.conda -https://repo.anaconda.com/pkgs/main/win-64/xz-5.2.5-h62dcd97_0.conda -https://repo.anaconda.com/pkgs/main/win-64/yaml-0.2.5-he774522_0.conda -https://repo.anaconda.com/pkgs/main/win-64/zlib-1.2.11-h62dcd97_4.conda -https://repo.anaconda.com/pkgs/main/win-64/hdf5-1.10.4-h7ebc959_0.conda -https://repo.anaconda.com/pkgs/main/win-64/libpng-1.6.37-h2a8f88b_0.conda -https://repo.anaconda.com/pkgs/msys2/win-64/m2w64-gcc-libgfortran-5.3.0-6.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/python-3.8.5-h5fd99cc_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/win-64/zeromq-4.3.3-ha925a31_3.conda -https://conda.anaconda.org/anaconda/win-64/zstd-1.4.5-h04227a9_0.tar.bz2 -https://conda.anaconda.org/anaconda/noarch/async_generator-1.10-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/attrs-20.3.0-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/noarch/backcall-0.2.0-pyhd3eb1b0_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/blosc-1.20.1-h7bd577a_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/certifi-2020.6.20-py38_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/chardet-3.0.4-py38_1003.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/click-7.1.2-pyhd3eb1b0_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/cloudpickle-1.6.0-py_0.conda -https://repo.anaconda.com/pkgs/main/noarch/colorama-0.4.4-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/noarch/decorator-5.0.6-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/noarch/defusedxml-0.7.1-pyhd3eb1b0_0.conda -https://conda.anaconda.org/anaconda/win-64/entrypoints-0.3-py38_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/win-64/freetype-2.10.4-hd328e21_0.conda -https://repo.anaconda.com/pkgs/main/noarch/fsspec-0.9.0-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/noarch/heapdict-1.0.1-py_0.conda -https://repo.anaconda.com/pkgs/main/noarch/idna-2.10-pyhd3eb1b0_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/ipython_genutils-0.2.0-pyhd3eb1b0_1.conda -https://conda.anaconda.org/anaconda/win-64/kiwisolver-1.2.0-py38h74a9793_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/win-64/libtiff-4.1.0-h56a325e_1.conda -https://conda.anaconda.org/anaconda/win-64/llvmlite-0.34.0-py38h1a82afc_4.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/locket-0.2.0-py38_1.tar.bz2 -https://repo.anaconda.com/pkgs/msys2/win-64/m2w64-gcc-libs-5.3.0-7.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/markupsafe-1.1.1-py38he774522_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/mistune-0.8.4-py38he774522_1000.tar.bz2 -https://conda.anaconda.org/anaconda/noarch/mock-4.0.2-py_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/msgpack-python-1.0.0-py38h74a9793_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/nest-asyncio-1.5.1-pyhd3eb1b0_0.conda -https://conda.anaconda.org/anaconda/noarch/olefile-0.46-py_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/pandocfilters-1.4.2-py38_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/param-1.10.1-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/noarch/parso-0.8.2-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/noarch/pickleshare-0.7.5-pyhd3eb1b0_1003.conda -https://repo.anaconda.com/pkgs/main/noarch/prometheus_client-0.10.1-pyhd3eb1b0_0.conda -https://conda.anaconda.org/anaconda/win-64/psutil-5.7.2-py38he774522_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/pycparser-2.20-py_2.conda -https://repo.anaconda.com/pkgs/main/noarch/pyparsing-2.4.7-pyhd3eb1b0_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/pyreadline-2.1-py38_1.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/pyrsistent-0.17.3-py38he774522_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/pytz-2021.1-pyhd3eb1b0_0.conda -https://conda.anaconda.org/anaconda/win-64/pywin32-227-py38he774522_1.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/pyyaml-5.3.1-py38he774522_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/win-64/pyzmq-20.0.0-py38hd77b12b_1.conda -https://repo.anaconda.com/pkgs/main/win-64/qt-5.9.7-vc14h73c81de_0.conda -https://repo.anaconda.com/pkgs/main/noarch/send2trash-1.5.0-pyhd3eb1b0_1.conda -https://conda.anaconda.org/anaconda/win-64/sip-4.19.24-py38ha925a31_0.tar.bz2 -https://conda.anaconda.org/anaconda/noarch/six-1.15.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/sortedcontainers-2.3.0-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/noarch/tblib-1.7.0-py_0.conda -https://repo.anaconda.com/pkgs/main/noarch/testpath-0.4.4-pyhd3eb1b0_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/toolz-0.11.1-pyhd3eb1b0_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/tornado-6.0.4-py38he774522_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/tqdm-4.59.0-pyhd3eb1b0_1.conda -https://repo.anaconda.com/pkgs/main/noarch/typing_extensions-3.7.4.3-pyha847dfd_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/wcwidth-0.2.5-py_0.conda -https://conda.anaconda.org/anaconda/win-64/webencodings-0.5.1-py38_1.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/wheel-0.36.2-pyhd3eb1b0_0.conda -https://conda.anaconda.org/anaconda/win-64/win_inet_pton-1.1.0-py38_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/wincertstore-0.2-py38_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/zipp-3.4.1-pyhd3eb1b0_0.conda -https://conda.anaconda.org/anaconda/win-64/cffi-1.14.3-py38h7a1dbc1_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/cycler-0.10.0-py38_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/cytoolz-0.11.0-py38he774522_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/dask-core-2021.3.0-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/win-64/importlib-metadata-3.10.0-py38haa95532_0.conda -https://repo.anaconda.com/pkgs/main/win-64/jedi-0.18.0-py38haa95532_1.conda -https://repo.anaconda.com/pkgs/main/win-64/mkl-service-2.3.0-py38h196d8e1_0.conda -https://conda.anaconda.org/anaconda/win-64/multipledispatch-0.6.0-py38_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/packaging-20.9-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/noarch/partd-1.2.0-pyhd3eb1b0_0.conda -https://conda.anaconda.org/anaconda/win-64/pillow-8.0.0-py38hca74424_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/prompt-toolkit-3.0.17-pyh06a4308_0.conda -https://conda.anaconda.org/anaconda/win-64/pyqt-5.9.2-py38ha925a31_4.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/pysocks-1.7.1-py38_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/python-dateutil-2.8.1-pyhd3eb1b0_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/pyviz_comms-2.0.1-pyhd3eb1b0_0.conda -https://conda.anaconda.org/anaconda/win-64/pywinpty-0.5.7-py38_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/setuptools-50.3.0-py38h9490d1a_1.tar.bz2 -https://conda.anaconda.org/anaconda/noarch/traitlets-5.0.5-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/zict-2.0.0-pyhd3eb1b0_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/bleach-3.3.0-pyhd3eb1b0_0.conda -https://conda.anaconda.org/anaconda/win-64/brotlipy-0.7.0-py38he774522_1000.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/cryptography-3.1.1-py38h7a1dbc1_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/win-64/distributed-2021.4.0-py38haa95532_0.conda -https://repo.anaconda.com/pkgs/main/noarch/importlib_metadata-3.10.0-hd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/noarch/jinja2-2.11.3-pyhd3eb1b0_0.conda -https://conda.anaconda.org/anaconda/win-64/jupyter_core-4.6.3-py38_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/markdown-3.3.2-py38_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/win-64/numpy-base-1.19.2-py38ha3acd2a_0.conda -https://conda.anaconda.org/anaconda/win-64/pip-20.2.4-py38_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/pygments-2.8.1-pyhd3eb1b0_0.conda -https://conda.anaconda.org/anaconda/win-64/terminado-0.9.1-py38_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/ipython-7.18.1-py38h5ca1d4c_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/jsonschema-3.2.0-py_2.conda -https://repo.anaconda.com/pkgs/main/noarch/jupyter_client-6.1.12-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/noarch/jupyterlab_pygments-0.1.2-py_0.conda -https://repo.anaconda.com/pkgs/main/noarch/pyopenssl-20.0.1-pyhd3eb1b0_1.conda -https://conda.anaconda.org/anaconda/win-64/ipykernel-5.3.4-py38h5ca1d4c_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/nbformat-5.1.3-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/noarch/urllib3-1.26.4-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/noarch/nbclient-0.5.3-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/noarch/requests-2.25.1-pyhd3eb1b0_0.conda -https://conda.anaconda.org/anaconda/win-64/selenium-3.141.0-py38he774522_1000.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/nbconvert-6.0.7-py38_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/pyct-0.4.8-py38_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/colorcet-2.0.6-pyhd3eb1b0_0.conda -https://conda.anaconda.org/anaconda/win-64/notebook-6.0.3-py38_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/datashape-0.5.4-py38_1.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/h5py-2.10.0-py38h5e291fa_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/matplotlib-3.3.1-0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/matplotlib-base-3.3.1-py38hba9282a_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/mkl_random-1.1.1-py38h47e9c7a_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/numba-0.51.2-py38hf9181ef_1.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/pandas-1.1.3-py38ha925a31_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/xarray-0.17.0-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/win-64/bokeh-2.3.1-py38haa95532_0.conda -https://conda.anaconda.org/pyviz/noarch/panel-0.11.0-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/noarch/dask-2021.3.0-pyhd3eb1b0_0.conda -https://repo.anaconda.com/pkgs/main/win-64/mkl_fft-1.3.0-py38h46781fe_0.conda -https://repo.anaconda.com/pkgs/main/win-64/numpy-1.19.2-py38hadc3359_0.conda -https://conda.anaconda.org/pyviz/noarch/holoviews-1.14.2-py_0.tar.bz2 -https://repo.anaconda.com/pkgs/main/win-64/numexpr-2.7.3-py38hcbcaa1e_0.conda -https://repo.anaconda.com/pkgs/main/win-64/scipy-1.6.2-py38h14eb087_0.conda -https://repo.anaconda.com/pkgs/main/noarch/datashader-0.12.1-pyhd3eb1b0_1.conda -https://conda.anaconda.org/pyviz/noarch/hvplot-0.7.1-py_0.tar.bz2 -https://conda.anaconda.org/anaconda/win-64/pytables-3.6.1-py38ha5be198_0.tar.bz2 From e6430c7d1a3af0679b6a1da24c72910a7c47af40 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 27 Aug 2025 08:31:25 -0700 Subject: [PATCH 36/83] Added installation instructions to the readme --- README.md | 77 ++++++++++++++++++++++++++----------------------------- 1 file changed, 37 insertions(+), 40 deletions(-) diff --git a/README.md b/README.md index 2783b2e..b564104 100644 --- a/README.md +++ b/README.md @@ -2,63 +2,59 @@ # GuPPy Guided Photometry Analysis in Python, a free and open-source fiber photometry data analysis tool. -## Installation Instructions +## Installation GuPPy can be run on Windows, Mac or Linux. -**Follow the instructions below to install GuPPy :**
-- Current Users : Download new code updates by following steps 1.a to 1.c, then visit the Github Wiki page to get started on your analysis -- New Users : Follow all the installation steps and then visit the Github Wiki page to get started on your analysis +### Installation via PyPI -1. Download the Guppy code
- a. Press the green button labeled “Code” on the top right corner and that will initiate a pull down menu.
- - b. Click on Download ZIP. *(Ensure that you save this ZIP locally, not in any external cloud storage such as iCloud, OneDrive, Box, etc. We suggest saving it in your User folder on the C drive)*
- - c. Once downloaded, open the ZIP file and you should have a folder named “GuPPy-main”. Place this GuPPy-main folder wherever is most convenient (avoiding cloud storage).
- - d. Inside the GuPPy-main folder there is a subfolder named “GuPPy”. Take note of the GuPPy subfolder location or path. It will be important for future steps in the GuPPy workflow
- - Mac: Right click folder → Click Get Info → Text next to “Where:”
- ~ Ex: /Users/LernerLab/Desktop/GuPPy-main
- - Windows/Linux: Right click folder → Properties → Text next to “Location:”
+To install the latest stable release of GuPPy through PyPI, simply run the following command in your terminal or command prompt: -2. Anaconda is a distribution of the Python and R programming languages for scientific computing. Install [Anaconda](https://www.anaconda.com/products/individual#macos). Install Anaconda based on your operating system (Mac, Windows or Linux) by following the prompts when you run the downloaded installation file. +```bash +pip install guppy +``` -3. Once installed, open an Anaconda Prompt window (for windows) or Terminal window (for Mac or Linux). You can search for "anaconda prompt" or "terminal" on your computer to open this window. +We recommend that you install the package inside a [virtual environment](https://docs.python.org/3/tutorial/venv.html). +A simple way of doing this is to use a [conda environment](https://docs.conda.io/projects/conda/en/latest/user-guide/concepts/environments.html) from the `conda` package manager ([installation instructions](https://docs.conda.io/en/latest/miniconda.html)). +Detailed instructions on how to use conda environments can be found in their [documentation](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html). -4. Find the location where GuPPy folder is located (from Step 1d) and execute the following command on the Anaconda Prompt or terminal window: +### Installation from GitHub +To install the latest development version of GuPPy from GitHub, you can clone the repository and install the package manually. +This has the advantage of allowing you to access the latest features and bug fixes that may not yet be available in the stable release. +To install the conversion from GitHub you will need to use `git` ([installation instructions](https://github.com/git-guides/install-git)). +From a terminal or command prompt, execute the following commands: + +1. Clone the repository: +```bash +git clone https://github.com/LernerLab/GuPPy.git ``` -cd path_to_GuPPy_folder -``` - - Ex: cd /Users/LernerLab/Desktop/GuPPy-main - -5. Next, execute the following commands, in this specific order, on Anaconda Prompt or terminal window:
- - Note : filename in the first command should be replaced by spec_file_windows10.txt or spec_file_mac.txt or spec_file_linux.txt (based on your OS)
- - Some of these commands will initiate various transactions. Wait until they are all done before executing the next line
- - If the Anaconda Prompt or Terminal window asks: Proceed ([y]/n)? Respond with y
-``` -conda create --name guppy --file filename -conda activate guppy -``` -6. Lastly, execute the following command to open the GuPPy User Interface: + +2. Navigate into the cloned directory: +```bash +cd GuPPy ``` -panel serve --show GuPPy/savingInputParameters.ipynb + +3. Install the package using pip: +```bash +pip install -e . ``` - GuPPy is now officially downloaded and ready to use!
-- The full instructions along with detailed descriptions of each step to run the GuPPy tool is on [Github Wiki Page](https://github.com/LernerLab/GuPPy/wiki). +Note: +This method installs the repository in [editable mode](https://pip.pypa.io/en/stable/cli/pip_install/#editable-installs). -## Uninstalling or removing instructions +## Usage -1. Open an Anaconda Prompt window (for windows) or Terminal window (for Mac or Linux). +In a terminal or command prompt, you can start using GuPPy by running the following command: -2. Execute the following command on Anaconda Prompt or terminal window:
-``` -conda remove --name guppy --all +```bash +guppy ``` -3. To reinstall, follow steps 1 (Download GuPPy code) and 4 to 6 from the Installation Instructions. +This will launch the GuPPy user interface, where you can begin analyzing your fiber photometry data. + +## Wiki +- The full instructions along with detailed descriptions of each step to run the GuPPy tool is on [Github Wiki Page](https://github.com/LernerLab/GuPPy/wiki). ## Tutorial Videos @@ -91,5 +87,6 @@ conda remove --name guppy --all - Jillian Seiler - [Gabriela Lopez](https://github.com/glopez924) - [Talia Lerner](https://github.com/talialerner) +- [Paul Adkisson](https://github.com/pauladkisson) From 71aa80ac921361998c6d9d33db40cc9be3c4804b Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 27 Aug 2025 08:50:13 -0700 Subject: [PATCH 37/83] Added changelog --- CHANGELOG.md | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..ce2cf81 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,61 @@ +# v2.0.0 (Upcoming) + +## Features + +- Modernized python packaging and distribution: [PR #129](https://github.com/LernerLab/GuPPy/pull/129) + +## Fixes + +## Deprecations and Removals + +## Improvements + +# GuPPy-v1.3.0 (August 12th, 2025) + +- Added support for NPM TTL files with multiple format versions +- Added support for multiple NPM files and CSV TTL files simultaneously +- Added binning by trials feature for data organization +- Extended peak AUC analysis with additional window options +- Enhanced cross-correlation module with artifact removal options +- Optional filtering - can disable signal filtering when needed +- Improved storenames GUI for better user experience +- Automatic saving of input parameters for group analysis +- Enhanced visualization GUI with improved Y-axis limits +- Fixed Windows and macOS compatibility issues +- Improved Doric file format support +- Added directory checking for output folders +- Fixed various bugs in group analysis and PSTH computation +- Resolved port number errors and improved error handling + +# GuPPy-v1.2.0 (November 11th, 2021) + +- Support for Doric system file (.csv and .doric) +- storenames GUI changed, designed it in a way which is less error prone +- Saving of input parameters is not required for doing the analysis +- Visualization GUI changed +- user-defined for number of cores used +- added cross-correalation computation +- two user-defined parameters for transients detection +- artifacts removal can be done with two different methods +- compute negative peaks along with positive peaks in a user-defined window + +# GuPPy-v1.1.4 (October 28th, 2021) + +- Support for Neurophotometrics data +- Option for binning of PSTH trials +- Option to carry out analysis without using isosbestic control channel +- Plot to see control fitted channel to signal channel +- Selection and deletion of chunks with specific keys in artifacts removal +- Option to change moving average filter window +- Option to compute variations of z-score based on different computation method. +- Faster computation speed for PSTH computation step + +# GuPPy-v1.1.2 (August 4th, 2021) + +- Minor Bug Fixes +- multiple windows for peak and AUC computation +- bug fix for searching a file name irrespective of lower-case of upper-case + +# GuPPy-v1.1.1 (July 6th, 2021) + +It is the GuPPy's first release for people to use and give us feedbacks on it \ No newline at end of file From 37ab6fb9688a0b1287cbf6726bf5f6651e014c02 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 27 Aug 2025 09:24:48 -0700 Subject: [PATCH 38/83] Added manifest --- MANIFEST.in | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 MANIFEST.in diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..603ac05 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,2 @@ +include *.md +recursive-include src *.ipynb \ No newline at end of file From ee9c99cf5d23b85b5cb5a4bf5717c9739873288f Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 27 Aug 2025 11:30:08 -0700 Subject: [PATCH 39/83] updated changelog to explicitly mention breaking changes --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ce2cf81..82cdfe9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,11 +3,16 @@ ## Features - Modernized python packaging and distribution: [PR #129](https://github.com/LernerLab/GuPPy/pull/129) +- Added support for Python 3.10-3.13: [PR #129](https://github.com/LernerLab/GuPPy/pull/129) ## Fixes ## Deprecations and Removals +- Dropped support for Python 3.6: [PR #129](https://github.com/LernerLab/GuPPy/pull/129) +- Restructured directory layout for improved organization: [PR #129](https://github.com/LernerLab/GuPPy/pull/129) +- Converted savingInputParameters.ipynb to saving_input_parameters.py: [PR #129](https://github.com/LernerLab/GuPPy/pull/129) + ## Improvements # GuPPy-v1.3.0 (August 12th, 2025) From 057ee6dbd1148f31c3f4666ae264423256130c40 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 28 Aug 2025 10:47:18 -0700 Subject: [PATCH 40/83] added headless mode for step 1 and appropriate test --- .gitignore | 1 + src/guppy/savingInputParameters.py | 49 ++++++++++----- src/guppy/testing/__init__.py | 5 ++ src/guppy/testing/api.py | 63 +++++++++++++++++++ tests/conftest.py | 8 +++ tests/test_step1.py | 98 ++++++++++++++++++++++++++++++ 6 files changed, 208 insertions(+), 16 deletions(-) create mode 100644 src/guppy/testing/__init__.py create mode 100644 src/guppy/testing/api.py create mode 100644 tests/conftest.py create mode 100644 tests/test_step1.py diff --git a/.gitignore b/.gitignore index de0bef9..f2c8949 100755 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ z-score_methods.tgn GuPPy/runFiberPhotometryAnalysis.ipynb .vscode/ *.egg-info/ +.clinerules/ \ No newline at end of file diff --git a/src/guppy/savingInputParameters.py b/src/guppy/savingInputParameters.py index 6c0ce45..69516fe 100644 --- a/src/guppy/savingInputParameters.py +++ b/src/guppy/savingInputParameters.py @@ -24,23 +24,31 @@ def savingInputParameters(): else: pass - # Create the main window - folder_selection = tk.Tk() - folder_selection.title("Select the folder path where your data is located") - folder_selection.geometry("700x200") - def select_folder(): + # Determine base folder path (headless-friendly via env var) + base_dir_env = os.environ.get('GUPPY_BASE_DIR') + is_headless = base_dir_env and os.path.isdir(base_dir_env) + if is_headless: global folder_path - folder_path = filedialog.askdirectory(title="Select the folder path where your data is located") - if folder_path: - print(f"Folder path set to {folder_path}") - folder_selection.destroy() - else: - folder_path = os.path.expanduser('~') - print(f"Folder path set to {folder_path}") - - select_button = ttk.Button(folder_selection, text="Select a Folder", command=select_folder) - select_button.pack(pady=5) - folder_selection.mainloop() + folder_path = base_dir_env + print(f"Folder path set to {folder_path} (from GUPPY_BASE_DIR)") + else: + # Create the main window + folder_selection = tk.Tk() + folder_selection.title("Select the folder path where your data is located") + folder_selection.geometry("700x200") + def select_folder(): + global folder_path + folder_path = filedialog.askdirectory(title="Select the folder path where your data is located") + if folder_path: + print(f"Folder path set to {folder_path}") + folder_selection.destroy() + else: + folder_path = os.path.expanduser('~') + print(f"Folder path set to {folder_path}") + + select_button = ttk.Button(folder_selection, text="Select a Folder", command=select_folder) + select_button.pack(pady=5) + folder_selection.mainloop() current_dir = os.getcwd() @@ -522,4 +530,13 @@ def onclickpsth(event=None): template.main.append(group) template.main.append(visualize) + # Expose minimal hooks and widgets to enable programmatic testing + template._hooks = { + "onclickProcess": onclickProcess, + "getInputParameters": getInputParameters, + } + template._widgets = { + "files_1": files_1, + } + return template diff --git a/src/guppy/testing/__init__.py b/src/guppy/testing/__init__.py new file mode 100644 index 0000000..0e8306d --- /dev/null +++ b/src/guppy/testing/__init__.py @@ -0,0 +1,5 @@ +from .api import step1 + +__all__ = [ + "step1", +] diff --git a/src/guppy/testing/api.py b/src/guppy/testing/api.py new file mode 100644 index 0000000..8fe0ace --- /dev/null +++ b/src/guppy/testing/api.py @@ -0,0 +1,63 @@ +""" +Python API for GuPPy pipeline steps. + +Step 1: Save Input Parameters +- Writes GuPPyParamtersUsed.json into each selected data folder. +- Mirrors the Panel UI's Step 1 behavior without invoking any UI by default. + +This module is intentionally minimal and non-invasive. +""" + +from __future__ import annotations + +import json +import os +from typing import Iterable, List + +from guppy.savingInputParameters import savingInputParameters + + + + + + +def step1(*, base_dir: str, selected_folders: Iterable[str]) -> None: + """ + Run pipeline Step 1 (Save Input Parameters) via the Panel logic. + + This calls the exact ``onclickProcess`` function defined in + ``savingInputParameters()``, in headless mode. The ``GUPPY_BASE_DIR`` + environment variable is used to bypass the Tk folder selection dialog. + The function programmatically sets the FileSelector value to + ``selected_folders`` and triggers the underlying callback that writes + ``GuPPyParamtersUsed.json`` into each selected folder. + + Parameters + ---------- + base_dir : str + Root directory used to initialize the FileSelector. All ``selected_folders`` + must reside under this path. + selected_folders : Iterable[str] + Absolute paths to the session directories to analyze. All must share the + same parent directory. + + Raises + ------ + RuntimeError + If the ``savingInputParameters`` template does not expose the required + testing hooks (``_hooks['onclickProcess']`` and ``_widgets['files_1']``). + """ + os.environ["GUPPY_BASE_DIR"] = base_dir + + # Build the template headlessly + template = savingInputParameters() + + # Sanity checks: ensure hooks/widgets exposed + if not hasattr(template, "_hooks") or "onclickProcess" not in template._hooks: + raise RuntimeError("savingInputParameters did not expose 'onclickProcess' hook") + if not hasattr(template, "_widgets") or "files_1" not in template._widgets: + raise RuntimeError("savingInputParameters did not expose 'files_1' widget") + + # Select folders and trigger actual step-1 logic + template._widgets["files_1"].value = list(selected_folders) + template._hooks["onclickProcess"]() diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..294c354 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,8 @@ +import os +import sys + +# Ensure the 'src' directory is on sys.path for tests without installation +PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +SRC_PATH = os.path.join(PROJECT_ROOT, "src") +if SRC_PATH not in sys.path: + sys.path.insert(0, SRC_PATH) diff --git a/tests/test_step1.py b/tests/test_step1.py new file mode 100644 index 0000000..a428531 --- /dev/null +++ b/tests/test_step1.py @@ -0,0 +1,98 @@ +import json +import os + +import numpy as np +import pytest + +from guppy.testing.api import step1 + + +@pytest.fixture(scope="function") +def default_parameters(): + return { + "combine_data": False, + "isosbestic_control": True, + "timeForLightsTurnOn": 1, + "filter_window": 100, + "removeArtifacts": False, + "noChannels": 2, + "zscore_method": "standard z-score", + "baselineWindowStart": 0, + "baselineWindowEnd": 0, + "nSecPrev": -10, + "nSecPost": 20, + "timeInterval": 2, + "bin_psth_trials": 0, + "use_time_or_trials": "Time (min)", + "baselineCorrectionStart": -5, + "baselineCorrectionEnd": 0, + "peak_startPoint": [ + -5.0, + 0.0, + 5.0, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan + ], + "peak_endPoint": [ + 0.0, + 3.0, + 10.0, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan, + np.nan + ], + "selectForComputePsth": "z_score", + "selectForTransientsComputation": "z_score", + "moving_window": 15, + "highAmpFilt": 2, + "transientsThresh": 3 + } + + +def test_step1(tmp_path, default_parameters): + # Arrange: base directory with two sessions under the same parent + session_names = ["session1", "session2"] + base_name = "data_root" + base_dir = tmp_path / base_name + base_dir.mkdir(parents=True, exist_ok=True) + sessions = [] + for name in session_names: + path = base_dir / name + path.mkdir(parents=True, exist_ok=True) + sessions.append(str(path)) + base_dir = str(base_dir) + + # Act: call actual Panel onclickProcess via the API helper (headless) + step1(base_dir=base_dir, selected_folders=sessions) + + # Assert: JSON written for each session with key defaults + for s in sessions: + out_fp = os.path.join(s, "GuPPyParamtersUsed.json") + assert os.path.exists(out_fp), f"Missing file: {out_fp}" + with open(out_fp, "r") as f: + data = json.load(f) + + # Check that JSON data matches default parameters + for key, expected_value in default_parameters.items(): + if isinstance(expected_value, np.ndarray): + np.testing.assert_array_equal(data[key], expected_value) + elif isinstance(expected_value, list) and any(isinstance(x, float) and np.isnan(x) for x in expected_value): + # Handle lists with NaN values + actual = data[key] + assert len(actual) == len(expected_value) + for i, (a, e) in enumerate(zip(actual, expected_value)): + if np.isnan(e): + assert np.isnan(a) or a is None, f"Mismatch at index {i}: expected NaN, got {a}" + else: + assert a == e, f"Mismatch at index {i}: expected {e}, got {a}" + else: + assert data[key] == expected_value, f"Mismatch for {key}: expected {expected_value}, got {data[key]}" From be54ce45d275e70b3ccdb7273ceae18dc06fd8cf Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 28 Aug 2025 12:39:14 -0700 Subject: [PATCH 41/83] added headless mode for step 2 and appropriate test --- src/guppy/saveStoresList.py | 10 +++++ src/guppy/testing/api.py | 85 +++++++++++++++++++++++++++++++++++++ tests/test_step2.py | 49 +++++++++++++++++++++ 3 files changed, 144 insertions(+) create mode 100644 tests/test_step2.py diff --git a/src/guppy/saveStoresList.py b/src/guppy/saveStoresList.py index 3a01bbb..6a21ffd 100755 --- a/src/guppy/saveStoresList.py +++ b/src/guppy/saveStoresList.py @@ -133,6 +133,16 @@ def saveStorenames(inputParameters, data, event_name, flag, filepath): # getting input parameters inputParameters = inputParameters + # Headless path: if storenames_map provided, write storesList.csv without building the Panel UI + storenames_map = inputParameters.get("storenames_map") + if isinstance(storenames_map, dict) and len(storenames_map) > 0: + op = make_dir(filepath) + arr = np.asarray([list(storenames_map.keys()), list(storenames_map.values())], dtype=str) + np.savetxt(os.path.join(op, 'storesList.csv'), arr, delimiter=",", fmt='%s') + insertLog(f"Storeslist file saved at {op}", logging.INFO) + insertLog('Storeslist : \n'+str(arr), logging.INFO) + return + # reading storenames from the data fetched using 'readtsq' function if isinstance(data, pd.DataFrame): data['name'] = np.asarray(data['name'], dtype=str) diff --git a/src/guppy/testing/api.py b/src/guppy/testing/api.py index 8fe0ace..0050e2a 100644 --- a/src/guppy/testing/api.py +++ b/src/guppy/testing/api.py @@ -12,9 +12,11 @@ import json import os +import numpy as np from typing import Iterable, List from guppy.savingInputParameters import savingInputParameters +from guppy.saveStoresList import execute @@ -61,3 +63,86 @@ def step1(*, base_dir: str, selected_folders: Iterable[str]) -> None: # Select folders and trigger actual step-1 logic template._widgets["files_1"].value = list(selected_folders) template._hooks["onclickProcess"]() + + +def step2(*, base_dir: str, selected_folders: Iterable[str], storenames_map: dict[str, str]) -> None: + """ + Run pipeline Step 2 (Save Storenames) via the actual Panel-backed logic. + + This builds the Step 2 template headlessly (using ``GUPPY_BASE_DIR`` to bypass + the folder dialog), sets the FileSelector to ``selected_folders``, retrieves + the full input parameters via ``getInputParameters()``, injects the provided + ``storenames_map``, and calls ``execute(inputParameters)`` from + ``guppy.saveStoresList``. The execute() function is minimally augmented to + support a headless branch when ``storenames_map`` is present, while leaving + Panel behavior unchanged. + + Parameters + ---------- + base_dir : str + Root directory used to initialize the FileSelector. All ``selected_folders`` + must reside directly under this path. + selected_folders : Iterable[str] + Absolute paths to the session directories to process. + storenames_map : dict[str, str] + Mapping from raw storenames (e.g., "Dv1A") to semantic names + (e.g., "control_DMS"). Insertion order is preserved. + + Raises + ------ + ValueError + If validation fails (e.g., empty mapping, invalid directories, or parent + mismatch). + RuntimeError + If the template does not expose the required testing hooks/widgets. + """ + # Validate base_dir + if not isinstance(base_dir, str) or not base_dir: + raise ValueError("base_dir must be a non-empty string") + base_dir = os.path.abspath(base_dir) + if not os.path.isdir(base_dir): + raise ValueError(f"base_dir does not exist or is not a directory: {base_dir}") + + # Validate selected_folders + sessions = list(selected_folders or []) + if not sessions: + raise ValueError("selected_folders must be a non-empty iterable of session directories") + abs_sessions = [os.path.abspath(s) for s in sessions] + for s in abs_sessions: + if not os.path.isdir(s): + raise ValueError(f"Session path does not exist or is not a directory: {s}") + parent = os.path.dirname(s) + if parent != base_dir: + raise ValueError( + f"All selected_folders must share the same parent equal to base_dir. " + f"Got parent {parent!r} for session {s!r}, expected {base_dir!r}" + ) + + # Validate storenames_map + if not isinstance(storenames_map, dict) or not storenames_map: + raise ValueError("storenames_map must be a non-empty dict[str, str]") + for k, v in storenames_map.items(): + if not isinstance(k, str) or not k.strip(): + raise ValueError(f"Invalid storename key: {k!r}") + if not isinstance(v, str) or not v.strip(): + raise ValueError(f"Invalid semantic name for key {k!r}: {v!r}") + + # Headless build: set base_dir and construct the template + os.environ["GUPPY_BASE_DIR"] = base_dir + template = savingInputParameters() + + # Ensure hooks/widgets exposed + if not hasattr(template, "_hooks") or "getInputParameters" not in template._hooks: + raise RuntimeError("savingInputParameters did not expose 'getInputParameters' hook") + if not hasattr(template, "_widgets") or "files_1" not in template._widgets: + raise RuntimeError("savingInputParameters did not expose 'files_1' widget") + + # Select folders and fetch input parameters + template._widgets["files_1"].value = abs_sessions + input_params = template._hooks["getInputParameters"]() + + # Inject storenames mapping for headless execution + input_params["storenames_map"] = dict(storenames_map) + + # Call the underlying Step 2 executor (now headless-aware) + execute(input_params) diff --git a/tests/test_step2.py b/tests/test_step2.py new file mode 100644 index 0000000..cdc1b1e --- /dev/null +++ b/tests/test_step2.py @@ -0,0 +1,49 @@ +import csv +import os + +import pytest + +from guppy.testing.api import step2 + + +@pytest.fixture(scope="function") +def storenames_map(): + return { + "Dv1A": "control_DMS", + "Dv2A": "signal_DMS", + "PrtR": "RewardedPort", + } + + +def test_step2_writes_storeslist(tmp_path, storenames_map): + # Arrange: create base_dir with two session folders + base_name = "data_root" + base_dir = tmp_path / base_name + base_dir.mkdir(parents=True, exist_ok=True) + + session_names = ["session1", "session2"] + sessions = [] + for name in session_names: + p = base_dir / name + p.mkdir(parents=True, exist_ok=True) + sessions.append(str(p)) + + # Act: write storesList.csv headlessly + step2(base_dir=str(base_dir), selected_folders=sessions, storenames_map=storenames_map) + + # Assert: each session has {session_basename}_output_1/storesList.csv with 2xN structure + for s in sessions: + basename = os.path.basename(s) + out_dir = os.path.join(s, f"{basename}_output_1") + out_fp = os.path.join(out_dir, "storesList.csv") + + assert os.path.isdir(out_dir), f"Missing output directory: {out_dir}" + assert os.path.exists(out_fp), f"Missing storesList.csv: {out_fp}" + + with open(out_fp, newline="") as f: + reader = csv.reader(f) + rows = list(reader) + + assert len(rows) == 2, f"Expected 2 rows (storenames, names_for_storenames), got {len(rows)}" + assert rows[0] == list(storenames_map.keys()), "Row 0 (storenames) mismatch" + assert rows[1] == list(storenames_map.values()), "Row 1 (names_for_storenames) mismatch" From 6be04a9cfcf6fb19003b4ebb2ece5a5f110f9d6b Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 28 Aug 2025 12:43:12 -0700 Subject: [PATCH 42/83] renamed test_step2 --- tests/test_step2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_step2.py b/tests/test_step2.py index cdc1b1e..61195f2 100644 --- a/tests/test_step2.py +++ b/tests/test_step2.py @@ -15,7 +15,7 @@ def storenames_map(): } -def test_step2_writes_storeslist(tmp_path, storenames_map): +def test_step2(tmp_path, storenames_map): # Arrange: create base_dir with two session folders base_name = "data_root" base_dir = tmp_path / base_name From 5a1fedb6a69d3700b9b68d2b22a9d417a232df70 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 28 Aug 2025 13:13:57 -0700 Subject: [PATCH 43/83] added headless mode for step 3 and appropriate test --- src/guppy/testing/__init__.py | 4 +- src/guppy/testing/api.py | 66 ++++++++++++++++++++++++++ tests/test_step3.py | 87 +++++++++++++++++++++++++++++++++++ 3 files changed, 156 insertions(+), 1 deletion(-) create mode 100644 tests/test_step3.py diff --git a/src/guppy/testing/__init__.py b/src/guppy/testing/__init__.py index 0e8306d..401828f 100644 --- a/src/guppy/testing/__init__.py +++ b/src/guppy/testing/__init__.py @@ -1,5 +1,7 @@ -from .api import step1 +from .api import step1, step2, step3 __all__ = [ "step1", + "step2", + "step3", ] diff --git a/src/guppy/testing/api.py b/src/guppy/testing/api.py index 0050e2a..48f5af2 100644 --- a/src/guppy/testing/api.py +++ b/src/guppy/testing/api.py @@ -17,6 +17,7 @@ from guppy.savingInputParameters import savingInputParameters from guppy.saveStoresList import execute +from guppy.readTevTsq import readRawData @@ -146,3 +147,68 @@ def step2(*, base_dir: str, selected_folders: Iterable[str], storenames_map: dic # Call the underlying Step 2 executor (now headless-aware) execute(input_params) + + +def step3(*, base_dir: str, selected_folders: Iterable[str]) -> None: + """ + Run pipeline Step 3 (Read Raw Data) via the actual Panel-backed logic, headlessly. + + This builds the template headlessly (using ``GUPPY_BASE_DIR`` to bypass + the folder dialog), sets the FileSelector to ``selected_folders``, retrieves + the full input parameters via ``getInputParameters()``, and calls the + underlying worker ``guppy.readTevTsq.readRawData(input_params)`` that the + UI normally launches via subprocess. No GUI is spawned. + + Parameters + ---------- + base_dir : str + Root directory used to initialize the FileSelector. All ``selected_folders`` + must reside directly under this path. + selected_folders : Iterable[str] + Absolute paths to the session directories to process. + + Raises + ------ + ValueError + If validation fails (e.g., empty iterable, invalid directories, or parent mismatch). + RuntimeError + If the template does not expose the required testing hooks/widgets. + """ + # Validate base_dir + if not isinstance(base_dir, str) or not base_dir: + raise ValueError("base_dir must be a non-empty string") + base_dir = os.path.abspath(base_dir) + if not os.path.isdir(base_dir): + raise ValueError(f"base_dir does not exist or is not a directory: {base_dir}") + + # Validate selected_folders + sessions = list(selected_folders or []) + if not sessions: + raise ValueError("selected_folders must be a non-empty iterable of session directories") + abs_sessions = [os.path.abspath(s) for s in sessions] + for s in abs_sessions: + if not os.path.isdir(s): + raise ValueError(f"Session path does not exist or is not a directory: {s}") + parent = os.path.dirname(s) + if parent != base_dir: + raise ValueError( + f"All selected_folders must share the same parent equal to base_dir. " + f"Got parent {parent!r} for session {s!r}, expected {base_dir!r}" + ) + + # Headless build: set base_dir and construct the template + os.environ["GUPPY_BASE_DIR"] = base_dir + template = savingInputParameters() + + # Ensure hooks/widgets exposed + if not hasattr(template, "_hooks") or "getInputParameters" not in template._hooks: + raise RuntimeError("savingInputParameters did not expose 'getInputParameters' hook") + if not hasattr(template, "_widgets") or "files_1" not in template._widgets: + raise RuntimeError("savingInputParameters did not expose 'files_1' widget") + + # Select folders and fetch input parameters + template._widgets["files_1"].value = abs_sessions + input_params = template._hooks["getInputParameters"]() + + # Call the underlying Step 3 worker directly (no subprocess) + readRawData(input_params) diff --git a/tests/test_step3.py b/tests/test_step3.py new file mode 100644 index 0000000..93737b4 --- /dev/null +++ b/tests/test_step3.py @@ -0,0 +1,87 @@ +import os +import csv +import glob +import shutil + +import h5py +import pytest + +from guppy.testing.api import step2, step3 +from guppy.saveStoresList import import_np_doric_csv + + +def test_step3(tmp_path): + """ + Full integration test for Step 3 (Read Raw Data) using real CSV sample data, + isolated to a temporary workspace to avoid mutating shared sample data. + + Behavior: + - Copies the SampleData_csv session under GDriveSampleData into tmp_path. + - Cleans any copied artifacts (*_output_* dirs, GuPPyParamtersUsed.json). + - Derives a minimal storenames_map from the copied session and runs Step 2 + to create storesList.csv in the temp copy. + - Runs Step 3 headlessly and verifies per-storename HDF5 outputs exist in + the temp copy (never touching the original sample path). + """ + src_base_dir = "/Users/pauladkisson/Documents/CatalystNeuro/Guppy/GDriveSampleData" + src_session = os.path.join(src_base_dir, "SampleData_csv") + + if not os.path.isdir(src_session): + pytest.skip(f"Sample data not available at expected path: {src_session}") + + # Stage a clean copy of the session into a temporary workspace + tmp_base = tmp_path / "data_root" + tmp_base.mkdir(parents=True, exist_ok=True) + session_copy = tmp_base / "SampleData_csv" + shutil.copytree(src_session, session_copy) + + # Remove any copied artifacts in the temp session + for d in glob.glob(os.path.join(session_copy, "*_output_*")): + try: + shutil.rmtree(d) + except FileNotFoundError: + pass + params_fp = session_copy / "GuPPyParamtersUsed.json" + if params_fp.exists(): + params_fp.unlink() + + # Derive the list of raw storenames from the copied session's files + events, _flags = import_np_doric_csv(str(session_copy), isosbestic_control=True, num_ch=2) + if not events: + pytest.skip("Could not derive storenames from copied sample data; events list empty") + storenames_map = {e: e for e in events} + + # Step 2: create storesList.csv in the temp copy + step2(base_dir=str(tmp_base), selected_folders=[str(session_copy)], storenames_map=storenames_map) + + # Step 3: read raw data in the temp copy + step3(base_dir=str(tmp_base), selected_folders=[str(session_copy)]) + + # Validate outputs exist in the temp copy + basename = os.path.basename(session_copy) + output_dirs = sorted(glob.glob(os.path.join(session_copy, f"{basename}_output_*"))) + assert output_dirs, f"No output directories found in {session_copy}" + out_dir = None + for d in output_dirs: + if os.path.exists(os.path.join(d, "storesList.csv")): + out_dir = d + break + assert out_dir is not None, f"No storesList.csv found in any output directory under {session_copy}" + stores_fp = os.path.join(out_dir, "storesList.csv") + + # Assert: storesList.csv structure is 2xN + with open(stores_fp, newline="") as f: + reader = csv.reader(f) + rows = list(reader) + assert len(rows) == 2, "storesList.csv should be 2 rows (storenames, names_for_storenames)" + storenames = rows[0] + assert storenames, "Expected at least one storename in storesList.csv" + + # For each storename, ensure an HDF5 was produced; minimally check timestamps dataset exists. + for storename in storenames: + safe = storename.replace("\\", "_").replace("/", "_") + h5_path = os.path.join(out_dir, f"{safe}.hdf5") + assert os.path.exists(h5_path), f"Missing HDF5 for storename {storename!r} at {h5_path}" + + with h5py.File(h5_path, "r") as f: + assert "timestamps" in f, "Expected 'timestamps' dataset in HDF5" From b9c2fdf263df6b44724e53cea5578e839b829cae Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 28 Aug 2025 15:10:10 -0700 Subject: [PATCH 44/83] fixed storenames_map --- tests/test_step3.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tests/test_step3.py b/tests/test_step3.py index 93737b4..b27c768 100644 --- a/tests/test_step3.py +++ b/tests/test_step3.py @@ -7,10 +7,17 @@ import pytest from guppy.testing.api import step2, step3 -from guppy.saveStoresList import import_np_doric_csv -def test_step3(tmp_path): +@pytest.fixture(scope="function") +def storenames_map(): + return { + "Sample_Control_Channel": "control_region", + "Sample_Signal_Channel": "signal_region", + "Sample_TTL": "ttl", + } + +def test_step3(tmp_path, storenames_map): """ Full integration test for Step 3 (Read Raw Data) using real CSV sample data, isolated to a temporary workspace to avoid mutating shared sample data. @@ -45,12 +52,6 @@ def test_step3(tmp_path): if params_fp.exists(): params_fp.unlink() - # Derive the list of raw storenames from the copied session's files - events, _flags = import_np_doric_csv(str(session_copy), isosbestic_control=True, num_ch=2) - if not events: - pytest.skip("Could not derive storenames from copied sample data; events list empty") - storenames_map = {e: e for e in events} - # Step 2: create storesList.csv in the temp copy step2(base_dir=str(tmp_base), selected_folders=[str(session_copy)], storenames_map=storenames_map) From 30ec4a2d4269365017157d46f3d49ccc8cbb8033 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 28 Aug 2025 15:41:06 -0700 Subject: [PATCH 45/83] added headless mode for step 4 and appropriate test --- src/guppy/testing/__init__.py | 3 +- src/guppy/testing/api.py | 66 ++++++++++++++++++++++ tests/test_step4.py | 100 ++++++++++++++++++++++++++++++++++ 3 files changed, 168 insertions(+), 1 deletion(-) create mode 100644 tests/test_step4.py diff --git a/src/guppy/testing/__init__.py b/src/guppy/testing/__init__.py index 401828f..17d2b2a 100644 --- a/src/guppy/testing/__init__.py +++ b/src/guppy/testing/__init__.py @@ -1,7 +1,8 @@ -from .api import step1, step2, step3 +from .api import step1, step2, step3, step4 __all__ = [ "step1", "step2", "step3", + "step4", ] diff --git a/src/guppy/testing/api.py b/src/guppy/testing/api.py index 48f5af2..2e0618f 100644 --- a/src/guppy/testing/api.py +++ b/src/guppy/testing/api.py @@ -18,6 +18,7 @@ from guppy.savingInputParameters import savingInputParameters from guppy.saveStoresList import execute from guppy.readTevTsq import readRawData +from guppy.preprocess import extractTsAndSignal @@ -212,3 +213,68 @@ def step3(*, base_dir: str, selected_folders: Iterable[str]) -> None: # Call the underlying Step 3 worker directly (no subprocess) readRawData(input_params) + + +def step4(*, base_dir: str, selected_folders: Iterable[str]) -> None: + """ + Run pipeline Step 4 (Extract timestamps and signal) via the Panel-backed logic, headlessly. + + This builds the template headlessly (using ``GUPPY_BASE_DIR`` to bypass + the folder dialog), sets the FileSelector to ``selected_folders``, retrieves + the full input parameters via ``getInputParameters()``, and calls the + underlying worker ``guppy.preprocess.extractTsAndSignal(input_params)`` that the + UI normally launches via subprocess. No GUI is spawned. + + Parameters + ---------- + base_dir : str + Root directory used to initialize the FileSelector. All ``selected_folders`` + must reside directly under this path. + selected_folders : Iterable[str] + Absolute paths to the session directories to process. + + Raises + ------ + ValueError + If validation fails (e.g., empty iterable, invalid directories, or parent mismatch). + RuntimeError + If the template does not expose the required testing hooks/widgets. + """ + # Validate base_dir + if not isinstance(base_dir, str) or not base_dir: + raise ValueError("base_dir must be a non-empty string") + base_dir = os.path.abspath(base_dir) + if not os.path.isdir(base_dir): + raise ValueError(f"base_dir does not exist or is not a directory: {base_dir}") + + # Validate selected_folders + sessions = list(selected_folders or []) + if not sessions: + raise ValueError("selected_folders must be a non-empty iterable of session directories") + abs_sessions = [os.path.abspath(s) for s in sessions] + for s in abs_sessions: + if not os.path.isdir(s): + raise ValueError(f"Session path does not exist or is not a directory: {s}") + parent = os.path.dirname(s) + if parent != base_dir: + raise ValueError( + f"All selected_folders must share the same parent equal to base_dir. " + f"Got parent {parent!r} for session {s!r}, expected {base_dir!r}" + ) + + # Headless build: set base_dir and construct the template + os.environ["GUPPY_BASE_DIR"] = base_dir + template = savingInputParameters() + + # Ensure hooks/widgets exposed + if not hasattr(template, "_hooks") or "getInputParameters" not in template._hooks: + raise RuntimeError("savingInputParameters did not expose 'getInputParameters' hook") + if not hasattr(template, "_widgets") or "files_1" not in template._widgets: + raise RuntimeError("savingInputParameters did not expose 'files_1' widget") + + # Select folders and fetch input parameters + template._widgets["files_1"].value = abs_sessions + input_params = template._hooks["getInputParameters"]() + + # Call the underlying Step 4 worker directly (no subprocess) + extractTsAndSignal(input_params) diff --git a/tests/test_step4.py b/tests/test_step4.py new file mode 100644 index 0000000..7aba868 --- /dev/null +++ b/tests/test_step4.py @@ -0,0 +1,100 @@ +import os +import glob +import shutil + +import h5py +import pytest + +from guppy.testing.api import step2, step3, step4 + +@pytest.fixture(scope="function") +def region(): + return "region" + +@pytest.fixture(scope="function") +def ttl_display_name(): + return "ttl" + +@pytest.fixture(scope="function") +def storenames_map(region, ttl_display_name): + return { + "Sample_Control_Channel": f"control_{region}", + "Sample_Signal_Channel": f"signal_{region}", + "Sample_TTL": f"{ttl_display_name}", + } + + +@pytest.mark.filterwarnings("ignore::UserWarning") +def test_step4(tmp_path, monkeypatch, region, ttl_display_name, storenames_map): + """ + Full integration test for Step 4 (Extract timestamps and signal) using real CSV sample data, + isolated to a temporary workspace to avoid mutating shared sample data. + + Pipeline executed on a temp copy: + - Step 2: create storesList.csv (derived from sample data if not present) + - Step 3: read raw data (per-storename HDF5 files) + - Step 4: extract timestamps/signal, compute z-score/dFF, time corrections, etc. + + Notes: + - matplotlib plotting in preprocess uses a GUI backend; to avoid blocking, we stub plt.show(). + - Assertions confirm creation of key HDF5 outputs expected from Step 4. + """ + # Use the CSV sample session + src_base_dir = "/Users/pauladkisson/Documents/CatalystNeuro/Guppy/GDriveSampleData" + src_session = os.path.join(src_base_dir, "SampleData_csv") + if not os.path.isdir(src_session): + pytest.skip(f"Sample data not available at expected path: {src_session}") + + # Stub matplotlib.pyplot.show to avoid GUI blocking + import matplotlib.pyplot as plt # noqa: F401 + monkeypatch.setattr("matplotlib.pyplot.show", lambda *args, **kwargs: None) + + # Stage a clean copy of the session into a temporary workspace + tmp_base = tmp_path / "data_root" + tmp_base.mkdir(parents=True, exist_ok=True) + session_copy = tmp_base / "SampleData_csv" + shutil.copytree(src_session, session_copy) + + # Remove any copied artifacts in the temp session + for d in glob.glob(os.path.join(session_copy, "*_output_*")): + try: + shutil.rmtree(d) + except FileNotFoundError: + pass + params_fp = session_copy / "GuPPyParamtersUsed.json" + if params_fp.exists(): + params_fp.unlink() + + # Step 2: create storesList.csv in the temp copy + step2(base_dir=str(tmp_base), selected_folders=[str(session_copy)], storenames_map=storenames_map) + + # Step 3: read raw data in the temp copy + step3(base_dir=str(tmp_base), selected_folders=[str(session_copy)]) + + # Step 4: extract timestamps and signal in the temp copy + step4(base_dir=str(tmp_base), selected_folders=[str(session_copy)]) + + # Validate outputs exist in the temp copy + basename = os.path.basename(session_copy) + output_dirs = sorted(glob.glob(os.path.join(session_copy, f"{basename}_output_*"))) + assert output_dirs, f"No output directories found in {session_copy}" + out_dir = None + for d in output_dirs: + if os.path.exists(os.path.join(d, "storesList.csv")): + out_dir = d + break + assert out_dir is not None, f"No storesList.csv found in any output directory under {session_copy}" + stores_fp = os.path.join(out_dir, "storesList.csv") + assert os.path.exists(stores_fp), "Missing storesList.csv after Step 2/3/4" + + # Ensure timeCorrection_.hdf5 exists with 'timestampNew' + timecorr = os.path.join(out_dir, f"timeCorrection_{region}.hdf5") + assert os.path.exists(timecorr), f"Missing {timecorr}" + with h5py.File(timecorr, "r") as f: + assert "timestampNew" in f, f"Expected 'timestampNew' dataset in {timecorr}" + + # If TTLs exist, check their per-region 'ts' outputs + ttl_fp = os.path.join(out_dir, f"{ttl_display_name}_{region}.hdf5") + assert os.path.exists(ttl_fp), f"Missing TTL-aligned file {ttl_fp}" + with h5py.File(ttl_fp, "r") as f: + assert "ts" in f, f"Expected 'ts' dataset in {ttl_fp}" From 04c564229f9938bbe32c1b4138fc908b9a373273 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 28 Aug 2025 16:59:21 -0700 Subject: [PATCH 46/83] added headless mode for step 5 and appropriate test --- src/guppy/testing/api.py | 70 ++++++++++++++++++++++ tests/test_step5.py | 122 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 192 insertions(+) create mode 100644 tests/test_step5.py diff --git a/src/guppy/testing/api.py b/src/guppy/testing/api.py index 2e0618f..6ffe01f 100644 --- a/src/guppy/testing/api.py +++ b/src/guppy/testing/api.py @@ -19,6 +19,8 @@ from guppy.saveStoresList import execute from guppy.readTevTsq import readRawData from guppy.preprocess import extractTsAndSignal +from guppy.computePsth import psthForEachStorename +from guppy.findTransientsFreqAndAmp import executeFindFreqAndAmp @@ -278,3 +280,71 @@ def step4(*, base_dir: str, selected_folders: Iterable[str]) -> None: # Call the underlying Step 4 worker directly (no subprocess) extractTsAndSignal(input_params) + + +def step5(*, base_dir: str, selected_folders: Iterable[str]) -> None: + """ + Run pipeline Step 5 (PSTH Computation) via the Panel-backed logic, headlessly. + + This builds the template headlessly (using ``GUPPY_BASE_DIR`` to bypass + the folder dialog), sets the FileSelector to ``selected_folders``, retrieves + the full input parameters via ``getInputParameters()``, and calls the + underlying worker ``guppy.computePsth.psthForEachStorename(input_params)`` that the + UI normally launches via subprocess. No GUI is spawned. + + Parameters + ---------- + base_dir : str + Root directory used to initialize the FileSelector. All ``selected_folders`` + must reside directly under this path. + selected_folders : Iterable[str] + Absolute paths to the session directories to process. + + Raises + ------ + ValueError + If validation fails (e.g., empty iterable, invalid directories, or parent mismatch). + RuntimeError + If the template does not expose the required testing hooks/widgets. + """ + # Validate base_dir + if not isinstance(base_dir, str) or not base_dir: + raise ValueError("base_dir must be a non-empty string") + base_dir = os.path.abspath(base_dir) + if not os.path.isdir(base_dir): + raise ValueError(f"base_dir does not exist or is not a directory: {base_dir}") + + # Validate selected_folders + sessions = list(selected_folders or []) + if not sessions: + raise ValueError("selected_folders must be a non-empty iterable of session directories") + abs_sessions = [os.path.abspath(s) for s in sessions] + for s in abs_sessions: + if not os.path.isdir(s): + raise ValueError(f"Session path does not exist or is not a directory: {s}") + parent = os.path.dirname(s) + if parent != base_dir: + raise ValueError( + f"All selected_folders must share the same parent equal to base_dir. " + f"Got parent {parent!r} for session {s!r}, expected {base_dir!r}" + ) + + # Headless build: set base_dir and construct the template + os.environ["GUPPY_BASE_DIR"] = base_dir + template = savingInputParameters() + + # Ensure hooks/widgets exposed + if not hasattr(template, "_hooks") or "getInputParameters" not in template._hooks: + raise RuntimeError("savingInputParameters did not expose 'getInputParameters' hook") + if not hasattr(template, "_widgets") or "files_1" not in template._widgets: + raise RuntimeError("savingInputParameters did not expose 'files_1' widget") + + # Select folders and fetch input parameters + template._widgets["files_1"].value = abs_sessions + input_params = template._hooks["getInputParameters"]() + + # Call the underlying Step 5 worker directly (no subprocess) + psthForEachStorename(input_params) + + # Also compute frequency/amplitude and transients occurrences (normally triggered by CLI main) + executeFindFreqAndAmp(input_params) diff --git a/tests/test_step5.py b/tests/test_step5.py new file mode 100644 index 0000000..8eb9a6d --- /dev/null +++ b/tests/test_step5.py @@ -0,0 +1,122 @@ +import os +import glob +import shutil + +import pytest +import pandas as pd + +from guppy.testing.api import step2, step3, step4, step5 + + +@pytest.fixture(scope="function") +def region(): + return "region" + + +@pytest.fixture(scope="function") +def ttl_display_name(): + return "ttl" + + +@pytest.fixture(scope="function") +def storenames_map(region, ttl_display_name): + return { + "Sample_Control_Channel": f"control_{region}", + "Sample_Signal_Channel": f"signal_{region}", + "Sample_TTL": f"{ttl_display_name}", + } + + +@pytest.mark.filterwarnings("ignore::UserWarning") +def test_step5(tmp_path, monkeypatch, region, ttl_display_name, storenames_map): + """ + Full integration test for Step 5 (PSTH Computation) using real CSV sample data, + isolated to a temporary workspace to avoid mutating shared sample data. + + Pipeline executed on a temp copy: + - Step 2: save storenames (storesList.csv) + - Step 3: read raw data (per-storename HDF5 outputs) + - Step 4: extract timestamps/signal, z-score/dFF, time corrections + - Step 5: compute PSTH and peak/AUC outputs + + Notes: + - matplotlib plotting in earlier steps may use a GUI backend; stub plt.show() to avoid blocking. + - Assertions confirm creation and basic readability of PSTH-related outputs from Step 5. + - Defaults are used for input parameters; PSTH computation defaults to z_score. + """ + # Use the CSV sample session + src_base_dir = "/Users/pauladkisson/Documents/CatalystNeuro/Guppy/GDriveSampleData" + src_session = os.path.join(src_base_dir, "SampleData_csv") + if not os.path.isdir(src_session): + pytest.skip(f"Sample data not available at expected path: {src_session}") + + # Stub matplotlib.pyplot.show to avoid GUI blocking (used in earlier steps) + import matplotlib.pyplot as plt # noqa: F401 + monkeypatch.setattr("matplotlib.pyplot.show", lambda *args, **kwargs: None) + + # Stage a clean copy of the session into a temporary workspace + tmp_base = tmp_path / "data_root" + tmp_base.mkdir(parents=True, exist_ok=True) + session_copy = tmp_base / "SampleData_csv" + shutil.copytree(src_session, session_copy) + + # Remove any copied artifacts in the temp session + for d in glob.glob(os.path.join(session_copy, "*_output_*")): + try: + shutil.rmtree(d) + except FileNotFoundError: + pass + params_fp = session_copy / "GuPPyParamtersUsed.json" + if params_fp.exists(): + params_fp.unlink() + + # Step 2: create storesList.csv in the temp copy with explicit naming + step2(base_dir=str(tmp_base), selected_folders=[str(session_copy)], storenames_map=storenames_map) + + # Step 3: read raw data in the temp copy + step3(base_dir=str(tmp_base), selected_folders=[str(session_copy)]) + + # Step 4: extract timestamps and signal in the temp copy + step4(base_dir=str(tmp_base), selected_folders=[str(session_copy)]) + + # Step 5: compute PSTH in the temp copy (headless) + step5(base_dir=str(tmp_base), selected_folders=[str(session_copy)]) + + # Locate output directory + basename = os.path.basename(session_copy) + output_dirs = sorted(glob.glob(os.path.join(session_copy, f"{basename}_output_*"))) + assert output_dirs, f"No output directories found in {session_copy}" + out_dir = None + for d in output_dirs: + if os.path.exists(os.path.join(d, "storesList.csv")): + out_dir = d + break + assert out_dir is not None, f"No storesList.csv found in any output directory under {session_copy}" + stores_fp = os.path.join(out_dir, "storesList.csv") + assert os.path.exists(stores_fp), "Missing storesList.csv after Steps 2–5" + + # Expected PSTH outputs (defaults compute z_score PSTH) + psth_h5 = os.path.join(out_dir, f"{ttl_display_name}_{region}_z_score_{region}.h5") + psth_baseline_uncorr_h5 = os.path.join(out_dir, f"{ttl_display_name}_{region}_baselineUncorrected_z_score_{region}.h5") + peak_auc_h5 = os.path.join(out_dir, f"peak_AUC_{ttl_display_name}_{region}_z_score_{region}.h5") + peak_auc_csv = os.path.join(out_dir, f"peak_AUC_{ttl_display_name}_{region}_z_score_{region}.csv") + + # Assert file creation + assert os.path.exists(psth_h5), f"Missing PSTH HDF5: {psth_h5}" + assert os.path.exists(psth_baseline_uncorr_h5), f"Missing baseline-uncorrected PSTH HDF5: {psth_baseline_uncorr_h5}" + assert os.path.exists(peak_auc_h5), f"Missing PSTH Peak/AUC HDF5: {peak_auc_h5}" + assert os.path.exists(peak_auc_csv), f"Missing PSTH Peak/AUC CSV: {peak_auc_csv}" + + # Basic readability checks: PSTH HDF5 contains a DataFrame with expected columns + df = pd.read_hdf(psth_h5, key="df") + assert "timestamps" in df.columns, f"'timestamps' column missing in {psth_h5}" + # The DataFrame should include a 'mean' column per create_Df implementation + assert "mean" in df.columns, f"'mean' column missing in {psth_h5}" + + # Additional artifacts from transients frequency/amplitude computation (Step 5 side-effect) + freq_amp_h5 = os.path.join(out_dir, f"freqAndAmp_z_score_{region}.h5") + freq_amp_csv = os.path.join(out_dir, f"freqAndAmp_z_score_{region}.csv") + trans_occ_csv = os.path.join(out_dir, f"transientsOccurrences_z_score_{region}.csv") + assert os.path.exists(freq_amp_h5), f"Missing freq/amp HDF5: {freq_amp_h5}" + assert os.path.exists(freq_amp_csv), f"Missing freq/amp CSV: {freq_amp_csv}" + assert os.path.exists(trans_occ_csv), f"Missing transients occurrences CSV: {trans_occ_csv}" From 838d91151950672ba6980ca94f8859c8af452665 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 29 Aug 2025 15:47:09 -0700 Subject: [PATCH 47/83] added the rest of the data modalities (except npm) for step 3 --- tests/test_step3.py | 59 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 51 insertions(+), 8 deletions(-) diff --git a/tests/test_step3.py b/tests/test_step3.py index b27c768..7e66b53 100644 --- a/tests/test_step3.py +++ b/tests/test_step3.py @@ -17,7 +17,50 @@ def storenames_map(): "Sample_TTL": "ttl", } -def test_step3(tmp_path, storenames_map): +@pytest.mark.parametrize( + "session_subdir, storenames_map", + [ + ( + "SampleData_csv", + { + "Sample_Control_Channel": "control_region", + "Sample_Signal_Channel": "signal_region", + "Sample_TTL": "ttl", + }, + ), + ( + "SampleData_Doric", + { + "AIn-1 - Dem (ref)": "control_region", + "AIn-1 - Dem (da)": "signal_region", + "DI/O-1": "ttl", + }, + ), + ( + "SampleData_Clean/Photo_63_207-181030-103332", + { + "Dv1A": "control_dms", + "Dv2A": "signal_dms", + "PrtN": "port_entries_dms", + }, + ), + ( + "SampleData_with_artifacts/Photo_048_392-200728-121222", + { + "Dv1A": "control_dms", + "Dv2A": "signal_dms", + "PrtN": "port_entries_dms", + }, + ), + ], + ids=[ + "csv_generic", + "doric_csv", + "tdt_clean", + "tdt_with_artifacts", + ], +) +def test_step3(tmp_path, storenames_map, session_subdir): """ Full integration test for Step 3 (Read Raw Data) using real CSV sample data, isolated to a temporary workspace to avoid mutating shared sample data. @@ -31,7 +74,7 @@ def test_step3(tmp_path, storenames_map): the temp copy (never touching the original sample path). """ src_base_dir = "/Users/pauladkisson/Documents/CatalystNeuro/Guppy/GDriveSampleData" - src_session = os.path.join(src_base_dir, "SampleData_csv") + src_session = os.path.join(src_base_dir, session_subdir) if not os.path.isdir(src_session): pytest.skip(f"Sample data not available at expected path: {src_session}") @@ -39,15 +82,15 @@ def test_step3(tmp_path, storenames_map): # Stage a clean copy of the session into a temporary workspace tmp_base = tmp_path / "data_root" tmp_base.mkdir(parents=True, exist_ok=True) - session_copy = tmp_base / "SampleData_csv" + dest_name = os.path.basename(src_session) + session_copy = tmp_base / dest_name shutil.copytree(src_session, session_copy) # Remove any copied artifacts in the temp session - for d in glob.glob(os.path.join(session_copy, "*_output_*")): - try: - shutil.rmtree(d) - except FileNotFoundError: - pass + # Use a specific glob that uniquely matches this session's output directory(ies) + for d in glob.glob(os.path.join(session_copy, f"{dest_name}_output_*")): + assert os.path.isdir(d), f"Expected output directory for cleanup, got non-directory: {d}" + shutil.rmtree(d) params_fp = session_copy / "GuPPyParamtersUsed.json" if params_fp.exists(): params_fp.unlink() From 1d24eca6f94c2a93662d5cc829335dc3611bdf5c Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 29 Aug 2025 15:55:29 -0700 Subject: [PATCH 48/83] added the rest of the data modalities (except npm) for step 2 --- tests/test_step2.py | 127 ++++++++++++++++++++++++++++++++------------ 1 file changed, 94 insertions(+), 33 deletions(-) diff --git a/tests/test_step2.py b/tests/test_step2.py index 61195f2..51b153b 100644 --- a/tests/test_step2.py +++ b/tests/test_step2.py @@ -1,49 +1,110 @@ import csv import os +import glob +import shutil import pytest from guppy.testing.api import step2 -@pytest.fixture(scope="function") -def storenames_map(): - return { - "Dv1A": "control_DMS", - "Dv2A": "signal_DMS", - "PrtR": "RewardedPort", - } +@pytest.mark.parametrize( + "session_subdir, storenames_map", + [ + ( + "SampleData_csv", + { + "Sample_Control_Channel": "control_region", + "Sample_Signal_Channel": "signal_region", + "Sample_TTL": "ttl", + }, + ), + ( + "SampleData_Doric", + { + "AIn-1 - Dem (ref)": "control_region", + "AIn-1 - Dem (da)": "signal_region", + "DI/O-1": "ttl", + }, + ), + ( + "SampleData_Clean/Photo_63_207-181030-103332", + { + "Dv1A": "control_dms", + "Dv2A": "signal_dms", + "PrtN": "port_entries_dms", + }, + ), + ( + "SampleData_with_artifacts/Photo_048_392-200728-121222", + { + "Dv1A": "control_dms", + "Dv2A": "signal_dms", + "PrtN": "port_entries_dms", + }, + ), + ], + ids=[ + "csv_generic", + "doric_csv", + "tdt_clean", + "tdt_with_artifacts", + ], +) +def test_step2(tmp_path, session_subdir, storenames_map): + """ + Step 2 integration test (Save Storenames) using real sample data, isolated to a temporary workspace. + For each dataset: + - Copies the session into a temp workspace + - Cleans any copied *_output_* artifacts (using a specific glob to avoid non-dirs) + - Calls step2 headlessly with an explicit, deterministic storenames_map + - Asserts storesList.csv exists and exactly matches the provided mapping (2xN) + """ + # Source sample data + src_base_dir = "/Users/pauladkisson/Documents/CatalystNeuro/Guppy/GDriveSampleData" + src_session = os.path.join(src_base_dir, session_subdir) + if not os.path.isdir(src_session): + pytest.skip(f"Sample data not available at expected path: {src_session}") + # Stage a clean copy of the session into a temporary workspace + tmp_base = tmp_path / "data_root" + tmp_base.mkdir(parents=True, exist_ok=True) + dest_name = os.path.basename(src_session) + session_copy = tmp_base / dest_name + shutil.copytree(src_session, session_copy) -def test_step2(tmp_path, storenames_map): - # Arrange: create base_dir with two session folders - base_name = "data_root" - base_dir = tmp_path / base_name - base_dir.mkdir(parents=True, exist_ok=True) + # Remove any copied artifacts in the temp session; match only this session's output directory(ies) + for d in glob.glob(os.path.join(session_copy, f"{dest_name}_output_*")): + assert os.path.isdir(d), f"Expected output directory for cleanup, got non-directory: {d}" + shutil.rmtree(d) - session_names = ["session1", "session2"] - sessions = [] - for name in session_names: - p = base_dir / name - p.mkdir(parents=True, exist_ok=True) - sessions.append(str(p)) + # Remove any copied GuPPyParamtersUsed.json to ensure a fresh run + params_fp = session_copy / "GuPPyParamtersUsed.json" + if params_fp.exists(): + params_fp.unlink() - # Act: write storesList.csv headlessly - step2(base_dir=str(base_dir), selected_folders=sessions, storenames_map=storenames_map) + # Run Step 2 headlessly using the explicit mapping + step2(base_dir=str(tmp_base), selected_folders=[str(session_copy)], storenames_map=storenames_map) - # Assert: each session has {session_basename}_output_1/storesList.csv with 2xN structure - for s in sessions: - basename = os.path.basename(s) - out_dir = os.path.join(s, f"{basename}_output_1") - out_fp = os.path.join(out_dir, "storesList.csv") + # Validate storesList.csv exists and matches the mapping exactly (order-preserved) + basename = os.path.basename(session_copy) + output_dirs = sorted(glob.glob(os.path.join(session_copy, f"{basename}_output_*"))) + assert output_dirs, f"No output directories found in {session_copy}" - assert os.path.isdir(out_dir), f"Missing output directory: {out_dir}" - assert os.path.exists(out_fp), f"Missing storesList.csv: {out_fp}" + out_dir = None + for d in output_dirs: + if os.path.exists(os.path.join(d, "storesList.csv")): + out_dir = d + break + assert out_dir is not None, f"No storesList.csv found in any output directory under {session_copy}" - with open(out_fp, newline="") as f: - reader = csv.reader(f) - rows = list(reader) + out_fp = os.path.join(out_dir, "storesList.csv") + assert os.path.exists(out_fp), f"Missing storesList.csv: {out_fp}" - assert len(rows) == 2, f"Expected 2 rows (storenames, names_for_storenames), got {len(rows)}" - assert rows[0] == list(storenames_map.keys()), "Row 0 (storenames) mismatch" - assert rows[1] == list(storenames_map.values()), "Row 1 (names_for_storenames) mismatch" + with open(out_fp, newline="") as f: + reader = csv.reader(f) + rows = list(reader) + + assert len(rows) == 2, f"Expected 2 rows (storenames, names_for_storenames), got {len(rows)}" + assert rows[0] == list(storenames_map.keys()), "Row 0 (storenames) mismatch" + assert rows[1] == list(storenames_map.values()), "Row 1 (names_for_storenames) mismatch" From 58092e591276f0afc74f4cb28cb74fdcc899c478 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 29 Aug 2025 16:15:15 -0700 Subject: [PATCH 49/83] added npm to step 2 --- src/guppy/saveStoresList.py | 46 +++++++++++++++++++++++++++++-------- tests/test_step2.py | 9 ++++++++ 2 files changed, 45 insertions(+), 10 deletions(-) diff --git a/src/guppy/saveStoresList.py b/src/guppy/saveStoresList.py index 6a21ffd..5e60962 100755 --- a/src/guppy/saveStoresList.py +++ b/src/guppy/saveStoresList.py @@ -586,7 +586,7 @@ def check_channels(state): return unique_state.shape[0], unique_state # function to decide NPM timestamps unit (seconds, ms or us) -def decide_ts_unit_for_npm(df): +def decide_ts_unit_for_npm(df, timestamp_col=None, time_unit=None, headless=False): col_names = np.array(list(df.columns)) col_names_ts = [''] for name in col_names: @@ -595,6 +595,15 @@ def decide_ts_unit_for_npm(df): ts_unit = 'seconds' if len(col_names_ts)>2: + # Headless path: auto-select column/unit without any UI + if headless: + # Choose provided column if valid, otherwise the first timestamp-like column found + chosen = timestamp_col if (isinstance(timestamp_col, str) and timestamp_col in df.columns) else col_names_ts[1] + df.insert(1, 'Timestamp', df[chosen]) + df = df.drop(col_names_ts[1:], axis=1) + valid_units = {'seconds', 'milliseconds', 'microseconds'} + ts_unit = time_unit if (isinstance(time_unit, str) and time_unit in valid_units) else 'seconds' + return df, ts_unit #def comboBoxSelected(event): # print(event.widget.get()) @@ -744,10 +753,19 @@ def read_doric(filepath): # and recognize type of 'csv' files either from # Neurophotometrics, Doric systems or custom made 'csv' files # and read data accordingly -def import_np_doric_csv(filepath, isosbestic_control, num_ch): +def import_np_doric_csv(filepath, isosbestic_control, num_ch, inputParameters=None): insertLog("If it exists, importing either NPM or Doric or csv file based on the structure of file", logging.DEBUG) + # Headless configuration (used to avoid any UI prompts when running tests) + headless = bool(os.environ.get('GUPPY_BASE_DIR')) + npm_timestamp_col = None + npm_time_unit = None + npm_split_events = None + if isinstance(inputParameters, dict): + npm_timestamp_col = inputParameters.get('npm_timestamp_col') + npm_time_unit = inputParameters.get('npm_time_unit') + npm_split_events = inputParameters.get('npm_split_events') path = sorted(glob.glob(os.path.join(filepath, '*.csv'))) + \ sorted(glob.glob(os.path.join(filepath, '*.doric'))) path_chev = glob.glob(os.path.join(filepath, '*chev*')) @@ -882,16 +900,19 @@ def import_np_doric_csv(filepath, isosbestic_control, num_ch): elif flag=='event_np': type_val = np.array(df.iloc[:,1]) type_val_unique = np.unique(type_val) - window = tk.Tk() - if len(type_val_unique)>1: - response = messagebox.askyesno('Multiple event TTLs', 'Based on the TTL file,\ + if headless: + response = 1 if bool(npm_split_events) else 0 + else: + window = tk.Tk() + if len(type_val_unique)>1: + response = messagebox.askyesno('Multiple event TTLs', 'Based on the TTL file,\ it looks like TTLs \ belongs to multipe behavior type. \ Do you want to create multiple files for each \ behavior type ?') - else: - response = 0 - window.destroy() + else: + response = 0 + window.destroy() if response==1: timestamps = np.array(df.iloc[:,0]) for j in range(len(type_val_unique)): @@ -910,7 +931,12 @@ def import_np_doric_csv(filepath, isosbestic_control, num_ch): event_from_filename.append('event'+str(0)) else: file = f'file{str(i)}_' - df, ts_unit = decide_ts_unit_for_npm(df) + df, ts_unit = decide_ts_unit_for_npm( + df, + timestamp_col=npm_timestamp_col, + time_unit=npm_time_unit, + headless=headless + ) df, indices_dict, num_channels = decide_indices(file, df, flag) keys = list(indices_dict.keys()) for k in range(len(keys)): @@ -1007,7 +1033,7 @@ def execute(inputParameters): for i in folderNames: filepath = os.path.join(inputParameters['abspath'], i) data = readtsq(filepath) - event_name, flag = import_np_doric_csv(filepath, isosbestic_control, num_ch) + event_name, flag = import_np_doric_csv(filepath, isosbestic_control, num_ch, inputParameters=inputParameters) saveStorenames(inputParameters, data, event_name, flag, filepath) insertLog('#'*400, logging.INFO) except Exception as e: diff --git a/tests/test_step2.py b/tests/test_step2.py index 51b153b..e8b6871 100644 --- a/tests/test_step2.py +++ b/tests/test_step2.py @@ -43,12 +43,21 @@ "PrtN": "port_entries_dms", }, ), + ( + "SampleData_Neurophotometrics/1442", + { + "file0_chev1": "control_region1", + "file0_chod1": "signal_region1", + "eventTrue": "ttl_true_region1", + }, + ), ], ids=[ "csv_generic", "doric_csv", "tdt_clean", "tdt_with_artifacts", + "neurophotometrics_csv", ], ) def test_step2(tmp_path, session_subdir, storenames_map): From 31dc80df90b861a38d1e2f1d3770e9d1fd9d8123 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 29 Aug 2025 16:19:13 -0700 Subject: [PATCH 50/83] added npm to step 3 --- tests/test_step3.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/test_step3.py b/tests/test_step3.py index 7e66b53..597150e 100644 --- a/tests/test_step3.py +++ b/tests/test_step3.py @@ -52,12 +52,21 @@ def storenames_map(): "PrtN": "port_entries_dms", }, ), + ( + "SampleData_Neurophotometrics/1442", + { + "file0_chev1": "control_region1", + "file0_chod1": "signal_region1", + "eventTrue": "ttl_true_region1", + }, + ), ], ids=[ "csv_generic", "doric_csv", "tdt_clean", "tdt_with_artifacts", + "neurophotometrics_csv", ], ) def test_step3(tmp_path, storenames_map, session_subdir): From fb2055732993800b644b3e2faef7c6bb18085c0c Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 29 Aug 2025 16:31:39 -0700 Subject: [PATCH 51/83] added the rest of the data modalities for step 4 --- tests/test_step4.py | 100 +++++++++++++++++++++++++++++++------------- 1 file changed, 72 insertions(+), 28 deletions(-) diff --git a/tests/test_step4.py b/tests/test_step4.py index 7aba868..4cd3aaf 100644 --- a/tests/test_step4.py +++ b/tests/test_step4.py @@ -7,25 +7,70 @@ from guppy.testing.api import step2, step3, step4 -@pytest.fixture(scope="function") -def region(): - return "region" - -@pytest.fixture(scope="function") -def ttl_display_name(): - return "ttl" - -@pytest.fixture(scope="function") -def storenames_map(region, ttl_display_name): - return { - "Sample_Control_Channel": f"control_{region}", - "Sample_Signal_Channel": f"signal_{region}", - "Sample_TTL": f"{ttl_display_name}", - } - - +@pytest.mark.parametrize( + "session_subdir, storenames_map, expected_region, expected_ttl", + [ + ( + "SampleData_csv", + { + "Sample_Control_Channel": "control_region", + "Sample_Signal_Channel": "signal_region", + "Sample_TTL": "ttl", + }, + "region", + "ttl", + ), + ( + "SampleData_Doric", + { + "AIn-1 - Dem (ref)": "control_region", + "AIn-1 - Dem (da)": "signal_region", + "DI/O-1": "ttl", + }, + "region", + "ttl", + ), + ( + "SampleData_Clean/Photo_63_207-181030-103332", + { + "Dv1A": "control_dms", + "Dv2A": "signal_dms", + "PrtN": "port_entries_dms", + }, + "dms", + "port_entries_dms", + ), + ( + "SampleData_with_artifacts/Photo_048_392-200728-121222", + { + "Dv1A": "control_dms", + "Dv2A": "signal_dms", + "PrtN": "port_entries_dms", + }, + "dms", + "port_entries_dms", + ), + ( + "SampleData_Neurophotometrics/1442", + { + "file0_chev1": "control_region1", + "file0_chod1": "signal_region1", + "eventTrue": "ttl_true_region1", + }, + "region1", + "ttl_true_region1", + ), + ], + ids=[ + "csv_generic", + "doric_csv", + "tdt_clean", + "tdt_with_artifacts", + "neurophotometrics_csv", + ], +) @pytest.mark.filterwarnings("ignore::UserWarning") -def test_step4(tmp_path, monkeypatch, region, ttl_display_name, storenames_map): +def test_step4(tmp_path, monkeypatch, session_subdir, storenames_map, expected_region, expected_ttl): """ Full integration test for Step 4 (Extract timestamps and signal) using real CSV sample data, isolated to a temporary workspace to avoid mutating shared sample data. @@ -41,7 +86,7 @@ def test_step4(tmp_path, monkeypatch, region, ttl_display_name, storenames_map): """ # Use the CSV sample session src_base_dir = "/Users/pauladkisson/Documents/CatalystNeuro/Guppy/GDriveSampleData" - src_session = os.path.join(src_base_dir, "SampleData_csv") + src_session = os.path.join(src_base_dir, session_subdir) if not os.path.isdir(src_session): pytest.skip(f"Sample data not available at expected path: {src_session}") @@ -52,15 +97,14 @@ def test_step4(tmp_path, monkeypatch, region, ttl_display_name, storenames_map): # Stage a clean copy of the session into a temporary workspace tmp_base = tmp_path / "data_root" tmp_base.mkdir(parents=True, exist_ok=True) - session_copy = tmp_base / "SampleData_csv" + dest_name = os.path.basename(src_session) + session_copy = tmp_base / dest_name shutil.copytree(src_session, session_copy) - # Remove any copied artifacts in the temp session - for d in glob.glob(os.path.join(session_copy, "*_output_*")): - try: - shutil.rmtree(d) - except FileNotFoundError: - pass + # Remove any copied artifacts in the temp session (match only this session's output dirs) + for d in glob.glob(os.path.join(session_copy, f"{dest_name}_output_*")): + assert os.path.isdir(d), f"Expected output directory for cleanup, got non-directory: {d}" + shutil.rmtree(d) params_fp = session_copy / "GuPPyParamtersUsed.json" if params_fp.exists(): params_fp.unlink() @@ -88,13 +132,13 @@ def test_step4(tmp_path, monkeypatch, region, ttl_display_name, storenames_map): assert os.path.exists(stores_fp), "Missing storesList.csv after Step 2/3/4" # Ensure timeCorrection_.hdf5 exists with 'timestampNew' - timecorr = os.path.join(out_dir, f"timeCorrection_{region}.hdf5") + timecorr = os.path.join(out_dir, f"timeCorrection_{expected_region}.hdf5") assert os.path.exists(timecorr), f"Missing {timecorr}" with h5py.File(timecorr, "r") as f: assert "timestampNew" in f, f"Expected 'timestampNew' dataset in {timecorr}" # If TTLs exist, check their per-region 'ts' outputs - ttl_fp = os.path.join(out_dir, f"{ttl_display_name}_{region}.hdf5") + ttl_fp = os.path.join(out_dir, f"{expected_ttl}_{expected_region}.hdf5") assert os.path.exists(ttl_fp), f"Missing TTL-aligned file {ttl_fp}" with h5py.File(ttl_fp, "r") as f: assert "ts" in f, f"Expected 'ts' dataset in {ttl_fp}" From 2664ab53f2b3b9affb03ab15be947950131cd44f Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 29 Aug 2025 16:35:50 -0700 Subject: [PATCH 52/83] added the rest of the data modalities for step 5 --- tests/test_step5.py | 114 ++++++++++++++++++++++++++++++-------------- 1 file changed, 78 insertions(+), 36 deletions(-) diff --git a/tests/test_step5.py b/tests/test_step5.py index 8eb9a6d..27f5f7a 100644 --- a/tests/test_step5.py +++ b/tests/test_step5.py @@ -8,27 +8,70 @@ from guppy.testing.api import step2, step3, step4, step5 -@pytest.fixture(scope="function") -def region(): - return "region" - - -@pytest.fixture(scope="function") -def ttl_display_name(): - return "ttl" - - -@pytest.fixture(scope="function") -def storenames_map(region, ttl_display_name): - return { - "Sample_Control_Channel": f"control_{region}", - "Sample_Signal_Channel": f"signal_{region}", - "Sample_TTL": f"{ttl_display_name}", - } - - +@pytest.mark.parametrize( + "session_subdir, storenames_map, expected_region, expected_ttl", + [ + ( + "SampleData_csv", + { + "Sample_Control_Channel": "control_region", + "Sample_Signal_Channel": "signal_region", + "Sample_TTL": "ttl", + }, + "region", + "ttl", + ), + ( + "SampleData_Doric", + { + "AIn-1 - Dem (ref)": "control_region", + "AIn-1 - Dem (da)": "signal_region", + "DI/O-1": "ttl", + }, + "region", + "ttl", + ), + ( + "SampleData_Clean/Photo_63_207-181030-103332", + { + "Dv1A": "control_dms", + "Dv2A": "signal_dms", + "PrtN": "port_entries_dms", + }, + "dms", + "port_entries_dms", + ), + ( + "SampleData_with_artifacts/Photo_048_392-200728-121222", + { + "Dv1A": "control_dms", + "Dv2A": "signal_dms", + "PrtN": "port_entries_dms", + }, + "dms", + "port_entries_dms", + ), + ( + "SampleData_Neurophotometrics/1442", + { + "file0_chev1": "control_region1", + "file0_chod1": "signal_region1", + "eventTrue": "ttl_true_region1", + }, + "region1", + "ttl_true_region1", + ), + ], + ids=[ + "csv_generic", + "doric_csv", + "tdt_clean", + "tdt_with_artifacts", + "neurophotometrics_csv", + ], +) @pytest.mark.filterwarnings("ignore::UserWarning") -def test_step5(tmp_path, monkeypatch, region, ttl_display_name, storenames_map): +def test_step5(tmp_path, monkeypatch, session_subdir, storenames_map, expected_region, expected_ttl): """ Full integration test for Step 5 (PSTH Computation) using real CSV sample data, isolated to a temporary workspace to avoid mutating shared sample data. @@ -44,9 +87,9 @@ def test_step5(tmp_path, monkeypatch, region, ttl_display_name, storenames_map): - Assertions confirm creation and basic readability of PSTH-related outputs from Step 5. - Defaults are used for input parameters; PSTH computation defaults to z_score. """ - # Use the CSV sample session + # Use the sample session src_base_dir = "/Users/pauladkisson/Documents/CatalystNeuro/Guppy/GDriveSampleData" - src_session = os.path.join(src_base_dir, "SampleData_csv") + src_session = os.path.join(src_base_dir, session_subdir) if not os.path.isdir(src_session): pytest.skip(f"Sample data not available at expected path: {src_session}") @@ -57,15 +100,14 @@ def test_step5(tmp_path, monkeypatch, region, ttl_display_name, storenames_map): # Stage a clean copy of the session into a temporary workspace tmp_base = tmp_path / "data_root" tmp_base.mkdir(parents=True, exist_ok=True) - session_copy = tmp_base / "SampleData_csv" + dest_name = os.path.basename(src_session) + session_copy = tmp_base / dest_name shutil.copytree(src_session, session_copy) - # Remove any copied artifacts in the temp session - for d in glob.glob(os.path.join(session_copy, "*_output_*")): - try: - shutil.rmtree(d) - except FileNotFoundError: - pass + # Remove any copied artifacts in the temp session (match only this session's output dirs) + for d in glob.glob(os.path.join(session_copy, f"{dest_name}_output_*")): + assert os.path.isdir(d), f"Expected output directory for cleanup, got non-directory: {d}" + shutil.rmtree(d) params_fp = session_copy / "GuPPyParamtersUsed.json" if params_fp.exists(): params_fp.unlink() @@ -96,10 +138,10 @@ def test_step5(tmp_path, monkeypatch, region, ttl_display_name, storenames_map): assert os.path.exists(stores_fp), "Missing storesList.csv after Steps 2–5" # Expected PSTH outputs (defaults compute z_score PSTH) - psth_h5 = os.path.join(out_dir, f"{ttl_display_name}_{region}_z_score_{region}.h5") - psth_baseline_uncorr_h5 = os.path.join(out_dir, f"{ttl_display_name}_{region}_baselineUncorrected_z_score_{region}.h5") - peak_auc_h5 = os.path.join(out_dir, f"peak_AUC_{ttl_display_name}_{region}_z_score_{region}.h5") - peak_auc_csv = os.path.join(out_dir, f"peak_AUC_{ttl_display_name}_{region}_z_score_{region}.csv") + psth_h5 = os.path.join(out_dir, f"{expected_ttl}_{expected_region}_z_score_{expected_region}.h5") + psth_baseline_uncorr_h5 = os.path.join(out_dir, f"{expected_ttl}_{expected_region}_baselineUncorrected_z_score_{expected_region}.h5") + peak_auc_h5 = os.path.join(out_dir, f"peak_AUC_{expected_ttl}_{expected_region}_z_score_{expected_region}.h5") + peak_auc_csv = os.path.join(out_dir, f"peak_AUC_{expected_ttl}_{expected_region}_z_score_{expected_region}.csv") # Assert file creation assert os.path.exists(psth_h5), f"Missing PSTH HDF5: {psth_h5}" @@ -114,9 +156,9 @@ def test_step5(tmp_path, monkeypatch, region, ttl_display_name, storenames_map): assert "mean" in df.columns, f"'mean' column missing in {psth_h5}" # Additional artifacts from transients frequency/amplitude computation (Step 5 side-effect) - freq_amp_h5 = os.path.join(out_dir, f"freqAndAmp_z_score_{region}.h5") - freq_amp_csv = os.path.join(out_dir, f"freqAndAmp_z_score_{region}.csv") - trans_occ_csv = os.path.join(out_dir, f"transientsOccurrences_z_score_{region}.csv") + freq_amp_h5 = os.path.join(out_dir, f"freqAndAmp_z_score_{expected_region}.h5") + freq_amp_csv = os.path.join(out_dir, f"freqAndAmp_z_score_{expected_region}.csv") + trans_occ_csv = os.path.join(out_dir, f"transientsOccurrences_z_score_{expected_region}.csv") assert os.path.exists(freq_amp_h5), f"Missing freq/amp HDF5: {freq_amp_h5}" assert os.path.exists(freq_amp_csv), f"Missing freq/amp CSV: {freq_amp_csv}" assert os.path.exists(trans_occ_csv), f"Missing transients occurrences CSV: {trans_occ_csv}" From 08e5799842b979a81199b7cf62ae9900c70ec792 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Fri, 29 Aug 2025 18:12:39 -0700 Subject: [PATCH 53/83] updated run-tests.yml --- .github/workflows/run-tests.yml | 35 ++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 5fd96d4..598df76 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -11,4 +11,37 @@ on: description: 'List of OS versions to use in matrix, as JSON string' required: true type: string - workflow_dispatch: \ No newline at end of file + workflow_dispatch: + +jobs: + run: + name: ${{ matrix.os }} Python ${{ matrix.python-version }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + python-version: ${{ fromJson(inputs.python-versions) }} + os: ${{ fromJson(inputs.os-versions) }} + steps: + - uses: actions/checkout@v5 + - name: Setup Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install pip + run: python -m pip install -U pip # Official recommended way + + - name: Install GuPPy + run: python -m pip install "." + + - name: Install pytest + run: python -m pip install pytest + + - name: Prepare data for tests + uses: ./.github/actions/load-data + with: + rclone-config: ${{ secrets.RCLONE_CONFIG }} + + - name: Run tests + run: pytest tests -vv -rsx -n auto --dist loadscope \ No newline at end of file From 20dc6e8b59a89b75c1eed461f7056d8f40efdab2 Mon Sep 17 00:00:00 2001 From: Paul Adkisson Date: Mon, 1 Sep 2025 14:49:16 -0400 Subject: [PATCH 54/83] Added load-data action (#156) --- .github/actions/load-data/action.yml | 33 ++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 .github/actions/load-data/action.yml diff --git a/.github/actions/load-data/action.yml b/.github/actions/load-data/action.yml new file mode 100644 index 0000000..514840e --- /dev/null +++ b/.github/actions/load-data/action.yml @@ -0,0 +1,33 @@ +name: 'Prepare Datasets' +description: 'Restores data from caches or downloads it from Google Drive.' +inputs: + rclone-config: + description: 'Rclone configuration' + required: true +runs: + using: 'composite' + steps: + - name: Setup Rclone + uses: AnimMouse/setup-rclone@v1 + with: + rclone_config: ${{ inputs.rclone-config }} + + - name: Get dataset version hash + shell: bash + run: | + HASH=$(rclone lsl remote:"SampleData" --drive-shared-with-me) + echo "DATASET_HASH=$HASH" >> $GITHUB_OUTPUT + + - name: Cache datasets + uses: actions/cache@v4 + id: cache-datasets + with: + path: ./testing_data + key: ephys-datasets-${{ steps.ephys.outputs.DATASET_HASH }} + enableCrossOsArchive: true + + - if: ${{ steps.cache-datasets.outputs.cache-hit != 'true' }} + name: Download datasets from Google Drive + shell: bash + run: | + rclone copy remote:"SampleData" ./testing_data --drive-shared-with-me \ No newline at end of file From 0ad6de1c943ce18e62990d588b4a8fc12575931c Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 1 Sep 2025 11:51:02 -0700 Subject: [PATCH 55/83] merge main --- .github/workflows/auto-publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/auto-publish.yml b/.github/workflows/auto-publish.yml index ac4d922..966d79d 100644 --- a/.github/workflows/auto-publish.yml +++ b/.github/workflows/auto-publish.yml @@ -1,4 +1,4 @@ -# Automatically builds and publishes Python package to TestPyPI when a GitHub release is published. +# Automatically builds and publishes Python package to PyPI when a GitHub release is published. # Uses build library to create distribution files from pyproject.toml configuration, # then uploads to TestPyPI using official PyPA GitHub Action with Trusted Publishing. # Trusted Publishing eliminates the need for API tokens by using cryptographic attestations From 70b72276e657ff9c74f3bd0a1beb07ece7a84a21 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 1 Sep 2025 12:01:13 -0700 Subject: [PATCH 56/83] added inputs to the workflow dispatch --- .github/workflows/run-tests.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 598df76..6322385 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -12,6 +12,17 @@ on: required: true type: string workflow_dispatch: + inputs: + python-versions: + description: 'List of Python versions to use in matrix, as JSON string' + required: true + type: string + default: '["3.10", "3.11", "3.12", "3.13"]' + os-versions: + description: 'List of OS versions to use in matrix, as JSON string' + required: true + type: string + default: '["ubuntu-latest", "windows-latest", "macos-latest"]' jobs: run: From 1f9a4903de9a7001e181b220a397136e08a600f6 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 1 Sep 2025 13:44:24 -0700 Subject: [PATCH 57/83] switched to rclone lsjson --- .github/actions/load-data/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/load-data/action.yml b/.github/actions/load-data/action.yml index 514840e..ac0b772 100644 --- a/.github/actions/load-data/action.yml +++ b/.github/actions/load-data/action.yml @@ -15,7 +15,7 @@ runs: - name: Get dataset version hash shell: bash run: | - HASH=$(rclone lsl remote:"SampleData" --drive-shared-with-me) + HASH=$(rclone lsjson remote:"SampleData" --drive-shared-with-me --recursive) echo "DATASET_HASH=$HASH" >> $GITHUB_OUTPUT - name: Cache datasets From db38acf819535970e457308fd847c7c70e9616ab Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 1 Sep 2025 13:48:49 -0700 Subject: [PATCH 58/83] fixed hashing --- .github/actions/load-data/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/load-data/action.yml b/.github/actions/load-data/action.yml index ac0b772..87ad451 100644 --- a/.github/actions/load-data/action.yml +++ b/.github/actions/load-data/action.yml @@ -15,7 +15,7 @@ runs: - name: Get dataset version hash shell: bash run: | - HASH=$(rclone lsjson remote:"SampleData" --drive-shared-with-me --recursive) + HASH=$(rclone lsjson remote:"SampleData" --drive-shared-with-me --recursive | sha256sum | cut -d' ' -f1) echo "DATASET_HASH=$HASH" >> $GITHUB_OUTPUT - name: Cache datasets From 4274192d7c78f6728908c662fe2cd87cb369fb79 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 1 Sep 2025 14:06:54 -0700 Subject: [PATCH 59/83] updated dependency groups to include test --- .github/workflows/run-tests.yml | 9 ++++----- pyproject.toml | 7 +++++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 6322385..9cbd404 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -43,11 +43,10 @@ jobs: - name: Install pip run: python -m pip install -U pip # Official recommended way - - name: Install GuPPy - run: python -m pip install "." - - - name: Install pytest - run: python -m pip install pytest + - name: Install GuPPy with testing requirements + run: | + python -m pip install "." + python -m pip install --group test - name: Prepare data for tests uses: ./.github/actions/load-data diff --git a/pyproject.toml b/pyproject.toml index aa0a3c9..822137f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,6 +56,13 @@ dependencies = [ "tables", ] +[dependency-groups] +test = [ + "pytest", + "pytest-cov", + "pytest-xdist" # Runs tests on parallel +] + [project.scripts] guppy = "guppy.main:main" From 58848bf0149e15b798d3ede83472b5ffe0a021b8 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 1 Sep 2025 14:26:09 -0700 Subject: [PATCH 60/83] DEBUG commented out plt.switch_backend --- src/guppy/preprocess.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index 3f472b6..8c51c9d 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -17,7 +17,7 @@ from matplotlib.widgets import MultiCursor from pathlib import Path from .combineDataFn import processTimestampsForCombiningData -plt.switch_backend('TKAgg') +# plt.switch_backend('TKAgg') def takeOnlyDirs(paths): removePaths = [] From 7909b04507b92adffcd38cd7f60d94f51ba7ab11 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 1 Sep 2025 14:30:06 -0700 Subject: [PATCH 61/83] Only set matplotlib backend if not in CI environment --- src/guppy/preprocess.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index 8c51c9d..4d14f12 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -17,7 +17,10 @@ from matplotlib.widgets import MultiCursor from pathlib import Path from .combineDataFn import processTimestampsForCombiningData -# plt.switch_backend('TKAgg') + +# Only set matplotlib backend if not in CI environment +if not os.getenv('CI'): + plt.switch_backend('TKAgg') def takeOnlyDirs(paths): removePaths = [] From 507163b9e75b25c4d0ecc405213b7c7e70f81bdd Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 1 Sep 2025 14:30:28 -0700 Subject: [PATCH 62/83] updated cache key --- .github/actions/load-data/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/load-data/action.yml b/.github/actions/load-data/action.yml index 87ad451..f8cd5e1 100644 --- a/.github/actions/load-data/action.yml +++ b/.github/actions/load-data/action.yml @@ -23,7 +23,7 @@ runs: id: cache-datasets with: path: ./testing_data - key: ephys-datasets-${{ steps.ephys.outputs.DATASET_HASH }} + key: ${{ steps.ephys.outputs.DATASET_HASH }} enableCrossOsArchive: true - if: ${{ steps.cache-datasets.outputs.cache-hit != 'true' }} From 5dc12ee6c2f5af6c10fb00757076aa751b431662 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 1 Sep 2025 14:33:41 -0700 Subject: [PATCH 63/83] updated cache key --- .github/actions/load-data/action.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/actions/load-data/action.yml b/.github/actions/load-data/action.yml index f8cd5e1..f41dfcd 100644 --- a/.github/actions/load-data/action.yml +++ b/.github/actions/load-data/action.yml @@ -13,6 +13,7 @@ runs: rclone_config: ${{ inputs.rclone-config }} - name: Get dataset version hash + id: hash shell: bash run: | HASH=$(rclone lsjson remote:"SampleData" --drive-shared-with-me --recursive | sha256sum | cut -d' ' -f1) @@ -23,7 +24,7 @@ runs: id: cache-datasets with: path: ./testing_data - key: ${{ steps.ephys.outputs.DATASET_HASH }} + key: ${{ steps.hash.outputs.DATASET_HASH }} enableCrossOsArchive: true - if: ${{ steps.cache-datasets.outputs.cache-hit != 'true' }} From 61209c56e95325bb118323865d9e69dbcffa0da5 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 1 Sep 2025 14:57:29 -0700 Subject: [PATCH 64/83] updated testing data location --- .gitignore | 4 +++- tests/test_step2.py | 3 ++- tests/test_step3.py | 3 ++- tests/test_step4.py | 3 ++- tests/test_step5.py | 3 ++- 5 files changed, 11 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index f2c8949..c845f0c 100755 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,6 @@ z-score_methods.tgn GuPPy/runFiberPhotometryAnalysis.ipynb .vscode/ *.egg-info/ -.clinerules/ \ No newline at end of file +.clinerules/ + +testing_data/ \ No newline at end of file diff --git a/tests/test_step2.py b/tests/test_step2.py index e8b6871..310cb42 100644 --- a/tests/test_step2.py +++ b/tests/test_step2.py @@ -2,6 +2,7 @@ import os import glob import shutil +from pathlib import Path import pytest @@ -70,7 +71,7 @@ def test_step2(tmp_path, session_subdir, storenames_map): - Asserts storesList.csv exists and exactly matches the provided mapping (2xN) """ # Source sample data - src_base_dir = "/Users/pauladkisson/Documents/CatalystNeuro/Guppy/GDriveSampleData" + src_base_dir = str(Path(".") / "testing_data") src_session = os.path.join(src_base_dir, session_subdir) if not os.path.isdir(src_session): pytest.skip(f"Sample data not available at expected path: {src_session}") diff --git a/tests/test_step3.py b/tests/test_step3.py index 597150e..86eaddc 100644 --- a/tests/test_step3.py +++ b/tests/test_step3.py @@ -5,6 +5,7 @@ import h5py import pytest +from pathlib import Path from guppy.testing.api import step2, step3 @@ -82,7 +83,7 @@ def test_step3(tmp_path, storenames_map, session_subdir): - Runs Step 3 headlessly and verifies per-storename HDF5 outputs exist in the temp copy (never touching the original sample path). """ - src_base_dir = "/Users/pauladkisson/Documents/CatalystNeuro/Guppy/GDriveSampleData" + src_base_dir = str(Path(".") / "testing_data") src_session = os.path.join(src_base_dir, session_subdir) if not os.path.isdir(src_session): diff --git a/tests/test_step4.py b/tests/test_step4.py index 4cd3aaf..b48fa9a 100644 --- a/tests/test_step4.py +++ b/tests/test_step4.py @@ -4,6 +4,7 @@ import h5py import pytest +from pathlib import Path from guppy.testing.api import step2, step3, step4 @@ -85,7 +86,7 @@ def test_step4(tmp_path, monkeypatch, session_subdir, storenames_map, expected_r - Assertions confirm creation of key HDF5 outputs expected from Step 4. """ # Use the CSV sample session - src_base_dir = "/Users/pauladkisson/Documents/CatalystNeuro/Guppy/GDriveSampleData" + src_base_dir = str(Path(".") / "testing_data") src_session = os.path.join(src_base_dir, session_subdir) if not os.path.isdir(src_session): pytest.skip(f"Sample data not available at expected path: {src_session}") diff --git a/tests/test_step5.py b/tests/test_step5.py index 27f5f7a..84f2ba6 100644 --- a/tests/test_step5.py +++ b/tests/test_step5.py @@ -4,6 +4,7 @@ import pytest import pandas as pd +from pathlib import Path from guppy.testing.api import step2, step3, step4, step5 @@ -88,7 +89,7 @@ def test_step5(tmp_path, monkeypatch, session_subdir, storenames_map, expected_r - Defaults are used for input parameters; PSTH computation defaults to z_score. """ # Use the sample session - src_base_dir = "/Users/pauladkisson/Documents/CatalystNeuro/Guppy/GDriveSampleData" + src_base_dir = str(Path(".") / "testing_data") src_session = os.path.join(src_base_dir, session_subdir) if not os.path.isdir(src_session): pytest.skip(f"Sample data not available at expected path: {src_session}") From c1a629c3aa77f17978458eb7dea88009cac0a260 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 2 Sep 2025 11:35:57 -0700 Subject: [PATCH 65/83] fixed bug with npm params --- src/guppy/saveStoresList.py | 10 +++++----- src/guppy/testing/api.py | 28 ++++++++++++++++++++++++---- 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/src/guppy/saveStoresList.py b/src/guppy/saveStoresList.py index 5e60962..7d83b66 100755 --- a/src/guppy/saveStoresList.py +++ b/src/guppy/saveStoresList.py @@ -586,7 +586,7 @@ def check_channels(state): return unique_state.shape[0], unique_state # function to decide NPM timestamps unit (seconds, ms or us) -def decide_ts_unit_for_npm(df, timestamp_col=None, time_unit=None, headless=False): +def decide_ts_unit_for_npm(df, timestamp_col=0, time_unit=None, headless=False): col_names = np.array(list(df.columns)) col_names_ts = [''] for name in col_names: @@ -598,7 +598,7 @@ def decide_ts_unit_for_npm(df, timestamp_col=None, time_unit=None, headless=Fals # Headless path: auto-select column/unit without any UI if headless: # Choose provided column if valid, otherwise the first timestamp-like column found - chosen = timestamp_col if (isinstance(timestamp_col, str) and timestamp_col in df.columns) else col_names_ts[1] + chosen = col_names_ts[timestamp_col] df.insert(1, 'Timestamp', df[chosen]) df = df.drop(col_names_ts[1:], axis=1) valid_units = {'seconds', 'milliseconds', 'microseconds'} @@ -763,9 +763,9 @@ def import_np_doric_csv(filepath, isosbestic_control, num_ch, inputParameters=No npm_time_unit = None npm_split_events = None if isinstance(inputParameters, dict): - npm_timestamp_col = inputParameters.get('npm_timestamp_col') - npm_time_unit = inputParameters.get('npm_time_unit') - npm_split_events = inputParameters.get('npm_split_events') + npm_timestamp_col = inputParameters.get('npm_timestamp_col', 0) + npm_time_unit = inputParameters.get('npm_time_unit', 'seconds') + npm_split_events = inputParameters.get('npm_split_events', True) path = sorted(glob.glob(os.path.join(filepath, '*.csv'))) + \ sorted(glob.glob(os.path.join(filepath, '*.doric'))) path_chev = glob.glob(os.path.join(filepath, '*chev*')) diff --git a/src/guppy/testing/api.py b/src/guppy/testing/api.py index 6ffe01f..fd4e31e 100644 --- a/src/guppy/testing/api.py +++ b/src/guppy/testing/api.py @@ -69,7 +69,7 @@ def step1(*, base_dir: str, selected_folders: Iterable[str]) -> None: template._hooks["onclickProcess"]() -def step2(*, base_dir: str, selected_folders: Iterable[str], storenames_map: dict[str, str]) -> None: +def step2(*, base_dir: str, selected_folders: Iterable[str], storenames_map: dict[str, str], npm_timestamp_col: int = 0, npm_time_unit: str = "seconds", npm_split_events: bool = True) -> None: """ Run pipeline Step 2 (Save Storenames) via the actual Panel-backed logic. @@ -148,11 +148,16 @@ def step2(*, base_dir: str, selected_folders: Iterable[str], storenames_map: dic # Inject storenames mapping for headless execution input_params["storenames_map"] = dict(storenames_map) + # Add npm parameters + input_params["npm_timestamp_col"] = npm_timestamp_col + input_params["npm_time_unit"] = npm_time_unit + input_params["npm_split_events"] = npm_split_events + # Call the underlying Step 2 executor (now headless-aware) execute(input_params) -def step3(*, base_dir: str, selected_folders: Iterable[str]) -> None: +def step3(*, base_dir: str, selected_folders: Iterable[str], npm_timestamp_col: int = 0, npm_time_unit: str = "seconds", npm_split_events: bool = True) -> None: """ Run pipeline Step 3 (Read Raw Data) via the actual Panel-backed logic, headlessly. @@ -213,11 +218,16 @@ def step3(*, base_dir: str, selected_folders: Iterable[str]) -> None: template._widgets["files_1"].value = abs_sessions input_params = template._hooks["getInputParameters"]() + # Inject explicit NPM parameters (match Step 2 style) + input_params["npm_timestamp_col"] = npm_timestamp_col + input_params["npm_time_unit"] = npm_time_unit + input_params["npm_split_events"] = npm_split_events + # Call the underlying Step 3 worker directly (no subprocess) readRawData(input_params) -def step4(*, base_dir: str, selected_folders: Iterable[str]) -> None: +def step4(*, base_dir: str, selected_folders: Iterable[str], npm_timestamp_col: int = 0, npm_time_unit: str = "seconds", npm_split_events: bool = True) -> None: """ Run pipeline Step 4 (Extract timestamps and signal) via the Panel-backed logic, headlessly. @@ -278,11 +288,16 @@ def step4(*, base_dir: str, selected_folders: Iterable[str]) -> None: template._widgets["files_1"].value = abs_sessions input_params = template._hooks["getInputParameters"]() + # Inject explicit NPM parameters (match Step 2 style) + input_params["npm_timestamp_col"] = npm_timestamp_col + input_params["npm_time_unit"] = npm_time_unit + input_params["npm_split_events"] = npm_split_events + # Call the underlying Step 4 worker directly (no subprocess) extractTsAndSignal(input_params) -def step5(*, base_dir: str, selected_folders: Iterable[str]) -> None: +def step5(*, base_dir: str, selected_folders: Iterable[str], npm_timestamp_col: int = 0, npm_time_unit: str = "seconds", npm_split_events: bool = True) -> None: """ Run pipeline Step 5 (PSTH Computation) via the Panel-backed logic, headlessly. @@ -343,6 +358,11 @@ def step5(*, base_dir: str, selected_folders: Iterable[str]) -> None: template._widgets["files_1"].value = abs_sessions input_params = template._hooks["getInputParameters"]() + # Inject explicit NPM parameters (match Step 2 style) + input_params["npm_timestamp_col"] = npm_timestamp_col + input_params["npm_time_unit"] = npm_time_unit + input_params["npm_split_events"] = npm_split_events + # Call the underlying Step 5 worker directly (no subprocess) psthForEachStorename(input_params) From a4e12accb750ce58a4bc187377181379830f8139 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 2 Sep 2025 11:44:22 -0700 Subject: [PATCH 66/83] added extra assertions to test_step2 to ensure npm creates the requisite .csvs --- tests/test_step2.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/test_step2.py b/tests/test_step2.py index 310cb42..add82ec 100644 --- a/tests/test_step2.py +++ b/tests/test_step2.py @@ -118,3 +118,19 @@ def test_step2(tmp_path, session_subdir, storenames_map): assert len(rows) == 2, f"Expected 2 rows (storenames, names_for_storenames), got {len(rows)}" assert rows[0] == list(storenames_map.keys()), "Row 0 (storenames) mismatch" assert rows[1] == list(storenames_map.values()), "Row 1 (names_for_storenames) mismatch" + + # Additional NPM assertions: ensure Step 2 created the expected CSV files for Neurophotometrics + if session_subdir == "SampleData_Neurophotometrics/1442": + expected_files = [ + "eventTrue.csv", + "eventFalse.csv", + "file0_chev1.csv", + "file0_chev2.csv", + "file0_chev3.csv", + "file0_chod1.csv", + "file0_chod2.csv", + "file0_chod3.csv", + ] + for rel in expected_files: + fp = os.path.join(session_copy, rel) + assert os.path.exists(fp), f"Missing expected NPM file at Step 2: {fp}" From 2a32bc60bf054613b28b90949fbdc8ca5bb699b3 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 2 Sep 2025 16:24:36 -0700 Subject: [PATCH 67/83] updated default os to windows-2022 --- .github/workflows/run-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 9cbd404..985c46a 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -22,7 +22,7 @@ on: description: 'List of OS versions to use in matrix, as JSON string' required: true type: string - default: '["ubuntu-latest", "windows-latest", "macos-latest"]' + default: '["ubuntu-latest", "windows-2022", "macos-latest"]' jobs: run: From ac86e35d0f48cc5e1c4c714c5506b520b8c246c0 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 2 Sep 2025 16:42:52 -0700 Subject: [PATCH 68/83] updated pr-tests --- .github/workflows/all_os_versions.txt | 1 + .github/workflows/all_python_versions.txt | 1 + .github/workflows/pr-tests.yml | 45 ++++++++++++++++++++++- .github/workflows/run-tests.yml | 5 +++ 4 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/all_os_versions.txt create mode 100644 .github/workflows/all_python_versions.txt diff --git a/.github/workflows/all_os_versions.txt b/.github/workflows/all_os_versions.txt new file mode 100644 index 0000000..1bcf5cd --- /dev/null +++ b/.github/workflows/all_os_versions.txt @@ -0,0 +1 @@ +["ubuntu-latest", "macos-latest", "windows-2022"] \ No newline at end of file diff --git a/.github/workflows/all_python_versions.txt b/.github/workflows/all_python_versions.txt new file mode 100644 index 0000000..350c415 --- /dev/null +++ b/.github/workflows/all_python_versions.txt @@ -0,0 +1 @@ +["3.10", "3.11", "3.12", "3.13"] \ No newline at end of file diff --git a/.github/workflows/pr-tests.yml b/.github/workflows/pr-tests.yml index cf703c7..9f0195a 100644 --- a/.github/workflows/pr-tests.yml +++ b/.github/workflows/pr-tests.yml @@ -10,4 +10,47 @@ on: concurrency: group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true \ No newline at end of file + cancel-in-progress: true + +jobs: + load_python_and_os_versions: + runs-on: ubuntu-latest + outputs: + ALL_PYTHON_VERSIONS: ${{ steps.load_python_versions.outputs.python_versions }} + ALL_OS_VERSIONS: ${{ steps.load_os_versions.outputs.os_versions }} + steps: + - uses: actions/checkout@v4 + - id: load_python_versions + run: echo "python_versions=$(cat ./.github/workflows/all_python_versions.txt)" >> "$GITHUB_OUTPUT" + - id: load_os_versions + run: echo "os_versions=$(cat ./.github/workflows/all_os_versions.txt)" >> "$GITHUB_OUTPUT" + - name: Debugging + run: | + echo "Loaded Python versions: ${{ steps.load_python_versions.outputs.python_versions }}" + echo "Loaded OS versions: ${{ steps.load_os_versions.outputs.os_versions }}" + + assess-file-changes: + uses: ./.github/workflows/assess-file-changes.yml + + run-tests: + needs: [assess-file-changes, load_python_and_os_versions] + if: ${{ needs.assess-file-changes.outputs.SOURCE_CHANGED == 'true' }} + uses: ./.github/workflows/run-tests.yml + secrets: + RCLONE_CONFIG: ${{ secrets.RCLONE_CONFIG }} + with: # Ternary operator: condition && value_if_true || value_if_false + python-versions: ${{ github.event.pull_request.draft == true && '["3.10"]' || needs.load_python_and_os_versions.outputs.ALL_PYTHON_VERSIONS }} + os-versions: ${{ github.event.pull_request.draft == true && '["ubuntu-latest"]' || needs.load_python_and_os_versions.outputs.ALL_OS_VERSIONS }} + + check-final-status: + name: All tests passing + if: always() + needs: + - run-tests + runs-on: ubuntu-latest + steps: + - name: Decide whether all jobs succeeded or at least one failed + uses: re-actors/alls-green@release/v1 + with: + allowed-skips: run-tests + jobs: ${{ toJSON(needs) }} \ No newline at end of file diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 985c46a..1c38dc6 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -11,6 +11,11 @@ on: description: 'List of OS versions to use in matrix, as JSON string' required: true type: string + secrets: + RCLONE_CONFIG: + description: 'RClone configuration file content' + required: true + type: string workflow_dispatch: inputs: python-versions: From d6a33e42b89512e5e683f2740231eab96560101f Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 2 Sep 2025 16:44:02 -0700 Subject: [PATCH 69/83] updated pr-tests --- .github/workflows/run-tests.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 1c38dc6..06103f0 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -13,9 +13,7 @@ on: type: string secrets: RCLONE_CONFIG: - description: 'RClone configuration file content' required: true - type: string workflow_dispatch: inputs: python-versions: From b05382df7fb1a5c37367e3917715dd651043fa34 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 2 Sep 2025 16:46:01 -0700 Subject: [PATCH 70/83] removed assess-file-changes --- .github/workflows/pr-tests.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.github/workflows/pr-tests.yml b/.github/workflows/pr-tests.yml index 9f0195a..7f67d6f 100644 --- a/.github/workflows/pr-tests.yml +++ b/.github/workflows/pr-tests.yml @@ -29,12 +29,8 @@ jobs: echo "Loaded Python versions: ${{ steps.load_python_versions.outputs.python_versions }}" echo "Loaded OS versions: ${{ steps.load_os_versions.outputs.os_versions }}" - assess-file-changes: - uses: ./.github/workflows/assess-file-changes.yml - run-tests: - needs: [assess-file-changes, load_python_and_os_versions] - if: ${{ needs.assess-file-changes.outputs.SOURCE_CHANGED == 'true' }} + needs: [load_python_and_os_versions] uses: ./.github/workflows/run-tests.yml secrets: RCLONE_CONFIG: ${{ secrets.RCLONE_CONFIG }} From 00b0e67987091e2003323fcec6bd28451c546d19 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Wed, 3 Sep 2025 12:41:55 -0700 Subject: [PATCH 71/83] fixed data cache hash --- .github/actions/load-data/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/load-data/action.yml b/.github/actions/load-data/action.yml index f41dfcd..b78fe20 100644 --- a/.github/actions/load-data/action.yml +++ b/.github/actions/load-data/action.yml @@ -16,7 +16,7 @@ runs: id: hash shell: bash run: | - HASH=$(rclone lsjson remote:"SampleData" --drive-shared-with-me --recursive | sha256sum | cut -d' ' -f1) + HASH=$(rclone lsjson remote:"SampleData" --drive-shared-with-me --recursive | sed 's/,$//' | sort | sha256sum | cut -d' ' -f1) echo "DATASET_HASH=$HASH" >> $GITHUB_OUTPUT - name: Cache datasets From 35e5731238b20d48a9212ac11539454fb39ad01a Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 15 Sep 2025 17:23:25 -0700 Subject: [PATCH 72/83] updated npm_timestamp to use name instead of index --- src/guppy/saveStoresList.py | 15 +++++++++------ src/guppy/testing/api.py | 16 ++++++++-------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/src/guppy/saveStoresList.py b/src/guppy/saveStoresList.py index 7d83b66..95f12f9 100755 --- a/src/guppy/saveStoresList.py +++ b/src/guppy/saveStoresList.py @@ -586,7 +586,7 @@ def check_channels(state): return unique_state.shape[0], unique_state # function to decide NPM timestamps unit (seconds, ms or us) -def decide_ts_unit_for_npm(df, timestamp_col=0, time_unit=None, headless=False): +def decide_ts_unit_for_npm(df, timestamp_column_name=None, time_unit=None, headless=False): col_names = np.array(list(df.columns)) col_names_ts = [''] for name in col_names: @@ -597,8 +597,11 @@ def decide_ts_unit_for_npm(df, timestamp_col=0, time_unit=None, headless=False): if len(col_names_ts)>2: # Headless path: auto-select column/unit without any UI if headless: - # Choose provided column if valid, otherwise the first timestamp-like column found - chosen = col_names_ts[timestamp_col] + if timestamp_column_name is not None: + assert timestamp_column_name in col_names_ts, f"Provided timestamp_column_name '{timestamp_column_name}' not found in columns {col_names_ts[1:]}" + chosen = timestamp_column_name + else: + chosen = col_names_ts[0] df.insert(1, 'Timestamp', df[chosen]) df = df.drop(col_names_ts[1:], axis=1) valid_units = {'seconds', 'milliseconds', 'microseconds'} @@ -759,11 +762,11 @@ def import_np_doric_csv(filepath, isosbestic_control, num_ch, inputParameters=No logging.DEBUG) # Headless configuration (used to avoid any UI prompts when running tests) headless = bool(os.environ.get('GUPPY_BASE_DIR')) - npm_timestamp_col = None + npm_timestamp_column_name = None npm_time_unit = None npm_split_events = None if isinstance(inputParameters, dict): - npm_timestamp_col = inputParameters.get('npm_timestamp_col', 0) + npm_timestamp_column_name = inputParameters.get('npm_timestamp_column_name') npm_time_unit = inputParameters.get('npm_time_unit', 'seconds') npm_split_events = inputParameters.get('npm_split_events', True) path = sorted(glob.glob(os.path.join(filepath, '*.csv'))) + \ @@ -933,7 +936,7 @@ def import_np_doric_csv(filepath, isosbestic_control, num_ch, inputParameters=No file = f'file{str(i)}_' df, ts_unit = decide_ts_unit_for_npm( df, - timestamp_col=npm_timestamp_col, + timestamp_column_name=npm_timestamp_column_name, time_unit=npm_time_unit, headless=headless ) diff --git a/src/guppy/testing/api.py b/src/guppy/testing/api.py index fd4e31e..bc8b239 100644 --- a/src/guppy/testing/api.py +++ b/src/guppy/testing/api.py @@ -69,7 +69,7 @@ def step1(*, base_dir: str, selected_folders: Iterable[str]) -> None: template._hooks["onclickProcess"]() -def step2(*, base_dir: str, selected_folders: Iterable[str], storenames_map: dict[str, str], npm_timestamp_col: int = 0, npm_time_unit: str = "seconds", npm_split_events: bool = True) -> None: +def step2(*, base_dir: str, selected_folders: Iterable[str], storenames_map: dict[str, str], npm_timestamp_column_name: str | None = None, npm_time_unit: str = "seconds", npm_split_events: bool = True) -> None: """ Run pipeline Step 2 (Save Storenames) via the actual Panel-backed logic. @@ -149,7 +149,7 @@ def step2(*, base_dir: str, selected_folders: Iterable[str], storenames_map: dic input_params["storenames_map"] = dict(storenames_map) # Add npm parameters - input_params["npm_timestamp_col"] = npm_timestamp_col + input_params["npm_timestamp_column_name"] = npm_timestamp_column_name input_params["npm_time_unit"] = npm_time_unit input_params["npm_split_events"] = npm_split_events @@ -157,7 +157,7 @@ def step2(*, base_dir: str, selected_folders: Iterable[str], storenames_map: dic execute(input_params) -def step3(*, base_dir: str, selected_folders: Iterable[str], npm_timestamp_col: int = 0, npm_time_unit: str = "seconds", npm_split_events: bool = True) -> None: +def step3(*, base_dir: str, selected_folders: Iterable[str], npm_timestamp_column_name: str | None = None, npm_time_unit: str = "seconds", npm_split_events: bool = True) -> None: """ Run pipeline Step 3 (Read Raw Data) via the actual Panel-backed logic, headlessly. @@ -219,7 +219,7 @@ def step3(*, base_dir: str, selected_folders: Iterable[str], npm_timestamp_col: input_params = template._hooks["getInputParameters"]() # Inject explicit NPM parameters (match Step 2 style) - input_params["npm_timestamp_col"] = npm_timestamp_col + input_params["npm_timestamp_column_name"] = npm_timestamp_column_name input_params["npm_time_unit"] = npm_time_unit input_params["npm_split_events"] = npm_split_events @@ -227,7 +227,7 @@ def step3(*, base_dir: str, selected_folders: Iterable[str], npm_timestamp_col: readRawData(input_params) -def step4(*, base_dir: str, selected_folders: Iterable[str], npm_timestamp_col: int = 0, npm_time_unit: str = "seconds", npm_split_events: bool = True) -> None: +def step4(*, base_dir: str, selected_folders: Iterable[str], npm_timestamp_column_name: str | None = None, npm_time_unit: str = "seconds", npm_split_events: bool = True) -> None: """ Run pipeline Step 4 (Extract timestamps and signal) via the Panel-backed logic, headlessly. @@ -289,7 +289,7 @@ def step4(*, base_dir: str, selected_folders: Iterable[str], npm_timestamp_col: input_params = template._hooks["getInputParameters"]() # Inject explicit NPM parameters (match Step 2 style) - input_params["npm_timestamp_col"] = npm_timestamp_col + input_params["npm_timestamp_column_name"] = npm_timestamp_column_name input_params["npm_time_unit"] = npm_time_unit input_params["npm_split_events"] = npm_split_events @@ -297,7 +297,7 @@ def step4(*, base_dir: str, selected_folders: Iterable[str], npm_timestamp_col: extractTsAndSignal(input_params) -def step5(*, base_dir: str, selected_folders: Iterable[str], npm_timestamp_col: int = 0, npm_time_unit: str = "seconds", npm_split_events: bool = True) -> None: +def step5(*, base_dir: str, selected_folders: Iterable[str], npm_timestamp_column_name: str | None = None, npm_time_unit: str = "seconds", npm_split_events: bool = True) -> None: """ Run pipeline Step 5 (PSTH Computation) via the Panel-backed logic, headlessly. @@ -359,7 +359,7 @@ def step5(*, base_dir: str, selected_folders: Iterable[str], npm_timestamp_col: input_params = template._hooks["getInputParameters"]() # Inject explicit NPM parameters (match Step 2 style) - input_params["npm_timestamp_col"] = npm_timestamp_col + input_params["npm_timestamp_column_name"] = npm_timestamp_column_name input_params["npm_time_unit"] = npm_time_unit input_params["npm_split_events"] = npm_split_events From 96facf3e1732cbfb994a1164f8652f2a8001391d Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 18 Sep 2025 09:33:23 -0700 Subject: [PATCH 73/83] disable parallel execution for ci tests --- .github/workflows/run-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 06103f0..22aa937 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -57,4 +57,4 @@ jobs: rclone-config: ${{ secrets.RCLONE_CONFIG }} - name: Run tests - run: pytest tests -vv -rsx -n auto --dist loadscope \ No newline at end of file + run: pytest tests -vv -rsx # -n auto --dist loadscope # TODO: re-enable parallel execution when logging issues with Windows are resolved \ No newline at end of file From b649247fb3c9996221acbb826fabe8462c87b14a Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 16 Oct 2025 10:50:54 -0700 Subject: [PATCH 74/83] First pass at centralizing the logging. --- pyproject.toml | 1 + src/guppy/computeCorr.py | 34 +------ src/guppy/computePsth.py | 62 ++++-------- src/guppy/findTransientsFreqAndAmp.py | 57 +++-------- src/guppy/logging_config.py | 84 ++++++++++++++++ src/guppy/preprocess.py | 134 ++++++++++---------------- src/guppy/readTevTsq.py | 106 +++++++------------- src/guppy/saveStoresList.py | 93 ++++++------------ src/guppy/savingInputParameters.py | 47 ++------- src/guppy/visualizePlot.py | 28 +----- 10 files changed, 249 insertions(+), 397 deletions(-) create mode 100644 src/guppy/logging_config.py diff --git a/pyproject.toml b/pyproject.toml index 822137f..0d6d6ca 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,6 +52,7 @@ dependencies = [ "pandas", "panel", "param", + "platformdirs", "scipy", "tables", ] diff --git a/src/guppy/computeCorr.py b/src/guppy/computeCorr.py index 404e9df..3ea82c1 100644 --- a/src/guppy/computeCorr.py +++ b/src/guppy/computeCorr.py @@ -3,34 +3,11 @@ import re import math import h5py -import logging import numpy as np import pandas as pd from scipy import signal from pathlib import Path - -def insertLog(text, level): - file = os.path.join(Path.home(), 'guppy.log') - format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') - infoLog = logging.FileHandler(file) - infoLog.setFormatter(format) - infoLog - logger = logging.getLogger(file) - logger.setLevel(level) - - if not logger.handlers: - logger.addHandler(infoLog) - if level == logging.DEBUG: - logger.debug(text) - if level == logging.INFO: - logger.info(text) - if level == logging.ERROR: - logger.exception(text) - if level == logging.WARNING: - logger.warning(text) - - infoLog.close() - logger.removeHandler(infoLog) +from .logging_config import logger def make_dir(filepath): op = os.path.join(filepath, "cross_correlation_output") @@ -49,7 +26,7 @@ def read_hdf5(event, filepath, key): with h5py.File(op, 'r') as f: arr = np.asarray(f[key]) else: - insertLog(f"{event}.hdf5 file does not exist", logging.ERROR) + logger.error(f"{event}.hdf5 file does not exist") raise Exception('{}.hdf5 file does not exist'.format(event)) return arr @@ -129,8 +106,7 @@ def getCorrCombinations(filepath, inputParameters): corr_info = list() if len(names)<=1: - insertLog("Cross-correlation cannot be computed because only one signal is present.", - logging.INFO) + logger.info("Cross-correlation cannot be computed because only one signal is present.") print("Cross-correlation cannot be computed because only one signal is present.") return corr_info, type elif len(names)==2: @@ -175,7 +151,7 @@ def computeCrossCorrelation(filepath, event, inputParameters): else: for i in range(1, len(corr_info)): print("Computing cross-correlation for event {}...".format(event)) - insertLog(f"Computing cross-correlation for event {event}", logging.DEBUG) + logger.debug(f"Computing cross-correlation for event {event}") for j in range(len(type)): psth_a = read_Df(filepath, event+'_'+corr_info[i-1], type[j]+'_'+corr_info[i-1]) psth_b = read_Df(filepath, event+'_'+corr_info[i], type[j]+'_'+corr_info[i]) @@ -191,5 +167,5 @@ def computeCrossCorrelation(filepath, event, inputParameters): cross_corr = helperCrossCorrelation(arr_A, arr_B, sample_rate) cols.append('timestamps') create_Df(make_dir(filepath), 'corr_'+event, type[j]+'_'+corr_info[i-1]+'_'+corr_info[i], cross_corr, cols) - insertLog(f"Cross-correlation for event {event} computed.", logging.INFO) + logger.info(f"Cross-correlation for event {event} computed.") print("Cross-correlation for event {} computed.".format(event)) diff --git a/src/guppy/computePsth.py b/src/guppy/computePsth.py index bbbac51..298c3f0 100755 --- a/src/guppy/computePsth.py +++ b/src/guppy/computePsth.py @@ -8,7 +8,6 @@ import h5py import math import subprocess -import logging import numpy as np import pandas as pd from itertools import repeat @@ -20,6 +19,7 @@ from .computeCorr import computeCrossCorrelation from .computeCorr import getCorrCombinations from .computeCorr import make_dir +from .logging_config import logger def takeOnlyDirs(paths): removePaths = [] @@ -28,29 +28,6 @@ def takeOnlyDirs(paths): removePaths.append(p) return list(set(paths)-set(removePaths)) -def insertLog(text, level): - file = os.path.join(Path.home(), 'guppy.log') - format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') - infoLog = logging.FileHandler(file) - infoLog.setFormatter(format) - infoLog - logger = logging.getLogger(file) - logger.setLevel(level) - - if not logger.handlers: - logger.addHandler(infoLog) - if level == logging.DEBUG: - logger.debug(text) - if level == logging.INFO: - logger.info(text) - if level == logging.ERROR: - logger.exception(text) - if level == logging.WARNING: - logger.warning(text) - - infoLog.close() - logger.removeHandler(infoLog) - def writeToFile(value: str): with open(os.path.join(os.path.expanduser('~'), 'pbSteps.txt'), 'a') as file: file.write(value) @@ -390,7 +367,7 @@ def storenamePsth(filepath, event, inputParameters): else: for i in range(len(path)): print("Computing PSTH for event {}...".format(event)) - insertLog(f"Computing PSTH for event {event}", logging.DEBUG) + logger.debug(f"Computing PSTH for event {event}") basename = (os.path.basename(path[i])).split('.')[0] name_1 = basename.split('_')[-1] control = read_hdf5('control_'+name_1, os.path.dirname(path[i]), 'data') @@ -409,7 +386,7 @@ def storenamePsth(filepath, event, inputParameters): create_Df(filepath, event+'_'+name_1+'_baselineUncorrected', basename, psth_baselineUncorrected, columns=cols) # extra create_Df(filepath, event+'_'+name_1, basename, psth, columns=cols) - insertLog(f"PSTH for event {event} computed.", logging.INFO) + logger.info(f"PSTH for event {event} computed.") print("PSTH for event {} computed.".format(event)) @@ -422,11 +399,11 @@ def helperPSTHPeakAndArea(psth_mean, timestamps, sampling_rate, peak_startPoint, peak_endPoint = peak_endPoint[~np.isnan(peak_endPoint)] if peak_startPoint.shape[0]!=peak_endPoint.shape[0]: - insertLog('Number of Peak Start Time and Peak End Time are unequal.', logging.ERROR) + logger.error('Number of Peak Start Time and Peak End Time are unequal.') raise Exception('Number of Peak Start Time and Peak End Time are unequal.') if np.less_equal(peak_endPoint, peak_startPoint).any()==True: - insertLog('Peak End Time is lesser than or equal to Peak Start Time. Please check the Peak parameters window.', logging.ERROR) + logger.error('Peak End Time is lesser than or equal to Peak Start Time. Please check the Peak parameters window.') raise Exception('Peak End Time is lesser than or equal to Peak Start Time. Please check the Peak parameters window.') @@ -477,7 +454,7 @@ def findPSTHPeakAndArea(filepath, event, inputParameters): else: for i in range(len(path)): print('Computing peak and area for PSTH mean signal for event {}...'.format(event)) - insertLog(f"Computing peak and area for PSTH mean signal for event {event}", logging.DEBUG) + logger.debug(f"Computing peak and area for PSTH mean signal for event {event}") basename = (os.path.basename(path[i])).split('.')[0] name_1 = basename.split('_')[-1] sampling_rate = read_hdf5('timeCorrection_'+name_1, filepath, 'sampling_rate')[0] @@ -496,7 +473,7 @@ def findPSTHPeakAndArea(filepath, event, inputParameters): index = [fileName[0]+'_'+s for s in psth_mean_bin_names] create_Df_area_peak(filepath, peak_area, event+'_'+name_1+'_'+basename, index=index) # columns=['peak', 'area'] create_csv_area_peak(filepath, peak_area, event+'_'+name_1+'_'+basename, index=index) - insertLog(f"Peak and Area for PSTH mean signal for event {event} computed.", logging.INFO) + logger.info(f"Peak and Area for PSTH mean signal for event {event} computed.") print('Peak and Area for PSTH mean signal for event {} computed.'.format(event)) def makeAverageDir(filepath): @@ -533,7 +510,7 @@ def averageForGroup(folderNames, event, inputParameters): event = event.replace("/","_") print("Averaging group of data...") - insertLog("Averaging group of data", logging.DEBUG) + logger.debug("Averaging group of data") path = [] abspath = inputParameters['abspath'] selectForComputePsth = inputParameters['selectForComputePsth'] @@ -593,7 +570,7 @@ def averageForGroup(folderNames, event, inputParameters): psth_bins.append(df[bins_cols]) if len(psth)==0: - insertLog('Somthing is wrong with the file search pattern.', logging.WARNING) + logger.warning('Somthing is wrong with the file search pattern.') print("Somthing is wrong with the file search pattern.") continue @@ -635,7 +612,7 @@ def averageForGroup(folderNames, event, inputParameters): index.append(list(df.index)) if len(arr)==0: - insertLog('Somthing is wrong with the file search pattern.', logging.WARNING) + logger.warning('Somthing is wrong with the file search pattern.') print("Somthing is wrong with the file search pattern.") continue index = list(np.concatenate(index)) @@ -674,7 +651,7 @@ def averageForGroup(folderNames, event, inputParameters): columns.append('timestamps') create_Df(make_dir(op), 'corr_'+event, type[i]+'_'+corr_info[k-1]+'_'+corr_info[k], corr, columns=columns) - insertLog('Group of data averaged.', logging.INFO) + logger.info('Group of data averaged.') print("Group of data averaged.") @@ -697,15 +674,15 @@ def psthForEachStorename(inputParameters): if numProcesses==0: numProcesses = mp.cpu_count() elif numProcesses>mp.cpu_count(): - insertLog('Warning : # of cores parameter set is greater than the cores available \ - available in your machine', logging.WARNING) + logger.warning('Warning : # of cores parameter set is greater than the cores available \ + available in your machine') print('Warning : # of cores parameter set is greater than the cores available \ available in your machine') numProcesses = mp.cpu_count()-1 print("Average for group : ", average) - insertLog(f"Average for group : {average}", logging.INFO) + logger.info(f"Average for group : {average}") # for average following if statement will be executed if average==True: @@ -737,8 +714,7 @@ def psthForEachStorename(inputParameters): inputParameters['step'] += 1 else: - insertLog('Not a single folder name is provided in folderNamesForAvg in inputParamters File.', - logging.ERROR) + logger.error('Not a single folder name is provided in folderNamesForAvg in inputParamters File.') raise Exception('Not a single folder name is provided in folderNamesForAvg in inputParamters File.') # for individual analysis following else statement will be executed @@ -768,7 +744,7 @@ def psthForEachStorename(inputParameters): storesListPath = np.concatenate(storesListPath) writeToFile(str((storesListPath.shape[0]+storesListPath.shape[0]+1)*10)+'\n'+str(10)+'\n') for i in range(len(folderNames)): - insertLog(f"Computing PSTH, Peak and Area for each event in {folderNames[i]}", logging.DEBUG) + logger.debug(f"Computing PSTH, Peak and Area for each event in {folderNames[i]}") storesListPath = takeOnlyDirs(glob.glob(os.path.join(folderNames[i], '*_output_*'))) for j in range(len(storesListPath)): filepath = storesListPath[j] @@ -789,7 +765,7 @@ def psthForEachStorename(inputParameters): writeToFile(str(10+((inputParameters['step']+1)*10))+'\n') inputParameters['step'] += 1 - insertLog(f"PSTH, Area and Peak are computed for all events in {folderNames[i]}.", logging.INFO) + logger.info(f"PSTH, Area and Peak are computed for all events in {folderNames[i]}.") print("PSTH, Area and Peak are computed for all events.") return inputParameters @@ -797,11 +773,11 @@ def main(input_parameters): try: inputParameters = psthForEachStorename(input_parameters) subprocess.call([sys.executable, "-m", "guppy.findTransientsFreqAndAmp", json.dumps(inputParameters)]) - insertLog('#'*400, logging.INFO) + logger.info('#'*400) except Exception as e: with open(os.path.join(os.path.expanduser('~'), 'pbSteps.txt'), 'a') as file: file.write(str(-1)+"\n") - insertLog(str(e), logging.ERROR) + logger.error(str(e)) raise e if __name__ == "__main__": diff --git a/src/guppy/findTransientsFreqAndAmp.py b/src/guppy/findTransientsFreqAndAmp.py index 96caaaf..1ac05a9 100755 --- a/src/guppy/findTransientsFreqAndAmp.py +++ b/src/guppy/findTransientsFreqAndAmp.py @@ -4,7 +4,6 @@ import h5py import json import math -import logging import numpy as np import pandas as pd import multiprocessing as mp @@ -13,6 +12,7 @@ from itertools import repeat from pathlib import Path from .preprocess import get_all_stores_for_combining_data +from .logging_config import logger def takeOnlyDirs(paths): removePaths = [] @@ -21,29 +21,6 @@ def takeOnlyDirs(paths): removePaths.append(p) return list(set(paths)-set(removePaths)) -def insertLog(text, level): - file = os.path.join(Path.home(), 'guppy.log') - format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') - infoLog = logging.FileHandler(file) - infoLog.setFormatter(format) - infoLog - logger = logging.getLogger(file) - logger.setLevel(level) - - if not logger.handlers: - logger.addHandler(infoLog) - if level == logging.DEBUG: - logger.debug(text) - if level == logging.INFO: - logger.info(text) - if level == logging.ERROR: - logger.exception(text) - if level == logging.WARNING: - logger.warning(text) - - infoLog.close() - logger.removeHandler(infoLog) - def writeToFile(value: str): with open(os.path.join(os.path.expanduser('~'), 'pbSteps.txt'), 'a') as file: file.write(value) @@ -58,7 +35,7 @@ def read_hdf5(event, filepath, key): with h5py.File(op, 'r') as f: arr = np.asarray(f[key]) else: - insertLog(f"{event}.hdf5 file does not exist", logging.ERROR) + logger.error(f"{event}.hdf5 file does not exist") raise Exception('{}.hdf5 file does not exist'.format(event)) return arr @@ -109,7 +86,7 @@ def processChunks(arrValues, arrIndexes, highAmpFilt, transientsThresh): def createChunks(z_score, sampling_rate, window): print('Creating chunks for multiprocessing...') - insertLog('Creating chunks for multiprocessing.', logging.DEBUG) + logger.debug('Creating chunks for multiprocessing.') windowPoints = math.ceil(sampling_rate*window) remainderPoints = math.ceil((sampling_rate*window) - (z_score.shape[0]%windowPoints)) @@ -128,9 +105,9 @@ def createChunks(z_score, sampling_rate, window): z_score_chunks = padded_z_score.reshape(int(reshape), -1) z_score_chunks_index = z_score_index.reshape(int(reshape), -1) else: - insertLog('Reshaping values should be integer.', logging.ERROR) + logger.error('Reshaping values should be integer.') raise Exception('Reshaping values should be integer.') - insertLog('Chunks are created for multiprocessing.', logging.INFO) + logger.info('Chunks are created for multiprocessing.') print('Chunks are created for multiprocessing.') return z_score_chunks, z_score_chunks_index @@ -191,7 +168,7 @@ def visuzlize_peaks(filepath, z_score, timestamps, peaksIndex): def findFreqAndAmp(filepath, inputParameters, window=15, numProcesses=mp.cpu_count()): print('Calculating frequency and amplitude of transients in z-score data....') - insertLog('Calculating frequency and amplitude of transients in z-score data.', logging.DEBUG) + logger.debug('Calculating frequency and amplitude of transients in z-score data.') selectForTransientsComputation = inputParameters['selectForTransientsComputation'] highAmpFilt = inputParameters['highAmpFilt'] transientsThresh = inputParameters['transientsThresh'] @@ -230,7 +207,7 @@ def findFreqAndAmp(filepath, inputParameters, window=15, numProcesses=mp.cpu_cou create_csv(filepath, peaks_occurrences, 'transientsOccurrences_'+basename+'.csv', index=np.arange(peaks_occurrences.shape[0]),columns=['timestamps', 'amplitude']) visuzlize_peaks(path[i], z_score, ts, peaksInd) - insertLog('Frequency and amplitude of transients in z_score data are calculated.', logging.INFO) + logger.info('Frequency and amplitude of transients in z_score data are calculated.') print('Frequency and amplitude of transients in z_score data are calculated.') @@ -246,7 +223,7 @@ def makeAverageDir(filepath): def averageForGroup(folderNames, inputParameters): print('Combining results for frequency and amplitude of transients in z-score data...') - insertLog('Combining results for frequency and amplitude of transients in z-score data.', logging.DEBUG) + logger.debug('Combining results for frequency and amplitude of transients in z-score data.') path = [] abspath = inputParameters['abspath'] selectForTransientsComputation = inputParameters['selectForTransientsComputation'] @@ -301,7 +278,7 @@ def averageForGroup(folderNames, inputParameters): arr = np.asarray(arr) create_Df(op, arr, temp_path[j][1], index=fileName, columns=['freq (events/min)', 'amplitude']) create_csv(op, arr, 'freqAndAmp_'+temp_path[j][1]+'.csv', index=fileName, columns=['freq (events/min)', 'amplitude']) - insertLog('Results for frequency and amplitude of transients in z-score data are combined.', logging.INFO) + logger.info('Results for frequency and amplitude of transients in z-score data are combined.') print('Results for frequency and amplitude of transients in z-score data are combined.') def executeFindFreqAndAmp(inputParameters): @@ -319,8 +296,8 @@ def executeFindFreqAndAmp(inputParameters): if numProcesses==0: numProcesses = mp.cpu_count() elif numProcesses>mp.cpu_count(): - insertLog('Warning : # of cores parameter set is greater than the cores available \ - available in your machine', logging.WARNING) + logger.warning('Warning : # of cores parameter set is greater than the cores available \ + available in your machine') print('Warning : # of cores parameter set is greater than the cores available \ available in your machine') numProcesses = mp.cpu_count()-1 @@ -336,8 +313,7 @@ def executeFindFreqAndAmp(inputParameters): writeToFile(str(10+((inputParameters['step']+1)*10))+'\n') inputParameters['step'] += 1 else: - insertLog('Not a single folder name is provided in folderNamesForAvg in inputParamters File.', - logging.ERROR) + logger.error('Not a single folder name is provided in folderNamesForAvg in inputParamters File.') raise Exception('Not a single folder name is provided in folderNamesForAvg in inputParamters File.') @@ -358,8 +334,7 @@ def executeFindFreqAndAmp(inputParameters): plt.show() else: for i in range(len(folderNames)): - insertLog(f"Finding transients in z-score data of {folderNames[i]} and calculating frequency and amplitude.", - logging.DEBUG) + logger.debug(f"Finding transients in z-score data of {folderNames[i]} and calculating frequency and amplitude.") filepath = folderNames[i] storesListPath = takeOnlyDirs(glob.glob(os.path.join(filepath, '*_output_*'))) for j in range(len(storesListPath)): @@ -368,7 +343,7 @@ def executeFindFreqAndAmp(inputParameters): findFreqAndAmp(filepath, inputParameters, window=moving_window, numProcesses=numProcesses) writeToFile(str(10+((inputParameters['step']+1)*10))+'\n') inputParameters['step'] += 1 - insertLog('Transients in z-score data found and frequency and amplitude are calculated.', logging.INFO) + logger.info('Transients in z-score data found and frequency and amplitude are calculated.') plt.show() print('Transients in z-score data found and frequency and amplitude are calculated.') @@ -377,9 +352,9 @@ def executeFindFreqAndAmp(inputParameters): if __name__ == "__main__": try: executeFindFreqAndAmp(json.loads(sys.argv[1])) - insertLog('#'*400, logging.INFO) + logger.info('#'*400) except Exception as e: with open(os.path.join(os.path.expanduser('~'), 'pbSteps.txt'), 'a') as file: file.write(str(-1)+"\n") - insertLog(str(e), logging.ERROR) + logger.error(str(e)) raise e diff --git a/src/guppy/logging_config.py b/src/guppy/logging_config.py new file mode 100644 index 0000000..d90e812 --- /dev/null +++ b/src/guppy/logging_config.py @@ -0,0 +1,84 @@ +"""Centralized logging configuration for GuPPy. + +This module provides a standardized logging setup that writes to platform-appropriate +log directories following OS conventions: +- Windows: %APPDATA%/LernerLab/guppy/Logs/ +- macOS: ~/Library/Logs/LernerLab/guppy/ +- Linux: ~/.local/state/LernerLab/guppy/log/ + +The logger can be imported and used across all GuPPy modules. +""" + +import logging +import os +from pathlib import Path +from platformdirs import user_log_dir + + +def get_log_file(): + """Get the platform-appropriate log file path. + + Returns + ------- + Path + Path to the guppy.log file in the platform-appropriate log directory. + """ + log_dir = Path(user_log_dir("guppy", "LernerLab")) + log_dir.mkdir(parents=True, exist_ok=True) + return log_dir / "guppy.log" + + +def setup_logging(*, level=None, console_output=True): + """Configure centralized logging for GuPPy. + + Parameters + ---------- + level : int, optional + Logging level (e.g., logging.DEBUG, logging.INFO). If None, uses + environment variable GUPPY_LOG_LEVEL or defaults to INFO. + console_output : bool, optional + Whether to also output logs to console. Default is True. + + Returns + ------- + logging.Logger + Configured logger instance for GuPPy. + """ + # Determine log level + if level is None: + env_level = os.environ.get('GUPPY_LOG_LEVEL', 'INFO').upper() + level = getattr(logging, env_level, logging.INFO) + + # Get log file path + log_file = get_log_file() + + # Configure root logger for guppy + logger = logging.getLogger("guppy") + logger.setLevel(level) + + # Prevent duplicate handlers if setup_logging is called multiple times + if logger.handlers: + return logger + + # Create formatter + formatter = logging.Formatter( + '%(asctime)s %(name)s %(levelname)s %(message)s', + datefmt='%Y-%m-%d %H:%M:%S' + ) + + # File handler + file_handler = logging.FileHandler(log_file) + file_handler.setFormatter(formatter) + logger.addHandler(file_handler) + + # Console handler (optional) + if console_output: + console_handler = logging.StreamHandler() + console_handler.setFormatter(formatter) + logger.addHandler(console_handler) + + return logger + + +# Initialize logger at module import +logger = setup_logging() diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index e9cb5b8..6bba777 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -5,7 +5,6 @@ import time import re import fnmatch -import logging import numpy as np import pandas as pd import h5py @@ -17,6 +16,7 @@ from matplotlib.widgets import MultiCursor from pathlib import Path from .combineDataFn import processTimestampsForCombiningData +from .logging_config import logger # Only set matplotlib backend if not in CI environment if not os.getenv('CI'): @@ -29,29 +29,6 @@ def takeOnlyDirs(paths): removePaths.append(p) return list(set(paths)-set(removePaths)) -def insertLog(text, level): - file = os.path.join(Path.home(), 'guppy.log') - format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') - infoLog = logging.FileHandler(file) - infoLog.setFormatter(format) - infoLog - logger = logging.getLogger(file) - logger.setLevel(level) - - if not logger.handlers: - logger.addHandler(infoLog) - if level == logging.DEBUG: - logger.debug(text) - if level == logging.INFO: - logger.info(text) - if level == logging.ERROR: - logger.exception(text) - if level == logging.WARNING: - logger.warning(text) - - infoLog.close() - logger.removeHandler(infoLog) - def writeToFile(value: str): with open(os.path.join(os.path.expanduser('~'), 'pbSteps.txt'), 'a') as file: file.write(value) @@ -97,7 +74,7 @@ def helper_create_control_channel(signal, timestamps, window): try: popt, pcov = curve_fit(curveFitFn, timestamps, filtered_signal, p0) except Exception as e: - insertLog(str(e), logging.ERROR) + logger.error(str(e)) print(e) #print('Curve Fit Parameters : ', popt) @@ -116,7 +93,7 @@ def create_control_channel(filepath, arr, window=5001): for i in range(storesList.shape[0]): event_name, event = storesList[i], storenames[i] if 'control' in event_name.lower() and 'cntrl' in event.lower(): - insertLog('Creating control channel from signal channel using curve-fitting', logging.DEBUG) + logger.debug('Creating control channel from signal channel using curve-fitting') print('Creating control channel from signal channel using curve-fitting') name = event_name.split('_')[-1] signal = read_hdf5('signal_'+name, filepath, 'data') @@ -134,7 +111,7 @@ def create_control_channel(filepath, arr, window=5001): } df = pd.DataFrame(d) df.to_csv(os.path.join(os.path.dirname(filepath), event.lower()+'.csv'), index=False) - insertLog('Control channel from signal channel created using curve-fitting', logging.INFO) + logger.info('Control channel from signal channel created using curve-fitting') print('Control channel from signal channel created using curve-fitting') @@ -153,13 +130,11 @@ def add_control_channel(filepath, arr): new_str = 'signal_'+str(name).lower() find_signal = [True for i in storesList if i==new_str] if len(find_signal)>1: - insertLog('Error in naming convention of files or Error in storesList file', logging.ERROR) + logger.error('Error in naming convention of files or Error in storesList file') raise Exception('Error in naming convention of files or Error in storesList file') if len(find_signal)==0: - insertLog("Isosbectic control channel parameter is set to False and still \ - storeslist file shows there is control channel present", - logging.ERROR - ) + logger.error("Isosbectic control channel parameter is set to False and still \ + storeslist file shows there is control channel present") raise Exception("Isosbectic control channel parameter is set to False and still \ storeslist file shows there is control channel present") else: @@ -200,7 +175,7 @@ def read_hdf5(event, filepath, key): with h5py.File(op, 'r') as f: arr = np.asarray(f[key]) else: - insertLog(f"{event}.hdf5 file does not exist", logging.ERROR) + logger.error(f"{event}.hdf5 file does not exist") raise Exception('{}.hdf5 file does not exist'.format(event)) return arr @@ -261,8 +236,7 @@ def check_cntrl_sig_length(filepath, channels_arr, storenames, storesList): def timestampCorrection_csv(filepath, timeForLightsTurnOn, storesList): print("Correcting timestamps by getting rid of the first {} seconds and convert timestamps to seconds...".format(timeForLightsTurnOn)) - insertLog(f"Correcting timestamps by getting rid of the first {timeForLightsTurnOn} seconds and convert timestamps to seconds", - logging.DEBUG) + logger.debug(f"Correcting timestamps by getting rid of the first {timeForLightsTurnOn} seconds and convert timestamps to seconds") storenames = storesList[0,:] storesList = storesList[1,:] @@ -275,8 +249,7 @@ def timestampCorrection_csv(filepath, timeForLightsTurnOn, storesList): try: arr = np.asarray(arr).reshape(2,-1) except: - insertLog('Error in saving stores list file or spelling mistake for control or signal', - logging.ERROR) + logger.error('Error in saving stores list file or spelling mistake for control or signal') raise Exception('Error in saving stores list file or spelling mistake for control or signal') indices = check_cntrl_sig_length(filepath, arr, storenames, storesList) @@ -288,7 +261,7 @@ def timestampCorrection_csv(filepath, timeForLightsTurnOn, storesList): idx = np.where(storesList==indices[i])[0] if idx.shape[0]==0: - insertLog(f"{arr[0,i]} does not exist in the stores list file.", logging.ERROR) + logger.error(f"{arr[0,i]} does not exist in the stores list file.") raise Exception('{} does not exist in the stores list file.'.format(arr[0,i])) timestamp = read_hdf5(storenames[idx][0], filepath, 'timestamps') @@ -302,10 +275,10 @@ def timestampCorrection_csv(filepath, timeForLightsTurnOn, storesList): write_hdf5(np.asarray(sampling_rate), 'timeCorrection_'+name_1, filepath, 'sampling_rate') else: - insertLog('Error in naming convention of files or Error in storesList file', logging.ERROR) + logger.error('Error in naming convention of files or Error in storesList file') raise Exception('Error in naming convention of files or Error in storesList file') - insertLog("Timestamps corrected and converted to seconds.", logging.INFO) + logger.info("Timestamps corrected and converted to seconds.") print("Timestamps corrected and converted to seconds.") @@ -314,8 +287,7 @@ def timestampCorrection_csv(filepath, timeForLightsTurnOn, storesList): def timestampCorrection_tdt(filepath, timeForLightsTurnOn, storesList): print("Correcting timestamps by getting rid of the first {} seconds and convert timestamps to seconds...".format(timeForLightsTurnOn)) - insertLog(f"Correcting timestamps by getting rid of the first {timeForLightsTurnOn} seconds and convert timestamps to seconds", - logging.DEBUG) + logger.debug(f"Correcting timestamps by getting rid of the first {timeForLightsTurnOn} seconds and convert timestamps to seconds") storenames = storesList[0,:] storesList = storesList[1,:] @@ -329,7 +301,7 @@ def timestampCorrection_tdt(filepath, timeForLightsTurnOn, storesList): try: arr = np.asarray(arr).reshape(2,-1) except: - insertLog('Error in saving stores list file or spelling mistake for control or signal', logging.ERROR) + logger.error('Error in saving stores list file or spelling mistake for control or signal') raise Exception('Error in saving stores list file or spelling mistake for control or signal') indices = check_cntrl_sig_length(filepath, arr, storenames, storesList) @@ -341,7 +313,7 @@ def timestampCorrection_tdt(filepath, timeForLightsTurnOn, storesList): idx = np.where(storesList==indices[i])[0] if idx.shape[0]==0: - insertLog(f"{arr[0,i]} does not exist in the stores list file.", logging.ERROR) + logger.error(f"{arr[0,i]} does not exist in the stores list file.") raise Exception('{} does not exist in the stores list file.'.format(arr[0,i])) timestamp = read_hdf5(storenames[idx][0], filepath, 'timestamps') @@ -365,10 +337,10 @@ def timestampCorrection_tdt(filepath, timeForLightsTurnOn, storesList): write_hdf5(correctionIndex, 'timeCorrection_'+name_1, filepath, 'correctionIndex') write_hdf5(np.asarray([sampling_rate]), 'timeCorrection_'+name_1, filepath, 'sampling_rate') else: - insertLog('Error in naming convention of files or Error in storesList file', logging.ERROR) + logger.error('Error in naming convention of files or Error in storesList file') raise Exception('Error in naming convention of files or Error in storesList file') - insertLog("Timestamps corrected and converted to seconds.", logging.INFO) + logger.info("Timestamps corrected and converted to seconds.") print("Timestamps corrected and converted to seconds.") #return timeRecStart, correctionIndex, timestampNew @@ -419,7 +391,7 @@ def applyCorrection(filepath, timeForLightsTurnOn, event, displayName, naming): def decide_naming_convention_and_applyCorrection(filepath, timeForLightsTurnOn, event, displayName, storesList): print("Applying correction of timestamps to the data and event timestamps...") - insertLog("Applying correction of timestamps to the data and event timestamps", logging.DEBUG) + logger.debug("Applying correction of timestamps to the data and event timestamps") storesList = storesList[1,:] arr = [] @@ -437,10 +409,10 @@ def decide_naming_convention_and_applyCorrection(filepath, timeForLightsTurnOn, if name_1==name_2: applyCorrection(filepath, timeForLightsTurnOn, event, displayName, name_1) else: - insertLog('Error in naming convention of files or Error in storesList file', logging.ERROR) + logger.error('Error in naming convention of files or Error in storesList file') raise Exception('Error in naming convention of files or Error in storesList file') - insertLog("Timestamps corrections applied to the data and event timestamps.", logging.INFO) + logger.info("Timestamps corrections applied to the data and event timestamps.") print("Timestamps corrections applied to the data and event timestamps.") @@ -530,7 +502,7 @@ def onclick(event): ix, iy = event.xdata, event.ydata print('x = %d, y = %d'%( ix, iy)) - insertLog(f"x = {ix}, y = {iy}", logging.INFO) + logger.info(f"x = {ix}, y = {iy}") y1_max, y1_min = np.amax(y1), np.amin(y1) y2_max, y2_min = np.amax(y2), np.amin(y2) @@ -555,7 +527,7 @@ def onclick(event): if len(coords)>0: print('x = %d, y = %d; deleted'%( coords[-1][0], coords[-1][1])) - insertLog(f"x = {coords[-1][0]}, y = {coords[-1][1]}; deleted", logging.INFO) + logger.info(f"x = {coords[-1][0]}, y = {coords[-1][1]}; deleted") del coords[-1] ax1.lines[-1].remove() ax2.lines[-1].remove() @@ -571,7 +543,7 @@ def plt_close_event(event): name_1 = plot_name[0].split('_')[-1] np.save(os.path.join(filepath, 'coordsForPreProcessing_'+name_1+'.npy'), coords) print('Coordinates file saved at {}'.format(os.path.join(filepath, 'coordsForPreProcessing_'+name_1+'.npy'))) - insertLog(f"Coordinates file saved at {os.path.join(filepath, 'coordsForPreProcessing_'+name_1+'.npy')}", logging.INFO) + logger.info(f"Coordinates file saved at {os.path.join(filepath, 'coordsForPreProcessing_'+name_1+'.npy')}") fig.canvas.mpl_disconnect(cid) coords = [] @@ -593,7 +565,7 @@ def visualizeControlAndSignal(filepath, removeArtifacts): path = sorted(path_1 + path_2, key=str.casefold) if len(path)%2 != 0: - insertLog('There are not equal number of Control and Signal data', logging.ERROR) + logger.error('There are not equal number of Control and Signal data') raise Exception('There are not equal number of Control and Signal data') path = np.asarray(path).reshape(2,-1) @@ -625,7 +597,7 @@ def decide_naming_convention(filepath): path = sorted(path_1 + path_2, key=str.casefold) if len(path)%2 != 0: - insertLog('There are not equal number of Control and Signal data', logging.ERROR) + logger.error('There are not equal number of Control and Signal data') raise Exception('There are not equal number of Control and Signal data') path = np.asarray(path).reshape(2,-1) @@ -644,7 +616,7 @@ def fetchCoords(filepath, naming, data): coords = np.load(os.path.join(filepath, 'coordsForPreProcessing_'+naming+'.npy'))[:,0] if coords.shape[0] % 2 != 0: - insertLog('Number of values in coordsForPreProcessing file is not even.', logging.ERROR) + logger.error('Number of values in coordsForPreProcessing file is not even.') raise Exception('Number of values in coordsForPreProcessing file is not even.') coords = coords.reshape(-1,2) @@ -752,7 +724,7 @@ def removeTTLs(filepath, event, naming): def addingNaNtoChunksWithArtifacts(filepath, events): print("Replacing chunks with artifacts by NaN values.") - insertLog("Replacing chunks with artifacts by NaN values.", logging.DEBUG) + logger.debug("Replacing chunks with artifacts by NaN values.") storesList = events[1,:] path = decide_naming_convention(filepath) @@ -776,15 +748,15 @@ def addingNaNtoChunksWithArtifacts(filepath, events): write_hdf5(ts, storesList[i]+'_'+name, filepath, 'ts') else: - insertLog('Error in naming convention of files or Error in storesList file', logging.ERROR) + logger.error('Error in naming convention of files or Error in storesList file') raise Exception('Error in naming convention of files or Error in storesList file') - insertLog("Chunks with artifacts are replaced by NaN values.", logging.INFO) + logger.info("Chunks with artifacts are replaced by NaN values.") # main function to align timestamps for control, signal and event timestamps for artifacts removal def processTimestampsForArtifacts(filepath, timeForLightsTurnOn, events): print("Processing timestamps to get rid of artifacts using concatenate method...") - insertLog("Processing timestamps to get rid of artifacts using concatenate method", logging.DEBUG) + logger.debug("Processing timestamps to get rid of artifacts using concatenate method") storesList = events[1,:] path = decide_naming_convention(filepath) @@ -813,9 +785,9 @@ def processTimestampsForArtifacts(filepath, timeForLightsTurnOn, events): #timestamp_dict[name] = timestampNew write_hdf5(timestampNew, 'timeCorrection_'+name, filepath, 'timestampNew') else: - insertLog('Error in naming convention of files or Error in storesList file', logging.ERROR) + logger.error('Error in naming convention of files or Error in storesList file') raise Exception('Error in naming convention of files or Error in storesList file') - insertLog("Timestamps processed, artifacts are removed and good chunks are concatenated.", logging.INFO) + logger.info("Timestamps processed, artifacts are removed and good chunks are concatenated.") print("Timestamps processed, artifacts are removed and good chunks are concatenated.") @@ -876,8 +848,8 @@ def z_score_computation(dff, timestamps, inputParameters): elif zscore_method=='baseline z-score': idx = np.where((timestamps>baseline_start) & (timestamps1: - insertLog('Two tsq files are present at the location.', - logging.ERROR) + logger.error('Two tsq files are present at the location.') raise Exception('Two tsq files are present at the location.') elif len(path)==0: - insertLog("\033[1m"+"tsq file not found."+"\033[1m", logging.INFO) + logger.info("\033[1m"+"tsq file not found."+"\033[1m") print("\033[1m"+"tsq file not found."+"\033[1m") return 0, 0 else: @@ -78,14 +54,14 @@ def readtsq(filepath): # creating dataframe of the data df = pd.DataFrame(tsq) - insertLog("Data from tsq file fetched.", logging.INFO) + logger.info("Data from tsq file fetched.") print("Data from tsq file fetched.") return df, flag # function to check if doric file exists def check_doric(filepath): print("Checking if doric file exists.") - insertLog('Checking if doric file exists', logging.DEBUG) + logger.debug('Checking if doric file exists') path = glob.glob(os.path.join(filepath, '*.csv')) + \ glob.glob(os.path.join(filepath, '*.doric')) @@ -108,13 +84,13 @@ def check_doric(filepath): pass if len(flag_arr)>1: - insertLog('Two doric files are present at the same location', logging.ERROR) + logger.error('Two doric files are present at the same location') raise Exception('Two doric files are present at the same location') if len(flag_arr)==0: - insertLog("\033[1m"+"Doric file not found."+"\033[1m", logging.ERROR) + logger.error("\033[1m"+"Doric file not found."+"\033[1m") print("\033[1m"+"Doric file not found."+"\033[1m") return 0 - insertLog('Doric file found.', logging.INFO) + logger.info('Doric file found.') print('Doric file found.') return flag_arr[0] @@ -163,11 +139,9 @@ def write_hdf5(data, event, filepath, key): # function to read event timestamps csv file. def import_csv(filepath, event, outputPath): print("\033[1m"+"Trying to read data for {} from csv file.".format(event)+"\033[0m") - insertLog("\033[1m"+"Trying to read data for {} from csv file.".format(event)+"\033[0m", - logging.DEBUG) + logger.debug("\033[1m"+"Trying to read data for {} from csv file.".format(event)+"\033[0m") if not os.path.exists(os.path.join(filepath, event+'.csv')): - insertLog("\033[1m"+"No csv file found for event {}".format(event)+"\033[0m", - logging.ERROR) + logger.error("\033[1m"+"No csv file found for event {}".format(event)+"\033[0m") raise Exception("\033[1m"+"No csv file found for event {}".format(event)+"\033[0m") df = pd.read_csv(os.path.join(filepath, event+'.csv'), index_col=False) @@ -178,28 +152,24 @@ def import_csv(filepath, event, outputPath): arr1 = np.array(['timestamps', 'data', 'sampling_rate']) arr2 = np.char.lower(np.array(key)) if (np.sort(arr1)==np.sort(arr2)).all()==False: - insertLog("\033[1m"+"Column names should be timestamps, data and sampling_rate"+"\033[0m", - logging.ERROR) + logger.error("\033[1m"+"Column names should be timestamps, data and sampling_rate"+"\033[0m") raise Exception("\033[1m"+"Column names should be timestamps, data and sampling_rate"+"\033[0m") if len(key)==1: if key[0].lower()!='timestamps': - insertLog("\033[1m"+"Column names should be timestamps, data and sampling_rate"+"\033[0m", - logging.ERROR) + logger.error("\033[1m"+"Column names should be timestamps, data and sampling_rate"+"\033[0m") raise Exception("\033[1m"+"Column name should be timestamps"+"\033[0m") if len(key)!=3 and len(key)!=1: - insertLog("\033[1m"+"Number of columns in csv file should be either three or one. Three columns if \ - the file is for control or signal data or one column if the file is for event TTLs."+"\033[0m", - logging.ERROR) + logger.error("\033[1m"+"Number of columns in csv file should be either three or one. Three columns if \ + the file is for control or signal data or one column if the file is for event TTLs."+"\033[0m") raise Exception("\033[1m"+"Number of columns in csv file should be either three or one. Three columns if \ the file is for control or signal data or one column if the file is for event TTLs."+"\033[0m") for i in range(len(key)): write_hdf5(data[key[i]].dropna(), event, outputPath, key[i].lower()) - insertLog("\033[1m"+"Reading data for {} from csv file is completed.".format(event)+"\033[0m", - logging.INFO) + logger.info("\033[1m"+"Reading data for {} from csv file is completed.".format(event)+"\033[0m") print("\033[1m"+"Reading data for {} from csv file is completed.".format(event)+"\033[0m") return data, key @@ -232,10 +202,8 @@ def check_data(S, filepath, event, outputPath): if S['sampling_rate']==0 and np.all(diff==diff[0])==False: print("\033[1m"+"Data in event {} belongs to multiple behavior".format(event)+"\033[0m") print("\033[1m"+"Create timestamp files for individual new event and change the stores list file."+"\033[0m") - insertLog("\033[1m"+"Data in event {} belongs to multiple behavior".format(event)+"\033[0m", - logging.INFO) - insertLog("\033[1m"+"Create timestamp files for individual new event and change the stores list file."+"\033[0m", - logging.DEBUG) + logger.info("\033[1m"+"Data in event {} belongs to multiple behavior".format(event)+"\033[0m") + logger.debug("\033[1m"+"Create timestamp files for individual new event and change the stores list file."+"\033[0m") i_d = np.unique(S['data']) for i in range(i_d.shape[0]): new_S = dict() @@ -257,9 +225,8 @@ def check_data(S, filepath, event, outputPath): pass else: np.savetxt(os.path.join(outputPath, 'storesList.csv'), storesList, delimiter=",", fmt='%s') - insertLog("\033[1m"+"Timestamp files for individual new event are created \ - and the stores list file is changed."+"\033[0m", - logging.INFO) + logger.info("\033[1m"+"Timestamp files for individual new event are created \ + and the stores list file is changed."+"\033[0m") @@ -267,7 +234,7 @@ def check_data(S, filepath, event, outputPath): def readtev(data, filepath, event, outputPath): print("Reading data for event {} ...".format(event)) - insertLog("Reading data for event {} ...".format(event), logging.DEBUG) + logger.debug("Reading data for event {} ...".format(event)) tevfilepath = glob.glob(os.path.join(filepath, '*.tev')) if len(tevfilepath)>1: raise Exception('Two tev files are present at the location.') @@ -349,7 +316,7 @@ def readtev(data, filepath, event, outputPath): check_data(S, filepath, event, outputPath) print("Data for event {} fetched and stored.".format(event)) - insertLog("Data for event {} fetched and stored.".format(event), logging.INFO) + logger.info("Data for event {} fetched and stored.".format(event)) # function to execute readtev function using multiprocessing to make it faster @@ -427,7 +394,7 @@ def access_data_doricV6(doric_file, storesList, outputPath): regex = re.compile('(.*?)'+str(storesList[0,i])+'(.*?)') idx = [i for i in range(len(decide_path)) if regex.match(decide_path[i])] if len(idx)>1: - insertLog('More than one string matched (which should not be the case)', logging.ERROR) + logger.error('More than one string matched (which should not be the case)') raise Exception('More than one string matched (which should not be the case)') idx = idx[0] data = np.array(doric_file[decide_path[idx]]) @@ -440,8 +407,7 @@ def access_data_doricV6(doric_file, storesList, outputPath): regex = re.compile('(.*?)'+storesList[0,i]+'$') idx = [i for i in range(len(decide_path)) if regex.match(decide_path[i])] if len(idx)>1: - insertLog('More than one string matched (which should not be the case)', - logging.ERROR) + logger.error('More than one string matched (which should not be the case)') raise Exception('More than one string matched (which should not be the case)') idx = idx[0] ttl = np.array(doric_file[decide_path[idx]]) @@ -455,8 +421,7 @@ def execute_import_doric(filepath, storesList, flag, outputPath): if flag=='doric_csv': path = glob.glob(os.path.join(filepath, '*.csv')) if len(path)>1: - insertLog('An error occurred : More than one Doric csv file present at the location', - logging.ERROR) + logger.error('An error occurred : More than one Doric csv file present at the location') raise Exception('More than one Doric csv file present at the location') else: df = pd.read_csv(path[0], header=1, index_col=False) @@ -478,8 +443,7 @@ def execute_import_doric(filepath, storesList, flag, outputPath): else: path = glob.glob(os.path.join(filepath, '*.doric')) if len(path)>1: - insertLog('An error occurred : More than one Doric file present at the location', - logging.ERROR) + logger.error('An error occurred : More than one Doric file present at the location') raise Exception('More than one Doric file present at the location') else: with h5py.File(path[0], 'r') as f: @@ -494,7 +458,7 @@ def readRawData(inputParameters): print('### Reading raw data... ###') - insertLog('### Reading raw data... ###', logging.DEBUG) + logger.debug('### Reading raw data... ###') # get input parameters inputParameters = inputParameters folderNames = inputParameters['folderNames'] @@ -505,8 +469,8 @@ def readRawData(inputParameters): elif numProcesses>mp.cpu_count(): print('Warning : # of cores parameter set is greater than the cores available \ available in your machine') - insertLog('Warning : # of cores parameter set is greater than the cores available \ - available in your machine', logging.WARNING) + logger.warning('Warning : # of cores parameter set is greater than the cores available \ + available in your machine') numProcesses = mp.cpu_count()-1 for i in range(len(folderNames)): filepath = folderNames[i] @@ -517,7 +481,7 @@ def readRawData(inputParameters): for i in range(len(folderNames)): filepath = folderNames[i] print(filepath) - insertLog(f"### Reading raw data for folder {folderNames[i]}", logging.DEBUG) + logger.debug(f"### Reading raw data for folder {folderNames[i]}") storesListPath = takeOnlyDirs(glob.glob(os.path.join(filepath, '*_output_*'))) # reading tsq file data, flag = readtsq(filepath) @@ -546,20 +510,20 @@ def readRawData(inputParameters): writeToFile(str(10+((step+1)*10))+'\n') step += 1 - insertLog(f"### Raw data for folder {folderNames[i]} fetched", logging.INFO) + logger.info(f"### Raw data for folder {folderNames[i]} fetched") print("### Raw data fetched and saved.") - insertLog('Raw data fetched and saved.', logging.INFO) - insertLog("#" * 400, logging.INFO) + logger.info('Raw data fetched and saved.') + logger.info("#" * 400) def main(input_parameters): print('run') try: readRawData(input_parameters) - insertLog('#'*400, logging.INFO) + logger.info('#'*400) except Exception as e: with open(os.path.join(os.path.expanduser('~'), 'pbSteps.txt'), 'a') as file: file.write(str(-1)+"\n") - insertLog(f"An error occurred: {e}", logging.ERROR) + logger.error(f"An error occurred: {e}") raise e if __name__ == "__main__": diff --git a/src/guppy/saveStoresList.py b/src/guppy/saveStoresList.py index 1faeb6c..5be6974 100755 --- a/src/guppy/saveStoresList.py +++ b/src/guppy/saveStoresList.py @@ -17,9 +17,9 @@ import holoviews as hv import warnings import socket -import logging import tkinter as tk from tkinter import ttk, StringVar, messagebox +from .logging_config import logger #hv.extension() pn.extension() @@ -44,29 +44,6 @@ def takeOnlyDirs(paths): removePaths.append(p) return list(set(paths)-set(removePaths)) -def insertLog(text, level): - file = os.path.join(Path.home(), 'guppy.log') - format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') - infoLog = logging.FileHandler(file) - infoLog.setFormatter(format) - infoLog - logger = logging.getLogger(file) - logger.setLevel(level) - - if not logger.handlers: - logger.addHandler(infoLog) - if level == logging.DEBUG: - logger.debug(text) - if level == logging.INFO: - logger.info(text) - if level == logging.ERROR: - logger.exception(text) - if level == logging.WARNING: - logger.warning(text) - - infoLog.close() - logger.removeHandler(infoLog) - # function to show location for over-writing or creating a new stores list file. def show_dir(filepath): i = 1 @@ -113,8 +90,7 @@ def readtsq(filepath): 'offsets': offsets}, align=True) path = glob.glob(os.path.join(filepath, '*.tsq')) if len(path)>1: - insertLog('Two tsq files are present at the location.', - logging.ERROR) + logger.error('Two tsq files are present at the location.') raise Exception('Two tsq files are present at the location.') elif len(path)==0: return 0 @@ -128,8 +104,7 @@ def readtsq(filepath): # function to show GUI and save def saveStorenames(inputParameters, data, event_name, flag, filepath): - insertLog('Saving stores list file.', - logging.DEBUG) + logger.debug('Saving stores list file.') # getting input parameters inputParameters = inputParameters @@ -139,8 +114,8 @@ def saveStorenames(inputParameters, data, event_name, flag, filepath): op = make_dir(filepath) arr = np.asarray([list(storenames_map.keys()), list(storenames_map.values())], dtype=str) np.savetxt(os.path.join(op, 'storesList.csv'), arr, delimiter=",", fmt='%s') - insertLog(f"Storeslist file saved at {op}", logging.INFO) - insertLog('Storeslist : \n'+str(arr), logging.INFO) + logger.info(f"Storeslist file saved at {op}") + logger.info('Storeslist : \n'+str(arr)) return # reading storenames from the data fetched using 'readtsq' function @@ -496,16 +471,14 @@ def save_button(event=None): if np.where(arr2=="")[0].size>0: alert.object = '#### Alert !! \n Empty string in the list names_for_storenames.' - insertLog('Empty string in the list names_for_storenames.', - logging.ERROR) + logger.error('Empty string in the list names_for_storenames.') raise Exception('Empty string in the list names_for_storenames.') else: alert.object = '#### No alerts !!' if arr1.shape[0]!=arr2.shape[0]: alert.object = '#### Alert !! \n Length of list storenames and names_for_storenames is not equal.' - insertLog('Length of list storenames and names_for_storenames is not equal.', - logging.ERROR) + logger.error('Length of list storenames and names_for_storenames is not equal.') raise Exception('Length of list storenames and names_for_storenames is not equal.') else: alert.object = '#### No alerts !!' @@ -544,9 +517,8 @@ def save_button(event=None): np.savetxt(os.path.join(select_location.value, 'storesList.csv'), arr, delimiter=",", fmt='%s') path.value = os.path.join(select_location.value, 'storesList.csv') - insertLog(f"Storeslist file saved at {select_location.value}", - logging.INFO) - insertLog('Storeslist : \n'+str(arr), logging.INFO) + logger.info(f"Storeslist file saved at {select_location.value}") + logger.info('Storeslist : \n'+str(arr)) # Connect button callbacks @@ -584,9 +556,8 @@ def check_channels(state): state = state.astype(int) unique_state = np.unique(state[2:12]) if unique_state.shape[0]>3: - insertLog("Looks like there are more than 3 channels in the file. Reading of these files\ - are not supported. Reach out to us if you get this error message.", - logging.ERROR) + logger.error("Looks like there are more than 3 channels in the file. Reading of these files\ + are not supported. Reach out to us if you get this error message.") raise Exception("Looks like there are more than 3 channels in the file. Reading of these files\ are not supported. Reach out to us if you get this error message.") @@ -650,9 +621,8 @@ def decide_ts_unit_for_npm(df, timestamp_column_name=None, time_unit=None, headl else: messagebox.showerror('All options not selected', 'All the options for timestamps \ were not selected. Please select appropriate options') - insertLog('All the options for timestamps \ - were not selected. Please select appropriate options', - logging.ERROR) + logger.error('All the options for timestamps \ + were not selected. Please select appropriate options') raise Exception('All the options for timestamps \ were not selected. Please select appropriate options') if holdComboboxValues['time_unit'].get(): @@ -665,9 +635,8 @@ def decide_ts_unit_for_npm(df, timestamp_column_name=None, time_unit=None, headl else: messagebox.showerror('All options not selected', 'All the options for timestamps \ were not selected. Please select appropriate options') - insertLog('All the options for timestamps \ - were not selected. Please select appropriate options', - logging.ERROR) + logger.error('All the options for timestamps \ + were not selected. Please select appropriate options') raise Exception('All the options for timestamps \ were not selected. Please select appropriate options') else: @@ -680,10 +649,9 @@ def decide_ts_unit_for_npm(df, timestamp_column_name=None, time_unit=None, headl def decide_indices(file, df, flag, num_ch=2): ch_name = [file+'chev', file+'chod', file+'chpr'] if len(ch_name)=2: flag = 'data_np' else: - insertLog('Number of columns in csv file does not make sense.', - logging.ERROR) + logger.error('Number of columns in csv file does not make sense.') raise Exception('Number of columns in csv file does not make sense.') @@ -1018,13 +981,11 @@ def import_np_doric_csv(filepath, isosbestic_control, num_ch, inputParameters=No df_chpr.at[0,'sampling_rate'] = df_chev['sampling_rate'][0] df_chpr.to_csv(path_chpr[j], index=False) else: - insertLog('Number of channels should be same for all regions.', - logging.ERROR) + logger.error('Number of channels should be same for all regions.') raise Exception('Number of channels should be same for all regions.') else: pass - insertLog('Importing of either NPM or Doric or csv file is done.', - logging.INFO) + logger.info('Importing of either NPM or Doric or csv file is done.') return event_from_filename, flag_arr @@ -1045,7 +1006,7 @@ def execute(inputParameters): data = readtsq(filepath) event_name, flag = import_np_doric_csv(filepath, isosbestic_control, num_ch, inputParameters=inputParameters) saveStorenames(inputParameters, data, event_name, flag, filepath) - insertLog('#'*400, logging.INFO) + logger.info('#'*400) except Exception as e: - insertLog(str(e), logging.ERROR) + logger.error(str(e)) raise e diff --git a/src/guppy/savingInputParameters.py b/src/guppy/savingInputParameters.py index 4adeebb..d70a971 100644 --- a/src/guppy/savingInputParameters.py +++ b/src/guppy/savingInputParameters.py @@ -6,7 +6,6 @@ import panel as pn import numpy as np import pandas as pd -import logging import tkinter as tk from tkinter import ttk from tkinter import filedialog @@ -14,16 +13,11 @@ from pathlib import Path from .visualizePlot import visualizeResults from .saveStoresList import execute +from .logging_config import logger def savingInputParameters(): pn.extension() - log_file = os.path.join(Path.home(), 'guppy.log') - if os.path.exists(log_file): - os.remove(log_file) - else: - pass - # Determine base folder path (headless-friendly via env var) base_dir_env = os.environ.get('GUPPY_BASE_DIR') is_headless = base_dir_env and os.path.isdir(base_dir_env) @@ -52,28 +46,6 @@ def select_folder(): current_dir = os.getcwd() - def insertLog(text, level): - file = os.path.join(Path.home(), 'guppy.log') - format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') - infoLog = logging.FileHandler(file) - infoLog.setFormatter(format) - logger = logging.getLogger(file) - logger.setLevel(level) - - if not logger.handlers: - logger.addHandler(infoLog) - if level == logging.DEBUG: - logger.debug(text) - if level == logging.INFO: - logger.info(text) - if level == logging.ERROR: - logger.exception(text) - if level == logging.WARNING: - logger.warning(text) - - infoLog.close() - logger.removeHandler(infoLog) - def make_dir(filepath): op = os.path.join(filepath, 'inputParameters') if not os.path.exists(op): @@ -376,8 +348,7 @@ def checkSameLocation(arr, abspath): abspath = np.asarray(abspath) abspath = np.unique(abspath) if len(abspath)>1: - insertLog('All the folders selected should be at the same location', - logging.ERROR) + logger.error('All the folders selected should be at the same location') raise Exception('All the folders selected should be at the same location') return abspath @@ -385,8 +356,7 @@ def checkSameLocation(arr, abspath): def getAbsPath(): arr_1, arr_2 = files_1.value, files_2.value if len(arr_1)==0 and len(arr_2)==0: - insertLog('No folder is selected for analysis', - logging.ERROR) + logger.error('No folder is selected for analysis') raise Exception('No folder is selected for analysis') abspath = [] @@ -397,15 +367,13 @@ def getAbsPath(): abspath = np.unique(abspath) if len(abspath)>1: - insertLog('All the folders selected should be at the same location', - logging.ERROR) + logger.error('All the folders selected should be at the same location') raise Exception('All the folders selected should be at the same location') return abspath def onclickProcess(event=None): - insertLog('Saving Input Parameters file.', - logging.DEBUG) + logger.debug('Saving Input Parameters file.') abspath = getAbsPath() analysisParameters = { "combine_data": combine_data.value, @@ -435,10 +403,9 @@ def onclickProcess(event=None): for folder in files_1.value: with open(os.path.join(folder, 'GuPPyParamtersUsed.json'), 'w') as f: json.dump(analysisParameters, f, indent=4) - insertLog(f"Input Parameters file saved at {folder}", - logging.INFO) + logger.info(f"Input Parameters file saved at {folder}") - insertLog('#'*400, logging.INFO) + logger.info('#'*400) #path.value = (os.path.join(op, 'inputParameters.json')).replace('\\', '/') print('Input Parameters File Saved.') diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index 992bdd2..2ee9a3b 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -3,7 +3,6 @@ import param import re import math -import logging import numpy as np import pandas as pd import socket @@ -17,6 +16,7 @@ import matplotlib.pyplot as plt from pathlib import Path from .preprocess import get_all_stores_for_combining_data +from .logging_config import logger import panel as pn pn.extension() @@ -40,29 +40,6 @@ def takeOnlyDirs(paths): removePaths.append(p) return list(set(paths)-set(removePaths)) -def insertLog(text, level): - file = os.path.join(Path.home(), 'guppy.log') - format = logging.Formatter('%(asctime)s %(levelname)s %(message)s') - infoLog = logging.FileHandler(file) - infoLog.setFormatter(format) - infoLog - logger = logging.getLogger(file) - logger.setLevel(level) - - if not logger.handlers: - logger.addHandler(infoLog) - if level == logging.DEBUG: - logger.debug(text) - if level == logging.INFO: - logger.info(text) - if level == logging.ERROR: - logger.exception(text) - if level == logging.WARNING: - logger.warning(text) - - infoLog.close() - logger.removeHandler(infoLog) - # read h5 file as a dataframe def read_Df(filepath, event, name): event = event.replace("\\","_") @@ -102,8 +79,7 @@ def helper_plots(filepath, event, name, inputParameters): # note when there are no behavior event TTLs if len(event)==0: - insertLog("\033[1m"+"There are no behavior event TTLs present to visualize.".format(event)+"\033[0m", - logging.WARNING) + logger.warning("\033[1m"+"There are no behavior event TTLs present to visualize.".format(event)+"\033[0m") print("\033[1m"+"There are no behavior event TTLs present to visualize.".format(event)+"\033[0m") return 0 From bced65e39eaf7ae5bbaed01f4ba6169d646f2427 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 16 Oct 2025 11:48:33 -0700 Subject: [PATCH 75/83] Updated logging to follow best practice of module-level loggers --- src/guppy/computeCorr.py | 4 +++- src/guppy/computePsth.py | 4 +++- src/guppy/findTransientsFreqAndAmp.py | 4 +++- src/guppy/logging_config.py | 18 +++++------------- src/guppy/main.py | 7 ++++++- src/guppy/preprocess.py | 4 +++- src/guppy/readTevTsq.py | 4 +++- src/guppy/saveStoresList.py | 4 +++- src/guppy/savingInputParameters.py | 4 +++- src/guppy/visualizePlot.py | 5 ++++- 10 files changed, 36 insertions(+), 22 deletions(-) diff --git a/src/guppy/computeCorr.py b/src/guppy/computeCorr.py index 3ea82c1..98b8129 100644 --- a/src/guppy/computeCorr.py +++ b/src/guppy/computeCorr.py @@ -7,7 +7,9 @@ import pandas as pd from scipy import signal from pathlib import Path -from .logging_config import logger +import logging + +logger = logging.getLogger(__name__) def make_dir(filepath): op = os.path.join(filepath, "cross_correlation_output") diff --git a/src/guppy/computePsth.py b/src/guppy/computePsth.py index 298c3f0..ca3d860 100755 --- a/src/guppy/computePsth.py +++ b/src/guppy/computePsth.py @@ -19,7 +19,9 @@ from .computeCorr import computeCrossCorrelation from .computeCorr import getCorrCombinations from .computeCorr import make_dir -from .logging_config import logger +import logging + +logger = logging.getLogger(__name__) def takeOnlyDirs(paths): removePaths = [] diff --git a/src/guppy/findTransientsFreqAndAmp.py b/src/guppy/findTransientsFreqAndAmp.py index 1ac05a9..bd083f1 100755 --- a/src/guppy/findTransientsFreqAndAmp.py +++ b/src/guppy/findTransientsFreqAndAmp.py @@ -12,7 +12,9 @@ from itertools import repeat from pathlib import Path from .preprocess import get_all_stores_for_combining_data -from .logging_config import logger +import logging + +logger = logging.getLogger(__name__) def takeOnlyDirs(paths): removePaths = [] diff --git a/src/guppy/logging_config.py b/src/guppy/logging_config.py index d90e812..2de2e14 100644 --- a/src/guppy/logging_config.py +++ b/src/guppy/logging_config.py @@ -6,7 +6,8 @@ - macOS: ~/Library/Logs/LernerLab/guppy/ - Linux: ~/.local/state/LernerLab/guppy/log/ -The logger can be imported and used across all GuPPy modules. +Call setup_logging() once at application startup before importing other modules. +Each module should then create its own logger using: logger = logging.getLogger(__name__) """ import logging @@ -31,6 +32,8 @@ def get_log_file(): def setup_logging(*, level=None, console_output=True): """Configure centralized logging for GuPPy. + This should be called once at application startup, before importing other modules. + Parameters ---------- level : int, optional @@ -38,11 +41,6 @@ def setup_logging(*, level=None, console_output=True): environment variable GUPPY_LOG_LEVEL or defaults to INFO. console_output : bool, optional Whether to also output logs to console. Default is True. - - Returns - ------- - logging.Logger - Configured logger instance for GuPPy. """ # Determine log level if level is None: @@ -58,7 +56,7 @@ def setup_logging(*, level=None, console_output=True): # Prevent duplicate handlers if setup_logging is called multiple times if logger.handlers: - return logger + return # Create formatter formatter = logging.Formatter( @@ -76,9 +74,3 @@ def setup_logging(*, level=None, console_output=True): console_handler = logging.StreamHandler() console_handler.setFormatter(formatter) logger.addHandler(console_handler) - - return logger - - -# Initialize logger at module import -logger = setup_logging() diff --git a/src/guppy/main.py b/src/guppy/main.py index 038114c..070b324 100644 --- a/src/guppy/main.py +++ b/src/guppy/main.py @@ -1,6 +1,11 @@ """ Main entry point for GuPPy (Guided Photometry Analysis in Python) """ +from . import logging_config + +# Logging must be configured before importing application modules so that module-level loggers inherit the proper handlers and formatters +logging_config.setup_logging() + import panel as pn from .savingInputParameters import savingInputParameters @@ -10,4 +15,4 @@ def main(): pn.serve(template, show=True) if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index 6bba777..6450fce 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -16,7 +16,9 @@ from matplotlib.widgets import MultiCursor from pathlib import Path from .combineDataFn import processTimestampsForCombiningData -from .logging_config import logger +import logging + +logger = logging.getLogger(__name__) # Only set matplotlib backend if not in CI environment if not os.getenv('CI'): diff --git a/src/guppy/readTevTsq.py b/src/guppy/readTevTsq.py index 7f4c490..a98b0be 100755 --- a/src/guppy/readTevTsq.py +++ b/src/guppy/readTevTsq.py @@ -12,7 +12,9 @@ from numpy import int32, uint32, uint8, uint16, float64, int64, int32, float32 import multiprocessing as mp from pathlib import Path -from .logging_config import logger +import logging + +logger = logging.getLogger(__name__) def takeOnlyDirs(paths): removePaths = [] diff --git a/src/guppy/saveStoresList.py b/src/guppy/saveStoresList.py index 5be6974..29b4e42 100755 --- a/src/guppy/saveStoresList.py +++ b/src/guppy/saveStoresList.py @@ -19,11 +19,13 @@ import socket import tkinter as tk from tkinter import ttk, StringVar, messagebox -from .logging_config import logger +import logging #hv.extension() pn.extension() +logger = logging.getLogger(__name__) + def scanPortsAndFind(start_port=5000, end_port=5200, host='127.0.0.1'): while True: port = randint(start_port, end_port) diff --git a/src/guppy/savingInputParameters.py b/src/guppy/savingInputParameters.py index d70a971..0f99473 100644 --- a/src/guppy/savingInputParameters.py +++ b/src/guppy/savingInputParameters.py @@ -13,7 +13,9 @@ from pathlib import Path from .visualizePlot import visualizeResults from .saveStoresList import execute -from .logging_config import logger +import logging + +logger = logging.getLogger(__name__) def savingInputParameters(): pn.extension() diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index 2ee9a3b..37a97ff 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -16,10 +16,13 @@ import matplotlib.pyplot as plt from pathlib import Path from .preprocess import get_all_stores_for_combining_data -from .logging_config import logger +import logging +import panel as pn import panel as pn pn.extension() +logger = logging.getLogger(__name__) + def scanPortsAndFind(start_port=5000, end_port=5200, host='127.0.0.1'): while True: port = randint(start_port, end_port) From e2edcb40febeb48a6f08fdf16b2d6c3da6306cc2 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 16 Oct 2025 12:14:54 -0700 Subject: [PATCH 76/83] Updated run-tests --- .github/workflows/run-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index e00d8ee..ae8fe85 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -57,4 +57,4 @@ jobs: rclone-config: ${{ secrets.RCLONE_CONFIG }} - name: Run tests - run: pytest tests -vv -rsx # -n auto --dist loadscope # TODO: re-enable parallel execution when logging issues with Windows are resolved + run: pytest tests -vv -rsx -n auto --dist loadscope From 90a3d8afe1946387344549b46c2ed2ad2ce22125 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 16 Oct 2025 12:59:53 -0700 Subject: [PATCH 77/83] replaced prints with logging --- src/guppy/combineDataFn.py | 48 +++++---------------------- src/guppy/computeCorr.py | 5 +-- src/guppy/computePsth.py | 32 +++++------------- src/guppy/findTransientsFreqAndAmp.py | 20 ++++------- src/guppy/preprocess.py | 44 +++++------------------- src/guppy/readTevTsq.py | 39 ++++++---------------- src/guppy/saveStoresList.py | 12 +++---- src/guppy/savingInputParameters.py | 14 ++++---- src/guppy/visualizePlot.py | 16 ++++----- 9 files changed, 62 insertions(+), 168 deletions(-) diff --git a/src/guppy/combineDataFn.py b/src/guppy/combineDataFn.py index 961efa8..a420a79 100755 --- a/src/guppy/combineDataFn.py +++ b/src/guppy/combineDataFn.py @@ -5,6 +5,9 @@ import h5py import re import fnmatch +import logging + +logger = logging.getLogger(__name__) def find_files(path, glob_path, ignore_case = False): rule = re.compile(fnmatch.translate(glob_path), re.IGNORECASE) if ignore_case \ @@ -109,7 +112,7 @@ def eliminateTs(filepath, timeForLightsTurnOn, event, sampling_rate, naming): else: ts = np.array([]) - #print("total time : ", tsNew[-1]) + #logger.info("total time : ", tsNew[-1]) if len(tsNew_arr)==0: sub = tsNew[0]-timeForLightsTurnOn tsNew_arr = np.concatenate((tsNew_arr, tsNew-sub)) @@ -122,13 +125,13 @@ def eliminateTs(filepath, timeForLightsTurnOn, event, sampling_rate, naming): tsNew_arr = np.concatenate((tsNew_arr, new_tsNew+(1/sampling_rate))) ts_arr = np.concatenate((ts_arr, new_ts+(1/sampling_rate))) - #print(event) - #print(ts_arr) + #logger.info(event) + #logger.info(ts_arr) return ts_arr def processTimestampsForCombiningData(filepath, timeForLightsTurnOn, events, sampling_rate): - print("Processing timestamps for combining data...") + logger.debug("Processing timestamps for combining data...") storesList = events[1,:] @@ -163,39 +166,4 @@ def processTimestampsForCombiningData(filepath, timeForLightsTurnOn, events, sam np.savetxt(os.path.join(filepath[k][0], 'combine_storesList.csv'), events, delimiter=",", fmt='%s') - print("Timestamps processed and data is combined.") - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file + logger.info("Timestamps processed and data is combined.") diff --git a/src/guppy/computeCorr.py b/src/guppy/computeCorr.py index 98b8129..306b06b 100644 --- a/src/guppy/computeCorr.py +++ b/src/guppy/computeCorr.py @@ -109,7 +109,6 @@ def getCorrCombinations(filepath, inputParameters): corr_info = list() if len(names)<=1: logger.info("Cross-correlation cannot be computed because only one signal is present.") - print("Cross-correlation cannot be computed because only one signal is present.") return corr_info, type elif len(names)==2: corr_info = names @@ -152,8 +151,7 @@ def computeCrossCorrelation(filepath, event, inputParameters): return else: for i in range(1, len(corr_info)): - print("Computing cross-correlation for event {}...".format(event)) - logger.debug(f"Computing cross-correlation for event {event}") + logger.debug(f"Computing cross-correlation for event {event}...") for j in range(len(type)): psth_a = read_Df(filepath, event+'_'+corr_info[i-1], type[j]+'_'+corr_info[i-1]) psth_b = read_Df(filepath, event+'_'+corr_info[i], type[j]+'_'+corr_info[i]) @@ -170,4 +168,3 @@ def computeCrossCorrelation(filepath, event, inputParameters): cols.append('timestamps') create_Df(make_dir(filepath), 'corr_'+event, type[j]+'_'+corr_info[i-1]+'_'+corr_info[i], cross_corr, cols) logger.info(f"Cross-correlation for event {event} computed.") - print("Cross-correlation for event {} computed.".format(event)) diff --git a/src/guppy/computePsth.py b/src/guppy/computePsth.py index ca3d860..b08764c 100755 --- a/src/guppy/computePsth.py +++ b/src/guppy/computePsth.py @@ -193,7 +193,7 @@ def baselineCorrection(filepath, arr, timeAxis, baselineStart, baselineEnd): baselineStrtPt = np.where(timeAxis>=baselineStart)[0] baselineEndPt = np.where(timeAxis>=baselineEnd)[0] - #print(baselineStrtPt[0], baselineEndPt[0]) + #logger.info(baselineStrtPt[0], baselineEndPt[0]) if baselineStart==0 and baselineEnd==0: return arr @@ -226,7 +226,7 @@ def helper_psth(z_score, event, filepath, # avoid writing same data to same file in multi-processing #if not os.path.exists(os.path.join(filepath, 'ts_psth.h5')): - # print('file not exists') + # logger.info('file not exists') # create_Df(filepath, 'ts_psth', '', timeAxis) # time.sleep(2) @@ -247,7 +247,7 @@ def helper_psth(z_score, event, filepath, # skip the event if there are no TTLs if len(ts)==0: new_ts = np.array([]) - print(f"Warning : No TTLs present for {event}. This will cause an error in Visualization step") + logger.info(f"Warning : No TTLs present for {event}. This will cause an error in Visualization step") else: new_ts = [ts[0]] for i in range(1, ts.shape[0]): @@ -368,8 +368,7 @@ def storenamePsth(filepath, event, inputParameters): return 0 else: for i in range(len(path)): - print("Computing PSTH for event {}...".format(event)) - logger.debug(f"Computing PSTH for event {event}") + logger.info(f"Computing PSTH for event {event}...") basename = (os.path.basename(path[i])).split('.')[0] name_1 = basename.split('_')[-1] control = read_hdf5('control_'+name_1, os.path.dirname(path[i]), 'data') @@ -389,7 +388,6 @@ def storenamePsth(filepath, event, inputParameters): create_Df(filepath, event+'_'+name_1+'_baselineUncorrected', basename, psth_baselineUncorrected, columns=cols) # extra create_Df(filepath, event+'_'+name_1, basename, psth, columns=cols) logger.info(f"PSTH for event {event} computed.") - print("PSTH for event {} computed.".format(event)) def helperPSTHPeakAndArea(psth_mean, timestamps, sampling_rate, peak_startPoint, peak_endPoint): @@ -455,8 +453,7 @@ def findPSTHPeakAndArea(filepath, event, inputParameters): return 0 else: for i in range(len(path)): - print('Computing peak and area for PSTH mean signal for event {}...'.format(event)) - logger.debug(f"Computing peak and area for PSTH mean signal for event {event}") + logger.info(f"Computing peak and area for PSTH mean signal for event {event}...") basename = (os.path.basename(path[i])).split('.')[0] name_1 = basename.split('_')[-1] sampling_rate = read_hdf5('timeCorrection_'+name_1, filepath, 'sampling_rate')[0] @@ -476,7 +473,6 @@ def findPSTHPeakAndArea(filepath, event, inputParameters): create_Df_area_peak(filepath, peak_area, event+'_'+name_1+'_'+basename, index=index) # columns=['peak', 'area'] create_csv_area_peak(filepath, peak_area, event+'_'+name_1+'_'+basename, index=index) logger.info(f"Peak and Area for PSTH mean signal for event {event} computed.") - print('Peak and Area for PSTH mean signal for event {} computed.'.format(event)) def makeAverageDir(filepath): @@ -511,8 +507,7 @@ def averageForGroup(folderNames, event, inputParameters): event = event.replace("\\","_") event = event.replace("/","_") - print("Averaging group of data...") - logger.debug("Averaging group of data") + logger.debug("Averaging group of data...") path = [] abspath = inputParameters['abspath'] selectForComputePsth = inputParameters['selectForComputePsth'] @@ -558,7 +553,7 @@ def averageForGroup(folderNames, event, inputParameters): bins_cols = [] temp_path = new_path[i] for j in range(len(temp_path)): - #print(os.path.join(temp_path[j][0], temp_path[j][1]+'_{}.h5'.format(temp_path[j][2]))) + #logger.info(os.path.join(temp_path[j][0], temp_path[j][1]+'_{}.h5'.format(temp_path[j][2]))) if not os.path.exists(os.path.join(temp_path[j][0], temp_path[j][1]+'_{}.h5'.format(temp_path[j][2]))): continue else: @@ -573,7 +568,6 @@ def averageForGroup(folderNames, event, inputParameters): if len(psth)==0: logger.warning('Somthing is wrong with the file search pattern.') - print("Somthing is wrong with the file search pattern.") continue if len(bins_cols)>0: @@ -615,7 +609,6 @@ def averageForGroup(folderNames, event, inputParameters): if len(arr)==0: logger.warning('Somthing is wrong with the file search pattern.') - print("Somthing is wrong with the file search pattern.") continue index = list(np.concatenate(index)) new_df = pd.concat(arr, axis=0) #os.path.join(filepath, 'peak_AUC_'+name+'.csv') @@ -654,14 +647,11 @@ def averageForGroup(folderNames, event, inputParameters): create_Df(make_dir(op), 'corr_'+event, type[i]+'_'+corr_info[k-1]+'_'+corr_info[k], corr, columns=columns) logger.info('Group of data averaged.') - print("Group of data averaged.") def psthForEachStorename(inputParameters): - print("Computing PSTH, Peak and Area for each event...") - - + logger.info("Computing PSTH, Peak and Area for each event...") inputParameters = inputParameters @@ -678,12 +668,8 @@ def psthForEachStorename(inputParameters): elif numProcesses>mp.cpu_count(): logger.warning('Warning : # of cores parameter set is greater than the cores available \ available in your machine') - print('Warning : # of cores parameter set is greater than the cores available \ - available in your machine') numProcesses = mp.cpu_count()-1 - - print("Average for group : ", average) logger.info(f"Average for group : {average}") # for average following if statement will be executed @@ -768,7 +754,7 @@ def psthForEachStorename(inputParameters): writeToFile(str(10+((inputParameters['step']+1)*10))+'\n') inputParameters['step'] += 1 logger.info(f"PSTH, Area and Peak are computed for all events in {folderNames[i]}.") - print("PSTH, Area and Peak are computed for all events.") + logger.info("PSTH, Area and Peak are computed for all events.") return inputParameters def main(input_parameters): diff --git a/src/guppy/findTransientsFreqAndAmp.py b/src/guppy/findTransientsFreqAndAmp.py index bd083f1..0ac43ce 100755 --- a/src/guppy/findTransientsFreqAndAmp.py +++ b/src/guppy/findTransientsFreqAndAmp.py @@ -87,8 +87,7 @@ def processChunks(arrValues, arrIndexes, highAmpFilt, transientsThresh): def createChunks(z_score, sampling_rate, window): - print('Creating chunks for multiprocessing...') - logger.debug('Creating chunks for multiprocessing.') + logger.debug('Creating chunks for multiprocessing...') windowPoints = math.ceil(sampling_rate*window) remainderPoints = math.ceil((sampling_rate*window) - (z_score.shape[0]%windowPoints)) @@ -110,7 +109,6 @@ def createChunks(z_score, sampling_rate, window): logger.error('Reshaping values should be integer.') raise Exception('Reshaping values should be integer.') logger.info('Chunks are created for multiprocessing.') - print('Chunks are created for multiprocessing.') return z_score_chunks, z_score_chunks_index @@ -129,7 +127,7 @@ def calculate_freq_amp(arr, z_score, z_score_chunks_index, timestamps): peaksInd = peaksInd.ravel() peaksInd = peaksInd.astype(int) - #print(timestamps) + #logger.info(timestamps) freq = peaksAmp.shape[0]/((timestamps[-1]-timestamps[0])/60) return freq, peaksAmp, peaksInd @@ -169,8 +167,7 @@ def visuzlize_peaks(filepath, z_score, timestamps, peaksIndex): def findFreqAndAmp(filepath, inputParameters, window=15, numProcesses=mp.cpu_count()): - print('Calculating frequency and amplitude of transients in z-score data....') - logger.debug('Calculating frequency and amplitude of transients in z-score data.') + logger.debug('Calculating frequency and amplitude of transients in z-score data....') selectForTransientsComputation = inputParameters['selectForTransientsComputation'] highAmpFilt = inputParameters['highAmpFilt'] transientsThresh = inputParameters['transientsThresh'] @@ -210,7 +207,6 @@ def findFreqAndAmp(filepath, inputParameters, window=15, numProcesses=mp.cpu_cou index=np.arange(peaks_occurrences.shape[0]),columns=['timestamps', 'amplitude']) visuzlize_peaks(path[i], z_score, ts, peaksInd) logger.info('Frequency and amplitude of transients in z_score data are calculated.') - print('Frequency and amplitude of transients in z_score data are calculated.') @@ -224,8 +220,7 @@ def makeAverageDir(filepath): def averageForGroup(folderNames, inputParameters): - print('Combining results for frequency and amplitude of transients in z-score data...') - logger.debug('Combining results for frequency and amplitude of transients in z-score data.') + logger.debug('Combining results for frequency and amplitude of transients in z-score data...') path = [] abspath = inputParameters['abspath'] selectForTransientsComputation = inputParameters['selectForTransientsComputation'] @@ -281,11 +276,10 @@ def averageForGroup(folderNames, inputParameters): create_Df(op, arr, temp_path[j][1], index=fileName, columns=['freq (events/min)', 'amplitude']) create_csv(op, arr, 'freqAndAmp_'+temp_path[j][1]+'.csv', index=fileName, columns=['freq (events/min)', 'amplitude']) logger.info('Results for frequency and amplitude of transients in z-score data are combined.') - print('Results for frequency and amplitude of transients in z-score data are combined.') def executeFindFreqAndAmp(inputParameters): - print('Finding transients in z-score data and calculating frequency and amplitude....') + logger.info('Finding transients in z-score data and calculating frequency and amplitude....') inputParameters = inputParameters @@ -300,8 +294,6 @@ def executeFindFreqAndAmp(inputParameters): elif numProcesses>mp.cpu_count(): logger.warning('Warning : # of cores parameter set is greater than the cores available \ available in your machine') - print('Warning : # of cores parameter set is greater than the cores available \ - available in your machine') numProcesses = mp.cpu_count()-1 if average==True: @@ -348,7 +340,7 @@ def executeFindFreqAndAmp(inputParameters): logger.info('Transients in z-score data found and frequency and amplitude are calculated.') plt.show() - print('Transients in z-score data found and frequency and amplitude are calculated.') + logger.info('Transients in z-score data found and frequency and amplitude are calculated.') if __name__ == "__main__": diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index 6450fce..d1db61a 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -77,9 +77,9 @@ def helper_create_control_channel(signal, timestamps, window): popt, pcov = curve_fit(curveFitFn, timestamps, filtered_signal, p0) except Exception as e: logger.error(str(e)) - print(e) + logger.info(e) - #print('Curve Fit Parameters : ', popt) + #logger.info('Curve Fit Parameters : ', popt) control = curveFitFn(timestamps,*popt) return control @@ -96,7 +96,6 @@ def create_control_channel(filepath, arr, window=5001): event_name, event = storesList[i], storenames[i] if 'control' in event_name.lower() and 'cntrl' in event.lower(): logger.debug('Creating control channel from signal channel using curve-fitting') - print('Creating control channel from signal channel using curve-fitting') name = event_name.split('_')[-1] signal = read_hdf5('signal_'+name, filepath, 'data') timestampNew = read_hdf5('timeCorrection_'+name, filepath, 'timestampNew') @@ -114,7 +113,6 @@ def create_control_channel(filepath, arr, window=5001): df = pd.DataFrame(d) df.to_csv(os.path.join(os.path.dirname(filepath), event.lower()+'.csv'), index=False) logger.info('Control channel from signal channel created using curve-fitting') - print('Control channel from signal channel created using curve-fitting') # function to add control channel when there is no @@ -237,7 +235,6 @@ def check_cntrl_sig_length(filepath, channels_arr, storenames, storesList): # function to correct timestamps after eliminating first few seconds of the data (for csv data) def timestampCorrection_csv(filepath, timeForLightsTurnOn, storesList): - print("Correcting timestamps by getting rid of the first {} seconds and convert timestamps to seconds...".format(timeForLightsTurnOn)) logger.debug(f"Correcting timestamps by getting rid of the first {timeForLightsTurnOn} seconds and convert timestamps to seconds") storenames = storesList[0,:] storesList = storesList[1,:] @@ -281,14 +278,12 @@ def timestampCorrection_csv(filepath, timeForLightsTurnOn, storesList): raise Exception('Error in naming convention of files or Error in storesList file') logger.info("Timestamps corrected and converted to seconds.") - print("Timestamps corrected and converted to seconds.") # function to correct timestamps after eliminating first few seconds of the data (for TDT data) def timestampCorrection_tdt(filepath, timeForLightsTurnOn, storesList): - print("Correcting timestamps by getting rid of the first {} seconds and convert timestamps to seconds...".format(timeForLightsTurnOn)) logger.debug(f"Correcting timestamps by getting rid of the first {timeForLightsTurnOn} seconds and convert timestamps to seconds") storenames = storesList[0,:] storesList = storesList[1,:] @@ -343,7 +338,6 @@ def timestampCorrection_tdt(filepath, timeForLightsTurnOn, storesList): raise Exception('Error in naming convention of files or Error in storesList file') logger.info("Timestamps corrected and converted to seconds.") - print("Timestamps corrected and converted to seconds.") #return timeRecStart, correctionIndex, timestampNew @@ -392,7 +386,6 @@ def applyCorrection(filepath, timeForLightsTurnOn, event, displayName, naming): # and apply timestamps correction using the function applyCorrection def decide_naming_convention_and_applyCorrection(filepath, timeForLightsTurnOn, event, displayName, storesList): - print("Applying correction of timestamps to the data and event timestamps...") logger.debug("Applying correction of timestamps to the data and event timestamps") storesList = storesList[1,:] @@ -415,7 +408,6 @@ def decide_naming_convention_and_applyCorrection(filepath, timeForLightsTurnOn, raise Exception('Error in naming convention of files or Error in storesList file') logger.info("Timestamps corrections applied to the data and event timestamps.") - print("Timestamps corrections applied to the data and event timestamps.") @@ -502,8 +494,6 @@ def onclick(event): if event.key == ' ': ix, iy = event.xdata, event.ydata - print('x = %d, y = %d'%( - ix, iy)) logger.info(f"x = {ix}, y = {iy}") y1_max, y1_min = np.amax(y1), np.amin(y1) y2_max, y2_min = np.amax(y2), np.amin(y2) @@ -527,8 +517,6 @@ def onclick(event): elif event.key == 'd': if len(coords)>0: - print('x = %d, y = %d; deleted'%( - coords[-1][0], coords[-1][1])) logger.info(f"x = {coords[-1][0]}, y = {coords[-1][1]}; deleted") del coords[-1] ax1.lines[-1].remove() @@ -544,7 +532,6 @@ def plt_close_event(event): if coords and len(coords)>0: name_1 = plot_name[0].split('_')[-1] np.save(os.path.join(filepath, 'coordsForPreProcessing_'+name_1+'.npy'), coords) - print('Coordinates file saved at {}'.format(os.path.join(filepath, 'coordsForPreProcessing_'+name_1+'.npy'))) logger.info(f"Coordinates file saved at {os.path.join(filepath, 'coordsForPreProcessing_'+name_1+'.npy')}") fig.canvas.mpl_disconnect(cid) coords = [] @@ -655,7 +642,7 @@ def eliminateData(filepath, timeForLightsTurnOn, event, sampling_rate, naming): arr = np.concatenate((arr, temp)) ts_arr = np.concatenate((ts_arr, new_ts+(1/sampling_rate))) - #print(arr.shape, ts_arr.shape) + #logger.info(arr.shape, ts_arr.shape) return arr, ts_arr @@ -725,7 +712,6 @@ def removeTTLs(filepath, event, naming): def addingNaNtoChunksWithArtifacts(filepath, events): - print("Replacing chunks with artifacts by NaN values.") logger.debug("Replacing chunks with artifacts by NaN values.") storesList = events[1,:] @@ -757,8 +743,7 @@ def addingNaNtoChunksWithArtifacts(filepath, events): # main function to align timestamps for control, signal and event timestamps for artifacts removal def processTimestampsForArtifacts(filepath, timeForLightsTurnOn, events): - print("Processing timestamps to get rid of artifacts using concatenate method...") - logger.debug("Processing timestamps to get rid of artifacts using concatenate method") + logger.debug("Processing timestamps to get rid of artifacts using concatenate method...") storesList = events[1,:] path = decide_naming_convention(filepath) @@ -790,7 +775,6 @@ def processTimestampsForArtifacts(filepath, timeForLightsTurnOn, events): logger.error('Error in naming convention of files or Error in storesList file') raise Exception('Error in naming convention of files or Error in storesList file') logger.info("Timestamps processed, artifacts are removed and good chunks are concatenated.") - print("Timestamps processed, artifacts are removed and good chunks are concatenated.") # function to compute deltaF/F using fitted control channel and filtered signal channel @@ -878,7 +862,7 @@ def helper_z_score(control, signal, filepath, name, inputParameters): #helpe tsNew = read_hdf5('timeCorrection_'+name, filepath, 'timestampNew') coords_path = os.path.join(filepath, 'coordsForPreProcessing_'+name+'.npy') - print("Remove Artifacts : ", removeArtifacts) + logger.info("Remove Artifacts : ", removeArtifacts) if (control==0).all()==True: control = np.zeros(tsNew.shape[0]) @@ -943,7 +927,6 @@ def helper_z_score(control, signal, filepath, name, inputParameters): #helpe # compute z-score and deltaF/F and save it to hdf5 file def compute_z_score(filepath, inputParameters): - print("Computing z-score for each of the data...") logger.debug(f"Computing z-score for each of the data in {filepath}") remove_artifacts = inputParameters['removeArtifacts'] @@ -990,7 +973,6 @@ def compute_z_score(filepath, inputParameters): raise Exception('Error in naming convention of files or Error in storesList file') logger.info(f"z-score for the data in {filepath} computed.") - print("z-score for the data computed.") @@ -1065,8 +1047,7 @@ def get_all_stores_for_combining_data(folderNames): # it will combine the data, do timestamps processing and save the combined data in the first output folder. def combineData(folderNames, inputParameters, storesList): - print("Combining Data from different data files...") - logger.debug("Combining Data from different data files") + logger.debug("Combining Data from different data files...") timeForLightsTurnOn = inputParameters['timeForLightsTurnOn'] op_folder = [] for i in range(len(folderNames)): @@ -1100,7 +1081,6 @@ def combineData(folderNames, inputParameters, storesList): # processing timestamps for combining the data processTimestampsForCombiningData(op, timeForLightsTurnOn, storesList, sampling_rate[0]) logger.info("Data is combined from different data files.") - print("Data is combined from different data files.") return op @@ -1131,8 +1111,7 @@ def execute_zscore(folderNames, inputParameters): storesList = np.genfromtxt(os.path.join(filepath, 'storesList.csv'), dtype='str', delimiter=',').reshape(2,-1) if remove_artifacts==True: - print("Removing Artifacts from the data and correcting timestamps...") - logger.debug("Removing Artifacts from the data and correcting timestamps.") + logger.debug("Removing Artifacts from the data and correcting timestamps...") compute_z_score(filepath, inputParameters) if artifactsRemovalMethod=='concatenate': processTimestampsForArtifacts(filepath, timeForLightsTurnOn, storesList) @@ -1140,7 +1119,6 @@ def execute_zscore(folderNames, inputParameters): addingNaNtoChunksWithArtifacts(filepath, storesList) visualizeControlAndSignal(filepath, remove_artifacts) logger.info("Artifacts from the data are removed and timestamps are corrected.") - print("Artifacts from the data are removed and timestamps are corrected.") else: compute_z_score(filepath, inputParameters) visualizeControlAndSignal(filepath, remove_artifacts) @@ -1158,13 +1136,11 @@ def execute_zscore(folderNames, inputParameters): plt.show() logger.info("Signal data and event timestamps are extracted.") - print("Signal data and event timestamps are extracted.") def extractTsAndSignal(inputParameters): - print("Extracting signal data and event timestamps...") - logger.debug("Extracting signal data and event timestamps") + logger.debug("Extracting signal data and event timestamps...") inputParameters = inputParameters #storesList = np.genfromtxt(inputParameters['storesListPath'], dtype='str', delimiter=',') @@ -1179,10 +1155,6 @@ def extractTsAndSignal(inputParameters): logger.info(f"Remove Artifacts : {remove_artifacts}") logger.info(f"Combine Data : {combine_data}") logger.info(f"Isosbestic Control Channel : {isosbestic_control}") - - print("Remove Artifacts : ", remove_artifacts) - print("Combine Data : ", combine_data) - print("Isosbestic Control Channel : ", isosbestic_control) storesListPath = [] for i in range(len(folderNames)): storesListPath.append(takeOnlyDirs(glob.glob(os.path.join(folderNames[i], '*_output_*')))) diff --git a/src/guppy/readTevTsq.py b/src/guppy/readTevTsq.py index a98b0be..3a44b2e 100755 --- a/src/guppy/readTevTsq.py +++ b/src/guppy/readTevTsq.py @@ -29,7 +29,6 @@ def writeToFile(value: str): # functino to read tsq file def readtsq(filepath): - print("Trying to read tsq file.") logger.debug("Trying to read tsq file.") names = ('size', 'type', 'name', 'chan', 'sort_code', 'timestamp', 'fp_loc', 'strobe', 'format', 'frequency') @@ -44,7 +43,6 @@ def readtsq(filepath): raise Exception('Two tsq files are present at the location.') elif len(path)==0: logger.info("\033[1m"+"tsq file not found."+"\033[1m") - print("\033[1m"+"tsq file not found."+"\033[1m") return 0, 0 else: path = path[0] @@ -57,12 +55,10 @@ def readtsq(filepath): df = pd.DataFrame(tsq) logger.info("Data from tsq file fetched.") - print("Data from tsq file fetched.") return df, flag # function to check if doric file exists def check_doric(filepath): - print("Checking if doric file exists.") logger.debug('Checking if doric file exists') path = glob.glob(os.path.join(filepath, '*.csv')) + \ glob.glob(os.path.join(filepath, '*.doric')) @@ -90,10 +86,8 @@ def check_doric(filepath): raise Exception('Two doric files are present at the same location') if len(flag_arr)==0: logger.error("\033[1m"+"Doric file not found."+"\033[1m") - print("\033[1m"+"Doric file not found."+"\033[1m") return 0 logger.info('Doric file found.') - print('Doric file found.') return flag_arr[0] @@ -140,7 +134,6 @@ def write_hdf5(data, event, filepath, key): # function to read event timestamps csv file. def import_csv(filepath, event, outputPath): - print("\033[1m"+"Trying to read data for {} from csv file.".format(event)+"\033[0m") logger.debug("\033[1m"+"Trying to read data for {} from csv file.".format(event)+"\033[0m") if not os.path.exists(os.path.join(filepath, event+'.csv')): logger.error("\033[1m"+"No csv file found for event {}".format(event)+"\033[0m") @@ -172,7 +165,6 @@ def import_csv(filepath, event, outputPath): write_hdf5(data[key[i]].dropna(), event, outputPath, key[i].lower()) logger.info("\033[1m"+"Reading data for {} from csv file is completed.".format(event)+"\033[0m") - print("\033[1m"+"Reading data for {} from csv file is completed.".format(event)+"\033[0m") return data, key @@ -190,7 +182,7 @@ def save_dict_to_hdf5(S, event, outputPath): # function to check event data (checking whether event timestamps belongs to same event or multiple events) def check_data(S, filepath, event, outputPath): - #print("Checking event storename data for creating multiple event names from single event storename...") + #logger.info("Checking event storename data for creating multiple event names from single event storename...") new_event = event.replace("\\","") new_event = event.replace("/","") diff = np.diff(S['data']) @@ -202,8 +194,6 @@ def check_data(S, filepath, event, outputPath): return 0 if S['sampling_rate']==0 and np.all(diff==diff[0])==False: - print("\033[1m"+"Data in event {} belongs to multiple behavior".format(event)+"\033[0m") - print("\033[1m"+"Create timestamp files for individual new event and change the stores list file."+"\033[0m") logger.info("\033[1m"+"Data in event {} belongs to multiple behavior".format(event)+"\033[0m") logger.debug("\033[1m"+"Create timestamp files for individual new event and change the stores list file."+"\033[0m") i_d = np.unique(S['data']) @@ -235,7 +225,6 @@ def check_data(S, filepath, event, outputPath): # function to read tev file def readtev(data, filepath, event, outputPath): - print("Reading data for event {} ...".format(event)) logger.debug("Reading data for event {} ...".format(event)) tevfilepath = glob.glob(os.path.join(filepath, '*.tev')) if len(tevfilepath)>1: @@ -260,16 +249,16 @@ def readtev(data, filepath, event, outputPath): eventNew = np.array(list(event)) - #print(allnames) - #print(eventNew) + #logger.info(allnames) + #logger.info(eventNew) row = ismember(data['name'], event) if sum(row)==0: - print("\033[1m"+"Requested store name "+event+" not found (case-sensitive)."+"\033[0m") - print("\033[1m"+"File contains the following TDT store names:"+"\033[0m") - print("\033[1m"+str(allnames)+"\033[0m") - print("\033[1m"+"TDT store name "+str(event)+" not found."+"\033[0m") + logger.info("\033[1m"+"Requested store name "+event+" not found (case-sensitive)."+"\033[0m") + logger.info("\033[1m"+"File contains the following TDT store names:"+"\033[0m") + logger.info("\033[1m"+str(allnames)+"\033[0m") + logger.info("\033[1m"+"TDT store name "+str(event)+" not found."+"\033[0m") import_csv(filepath, event, outputPath) return 0 @@ -317,7 +306,6 @@ def readtev(data, filepath, event, outputPath): check_data(S, filepath, event, outputPath) - print("Data for event {} fetched and stored.".format(event)) logger.info("Data for event {} fetched and stored.".format(event)) @@ -331,16 +319,16 @@ def execute_readtev(data, filepath, event, outputPath, numProcesses=mp.cpu_count #p.starmap(readtev, zip(repeat(data), repeat(filepath), event, repeat(outputPath))) #p.close() #p.join() - print("Time taken = {0:.5f}".format(time.time() - start)) + logger.info("Time taken = {0:.5f}".format(time.time() - start)) def execute_import_csv(filepath, event, outputPath, numProcesses=mp.cpu_count()): - #print("Reading data for event {} ...".format(event)) + #logger.info("Reading data for event {} ...".format(event)) start = time.time() with mp.Pool(numProcesses) as p: p.starmap(import_csv, zip(repeat(filepath), event, repeat(outputPath))) - print("Time taken = {0:.5f}".format(time.time() - start)) + logger.info("Time taken = {0:.5f}".format(time.time() - start)) def access_data_doricV1(doric_file, storesList, outputPath): keys = list(doric_file['Traces']['Console'].keys()) @@ -459,7 +447,6 @@ def execute_import_doric(filepath, storesList, flag, outputPath): def readRawData(inputParameters): - print('### Reading raw data... ###') logger.debug('### Reading raw data... ###') # get input parameters inputParameters = inputParameters @@ -469,8 +456,6 @@ def readRawData(inputParameters): if numProcesses==0: numProcesses = mp.cpu_count() elif numProcesses>mp.cpu_count(): - print('Warning : # of cores parameter set is greater than the cores available \ - available in your machine') logger.warning('Warning : # of cores parameter set is greater than the cores available \ available in your machine') numProcesses = mp.cpu_count()-1 @@ -482,7 +467,6 @@ def readRawData(inputParameters): step = 0 for i in range(len(folderNames)): filepath = folderNames[i] - print(filepath) logger.debug(f"### Reading raw data for folder {folderNames[i]}") storesListPath = takeOnlyDirs(glob.glob(os.path.join(filepath, '*_output_*'))) # reading tsq file @@ -513,12 +497,11 @@ def readRawData(inputParameters): writeToFile(str(10+((step+1)*10))+'\n') step += 1 logger.info(f"### Raw data for folder {folderNames[i]} fetched") - print("### Raw data fetched and saved.") logger.info('Raw data fetched and saved.') logger.info("#" * 400) def main(input_parameters): - print('run') + logger.info('run') try: readRawData(input_parameters) logger.info('#'*400) diff --git a/src/guppy/saveStoresList.py b/src/guppy/saveStoresList.py index 29b4e42..4d0355c 100755 --- a/src/guppy/saveStoresList.py +++ b/src/guppy/saveStoresList.py @@ -513,7 +513,7 @@ def save_button(event=None): json.dump(storenames_cache, f, indent=4) arr = np.asarray([arr1, arr2]) - print(arr) + logger.info(arr) if not os.path.exists(select_location.value): os.mkdir(select_location.value) @@ -588,7 +588,7 @@ def decide_ts_unit_for_npm(df, timestamp_column_name=None, time_unit=None, headl ts_unit = time_unit if (isinstance(time_unit, str) and time_unit in valid_units) else 'seconds' return df, ts_unit #def comboBoxSelected(event): - # print(event.widget.get()) + # logger.info(event.widget.get()) window = tk.Tk() window.title('Select appropriate options for timestamps') @@ -779,7 +779,7 @@ def import_np_doric_csv(filepath, isosbestic_control, num_ch, inputParameters=No df = df.drop(['Time(s)'], axis=1) event_from_filename.extend(list(df.columns)) flag = 'doric_csv' - print(flag) + logger.info(flag) else: df = pd.read_csv(path[i], index_col=False) # with warnings.catch_warnings(): @@ -795,7 +795,7 @@ def import_np_doric_csv(filepath, isosbestic_control, num_ch, inputParameters=No continue else: colnames, value = check_header(df) - #print(len(colnames), len(value)) + #logger.info(len(colnames), len(value)) # check dataframe structure and read data accordingly if len(value)>0: @@ -850,7 +850,7 @@ def import_np_doric_csv(filepath, isosbestic_control, num_ch, inputParameters=No pass flag_arr.append(flag) - print(flag) + logger.info(flag) if flag=='event_csv' or flag=='data_csv': name = os.path.basename(path[i]).split('.')[0] event_from_filename.append(name) @@ -1000,7 +1000,7 @@ def execute(inputParameters): isosbestic_control = inputParameters['isosbestic_control'] num_ch = inputParameters['noChannels'] - print(folderNames) + logger.info(folderNames) try: for i in folderNames: diff --git a/src/guppy/savingInputParameters.py b/src/guppy/savingInputParameters.py index 0f99473..98f9267 100644 --- a/src/guppy/savingInputParameters.py +++ b/src/guppy/savingInputParameters.py @@ -26,7 +26,7 @@ def savingInputParameters(): if is_headless: global folder_path folder_path = base_dir_env - print(f"Folder path set to {folder_path} (from GUPPY_BASE_DIR)") + logger.info(f"Folder path set to {folder_path} (from GUPPY_BASE_DIR)") else: # Create the main window folder_selection = tk.Tk() @@ -36,11 +36,11 @@ def select_folder(): global folder_path folder_path = filedialog.askdirectory(title="Select the folder path where your data is located") if folder_path: - print(f"Folder path set to {folder_path}") + logger.info(f"Folder path set to {folder_path}") folder_selection.destroy() else: folder_path = os.path.expanduser('~') - print(f"Folder path set to {folder_path}") + logger.info(f"Folder path set to {folder_path}") select_button = ttk.Button(folder_selection, text="Select a Folder", command=select_folder) select_button.pack(pady=5) @@ -69,7 +69,7 @@ def psthComputation(): def readPBIncrementValues(progressBar): - print("Read progress bar increment values function started...") + logger.info("Read progress bar increment values function started...") file_path = os.path.join(os.path.expanduser('~'), 'pbSteps.txt') if os.path.exists(file_path): os.remove(file_path) @@ -99,13 +99,13 @@ def readPBIncrementValues(progressBar): time.sleep(0.001) except Exception as e: # Handle other exceptions that may occur - print(f"An error occurred while reading the file: {e}") + logger.info(f"An error occurred while reading the file: {e}") break if increment==maximum: os.remove(file_path) break - print("Read progress bar increment values stopped.") + logger.info("Read progress bar increment values stopped.") # progress bars = PB read_progress = pn.indicators.Progress(name='Progress', value=100, max=100, width=300) @@ -410,7 +410,7 @@ def onclickProcess(event=None): logger.info('#'*400) #path.value = (os.path.join(op, 'inputParameters.json')).replace('\\', '/') - print('Input Parameters File Saved.') + logger.info('Input Parameters File Saved.') def onclickStoresList(event=None): inputParameters = getInputParameters() diff --git a/src/guppy/visualizePlot.py b/src/guppy/visualizePlot.py index 37a97ff..c98bbe8 100755 --- a/src/guppy/visualizePlot.py +++ b/src/guppy/visualizePlot.py @@ -83,7 +83,6 @@ def helper_plots(filepath, event, name, inputParameters): # note when there are no behavior event TTLs if len(event)==0: logger.warning("\033[1m"+"There are no behavior event TTLs present to visualize.".format(event)+"\033[0m") - print("\033[1m"+"There are no behavior event TTLs present to visualize.".format(event)+"\033[0m") return 0 @@ -220,7 +219,7 @@ def save_hm_plots(self): plot = self.results_hm['plot'] op = self.results_hm['op'] save_opts = self.save_options_heatmap - print(save_opts) + logger.info(save_opts) if save_opts=='save_svg_format': p = hv.render(plot, backend='bokeh') p.output_backend = 'svg' @@ -360,7 +359,7 @@ def contPlot(self): df1 = self.df_new[self.event_selector] #height = self.Heigth_Plot #width = self.Width_Plot - #print(height, width) + #logger.info(height, width) if self.y == 'All': if self.Y_Limit==None: self.Y_Limit = (np.nanmin(np.asarray(df1))-0.5, np.nanmax(np.asarray(df1))-0.5) @@ -545,7 +544,6 @@ def heatmap(self): view = Viewer() - print('view') #PSTH plot options psth_checkbox = pn.Param(view.param.select_trials_checkbox, widgets={ @@ -613,12 +611,11 @@ def heatmap(self): width_heatmap, height_heatmap, save_options_heatmap, pn.Column(pn.Spacer(height=25), save_hm)), pn.Row(view.heatmap, heatmap_y_parameters)) # - print('app') + logger.info('app') template = pn.template.MaterialTemplate(title='Visualization GUI') number = scanPortsAndFind(start_port=5000, end_port=5200) - app = pn.Tabs(('PSTH', line_tab), ('Heat Map', hm_tab)) @@ -670,7 +667,7 @@ def createPlots(filepath, event, inputParameters): if average==True: - print('average') + logger.info('average') helper_plots(filepath, name_arr, '', inputParameters) else: helper_plots(filepath, event, name_arr, inputParameters) @@ -684,7 +681,7 @@ def visualizeResults(inputParameters): average = inputParameters['visualizeAverageResults'] - print(average) + logger.info(average) folderNames = inputParameters['folderNames'] folderNamesForAvg = inputParameters['folderNamesForAvg'] @@ -726,7 +723,6 @@ def visualizeResults(inputParameters): filepath = folderNames[i] storesListPath = takeOnlyDirs(glob.glob(os.path.join(filepath, '*_output_*'))) - print(storesListPath) for j in range(len(storesListPath)): filepath = storesListPath[j] storesList = np.genfromtxt(os.path.join(filepath, 'storesList.csv'), dtype='str', delimiter=',').reshape(2,-1) @@ -734,5 +730,5 @@ def visualizeResults(inputParameters): createPlots(filepath, storesList[1,:], inputParameters) -#print(sys.argv[1:]) +#logger.info(sys.argv[1:]) #visualizeResults(sys.argv[1:][0]) From 2c7047da90fc56fd8d01ad60dd231a67f0e81f51 Mon Sep 17 00:00:00 2001 From: Paul Adkisson-Floro Date: Fri, 17 Oct 2025 07:05:32 +1100 Subject: [PATCH 78/83] Update src/guppy/preprocess.py --- src/guppy/preprocess.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/guppy/preprocess.py b/src/guppy/preprocess.py index d1db61a..be7c0bf 100755 --- a/src/guppy/preprocess.py +++ b/src/guppy/preprocess.py @@ -77,7 +77,6 @@ def helper_create_control_channel(signal, timestamps, window): popt, pcov = curve_fit(curveFitFn, timestamps, filtered_signal, p0) except Exception as e: logger.error(str(e)) - logger.info(e) #logger.info('Curve Fit Parameters : ', popt) control = curveFitFn(timestamps,*popt) From a62f66516d05df3ab285e661a9f794897c13b440 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 27 Oct 2025 16:50:55 -0700 Subject: [PATCH 79/83] Updated the paths in test_step2. --- tests/test_step2.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_step2.py b/tests/test_step2.py index add82ec..5c5eaaf 100644 --- a/tests/test_step2.py +++ b/tests/test_step2.py @@ -13,7 +13,7 @@ "session_subdir, storenames_map", [ ( - "SampleData_csv", + "SampleData_csv/sample_data_csv_1", { "Sample_Control_Channel": "control_region", "Sample_Signal_Channel": "signal_region", @@ -21,7 +21,7 @@ }, ), ( - "SampleData_Doric", + "SampleData_Doric/sample_doric_1", { "AIn-1 - Dem (ref)": "control_region", "AIn-1 - Dem (da)": "signal_region", @@ -45,7 +45,7 @@ }, ), ( - "SampleData_Neurophotometrics/1442", + "SampleData_Neurophotometrics/sampleData_NPM_4", { "file0_chev1": "control_region1", "file0_chod1": "signal_region1", From cd0779742fd1457fe27113c91867d44e854e8dba Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Mon, 27 Oct 2025 17:10:53 -0700 Subject: [PATCH 80/83] Updated the paths in the rest of the tests. --- tests/test_step2.py | 2 +- tests/test_step3.py | 6 +++--- tests/test_step4.py | 6 +++--- tests/test_step5.py | 6 +++--- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/test_step2.py b/tests/test_step2.py index 5c5eaaf..7e11287 100644 --- a/tests/test_step2.py +++ b/tests/test_step2.py @@ -21,7 +21,7 @@ }, ), ( - "SampleData_Doric/sample_doric_1", + "SampleData_Doric/sample_doric_2", { "AIn-1 - Dem (ref)": "control_region", "AIn-1 - Dem (da)": "signal_region", diff --git a/tests/test_step3.py b/tests/test_step3.py index 86eaddc..cf46c42 100644 --- a/tests/test_step3.py +++ b/tests/test_step3.py @@ -22,7 +22,7 @@ def storenames_map(): "session_subdir, storenames_map", [ ( - "SampleData_csv", + "SampleData_csv/sample_data_csv_1", { "Sample_Control_Channel": "control_region", "Sample_Signal_Channel": "signal_region", @@ -30,7 +30,7 @@ def storenames_map(): }, ), ( - "SampleData_Doric", + "SampleData_Doric/sample_doric_2", { "AIn-1 - Dem (ref)": "control_region", "AIn-1 - Dem (da)": "signal_region", @@ -54,7 +54,7 @@ def storenames_map(): }, ), ( - "SampleData_Neurophotometrics/1442", + "SampleData_Neurophotometrics/sampleData_NPM_4", { "file0_chev1": "control_region1", "file0_chod1": "signal_region1", diff --git a/tests/test_step4.py b/tests/test_step4.py index b48fa9a..be83740 100644 --- a/tests/test_step4.py +++ b/tests/test_step4.py @@ -12,7 +12,7 @@ "session_subdir, storenames_map, expected_region, expected_ttl", [ ( - "SampleData_csv", + "SampleData_csv/sample_data_csv_1", { "Sample_Control_Channel": "control_region", "Sample_Signal_Channel": "signal_region", @@ -22,7 +22,7 @@ "ttl", ), ( - "SampleData_Doric", + "SampleData_Doric/sample_doric_2", { "AIn-1 - Dem (ref)": "control_region", "AIn-1 - Dem (da)": "signal_region", @@ -52,7 +52,7 @@ "port_entries_dms", ), ( - "SampleData_Neurophotometrics/1442", + "SampleData_Neurophotometrics/sampleData_NPM_4", { "file0_chev1": "control_region1", "file0_chod1": "signal_region1", diff --git a/tests/test_step5.py b/tests/test_step5.py index 84f2ba6..59ae5ba 100644 --- a/tests/test_step5.py +++ b/tests/test_step5.py @@ -13,7 +13,7 @@ "session_subdir, storenames_map, expected_region, expected_ttl", [ ( - "SampleData_csv", + "SampleData_csv/sample_data_csv_1", { "Sample_Control_Channel": "control_region", "Sample_Signal_Channel": "signal_region", @@ -23,7 +23,7 @@ "ttl", ), ( - "SampleData_Doric", + "SampleData_Doric/sample_doric_2", { "AIn-1 - Dem (ref)": "control_region", "AIn-1 - Dem (da)": "signal_region", @@ -53,7 +53,7 @@ "port_entries_dms", ), ( - "SampleData_Neurophotometrics/1442", + "SampleData_Neurophotometrics/sampleData_NPM_4", { "file0_chev1": "control_region1", "file0_chod1": "signal_region1", From 592701af24b40f2626ac4328a4642c7c9b40ecec Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 28 Oct 2025 17:32:22 -0700 Subject: [PATCH 81/83] Added option to export logs to a more user-friendly location. --- src/guppy/logging_config.py | 25 ++++++++++++++++++++++++- src/guppy/main.py | 29 +++++++++++++++++++++++++++-- 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/src/guppy/logging_config.py b/src/guppy/logging_config.py index 2de2e14..063db3c 100644 --- a/src/guppy/logging_config.py +++ b/src/guppy/logging_config.py @@ -13,7 +13,9 @@ import logging import os from pathlib import Path -from platformdirs import user_log_dir +from platformdirs import user_log_dir, user_desktop_dir +import shutil +from datetime import datetime def get_log_file(): @@ -74,3 +76,24 @@ def setup_logging(*, level=None, console_output=True): console_handler = logging.StreamHandler() console_handler.setFormatter(formatter) logger.addHandler(console_handler) + + +def export_log_file(): + """Export the GuPPy log file to Desktop with a timestamped name. + + The log file is copied from its hidden platform-specific location to the user's + Desktop with a clear, timestamped filename (e.g., guppy_log_20251028_165530.log). + If the log file does not exist, an error message is printed. + """ + log_file = get_log_file() + if not log_file.exists(): + print(f"Error: Log file not found at {log_file}") + print("The log file may not exist yet. Try running GuPPy first to generate logs.") + return + + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + export_filename = f"guppy_log_{timestamp}.log" + desktop_dir = Path(user_desktop_dir()) + export_path = desktop_dir / export_filename + shutil.copy2(log_file, export_path) + print(f"Log file successfully exported to {export_path}") diff --git a/src/guppy/main.py b/src/guppy/main.py index 070b324..96479a6 100644 --- a/src/guppy/main.py +++ b/src/guppy/main.py @@ -6,13 +6,38 @@ # Logging must be configured before importing application modules so that module-level loggers inherit the proper handlers and formatters logging_config.setup_logging() +import argparse import panel as pn from .savingInputParameters import savingInputParameters -def main(): - """Main entry point for GuPPy""" +def serve_app(): + """Serve the GuPPy application using Panel.""" template = savingInputParameters() pn.serve(template, show=True) +def main(): + """Main entry point for GuPPy. + + Supports command-line flags: + - --export-logs: Export the log file to Desktop for sharing with support + - (no flags): Launch the GUI application + """ + parser = argparse.ArgumentParser( + description="GuPPy - Guided Photometry Analysis in Python" + ) + parser.add_argument( + '--export-logs', + action='store_true', + help='Export log file to Desktop with timestamped name for support purposes' + ) + + args = parser.parse_args() + + if args.export_logs: + logging_config.export_log_file() + return + + serve_app() + if __name__ == "__main__": main() From c5a1456234d4b3acbef4bf2fb6d32eec3125e81a Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Tue, 28 Oct 2025 17:35:36 -0700 Subject: [PATCH 82/83] Corrected documentation. --- src/guppy/logging_config.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/guppy/logging_config.py b/src/guppy/logging_config.py index 063db3c..40f03e1 100644 --- a/src/guppy/logging_config.py +++ b/src/guppy/logging_config.py @@ -2,9 +2,9 @@ This module provides a standardized logging setup that writes to platform-appropriate log directories following OS conventions: -- Windows: %APPDATA%/LernerLab/guppy/Logs/ -- macOS: ~/Library/Logs/LernerLab/guppy/ -- Linux: ~/.local/state/LernerLab/guppy/log/ +- Windows: +- macOS: /Users//Library/Logs/guppy/guppy.log +- Linux: Call setup_logging() once at application startup before importing other modules. Each module should then create its own logger using: logger = logging.getLogger(__name__) From a105ad63fb83e3ddc63d6dbd2400f538bd318303 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 30 Oct 2025 09:04:01 -0700 Subject: [PATCH 83/83] Updated docstring to contain accurate log_file paths for each OS --- src/guppy/logging_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/guppy/logging_config.py b/src/guppy/logging_config.py index 40f03e1..2e47062 100644 --- a/src/guppy/logging_config.py +++ b/src/guppy/logging_config.py @@ -2,9 +2,9 @@ This module provides a standardized logging setup that writes to platform-appropriate log directories following OS conventions: -- Windows: +- Windows: C:/Users//AppData/Local/LernerLab/guppy/Logs/guppy.log - macOS: /Users//Library/Logs/guppy/guppy.log -- Linux: +- Linux: /home//.local/state/guppy/log/guppy.log Call setup_logging() once at application startup before importing other modules. Each module should then create its own logger using: logger = logging.getLogger(__name__)