diff --git a/.github/workflows/Build.yml b/.github/workflows/Build.yml index 736c29a1..707f6653 100644 --- a/.github/workflows/Build.yml +++ b/.github/workflows/Build.yml @@ -1,20 +1,30 @@ -name: Build (& Test) +name: Build and Deploy on: workflow_dispatch: inputs: - debug_enabled: - description: 'Run workflow with ssh debugging' + upload_anaconda: + description: 'Upload build to anaconda' + required: false + default: 'skip' + type: choice + options: + - 'pre-release' + - 'main' + - 'skip' + upload_pypi: + description: 'Upload build to pypi' required: false default: false - release: - types: [published] - - schedule: - - cron: '0 0 * * 0' + type: boolean + deploy_docs: + description: 'Deploy docs to github pages' + required: false + default: false + type: boolean env: PACKAGE_NAME: pymer4 - DEPLOY_PY_VER: 3.8 # only this job deploys docs, anaconda.org, pypi + DEPLOY_PY_VER: 3.8 # only this job runner deploys docs and pypi package DEPLOY_OS: ubuntu-latest CONDA_BLD_PATH: /tmp/ci_conda_bld @@ -29,18 +39,11 @@ jobs: runs-on: ${{ matrix.os }} continue-on-error: ${{ matrix.experimental }} strategy: - fail-fast: false + fail-fast: true matrix: - py_ver: [3.8, 3.9] + py_ver: [3.8, 3.9, '3.10', 3.11] os: [ubuntu-latest, macos-latest] - experimental: [false] - include: - - py_ver: 3.7 - os: macos-latest - experimental: true - - py_ver: 3.7 - os: ubuntu-latest - experimental: false + experimental: [true] outputs: # tarballs are py3X job-specific @@ -89,12 +92,11 @@ jobs: conda config --show | grep bld_path conda info conda-build ./conda/ --python=$PY_VER -c https://conda.anaconda.org/conda-forge/ --verify - tarball=$(conda build --python=$PY_VER conda --output | tail -1) + tarball=$(conda-build --python=$PY_VER conda --output | tail -1) + echo "conda build tarball" $tarball if [[ $OS == "Linux" ]]; then \ conda convert -p win-64 -o $CONDA_BLD_PATH $tarball; \ fi - echo "conda build tarball" $tarball - echo "::set-output name=conda-tarball::$tarball" echo "{conda-tarball}={$tarball}" >> $GITHUB_OUTPUT # ------------------------------------------------------------ @@ -107,6 +109,7 @@ jobs: run: | conda create -n env_$PY_VER python=$PY_VER $PACKAGE_NAME -c $CONDA_BLD_PATH -c conda-forge -c defaults conda activate env_$PY_VER + conda info | grep active pip install -r requirements-dev.txt python -c "from pymer4.test_install import test_install; test_install()" @@ -116,6 +119,8 @@ jobs: PY_VER: ${{ matrix.py_ver }} run: | conda activate env_$PY_VER + conda info | grep active + black --version black --check --verbose . pytest pymer4/tests @@ -127,60 +132,63 @@ jobs: if: ${{ matrix.py_ver == env.DEPLOY_PY_VER && matrix.os == env.DEPLOY_OS }} run: | conda activate env_$PY_VER + conda info | grep active conda install sphinx sphinx_bootstrap_theme sphinx-gallery -c conda-forge cd docs && make clean && make html touch _build/html/.nojekyll - # 5b. Deploy docs (only for 3.8 which handles deployment) - # Only runs when a PR is merged into master or there's a direct push to master + # 5b. Deploy docs + # Only runs on the 3.8 runner when there's a release - name: Deploy docs env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - if: ${{ (matrix.py_ver == env.DEPLOY_PY_VER && matrix.os == env.DEPLOY_OS) && ((github.event_name == 'push' && github.ref == 'refs/heads/master') || (github.event.pull_request.merged && github.base_ref == 'master' && github.head_ref == 'dev')) }} + if: ${{ (matrix.py_ver == env.DEPLOY_PY_VER && matrix.os == env.DEPLOY_OS) && (github.event_name == 'release' || inputs.deploy_docs)}} uses: crazy-max/ghaction-github-pages@v2 with: target_branch: gh-pages build_dir: docs/_build/html - # 6. Build package for PyPi (only for 3.8 linux which handles deployment) + # 6. Build package for PyPi + # Only runs on the 3.8 runner - name: Build for Pypi + env: + PY_VER: ${{ matrix.py_ver }} if: ${{ matrix.py_ver == env.DEPLOY_PY_VER && matrix.os == env.DEPLOY_OS }} run: | conda activate env_$PY_VER + conda info | grep active pip install build python -m build --sdist --wheel --outdir dist/ # 7. Deploy package to Pypi - # Only runs when a manual github release is created + # Only runs on the 3.8 runner when there's a release - name: PyPi deploy - if: ${{ matrix.py_ver == env.DEPLOY_PY_VER && matrix.os == env.DEPLOY_OS && github.event_name == 'release'}} + if: ${{ (matrix.py_ver == env.DEPLOY_PY_VER && matrix.os == env.DEPLOY_OS) && (github.event_name == 'release' || inputs.upload_pypi)}} uses: pypa/gh-action-pypi-publish@master with: password: ${{ secrets.PYPI_API_TOKEN }} - # 8a. Deploy package to main conda channel but each OS uploads their own and linux uploads for linux and windows - # Only runs when a manual github release is created + # 8a. Deploy package to main conda main channel + # Runs when a github release is created, but can also be triggered manually - name: Conda main deploy - if: ${{ github.event_name == 'release' }} + if: ${{ (github.event_name == 'release') || (inputs.upload_anaconda == 'main') }} env: ANACONDA_TOKEN: ${{ secrets.ANACONDA_TOKEN }} run: | echo 'Conda release on main channel' + conda install anaconda-client anaconda -t "$ANACONDA_TOKEN" upload $CONDA_BLD_PATH/**/${PACKAGE_NAME}*.tar.bz2 -l "main" # OR ----- - # 8b. Deploy package to pre-release conda channel (only for 3.8 which handles deployment) but each OS uploads their own and linux uploads for linux and windows - # Only runs when a PR is merged into master or there's a direct push to master + # 8b. Deploy package to pre-release conda channel + # Only runs when triggered manually - name: Conda pre-release deploy - if: ${{ ((github.event_name == 'push' && github.ref == 'refs/heads/master') || (github.event.pull_request.merged && github.base_ref == 'master' && github.head_ref == 'dev')) }} + if: ${{ inputs.upload_anaconda == 'pre-release' }} env: ANACONDA_TOKEN: ${{ secrets.ANACONDA_TOKEN }} run: | echo 'Conda release on pre-release channel' + conda install anaconda-client anaconda -t "$ANACONDA_TOKEN" upload $CONDA_BLD_PATH/**/${PACKAGE_NAME}*.tar.bz2 -l "pre-release" - - # N. Optionall debug via ssh if workflow is run manually with debug_enabled = true - - name: Setup SSH session - if: ${{ github.event_name == 'workflow_dispatch' && github.events.inputs.debug_enabled }} - uses: mxschmitt/action-tmate@v3 + \ No newline at end of file diff --git a/.github/workflows/Build_noarch.yml b/.github/workflows/Build_noarch.yml new file mode 100644 index 00000000..e2cff91b --- /dev/null +++ b/.github/workflows/Build_noarch.yml @@ -0,0 +1,137 @@ +name: Build and Deploy noarch +on: + workflow_dispatch: + inputs: + upload_anaconda: + description: 'Upload build to anaconda' + required: false + default: 'skip' + type: choice + options: + - 'pre-release' + - 'main' + - 'skip' + +env: + PACKAGE_NAME: pymer4 + DEPLOY_PY_VER: 3.8 # only this job runner deploys docs and pypi package + DEPLOY_OS: ubuntu-latest + CONDA_BLD_PATH: /tmp/ci_conda_bld + +defaults: + run: + # login shell to source the conda hook in .bash_profile + shell: + bash -l {0} + +jobs: + ci: + runs-on: ${{ matrix.os }} + continue-on-error: ${{ matrix.experimental }} + strategy: + fail-fast: true + matrix: + py_ver: [3.9] + os: [ubuntu-latest] + experimental: [true] + + outputs: + # tarballs are py3X job-specific + conda-tarball: ${{ steps.conda-bld.outputs.conda-tarball }} + + steps: + + # ------------------------------------------------------------ + # 0. Print some basic github action info + - name: diagnostic info + run: | + echo "OS: ${{ matrix.os }}" + echo "Python: ${{ matrix.py_ver }}" + echo "Conda build path: $CONDA_BLD_PATH" + echo "Deploy OS: $DEPLOY_OS" + echo "Deploy Python: $DEPLOY_PY_VER" + echo "GA event name: ${{ github.event_name }}" + echo "GA ref: ${{ github.ref }}" + + # ------------------------------------------------------------ + # Step up miniconda + - name: Download Miniconda + uses: conda-incubator/setup-miniconda@059455a698430d8b68fa317268fa2e3da3492a98 + with: + miniconda-version: "latest" + python-version: ${{ matrix.py_ver }} + + # ------------------------------------------------------------ + # Get code + - name: Checkout code + uses: actions/checkout@v2 + + # ------------------------------------------------------------ + # Setup conda build environment and build package + # env defined here are just for convenience when writing bash commands + - name: Setup and build package + env: + OS: ${{ runner.os }} + PY_VER: ${{ matrix.py_ver }} + run: | + conda config --set always_yes yes --set changeps1 no + conda config --set bld_path $CONDA_BLD_PATH + conda install -n base -q conda-build + conda deactivate + echo "# ----------------BUILDING---------------------------------" + conda config --show | grep bld_path + conda info + conda-build ./conda_noarch -c https://conda.anaconda.org/conda-forge --verify + tarball=$(conda-build ./conda_noarch --output | tail -1) + echo "conda build tarball" $tarball + echo "{conda-tarball}={$tarball}" >> $GITHUB_OUTPUT + + # ------------------------------------------------------------ + # Create new conda env and install package locally + # Test installation worked + # Get black and pytest from pip because black version on conda lags behind + - name: Test installation + env: + PY_VER: ${{ matrix.py_ver }} + run: | + conda create -n env_$PY_VER python=$PY_VER $PACKAGE_NAME -c $CONDA_BLD_PATH -c conda-forge -c defaults + conda activate env_$PY_VER + conda info | grep active + pip install -r requirements-dev.txt + python -c "from pymer4.test_install import test_install; test_install()" + + # 4. Run code tests + - name: Run Test Suite + env: + PY_VER: ${{ matrix.py_ver }} + run: | + conda activate env_$PY_VER + conda info | grep active + black --version + black --check --verbose . + pytest pymer4/tests + + # 8a. Deploy package to main conda main channel + # Runs when a github release is created, but can also be triggered manually + - name: Conda main deploy + if: ${{ (github.event_name == 'release') || (inputs.upload_anaconda == 'main') }} + env: + ANACONDA_TOKEN: ${{ secrets.ANACONDA_TOKEN }} + run: | + echo 'Conda release on main channel' + conda install anaconda-client + anaconda -t "$ANACONDA_TOKEN" upload $CONDA_BLD_PATH/**/${PACKAGE_NAME}*.tar.bz2 -l "main" + + # OR ----- + + # 8b. Deploy package to pre-release conda channel + # Only runs when triggered manually + - name: Conda pre-release deploy + if: ${{ inputs.upload_anaconda == 'pre-release' }} + env: + ANACONDA_TOKEN: ${{ secrets.ANACONDA_TOKEN }} + run: | + echo 'Conda release on pre-release channel' + conda install anaconda-client + anaconda -t "$ANACONDA_TOKEN" upload $CONDA_BLD_PATH/**/${PACKAGE_NAME}*.tar.bz2 -l "pre-release" + \ No newline at end of file diff --git a/.github/workflows/Docs.yml b/.github/workflows/Docs.yml new file mode 100644 index 00000000..05876d90 --- /dev/null +++ b/.github/workflows/Docs.yml @@ -0,0 +1,71 @@ +name: Manual Deploy Docs +on: + workflow_dispatch: + +env: + PACKAGE_NAME: pymer4 + DEPLOY_PY_VER: 3.8 # only this job deploys docs, anaconda.org, pypi + DEPLOY_OS: ubuntu-latest + CONDA_BLD_PATH: /tmp/ci_conda_bld + +defaults: + run: + # login shell to source the conda hook in .bash_profile + shell: + bash -l {0} + +jobs: + ci: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + py_ver: [3.8] + os: [ubuntu-latest] + + steps: + + # ------------------------------------------------------------ + # Step up miniconda + - name: Download Miniconda + uses: conda-incubator/setup-miniconda@059455a698430d8b68fa317268fa2e3da3492a98 + with: + miniconda-version: "latest" + + # ------------------------------------------------------------ + # Get code + - name: Checkout code + uses: actions/checkout@v2 + + + # ------------------------------------------------------------ + # Setup conda build environment and build package + # env defined here are just for convenience when writing bash commands + - name: Install package + env: + PY_VER: ${{ matrix.py_ver }} + run: | + conda config --set always_yes yes --set changeps1 no + conda create -n pkg_test -c conda-forge python=$PY_VER 'r-lmerTest' 'r-emmeans' 'rpy2' + + conda activate pkg_test + pip install -r requirements-dev.txt + pip install . + + # ------------------------------------------------------------ + # Build docs + - name: Build Docs + run: | + conda activate pkg_test + python -c "from pymer4.test_install import test_install; test_install()" + cd docs && make clean && make html + touch _build/html/.nojekyll + + # Deploy docs + - name: Deploy docs + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + uses: crazy-max/ghaction-github-pages@v2 + with: + target_branch: gh-pages + build_dir: docs/_build/html diff --git a/.github/workflows/Install.yml b/.github/workflows/Install.yml new file mode 100644 index 00000000..1c65b75c --- /dev/null +++ b/.github/workflows/Install.yml @@ -0,0 +1,41 @@ +name: Test install conda pre-release +on: [workflow_dispatch] + +env: + PACKAGE_NAME: pymer4 + +defaults: + run: + # login shell to source the conda hook in .bash_profile + shell: + bash -l {0} + +jobs: + ci: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + py_ver: [3.8] + os: [ubuntu-latest, macos-latest, windows-latest] + + steps: + + # ------------------------------------------------------------ + # Step up miniconda + - name: Download Miniconda + uses: conda-incubator/setup-miniconda@059455a698430d8b68fa317268fa2e3da3492a98 + with: + miniconda-version: "latest" + python-version: ${{ matrix.py_ver }} + + # ------------------------------------------------------------ + # Setup conda build environment and build package + # env defined here are just for convenience when writing bash commands + - name: Install package from pre-release + run: | + conda config --set always_yes yes --set changeps1 no + conda create -n pkg_test -c 'ejolly/label/pre-release' -c conda-forge pymer4 + conda activate pkg_test + python -c "from pymer4.test_install import test_install; test_install()" + diff --git a/.github/workflows/Tests.yml b/.github/workflows/Tests.yml index ef1d9449..6ed5594c 100644 --- a/.github/workflows/Tests.yml +++ b/.github/workflows/Tests.yml @@ -9,11 +9,6 @@ on: - master - main workflow_dispatch: - inputs: - debug_enabled: - description: 'Run workflow with ssh debugging' - required: false - default: false schedule: - cron: '0 0 * * 0' @@ -33,7 +28,7 @@ jobs: strategy: fail-fast: true matrix: - py_ver: [3.7, 3.8, 3.9] + py_ver: [3.8, 3.9, '3.10', 3.11] os: [ubuntu-latest] steps: @@ -71,4 +66,13 @@ jobs: run: | conda activate pkg_test python -c "from pymer4.test_install import test_install; test_install()" + black --version + black --check --verbose . pytest pymer4/tests + + # ------------------------------------------------------------ + # Build docs + - name: Build Docs + run: | + conda activate pkg_test + cd docs && make clean && make html diff --git a/.github/workflows/manual_deploy_docs.yml b/.github/workflows/manual_deploy_docs.yml deleted file mode 100644 index 4d994d84..00000000 --- a/.github/workflows/manual_deploy_docs.yml +++ /dev/null @@ -1,124 +0,0 @@ -name: manual deploy docs -on: [workflow_dispatch] - -env: - PACKAGE_NAME: pymer4 - DEPLOY_PY_VER: 3.8 # only this job deploys docs, anaconda.org, pypi - DEPLOY_OS: ubuntu-latest - CONDA_BLD_PATH: /tmp/ci_conda_bld - -defaults: - run: - # login shell to source the conda hook in .bash_profile - shell: - bash -l {0} - -jobs: - ci: - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - py_ver: [3.7, 3.8, 3.9] - os: [ubuntu-latest, macos-11] - - outputs: - # tarballs are py3X job-specific - conda-tarball: ${{ steps.conda-bld.outputs.conda-tarball }} - - steps: - - # ------------------------------------------------------------ - # 0. Print some basic github action info - - name: diagnostic info - run: | - echo "OS: ${{ matrix.os }}" - echo "Python: ${{ matrix.py_ver }}" - echo "Conda build path: $CONDA_BLD_PATH" - echo "Deploy OS: $DEPLOY_OS" - echo "Deploy Python: $DEPLOY_PY_VER" - echo "GA event name: ${{ github.event_name }}" - echo "GA ref: ${{ github.ref }}" - - # ------------------------------------------------------------ - # 1. Grab git repo, setup miniconda environment and packages required to build - - uses: actions/checkout@v2 - - name: Setup Miniconda + Checkout code - run: | - echo "GIT_ABBREV_COMMIT=_g${GITHUB_SHA:0:8}" >> $GITHUB_ENV - - if [[ ${{ runner.os }} == Linux ]]; then \ - miniconda_url='https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh'; \ - fi - if [[ ${{ runner.os }} == macOS ]]; then \ - miniconda_url='https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh'; \ - fi - - wget $miniconda_url -O $HOME/miniconda.sh - bash ~/miniconda.sh -b -p $HOME/miniconda - hash -r - $HOME/miniconda/bin/conda shell.bash hook >> ~/.bash_profile - source ~/.bash_profile - - hash -r - conda config --set always_yes yes --set changeps1 no - conda config --set bld_path $CONDA_BLD_PATH - conda install -n base -q conda-build conda-verify anaconda-client - echo "# ------------------------------------------------------------" - conda info -a - - # ------------------------------------------------------------ - # 2. Build the conda package and tarballs for each OS - # env defined here are just for convenience when writing bash commands - - name: Build package - id: conda-bld - env: - OS: ${{ runner.os }} - PY_VER: ${{ matrix.py_ver }} - run: | - conda build --python=$PY_VER -c conda-forge -c defaults conda - tarball=$(conda build --python=$PY_VER conda --output | tail -1) - if [[ $OS == "Linux" ]]; then \ - conda convert -p win-64 -o $CONDA_BLD_PATH $tarball; \ - fi - echo "conda build tarball" $tarball - echo "::set-output name=conda-tarball::$tarball" - - # ------------------------------------------------------------ - # 3. Create new conda env and install package locally - # Test installation worked - # Get black and pytest from pip because black version on conda lags behind - - name: Create and test fresh installation - env: - PY_VER: ${{ matrix.py_ver }} - run: | - conda create -n env_$PY_VER python=$PY_VER $PACKAGE_NAME 'blas=*=mkl' -c $CONDA_BLD_PATH -c conda-forge -c defaults - conda activate env_$PY_VER - conda install black pytest-cov -c conda-forge - conda list - lscpu - python -c 'import numpy; numpy.show_config()' - python -c "from pymer4.test_install import test_install; test_install()" - - # 5a. Build docs (only for 3.8 which handles deployment) - # Will also run on PRs which serves as another layer of testing - - name: Build docs - env: - PY_VER: ${{ matrix.py_ver }} - if: ${{ matrix.py_ver == env.DEPLOY_PY_VER && matrix.os == env.DEPLOY_OS }} - run: | - conda activate env_$PY_VER - conda install sphinx sphinx_bootstrap_theme sphinx-gallery -c conda-forge - cd docs && make clean && make html - touch _build/html/.nojekyll - - # 5b. Deploy docs (only for 3.8 which handles deployment) - # Only runs when a PR is merged into master or there's a direct push to master - - name: Deploy docs - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - if: ${{ (matrix.py_ver == env.DEPLOY_PY_VER && matrix.os == env.DEPLOY_OS) && ((github.event_name == 'push' && github.ref == 'refs/heads/master') || (github.event.pull_request.merged && github.base_ref == 'master' && github.head_ref == 'dev')) }} - uses: crazy-max/ghaction-github-pages@v2 - with: - target_branch: gh-pages - build_dir: docs/_build/html diff --git a/.github/workflows/manual_upload_conda.yml b/.github/workflows/manual_upload_conda.yml deleted file mode 100644 index 0f6a0a1e..00000000 --- a/.github/workflows/manual_upload_conda.yml +++ /dev/null @@ -1,93 +0,0 @@ -name: manual release conda -on: [workflow_dispatch] - -env: - PACKAGE_NAME: pymer4 - DEPLOY_PY_VER: 3.8 # only this job deploys docs, anaconda.org, pypi - DEPLOY_OS: ubuntu-latest - CONDA_BLD_PATH: /tmp/ci_conda_bld - -defaults: - run: - # login shell to source the conda hook in .bash_profile - shell: - bash -l {0} - -jobs: - ci: - runs-on: ${{ matrix.os }} - strategy: - matrix: - py_ver: [3.7, 3.8, 3.9] - os: [ubuntu-latest, macos-11] # Intel macs - - outputs: - # tarballs are py3X job-specific - conda-tarball: ${{ steps.conda-bld.outputs.conda-tarball }} - - steps: - - # ------------------------------------------------------------ - # 0. Print some basic github action info - - name: diagnostic info - run: | - echo "OS: ${{ matrix.os }}" - echo "Python: ${{ matrix.py_ver }}" - echo "Conda build path: $CONDA_BLD_PATH" - echo "Deploy OS: $DEPLOY_OS" - echo "Deploy Python: $DEPLOY_PY_VER" - echo "GA event name: ${{ github.event_name }}" - echo "GA ref: ${{ github.ref }}" - - # ------------------------------------------------------------ - # 1. Grab git repo, setup miniconda environment and packages required to build - - uses: actions/checkout@v2 - - name: Setup Miniconda + Checkout code - run: | - echo "GIT_ABBREV_COMMIT=_g${GITHUB_SHA:0:8}" >> $GITHUB_ENV - - if [[ ${{ runner.os }} == Linux ]]; then \ - miniconda_url='https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh'; \ - fi - if [[ ${{ runner.os }} == macOS ]]; then \ - miniconda_url='https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh'; \ - fi - - wget $miniconda_url -O $HOME/miniconda.sh - bash ~/miniconda.sh -b -p $HOME/miniconda - hash -r - $HOME/miniconda/bin/conda shell.bash hook >> ~/.bash_profile - source ~/.bash_profile - - hash -r - conda config --set always_yes yes --set changeps1 no - conda config --set bld_path $CONDA_BLD_PATH - conda install -n base -q conda-build conda-verify anaconda-client - echo "# ------------------------------------------------------------" - conda info -a - - # ------------------------------------------------------------ - # 2. Build the conda package and tarballs for each OS - # env defined here are just for convenience when writing bash commands - - name: Build package - id: conda-bld - env: - OS: ${{ runner.os }} - PY_VER: ${{ matrix.py_ver }} - run: | - conda build --python=$PY_VER -c conda-forge -c defaults conda - tarball=$(conda build --python=$PY_VER conda --output | tail -1) - if [[ $OS == "Linux" ]]; then \ - conda convert -p win-64 -o $CONDA_BLD_PATH $tarball; \ - fi - echo "conda build tarball" $tarball - echo "::set-output name=conda-tarball::$tarball" - - - # 8a. Deploy package to main conda channel (only for 3.8 which handles deployment) but each OS uploads their own and linux uploads for linux and windows - - name: Conda main deploy - env: - ANACONDA_TOKEN: ${{ secrets.ANACONDA_TOKEN }} - run: | - echo 'Conda release on main channel' - anaconda -t "$ANACONDA_TOKEN" upload $CONDA_BLD_PATH/**/${PACKAGE_NAME}*.tar.bz2 -l "main" diff --git a/.github/workflows/manual_upload_pypi.yml b/.github/workflows/manual_upload_pypi.yml deleted file mode 100644 index 40e3b8e2..00000000 --- a/.github/workflows/manual_upload_pypi.yml +++ /dev/null @@ -1,99 +0,0 @@ -name: manual release pypi -on: [workflow_dispatch] - -env: - PACKAGE_NAME: pymer4 - DEPLOY_PY_VER: 3.8 # only this job deploys docs, anaconda.org, pypi - DEPLOY_OS: ubuntu-latest - CONDA_BLD_PATH: /tmp/ci_conda_bld - -defaults: - run: - # login shell to source the conda hook in .bash_profile - shell: - bash -l {0} - -jobs: - ci: - runs-on: ${{ matrix.os }} - strategy: - matrix: - py_ver: [3.7, 3.8, 3.9] - os: [ubuntu-latest, macos-11] # Intel macs - - outputs: - # tarballs are py3X job-specific - conda-tarball: ${{ steps.conda-bld.outputs.conda-tarball }} - - steps: - - # ------------------------------------------------------------ - # 0. Print some basic github action info - - name: diagnostic info - run: | - echo "OS: ${{ matrix.os }}" - echo "Python: ${{ matrix.py_ver }}" - echo "Conda build path: $CONDA_BLD_PATH" - echo "Deploy OS: $DEPLOY_OS" - echo "Deploy Python: $DEPLOY_PY_VER" - echo "GA event name: ${{ github.event_name }}" - echo "GA ref: ${{ github.ref }}" - - # ------------------------------------------------------------ - # 1. Grab git repo, setup miniconda environment and packages required to build - - uses: actions/checkout@v2 - - name: Setup Miniconda + Checkout code - run: | - echo "GIT_ABBREV_COMMIT=_g${GITHUB_SHA:0:8}" >> $GITHUB_ENV - - if [[ ${{ runner.os }} == Linux ]]; then \ - miniconda_url='https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh'; \ - fi - if [[ ${{ runner.os }} == macOS ]]; then \ - miniconda_url='https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh'; \ - fi - - wget $miniconda_url -O $HOME/miniconda.sh - bash ~/miniconda.sh -b -p $HOME/miniconda - hash -r - $HOME/miniconda/bin/conda shell.bash hook >> ~/.bash_profile - source ~/.bash_profile - - hash -r - conda config --set always_yes yes --set changeps1 no - conda config --set bld_path $CONDA_BLD_PATH - conda install -n base -q conda-build conda-verify anaconda-client - echo "# ------------------------------------------------------------" - conda info -a - - # ------------------------------------------------------------ - # 2. Build the conda package and tarballs for each OS - # env defined here are just for convenience when writing bash commands - - name: Build package - id: conda-bld - env: - OS: ${{ runner.os }} - PY_VER: ${{ matrix.py_ver }} - run: | - conda build --python=$PY_VER -c conda-forge -c defaults conda - tarball=$(conda build --python=$PY_VER conda --output | tail -1) - if [[ $OS == "Linux" ]]; then \ - conda convert -p win-64 -o $CONDA_BLD_PATH $tarball; \ - fi - echo "conda build tarball" $tarball - echo "::set-output name=conda-tarball::$tarball" - - # 6. Build package for PyPi (only for 3.8 linux which handles deployment) - - name: Build for Pypi - if: ${{ matrix.py_ver == env.DEPLOY_PY_VER && matrix.os == env.DEPLOY_OS }} - run: | - conda activate env_$PY_VER - pip install build - python -m build --sdist --wheel --outdir dist/ - - # 7. Deploy package to Pypi (only need to do this once on linux) - - name: PyPi deploy - if: ${{ matrix.py_ver == env.DEPLOY_PY_VER && matrix.os == env.DEPLOY_OS }} - uses: pypa/gh-action-pypi-publish@master - with: - password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.gitignore b/.gitignore index 7b8cab6c..7050fdd3 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,7 @@ *.DS_Store *.ipynb_checkpoints *.pyc -build/* +build/ docs/_build/ docs/modules/ .cache/* @@ -19,7 +19,10 @@ dev/ pytest.ini env/* conda/build/* +conda_noarch/build/* .pytest_cache .tox .venv environment.yml +*.pickle +*.joblib diff --git a/README.md b/README.md index 411632ac..ac772c40 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ [![Downloads](https://pepy.tech/badge/pymer4)](https://pepy.tech/project/pymer4) [![DOI](http://joss.theoj.org/papers/10.21105/joss.00862/status.svg)](https://doi.org/10.21105/joss.00862) [![DOI](https://zenodo.org/badge/90598701.svg)](https://zenodo.org/record/1523205) -![Python Versions](https://img.shields.io/badge/python-3.6%20%7C%203.7%20%7C%203.8-blue) +![Python Versions](https://img.shields.io/badge/python-3.8%20%7C%203.9%20%7C%203.10%20%7C%203.11-blue) [![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/ejolly/pymer4/issues) # Pymer4 diff --git a/conda/meta.yaml b/conda/meta.yaml index ea74e9df..6bb0f8b6 100644 --- a/conda/meta.yaml +++ b/conda/meta.yaml @@ -11,16 +11,12 @@ source: build: script: pip install . -vv --no-deps - # abandoned conda build GIT_BUILD_STRING env variable - string: py{{environ.get("CONDA_PY", "XX")}}{{environ.get("GIT_ABBREV_COMMIT", "no_git_abbrev_commit") }}_{{ environ.get("PKG_BUILDNUM", "no_pkg_buildnum") }} + string: py{{environ.get("CONDA_PY", "XX")}}{{environ.get("GIT_DESCRIBE_HASH", "no_git_abbrev_commit") }}_{{ environ.get("PKG_BUILDNUM", "no_pkg_buildnum") }} requirements: host: - python {{ python }} - # - r-base - # - r-lmerTest - # - r-base - pip - numpy >=1.20 @@ -36,11 +32,7 @@ requirements: - patsy - joblib - scipy - - deepdish - scikit-learn - # {% for req in data.get('install_requires', []) %} - # - {{ req }} - # {% endfor %} test: imports: @@ -59,6 +51,6 @@ about: doc_url: dev_url: -# extra: -# recipe-maintainers: -# - your-github-id-here +extra: + recipe-maintainers: + - ejolly diff --git a/conda_noarch/meta.yaml b/conda_noarch/meta.yaml new file mode 100644 index 00000000..74251cf4 --- /dev/null +++ b/conda_noarch/meta.yaml @@ -0,0 +1,56 @@ +{% set name = "pymer4" %} +{% set data = load_setup_py_data() %} + +package: + name: "{{ name|lower }}" + version: "{{ data.get('version') }}" + +source: + git_url: ../ # to enable GIT_X_Y env vars + path_url: ../ # so conda build will grab source to read version + +build: + noarch: python + script: pip install . -vv --no-deps + string: {{environ.get("GIT_DESCRIBE_HASH", "no_git_abbrev_commit") }}_{{ environ.get("PKG_BUILDNUM", "no_pkg_buildnum") }} + +requirements: + + host: + - pip + - numpy >=1.20 + + run: + - python + - r-lmerTest + - r-emmeans + - pandas >=1.1.0 + - numpy >=1.20 + - rpy2 >=3.5.3 + - seaborn + - matplotlib + - patsy + - joblib + - scipy + - scikit-learn + +test: + imports: + - pymer4 + - pymer4.models + requires: + - pytest + - black + +about: + home: "https://eshinjolly.com/pymer4/" + license: MIT + license_family: MIT + license_file: + summary: "pymer4: all the convenience of lme4 in python" + doc_url: + dev_url: + +extra: + recipe-maintainers: + - ejolly diff --git a/docs/auto_examples/auto_examples_jupyter.zip b/docs/auto_examples/auto_examples_jupyter.zip index 6b9900a3..8951829a 100644 Binary files a/docs/auto_examples/auto_examples_jupyter.zip and b/docs/auto_examples/auto_examples_jupyter.zip differ diff --git a/docs/auto_examples/auto_examples_python.zip b/docs/auto_examples/auto_examples_python.zip index 7a0ca359..059425be 100644 Binary files a/docs/auto_examples/auto_examples_python.zip and b/docs/auto_examples/auto_examples_python.zip differ diff --git a/docs/auto_examples/example_01_basic_usage.ipynb b/docs/auto_examples/example_01_basic_usage.ipynb index afbcc247..55cd981d 100644 --- a/docs/auto_examples/example_01_basic_usage.ipynb +++ b/docs/auto_examples/example_01_basic_usage.ipynb @@ -15,7 +15,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "\n1. Basic Usage Guide\n====================\n" + "\n# 1. Basic Usage Guide\n" ] }, { @@ -40,7 +40,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Standard regression models\n------------------------------------\nFitting a standard regression model is accomplished using the :code:`Lm` model class in :code:`pymer4`. All we need to do is initialize a model with a formula, some data, and call its :code:`.fit()` method.\n\nBy default the output of :code:`.fit()` has been formated to be a blend of :code:`summary()` in R and :code:`.summary()` from `statsmodels `_. This includes metadata about the model, data, and overall fit as well as estimates and inference results of model terms.\n\n" + "## Standard regression models\nFitting a standard regression model is accomplished using the :code:`Lm` model class in :code:`pymer4`. All we need to do is initialize a model with a formula, some data, and call its :code:`.fit()` method.\n\nBy default the output of :code:`.fit()` has been formated to be a blend of :code:`summary()` in R and :code:`.summary()` from [statsmodels](http://www.statsmodels.org/dev/index.html/). This includes metadata about the model, data, and overall fit as well as estimates and inference results of model terms.\n\n" ] }, { @@ -123,7 +123,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Robust and WLS estimation\n-------------------------\n:code:`Lm` models can also perform inference using robust-standard errors or perform weight-least-squares (experimental feature) for models with categorical predictors (equivalent to Welch's t-test).\n\n" + "## Robust and WLS estimation\n:code:`Lm` models can also perform inference using robust-standard errors or perform weight-least-squares (experimental feature) for models with categorical predictors (equivalent to Welch's t-test).\n\n" ] }, { @@ -152,7 +152,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Multi-level models\n----------------------------\nFitting a multi-level model works similarly and actually just calls :code:`lmer` or :code:`glmer` in R behind the scenes. The corresponding output is also formatted to be very similar to output of :code:`summary()` in R.\n\n" + "## Multi-level models\nFitting a multi-level model works similarly and actually just calls :code:`lmer` or :code:`glmer` in R behind the scenes. The corresponding output is also formatted to be very similar to output of :code:`summary()` in R.\n\n" ] }, { @@ -253,7 +253,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Two-stage summary statistics models\n-----------------------------------\nFitting :code:`Lm2` models are also very similar\n\n" + "## Two-stage summary statistics models\nFitting :code:`Lm2` models are also very similar\n\n" ] }, { @@ -300,7 +300,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Model Persistence\n-----------------\nAll pymer4 models can be saved and loaded from disk. Doing so will persist *all* model attributes and data i.e. anything accessible with the '.' syntax. Models are saved and loaded using the `HDF5 format `_ using the `deepdish `_ python library. This ensures near universal accesibility on different machines and operating systems. Therefore all filenames must end with :code:`.h5` or :code:`.hdf5`. For :code:`Lmer` models, an additional file ending in :code:`.rds` will be saved in the same directory as the HDF5 file. This is the R model object readable in R using :code:`readRDS`.\n\nTo persist models you can use the dedicated :code:`save_model` and :code:`load_model` functions from the :code:`pymer4.io` module\n\n" + "## Model Persistence\nAll pymer4 models can be saved and loaded from disk. Doing so will persist *all* model attributes and data i.e. anything accessible with the '.' syntax. Models are saved and loaded using [Joblib](https://joblib.readthedocs.io/en/latest/persistence.html#persistence) Therefore all filenames must end with :code:`.joblib`. For :code:`Lmer` models, an additional file ending in :code:`.rds` will be saved in the same directory as the HDF5 file. This is the R model object readable in R using :code:`readRDS`.\n\nPrior to version 0.8.1 models were saved to HDF5 files using [deepdish](https://github.com/uchicago-cs/deepdish/) but this library is no longer maintained. If you have old models saved as :code:`.h5` or :code:`.hdf5` files you should use the same version of pymer4 that you used to estimate those models.\n\nTo persist models you can use the dedicated :code:`save_model` and :code:`load_model` functions from the :code:`pymer4.io` module\n\n" ] }, { @@ -311,14 +311,14 @@ }, "outputs": [], "source": [ - "# Import functions\nfrom pymer4.io import save_model, load_model\n\n# Save the Lm2 model above\nsave_model(model, \"mymodel.h5\")\n# Load it back up\nmodel = load_model(\"mymodel.h5\")\n# Check that it looks the same\nprint(model)" + "# Import functions\nfrom pymer4.io import save_model, load_model\n\n# Save the Lm2 model above\nsave_model(model, \"mymodel.joblib\")\n# Load it back up\nmodel = load_model(\"mymodel.joblib\")\n# Check that it looks the same\nprint(model)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Wrap Up\n-------\nThis was a quick overview of the 3 major model classes in :code:`pymer4`. However, it's highly recommended to check out the API to see *all* the features and options that each model class has including things like permutation-based inference (:code:`Lm` and :code:`Lm2` models) and fine-grain control of optimizer and tolerance settings (:code:`Lmer` models).\n\n" + "## Wrap Up\nThis was a quick overview of the 3 major model classes in :code:`pymer4`. However, it's highly recommended to check out the API to see *all* the features and options that each model class has including things like permutation-based inference (:code:`Lm` and :code:`Lm2` models) and fine-grain control of optimizer and tolerance settings (:code:`Lmer` models).\n\n" ] } ], @@ -338,7 +338,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.3" + "version": "3.8.15" } }, "nbformat": 4, diff --git a/docs/auto_examples/example_01_basic_usage.py b/docs/auto_examples/example_01_basic_usage.py index 59ba41b6..88d908f4 100644 --- a/docs/auto_examples/example_01_basic_usage.py +++ b/docs/auto_examples/example_01_basic_usage.py @@ -156,7 +156,9 @@ ############################################################################### # Model Persistence # ----------------- -# All pymer4 models can be saved and loaded from disk. Doing so will persist *all* model attributes and data i.e. anything accessible with the '.' syntax. Models are saved and loaded using the `HDF5 format `_ using the `deepdish `_ python library. This ensures near universal accesibility on different machines and operating systems. Therefore all filenames must end with :code:`.h5` or :code:`.hdf5`. For :code:`Lmer` models, an additional file ending in :code:`.rds` will be saved in the same directory as the HDF5 file. This is the R model object readable in R using :code:`readRDS`. +# All pymer4 models can be saved and loaded from disk. Doing so will persist *all* model attributes and data i.e. anything accessible with the '.' syntax. Models are saved and loaded using `Joblib `_ Therefore all filenames must end with :code:`.joblib`. For :code:`Lmer` models, an additional file ending in :code:`.rds` will be saved in the same directory as the HDF5 file. This is the R model object readable in R using :code:`readRDS`. +# +# Prior to version 0.8.1 models were saved to HDF5 files using `deepdish `_ but this library is no longer maintained. If you have old models saved as :code:`.h5` or :code:`.hdf5` files you should use the same version of pymer4 that you used to estimate those models. # # To persist models you can use the dedicated :code:`save_model` and :code:`load_model` functions from the :code:`pymer4.io` module @@ -164,9 +166,9 @@ from pymer4.io import save_model, load_model # Save the Lm2 model above -save_model(model, "mymodel.h5") +save_model(model, "mymodel.joblib") # Load it back up -model = load_model("mymodel.h5") +model = load_model("mymodel.joblib") # Check that it looks the same print(model) diff --git a/docs/auto_examples/example_01_basic_usage.py.md5 b/docs/auto_examples/example_01_basic_usage.py.md5 index e3e65e03..f44ff187 100644 --- a/docs/auto_examples/example_01_basic_usage.py.md5 +++ b/docs/auto_examples/example_01_basic_usage.py.md5 @@ -1 +1 @@ -4a918132ecf9367d13d96d3823c35cb8 \ No newline at end of file +b56f42c7a0051017f9fba41a12809193 \ No newline at end of file diff --git a/docs/auto_examples/example_01_basic_usage.rst b/docs/auto_examples/example_01_basic_usage.rst index 3ef6afeb..6312ad7d 100644 --- a/docs/auto_examples/example_01_basic_usage.rst +++ b/docs/auto_examples/example_01_basic_usage.rst @@ -1,17 +1,28 @@ + +.. DO NOT EDIT. +.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. +.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: +.. "auto_examples/example_01_basic_usage.py" +.. LINE NUMBERS ARE GIVEN BELOW. + .. only:: html .. note:: :class: sphx-glr-download-link-note - Click :ref:`here ` to download the full example code - .. rst-class:: sphx-glr-example-title + Click :ref:`here ` + to download the full example code + +.. rst-class:: sphx-glr-example-title - .. _sphx_glr_auto_examples_example_01_basic_usage.py: +.. _sphx_glr_auto_examples_example_01_basic_usage.py: 1. Basic Usage Guide ==================== +.. GENERATED FROM PYTHON SOURCE LINES 7-15 + :code:`pymer4` comes with sample data for testing purposes which we'll utilize for most of the tutorials. This sample data has: @@ -21,6 +32,7 @@ This sample data has: Let's check it out below: +.. GENERATED FROM PYTHON SOURCE LINES 15-27 .. code-block:: default @@ -42,8 +54,6 @@ Let's check it out below: .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Group IV1 DV_l DV IV2 IV3 @@ -56,12 +66,15 @@ Let's check it out below: +.. GENERATED FROM PYTHON SOURCE LINES 28-33 + Standard regression models ------------------------------------ Fitting a standard regression model is accomplished using the :code:`Lm` model class in :code:`pymer4`. All we need to do is initialize a model with a formula, some data, and call its :code:`.fit()` method. By default the output of :code:`.fit()` has been formated to be a blend of :code:`summary()` in R and :code:`.summary()` from `statsmodels `_. This includes metadata about the model, data, and overall fit as well as estimates and inference results of model terms. +.. GENERATED FROM PYTHON SOURCE LINES 33-43 .. code-block:: default @@ -81,8 +94,6 @@ By default the output of :code:`.fit()` has been formated to be a blend of :code .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Formula: DV~IV1+IV2 @@ -105,8 +116,11 @@ By default the output of :code:`.fit()` has been formated to be a blend of :code +.. GENERATED FROM PYTHON SOURCE LINES 44-45 + All information about the model as well as data, residuals, estimated coefficients, etc are saved as attributes and can be accessed like this: +.. GENERATED FROM PYTHON SOURCE LINES 45-49 .. code-block:: default @@ -120,8 +134,6 @@ All information about the model as well as data, residuals, estimated coefficien .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none 5061.3629635837815 @@ -129,6 +141,7 @@ All information about the model as well as data, residuals, estimated coefficien +.. GENERATED FROM PYTHON SOURCE LINES 50-54 .. code-block:: default @@ -142,8 +155,6 @@ All information about the model as well as data, residuals, estimated coefficien .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none [-3.79994762 6.94860187 -8.32917613 1.19463387 -5.8271851 -6.88457421 @@ -152,8 +163,11 @@ All information about the model as well as data, residuals, estimated coefficien +.. GENERATED FROM PYTHON SOURCE LINES 55-56 + A copy of the dataframe used to estimate the model with added columns for residuals and fits are are available at :code:`model.data`. Residuals and fits can also be directly accessed using :code:`model.residuals` and :code:`model.fits` respectively +.. GENERATED FROM PYTHON SOURCE LINES 56-60 .. code-block:: default @@ -167,8 +181,6 @@ A copy of the dataframe used to estimate the model with added columns for residu .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Group IV1 DV_l DV IV2 IV3 fits residuals @@ -181,8 +193,11 @@ A copy of the dataframe used to estimate the model with added columns for residu +.. GENERATED FROM PYTHON SOURCE LINES 61-62 + This makes it easy to assess overall model fit visually, for example using seaborn +.. GENERATED FROM PYTHON SOURCE LINES 62-69 .. code-block:: default @@ -196,26 +211,28 @@ This makes it easy to assess overall model fit visually, for example using seabo -.. image:: /auto_examples/images/sphx_glr_example_01_basic_usage_001.png - :alt: example 01 basic usage - :class: sphx-glr-single-img +.. image-sg:: /auto_examples/images/sphx_glr_example_01_basic_usage_001.png + :alt: example 01 basic usage + :srcset: /auto_examples/images/sphx_glr_example_01_basic_usage_001.png + :class: sphx-glr-single-img .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - + +.. GENERATED FROM PYTHON SOURCE LINES 70-73 + Robust and WLS estimation ------------------------- :code:`Lm` models can also perform inference using robust-standard errors or perform weight-least-squares (experimental feature) for models with categorical predictors (equivalent to Welch's t-test). +.. GENERATED FROM PYTHON SOURCE LINES 73-77 .. code-block:: default @@ -229,8 +246,6 @@ Robust and WLS estimation .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Formula: DV~IV1+IV2 @@ -253,6 +268,7 @@ Robust and WLS estimation +.. GENERATED FROM PYTHON SOURCE LINES 78-86 .. code-block:: default @@ -270,8 +286,6 @@ Robust and WLS estimation .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Formula: DV~IV3 @@ -293,10 +307,13 @@ Robust and WLS estimation +.. GENERATED FROM PYTHON SOURCE LINES 87-90 + Multi-level models ---------------------------- Fitting a multi-level model works similarly and actually just calls :code:`lmer` or :code:`glmer` in R behind the scenes. The corresponding output is also formatted to be very similar to output of :code:`summary()` in R. +.. GENERATED FROM PYTHON SOURCE LINES 90-100 .. code-block:: default @@ -316,24 +333,25 @@ Fitting a multi-level model works similarly and actually just calls :code:`lmer` .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none + Model failed to converge with max|grad| = 0.00358015 (tol = 0.002, component 1) + + Linear mixed model fit by REML [’lmerMod’] Formula: DV~IV2+(IV2|Group) Family: gaussian Inference: parametric Number of observations: 564 Groups: {'Group': 47.0} - Log-likelihood: -2249.281 AIC: 4498.562 + Log-likelihood: -2249.281 AIC: 4510.562 Random effects: Name Var Std - Group (Intercept) 203.474 14.264 + Group (Intercept) 203.390 14.261 Group IV2 0.136 0.369 - Residual 121.535 11.024 + Residual 121.537 11.024 IV1 IV2 Corr Group (Intercept) IV2 -0.585 @@ -341,14 +359,17 @@ Fitting a multi-level model works similarly and actually just calls :code:`lmer` Fixed effects: Estimate 2.5_ci 97.5_ci SE DF T-stat P-val Sig - (Intercept) 10.301 4.805 15.797 2.804 20.179 3.674 0.001 ** - IV2 0.682 0.556 0.808 0.064 42.389 10.598 0.000 *** + (Intercept) 10.300 4.806 15.795 2.804 20.183 3.674 0.001 ** + IV2 0.682 0.556 0.808 0.064 42.402 10.599 0.000 *** +.. GENERATED FROM PYTHON SOURCE LINES 101-102 + Similar to :code:`Lm` models, :code:`Lmer` models save details in model attributes and have additional methods that can be called using the same syntax as described above. +.. GENERATED FROM PYTHON SOURCE LINES 102-106 .. code-block:: default @@ -362,17 +383,16 @@ Similar to :code:`Lm` models, :code:`Lmer` models save details in model attribut .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Estimate 2.5_ci 97.5_ci SE DF T-stat P-val Sig - (Intercept) 10.301072 4.805390 15.796755 2.803971 20.178790 3.673744 1.488978e-03 ** - IV2 0.682124 0.555968 0.808279 0.064366 42.388551 10.597508 1.720351e-13 *** + (Intercept) 10.300430 4.805524 15.795335 2.803575 20.182531 3.674034 1.487605e-03 ** + IV2 0.682128 0.555987 0.808268 0.064359 42.402062 10.598847 1.706855e-13 *** +.. GENERATED FROM PYTHON SOURCE LINES 107-113 .. code-block:: default @@ -388,20 +408,19 @@ Similar to :code:`Lm` models, :code:`Lmer` models save details in model attribut .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none (Intercept) IV2 - 0 4.481913 0.885171 - 1 17.992555 0.622114 - 2 8.705373 0.838071 - 3 10.142647 0.865350 - 4 10.072354 0.182063 + 1 4.482095 0.885138 + 2 17.991023 0.622143 + 3 8.706144 0.838055 + 4 10.143487 0.865341 + 5 10.071328 0.182101 +.. GENERATED FROM PYTHON SOURCE LINES 114-118 .. code-block:: default @@ -415,22 +434,23 @@ Similar to :code:`Lm` models, :code:`Lmer` models save details in model attribut .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - (Intercept) IV2 - 0 -5.819160 0.203048 - 1 7.691483 -0.060010 - 2 -1.595700 0.155947 - 3 -0.158426 0.183227 - 4 -0.228718 -0.500060 + X.Intercept. IV2 + 1 -5.818335 0.203011 + 2 7.690593 -0.059985 + 3 -1.594286 0.155927 + 4 -0.156943 0.183213 + 5 -0.229102 -0.500026 +.. GENERATED FROM PYTHON SOURCE LINES 119-120 + :code:`Lmer` models also have some basic plotting abilities that :code:`Lm` models do not +.. GENERATED FROM PYTHON SOURCE LINES 120-124 .. code-block:: default @@ -441,24 +461,26 @@ Similar to :code:`Lm` models, :code:`Lmer` models save details in model attribut -.. image:: /auto_examples/images/sphx_glr_example_01_basic_usage_002.png - :alt: example 01 basic usage - :class: sphx-glr-single-img +.. image-sg:: /auto_examples/images/sphx_glr_example_01_basic_usage_002.png + :alt: example 01 basic usage + :srcset: /auto_examples/images/sphx_glr_example_01_basic_usage_002.png + :class: sphx-glr-single-img .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - + + +.. GENERATED FROM PYTHON SOURCE LINES 125-126 Plot coefficients for each group/cluster as separate regressions +.. GENERATED FROM PYTHON SOURCE LINES 126-128 .. code-block:: default @@ -467,28 +489,32 @@ Plot coefficients for each group/cluster as separate regressions -.. image:: /auto_examples/images/sphx_glr_example_01_basic_usage_003.png - :alt: example 01 basic usage - :class: sphx-glr-single-img +.. image-sg:: /auto_examples/images/sphx_glr_example_01_basic_usage_003.png + :alt: example 01 basic usage + :srcset: /auto_examples/images/sphx_glr_example_01_basic_usage_003.png + :class: sphx-glr-single-img .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - + + +.. GENERATED FROM PYTHON SOURCE LINES 129-130 Because :code:`Lmer` models rely on R, they have also some extra arguments to the :code:`.fit()` method for controlling things like optimizer behavior, as well as additional methods such for post-hoc tests and ANOVAs. See tutorial 2 for information about this functionality. +.. GENERATED FROM PYTHON SOURCE LINES 132-135 + Two-stage summary statistics models ----------------------------------- Fitting :code:`Lm2` models are also very similar +.. GENERATED FROM PYTHON SOURCE LINES 135-145 .. code-block:: default @@ -508,10 +534,10 @@ Fitting :code:`Lm2` models are also very similar .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none + /Users/Esh/Documents/pypackages/pymer4/pymer4/stats.py:657: RuntimeWarning: invalid value encountered in double_scalars + return 1 - (rss / tss) Formula: DV~IV2 Family: gaussian @@ -529,8 +555,11 @@ Fitting :code:`Lm2` models are also very similar +.. GENERATED FROM PYTHON SOURCE LINES 146-147 + Like :code:`Lmer` models, :code:`Lm2` models also store group/cluster level estimates and have some basic plotting functionality +.. GENERATED FROM PYTHON SOURCE LINES 147-151 .. code-block:: default @@ -544,8 +573,6 @@ Like :code:`Lmer` models, :code:`Lm2` models also store group/cluster level esti .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none Intercept IV2 @@ -559,6 +586,7 @@ Like :code:`Lmer` models, :code:`Lm2` models also store group/cluster level esti +.. GENERATED FROM PYTHON SOURCE LINES 152-156 .. code-block:: default @@ -569,28 +597,32 @@ Like :code:`Lmer` models, :code:`Lm2` models also store group/cluster level esti -.. image:: /auto_examples/images/sphx_glr_example_01_basic_usage_004.png - :alt: example 01 basic usage - :class: sphx-glr-single-img +.. image-sg:: /auto_examples/images/sphx_glr_example_01_basic_usage_004.png + :alt: example 01 basic usage + :srcset: /auto_examples/images/sphx_glr_example_01_basic_usage_004.png + :class: sphx-glr-single-img .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none - + + +.. GENERATED FROM PYTHON SOURCE LINES 157-164 Model Persistence ----------------- -All pymer4 models can be saved and loaded from disk. Doing so will persist *all* model attributes and data i.e. anything accessible with the '.' syntax. Models are saved and loaded using the `HDF5 format `_ using the `deepdish `_ python library. This ensures near universal accesibility on different machines and operating systems. Therefore all filenames must end with :code:`.h5` or :code:`.hdf5`. For :code:`Lmer` models, an additional file ending in :code:`.rds` will be saved in the same directory as the HDF5 file. This is the R model object readable in R using :code:`readRDS`. +All pymer4 models can be saved and loaded from disk. Doing so will persist *all* model attributes and data i.e. anything accessible with the '.' syntax. Models are saved and loaded using `Joblib `_ Therefore all filenames must end with :code:`.joblib`. For :code:`Lmer` models, an additional file ending in :code:`.rds` will be saved in the same directory as the HDF5 file. This is the R model object readable in R using :code:`readRDS`. + +Prior to version 0.8.1 models were saved to HDF5 files using `deepdish `_ but this library is no longer maintained. If you have old models saved as :code:`.h5` or :code:`.hdf5` files you should use the same version of pymer4 that you used to estimate those models. To persist models you can use the dedicated :code:`save_model` and :code:`load_model` functions from the :code:`pymer4.io` module +.. GENERATED FROM PYTHON SOURCE LINES 164-175 .. code-block:: default @@ -599,9 +631,9 @@ To persist models you can use the dedicated :code:`save_model` and :code:`load_m from pymer4.io import save_model, load_model # Save the Lm2 model above - save_model(model, "mymodel.h5") + save_model(model, "mymodel.joblib") # Load it back up - model = load_model("mymodel.h5") + model = load_model("mymodel.joblib") # Check that it looks the same print(model) @@ -611,8 +643,6 @@ To persist models you can use the dedicated :code:`save_model` and :code:`load_m .. rst-class:: sphx-glr-script-out - Out: - .. code-block:: none pymer4.models.Lm2(fitted=True, formula=DV~IV2, family=gaussian, group=Group) @@ -620,6 +650,8 @@ To persist models you can use the dedicated :code:`save_model` and :code:`load_m +.. GENERATED FROM PYTHON SOURCE LINES 176-179 + Wrap Up ------- This was a quick overview of the 3 major model classes in :code:`pymer4`. However, it's highly recommended to check out the API to see *all* the features and options that each model class has including things like permutation-based inference (:code:`Lm` and :code:`Lm2` models) and fine-grain control of optimizer and tolerance settings (:code:`Lmer` models). @@ -627,23 +659,18 @@ This was a quick overview of the 3 major model classes in :code:`pymer4`. Howeve .. _sphx_glr_download_auto_examples_example_01_basic_usage.py: +.. only:: html -.. only :: html - - .. container:: sphx-glr-footer - :class: sphx-glr-footer-example - - - - .. container:: sphx-glr-download sphx-glr-download-python + .. container:: sphx-glr-footer sphx-glr-footer-example - :download:`Download Python source code: example_01_basic_usage.py ` + .. container:: sphx-glr-download sphx-glr-download-python + :download:`Download Python source code: example_01_basic_usage.py ` - .. container:: sphx-glr-download sphx-glr-download-jupyter + .. container:: sphx-glr-download sphx-glr-download-jupyter - :download:`Download Jupyter notebook: example_01_basic_usage.ipynb ` + :download:`Download Jupyter notebook: example_01_basic_usage.ipynb ` .. only:: html diff --git a/docs/auto_examples/images/sphx_glr_example_01_basic_usage_001.png b/docs/auto_examples/images/sphx_glr_example_01_basic_usage_001.png index 54792e31..7ba6d0f1 100644 Binary files a/docs/auto_examples/images/sphx_glr_example_01_basic_usage_001.png and b/docs/auto_examples/images/sphx_glr_example_01_basic_usage_001.png differ diff --git a/docs/auto_examples/images/sphx_glr_example_01_basic_usage_002.png b/docs/auto_examples/images/sphx_glr_example_01_basic_usage_002.png index 7a84bddb..9a441f5e 100644 Binary files a/docs/auto_examples/images/sphx_glr_example_01_basic_usage_002.png and b/docs/auto_examples/images/sphx_glr_example_01_basic_usage_002.png differ diff --git a/docs/auto_examples/images/sphx_glr_example_01_basic_usage_003.png b/docs/auto_examples/images/sphx_glr_example_01_basic_usage_003.png index 173d1955..a31e2cb2 100644 Binary files a/docs/auto_examples/images/sphx_glr_example_01_basic_usage_003.png and b/docs/auto_examples/images/sphx_glr_example_01_basic_usage_003.png differ diff --git a/docs/auto_examples/images/sphx_glr_example_01_basic_usage_004.png b/docs/auto_examples/images/sphx_glr_example_01_basic_usage_004.png index ef8dfcf4..5fafff1d 100644 Binary files a/docs/auto_examples/images/sphx_glr_example_01_basic_usage_004.png and b/docs/auto_examples/images/sphx_glr_example_01_basic_usage_004.png differ diff --git a/docs/auto_examples/images/thumb/sphx_glr_example_01_basic_usage_thumb.png b/docs/auto_examples/images/thumb/sphx_glr_example_01_basic_usage_thumb.png index 7368954d..b28fac18 100644 Binary files a/docs/auto_examples/images/thumb/sphx_glr_example_01_basic_usage_thumb.png and b/docs/auto_examples/images/thumb/sphx_glr_example_01_basic_usage_thumb.png differ diff --git a/docs/auto_examples/sg_execution_times.rst b/docs/auto_examples/sg_execution_times.rst index 6cbd4ee6..e6908070 100644 --- a/docs/auto_examples/sg_execution_times.rst +++ b/docs/auto_examples/sg_execution_times.rst @@ -5,16 +5,16 @@ Computation times ================= -**00:11.920** total execution time for **auto_examples** files: +**00:02.957** total execution time for **auto_examples** files: +-------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_example_05_misc_stats.py` (``example_05_misc_stats.py``) | 00:04.245 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_example_01_basic_usage.py` (``example_01_basic_usage.py``) | 00:02.957 | 0.0 MB | +-------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_example_02_categorical.py` (``example_02_categorical.py``) | 00:02.812 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_example_02_categorical.py` (``example_02_categorical.py``) | 00:00.000 | 0.0 MB | +-------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_example_01_basic_usage.py` (``example_01_basic_usage.py``) | 00:02.487 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_example_03_posthoc.py` (``example_03_posthoc.py``) | 00:00.000 | 0.0 MB | +-------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_example_03_posthoc.py` (``example_03_posthoc.py``) | 00:01.915 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_example_04_simulating_data.py` (``example_04_simulating_data.py``) | 00:00.000 | 0.0 MB | +-------------------------------------------------------------------------------------------------+-----------+--------+ -| :ref:`sphx_glr_auto_examples_example_04_simulating_data.py` (``example_04_simulating_data.py``) | 00:00.461 | 0.0 MB | +| :ref:`sphx_glr_auto_examples_example_05_misc_stats.py` (``example_05_misc_stats.py``) | 00:00.000 | 0.0 MB | +-------------------------------------------------------------------------------------------------+-----------+--------+ diff --git a/docs/contributing.rst b/docs/contributing.rst index 8b241392..1e4ef33f 100644 --- a/docs/contributing.rst +++ b/docs/contributing.rst @@ -1,37 +1,51 @@ Contributing ============ -Maintaining this package is tricky because of its inter-language operability. In particular this requires keeping up with API changes to Python packages (e.g. pandas), R packages (e.g. lmerTest) as well as changes in rpy2 (which tend to break between versions), the interface package between them. For these reasons contributions are **always** welcome! Checkout the `development roadmap on Trello `_. Also note the diagram and explanation below which illustrate how code development cycles work and how automated deployment is handled through Travis CI. +Maintaining this package is tricky because of its inter-language operability. In particular this requires keeping up with API changes to Python packages (e.g. pandas), R packages (e.g. lmerTest) as well as changes in rpy2 (which tend to break between versions), the interface package between them. For these reasons contributions are **always** welcome! Checkout the `development roadmap on Github `_. Also note the diagram and explanation below which illustrate how code development cycles work and how automated deployment is handled through Travis CI. Development Cycle and workflow ------------------------------ -All work proceeds on the :code:`dev` branch. This can include direct commits and PRs from other (non-:code:`master`) branches. +All automation for testing, documentation, and packaging is handled through Github Actions. We use separate workflows to handle testing and packaging. -Each new *pre-release* should proceed by opening a pull-request (PR) the against :code:`master` branch. Merging this PR will automatically trigger a :code:`M.N.P.devX` release to :code:`ejolly/label/pre-release` on `anaconda cloud `_ via Travis. Direct pushes to :code:`master` should be rare and primarily constitute documentation or devops changes rather than changes to the code base. These direct pushes will also trigger a pre-release. +Testing ++++++++ +Any pushes or PRs against the :code:`master` branch will trigger the **Tests** GA workflow. This is a simple workflow that: -Each new *stable* release should follow the following steps, the first of which can occur in two ways: +- sets up a :code:`conda` environment with required :code:`R` dependencies +- installs the latest code from the :code:`master` branch +- runs tests and builds documentation (as an additional testing layer) -- Step 1: drop :code:`.devX` from version string (in :code:`pymer4/version.py`) and update the release notes page in the docs via **either**: +Packaging Stable Releases ++++++++++++++++++++++++++ +A stable release can be installed from :code:`pip` or from :code:`conda` using the :code:`-c ejolly` channel flag. Packaging a stable release requires building 3 artifacts: - - **Pre-merge** at least one commit in :code:`dev` for the PR against master, such that the merge will include updating the version string. In the illustration below, this is depicted by the dashed borders around the final merge into :code:`master`. +1. Conda packages for multiple platforms uploaded to the main :code:`ejolly` channel on anaconda cloud +2. A pip installable package uploaded to Pypi +3. Documentation site deployed to github pages - - **Post-merge** making a final commit to :code:`master` updating the version string. In the illustration below, this is depicted by the "final push" dashed commit circle on :code:`master` right before "tagged manual release" of :code:`v0.7.2`. +To create a new release: -- Step 2: manually trigger a release on Github using a :code:`vM.N.P` version string where :code:`M.N.P` matches Step 1. In the illustration below, this is depicted by the right-most, salmon-colored "tagged manual release" commit to :code:`master`. +1. Publish a new release via github +2. Manually trigger the **Build** and **Build_noarch** workflows and enable uploading to the main channel on anaconda, uploading to pypi, and deploying documentation - - **Note:** this is the version string entered *on Github* when publishing the release and should specifically contain a :code:`v` prefix. +*Note: Previously this process was automated to trigger when a github release is made, but this seems to be unreliable as the commit hash is missing and causes runners to not find the built tarballs* -- Step 3: immediately bump the version string on :code:`dev` to :code:`M.N.P+1.dev0` where :code:`M.N.P` refers to the version string from Step 1. In the illustration below, an example is shown on the left-side of the diagram immediately to the right of the "tagged manual release" of :code:`v0.7.1` on :code:`master`. +Packaging Development Releases +++++++++++++++++++++++++++++++ +Development releases can be install directly from the :code:`master` branch on github using :code:`pip install git+https://github.com/ejolly/pymer4.git` or conda using the :code:`-c ejolly/label/pre-release` channel flag. -Adhering to this workflow makes it much easier to *automatically* ensure that a stable version of :code:`pymer4` is always available on conda and pypi, while a development version is available on the conda pre-release label and github master branch. Feel free to ask questions, make suggestions, or contribute changes/additions on `github `_. If you do so, please follow the guidelines below for structuring contributions. +A development release only includes 1 artifact: -.. image:: pymer4_dev_cycle.jpeg - :width: 800 +1. Conda packages for multiple platforms uploaded to the :code:`ejolly/label/pre-release` channel on anaconda cloud + +Development releases are created the same way as stable releases using the same **Build** and **Build_noarch** workflows, but choosing the "pre-release" option for uploading to anaconda cloud and disabling pypi and documentation deploys. The default options for these works flow will simply build packages but perform no uploading at all which can useful for testing package builds. + +Updating deployed documentation ++++++++++++++++++++++++++++++++ +To deploy only documentation changes you can use *either* the **Build** workflow and enable the documentation deploy or the **Docs** workflow which is a bit faster as it skips packaging building. Code Guidelines --------------- -Please fork and make pull requests from the `development branch `_ on github. This branch will usually have additions and bug fixes not in master and is easier to integrate with contributions. - Please use the `black `_ code formatter for styling code. Any easy way to check if code is formatted properly is to use a `git pre-commit hook `_. After installing black, just create a file called :code:`.git/hooks/pre-commit` and put the following inside: .. code-block:: bash @@ -48,7 +62,7 @@ Please be sure to include tests with any code submissions and verify they pass u Versioning Guidelines --------------------- -The current :code:`pymer4` scheme is `PEP 440 `_ compliant with two and only two forms of version strings: :code:`M.N.P` and :code:`M.N.P.devX`. These are pattern matched to automate builds and deployment using the following regular expression: :code:`r"^\d+\.\d+\.\d+(\.dev\d+){0,1}$"`. +The current :code:`pymer4` scheme is `PEP 440 `_ compliant with two and only two forms of version strings: :code:`M.N.P` and :code:`M.N.P.devX`. Versions with the :code:`.devX` designation denote development versions typically on the :code:`master` branch or :code:`conda` pre-release channel. This simplifed scheme is not illustrated in the PEP 440 examples, but if was it would be described as "major.minor.micro" with development releases. To illustrate, the version sequence would look like this: @@ -61,10 +75,6 @@ This simplifed scheme is not illustrated in the PEP 440 examples, but if was it The third digit(s) in the :code:`pymer4` scheme, i.e. PEP 440 "micro," are not strictly necessary but are useful for semantically versioned "patch" designations. The :code:`.devX` extension on the other hand denotes a sequence of incremental work in progress like the alpha, beta, developmental, release candidate system without the alphabet soup. -PEP 440 specifies four categories of public release: "Any given release will be a "final release", "pre-release", "post-release" or "developmental release." The :code:`pymer4` scheme simplifies this to two release categories: final releases versioned :code:`M.N.P`, and developmental releases, versioned :code:`M.N.P.devX`. - -In this way, the PEP 440 "pre-release" of a stable version :code:`M.N.P` is realized as a :code:`pymer4` :code:`M.N.P.devX` release while a PEP 440 "final release" is realized as a :code:`pymer4` :code:`M.N.P+1` release. - Documentation Guidelines ------------------------ Documentation is written with `sphinx `_ using the `bootstrap theme `_. Tutorial usage of package features is written using `sphinx gallery `_. diff --git a/docs/index.rst b/docs/index.rst index 42d607b0..d48f49f4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -25,7 +25,7 @@ Pymer4 .. image:: https://zenodo.org/badge/90598701.svg :target: https://zenodo.org/record/1523205 -.. image:: https://img.shields.io/badge/python-3.6%20%7C%203.7%20%7C%203.8-blue +.. image:: https://img.shields.io/badge/python-3.8%20%7C%203.9%20%7C%203.10%20%7C%203.11-blue .. raw:: html diff --git a/docs/new.rst b/docs/new.rst index 877402c3..6def9f9b 100644 --- a/docs/new.rst +++ b/docs/new.rst @@ -2,8 +2,20 @@ What's New ========== Historically :code:`pymer4` versioning was a bit all over the place but has settled down since 0.5.0. This page includes the most notable updates between versions but github is the best place to checkout more details and `releases `_. +0.8.1 +----- +- **Compatibility Updates:** + - This version includes a :code:`noarch` build that should be installable on arm-based macOS platforms (e.g. M1, M2, etc) + - This version drops support for Python 3.7 and adds support for 3.9-3.11 +- **Breaking changes:** + - This version also uses :code:`joblib` for model saving and loading and drops supported hdf5 files previously handled with the :code:`deepdish` library as it is no longer actively maintained. This means that 0.8.1 will **not** be able to load models saved with earlier versions of :code:`pymer4`! +- **Fixes:** + - `#119 `_ + - `#122 `_ + - `#125 `_ + 0.8.0 ----- +----- - **NOTE:** - there was no 0.7.9 release as there were enough major changes to warrant a new minor release version diff --git a/examples/example_01_basic_usage.py b/examples/example_01_basic_usage.py index 59ba41b6..576e3cf1 100644 --- a/examples/example_01_basic_usage.py +++ b/examples/example_01_basic_usage.py @@ -115,6 +115,22 @@ # Get group level deviates from population level coefficients (i.e. rfx) print(model.ranef.head(5)) +############################################################################### + +# Get group level deviates from population level coefficients (also called conditional modes) as well as their +# associated conditional standard deviations in the form of a dataframe + +# :code:`model.ranef_df` is a dataframe containing information about the random effects. The dataframe contains the following columns: +# grpvar: grouping variable +# term: random-effects term, e.g. 1 for “(Intercept)” in R +# grp: level of the grouping variable (e.g., which Subject) +# condval: value of the conditional mean (similar to above) +# condsd: conditional standard deviation. +# This is obtained from R with the following R code: as.data.frame(ranef(model_fit, condVar=TRUE) +# where model_fit is the glmer fit. + +print(model.ranef_df) + ############################################################################### # :code:`Lmer` models also have some basic plotting abilities that :code:`Lm` models do not @@ -156,7 +172,9 @@ ############################################################################### # Model Persistence # ----------------- -# All pymer4 models can be saved and loaded from disk. Doing so will persist *all* model attributes and data i.e. anything accessible with the '.' syntax. Models are saved and loaded using the `HDF5 format `_ using the `deepdish `_ python library. This ensures near universal accesibility on different machines and operating systems. Therefore all filenames must end with :code:`.h5` or :code:`.hdf5`. For :code:`Lmer` models, an additional file ending in :code:`.rds` will be saved in the same directory as the HDF5 file. This is the R model object readable in R using :code:`readRDS`. +# All pymer4 models can be saved and loaded from disk. Doing so will persist *all* model attributes and data i.e. anything accessible with the '.' syntax. Models are saved and loaded using `Joblib `_ Therefore all filenames must end with :code:`.joblib`. For :code:`Lmer` models, an additional file ending in :code:`.rds` will be saved in the same directory as the HDF5 file. This is the R model object readable in R using :code:`readRDS`. +# +# Prior to version 0.8.1 models were saved to HDF5 files using `deepdish `_ but this library is no longer maintained. If you have old models saved as :code:`.h5` or :code:`.hdf5` files you should use the same version of pymer4 that you used to estimate those models. # # To persist models you can use the dedicated :code:`save_model` and :code:`load_model` functions from the :code:`pymer4.io` module @@ -164,9 +182,9 @@ from pymer4.io import save_model, load_model # Save the Lm2 model above -save_model(model, "mymodel.h5") +save_model(model, "mymodel.joblib") # Load it back up -model = load_model("mymodel.h5") +model = load_model("mymodel.joblib") # Check that it looks the same print(model) diff --git a/examples/mymodel.h5 b/examples/mymodel.h5 deleted file mode 100644 index 4fa19df3..00000000 Binary files a/examples/mymodel.h5 and /dev/null differ diff --git a/pymer4/io.py b/pymer4/io.py index 8073d68f..bad107d6 100644 --- a/pymer4/io.py +++ b/pymer4/io.py @@ -1,203 +1,59 @@ __all__ = ["save_model", "load_model"] import os -from .models import Lm, Lm2, Lmer -from .utils import _df_meta_to_arr +from .models import Lmer from rpy2.robjects.packages import importr -import deepdish as dd -import pandas as pd -import warnings -from tables import NaturalNameWarning -from pathlib import Path +from joblib import dump, load base = importr("base") -def save_model(model, filepath, compression="zlib", **kwargs): +def save_model(model, filepath, **kwargs): """ - Function for saving pymer4 models. All models are saved in .h5 or .hdf5 files so filepath extensions should include this. For Lmer models an additional filepath.robj file will be created to retain all R objects. + Function for saving pymer4 models. All models are saved using joblib.dump files so + filepath extensions should end with .joblib. For Lmer models an additional + filepath.robj file will be created to retain all R objects. Args: model (pymer4.models): an instance of a pymer4 model - filepath (str): full filepath string ending with .h5 or .hd5f - compression (string): what kind of compression to use; zlib is the default which should be universally accessible, but for example 'blosc' will be faster and produce smaller files. See more here: https://bit.ly/33x9JD7 - kwargs: optional keyword arguments to deepdish.io.save + filepath (str): full filepath string ending .joblib + kwargs: optional keyword arguments to joblib.dump """ - if isinstance(filepath, Path): - rds_file = filepath.parent / filepath.name.replace(".h5", "rds") - filepath = str(filepath) - else: - rds_file = filepath.replace(".h5", ".rds") + filepath = str(filepath) + if not filepath.endswith(".joblib"): + raise IOError("filepath must end with .joblib") - if filepath.endswith(".h5") or filepath.endswith(".hdf5"): + rds_file = filepath.replace(".joblib", ".rds") - # Separate out model attributes that are not pandas dataframes (or lists conatins dataframes) or R model objects - simple_atts, data_atts = {}, {} - for k, v in vars(model).items(): - skip = False - if k == "model_obj": - skip = True - elif isinstance(v, pd.DataFrame): - skip = True - elif isinstance(v, list): - if any([isinstance(elem, pd.DataFrame) for elem in v]): - skip = True - if not skip: - simple_atts[k] = v - else: - data_atts[k] = v - simple_atts["model_class"] = model.__class__.__name__ - - # Now deal with other attributes - data_atts_separated = {} - for k, v in data_atts.items(): - if k != "model_obj": - # Deconstruct pandas dataframes - if isinstance(v, pd.DataFrame): - cols, idx = _df_meta_to_arr(v) - vals = v.values - dtypes = v.dtypes.to_dict() - data_atts_separated[f"df_cols__{k}"] = cols - data_atts_separated[f"df_idx__{k}"] = idx - data_atts_separated[f"df_vals__{k}"] = vals - data_atts_separated[f"df_dtypes__{k}"] = dtypes - elif isinstance(v, list): - for i, elem in enumerate(v): - if isinstance(elem, pd.DataFrame): - cols, idx = _df_meta_to_arr(elem) - vals = elem.values - dtypes = elem.dtypes.to_dict() - data_atts_separated[f"list_{i}_cols__{k}"] = cols - data_atts_separated[f"list_{i}_idx__{k}"] = idx - data_atts_separated[f"list_{i}_vals__{k}"] = vals - data_atts_separated[f"list_{i}_dtypes__{k}"] = dtypes - else: - raise TypeError( - f"Value is list but list item is {type(elem)} not pd.DataFrame" - ) - - # Combine all attributes into a single dict and save with dd - model_atts = {} - model_atts["simple_atts"] = simple_atts - model_atts["data_atts"] = data_atts_separated - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=FutureWarning) - warnings.simplefilter("ignore", category=NaturalNameWarning) - dd.io.save(filepath, model_atts, compression=compression, **kwargs) - assert os.path.exists(filepath) - - # Now deal with model object in R if needed - if model.model_obj is not None: - base.saveRDS(model.model_obj, rds_file) - assert os.path.exists(rds_file) - else: - raise IOError("filepath must end with .h5 or .hdf5") + # Save the python object + dump(model, filepath, **kwargs) + assert os.path.exists(filepath) + # Now deal with model object in R if needed + if model.model_obj is not None: + base.saveRDS(model.model_obj, rds_file) + assert os.path.exists(rds_file) def load_model(filepath): """ - Function for loading pymer4 models. A file path ending in .h5 or .hdf5 should be provided. For Lmer models an additional filepath.robj should be located in the same directory. + Function for loading pymer4 models. A file path ending in .joblib should be provided. For Lmer models an additional filepath.robj should be located in the same directory. Args: model (pymer4.models): an instance of a pymer4 model - filepath (str): full filepath string ending with .h5 or .hd5f + filepath (str): full filepath string ending with .joblib """ - if isinstance(filepath, Path): - rds_file = filepath.parent / filepath.name.replace(".h5", "rds") - filepath = str(filepath) - else: - rds_file = filepath.replace(".h5", ".rds") - - if filepath.endswith(".h5") or filepath.endswith(".hdf5"): - if not os.path.exists(filepath): - raise IOError("File not found!") + filepath = str(filepath) + if not filepath.endswith(".joblib"): + raise IOError("filepath must end with .joblib") - # Load h5 first - model_atts = dd.io.load(filepath) - # Figure out what kind of model we're dealing with - if model_atts["simple_atts"]["model_class"] == "Lmer": - model = Lmer("", []) - elif model_atts["simple_atts"]["model_class"] == "Lm2": - model = Lm2("", [], "") - elif model_atts["simple_atts"]["model_class"] == "Lm": - model = Lm("", []) + rds_file = filepath.replace(".joblib", ".rds") - # Set top level attributes - for k, v in model_atts["simple_atts"].items(): - if k != "model_class": - setattr(model, k, v) - # Make sure the model formula is a python string string so that rpy2 doesn't complain - model.formula = str(model.formula) + # Load python object + model = load(filepath) - # Set data attributes - # Container for already set items - completed = [] - for k, v in model_atts["data_atts"].items(): - # Re-assembe dataframes - if k.startswith("df_"): - # First check if we haven't set it yet - if k not in completed: - # Get the id of this deconstructed df - item_name = k.split("__")[-1] - vals_name = f"df_vals__{item_name}" - cols_name = f"df_cols__{item_name}" - idx_name = f"df_idx__{item_name}" - dtype_name = f"df_dtypes__{item_name}" - # Reconstruct the dataframe - df = pd.DataFrame( - model_atts["data_atts"][vals_name], - columns=[ - e.decode("utf-8") if isinstance(e, bytes) else e - for e in model_atts["data_atts"][cols_name] - ], - index=[ - e.decode("utf-8") if isinstance(e, bytes) else e - for e in model_atts["data_atts"][idx_name] - ], - ).astype(model_atts["data_atts"][dtype_name]) - setattr(model, item_name, df) - # Add it to the list of completed items - completed.extend([item_name, vals_name, idx_name, dtype_name]) - # Same idea for list items - elif k.startswith("list_"): - if k not in completed: - # Get the id of the deconstructed list - item_name = k.split("__")[-1] - item_idx = [e for e in k.split("__")[0] if e.isdigit()][0] - vals_name = f"list_{item_idx}_vals__{item_name}" - cols_name = f"list_{item_idx}_cols__{item_name}" - idx_name = f"list_{item_idx}_idx__{item_name}" - dtype_name = f"list_{item_idx}_dtypes__{item_name}" - # Reconstruct the dataframe - df = pd.DataFrame( - model_atts["data_atts"][vals_name], - columns=[ - e.decode("utf-8") if isinstance(e, bytes) else e - for e in model_atts["data_atts"][cols_name] - ], - index=[ - e.decode("utf-8") if isinstance(e, bytes) else e - for e in model_atts["data_atts"][idx_name] - ], - ).astype(model_atts["data_atts"][dtype_name]) - # Check if the list already exists if so just append to it - if hasattr(model, item_name): - current_items = getattr(model, item_name) - if current_items is not None: - current_items += [df] - setattr(model, item_name, current_items) - else: - setattr(model, item_name, [df]) - # Otherwise create it - else: - setattr(model, item_name, [df]) - # Add to the list of completed items - completed.extend([item_name, vals_name, idx_name, dtype_name]) - # Now deal with model object in R if needed - if isinstance(model, Lmer): - model.model_obj = base.readRDS(rds_file) - return model - else: - raise IOError("filepath must end with .h5 or .hdf5") + # Now deal with model object in R if needed + if isinstance(model, Lmer): + model.model_obj = base.readRDS(rds_file) + return model diff --git a/pymer4/models/Lm.py b/pymer4/models/Lm.py index 747eb2ee..d343ce99 100644 --- a/pymer4/models/Lm.py +++ b/pymer4/models/Lm.py @@ -435,7 +435,7 @@ def fit( # Since we're bootstrapping coefficients themselves we don't need the robust info anymore boot_betas = par_for( delayed(_chunk_boot_ols_coefs)( - dat=self.data, + dat=ddat, formula=self.formula, weights=weights, seed=seeds[i], diff --git a/pymer4/models/Lm2.py b/pymer4/models/Lm2.py index c2f7f5bb..4df33f79 100644 --- a/pymer4/models/Lm2.py +++ b/pymer4/models/Lm2.py @@ -210,11 +210,11 @@ def fit( betas = np.array([e["betas"] for e in out]) fits = np.concatenate([e["pred"] for e in out], axis=0) residuals = np.concatenate([e["res"] for e in out], axis=0) + self.residuals = residuals + self.data["residuals"] = residuals + self.fits = fits + self.data["fits"] = fits - self.residuals = residuals - self.data["residuals"] = residuals - self.fits = fits - self.data["fits"] = fits # Get the model matrix formula from patsy to make it more reliable to set the results dataframe index like Lmer _, x = dmatrices(self.formula, self.data, 1, return_type="dataframe") self.design_matrix = x @@ -299,6 +299,7 @@ def fit( ] ] self.fitted = True + self.iscorrs = to_corrs # Fit statistics if "Intercept" in self.design_matrix.columns: @@ -313,31 +314,30 @@ def fit( # don't compute anything from the second-level fits or residuals since those # are just univariate mean tests. - # Method 1: "naive" over the whole dataset - self.rsquared = rsquared(fits, residuals, center_tss) - self.rsquared_adj = rsquared_adj( - self.rsquared, len(residuals), len(residuals) - x.shape[1], center_tss - ) - - # Method 2: calculated separately group. Potentially useful for inspecting 1st - # level model fits - separate_results = [(e["pred"], e["res"]) for e in out] - self.rsquared_per_group = np.array( - [rsquared(e[0], e[1], center_tss) for e in separate_results] - ) - self.rsquared_adj_per_group = np.array( - [ - rsquared_adj( - self.rsquared_per_group[i], - len(separate_results[i][0]), - len(separate_results[i][0]) - x.shape[1], - center_tss, - ) - for i in range(len(self.rsquared_per_group)) - ] - ) + if not self.iscorrs: + # Method 1: "naive" over the whole dataset + self.rsquared = rsquared(fits, residuals, center_tss) + self.rsquared_adj = rsquared_adj( + self.rsquared, len(residuals), len(residuals) - x.shape[1], center_tss + ) - self.iscorrs = to_corrs + # Method 2: calculated separately group. Potentially useful for inspecting 1st + # level model fits + separate_results = [(e["pred"], e["res"]) for e in out] + self.rsquared_per_group = np.array( + [rsquared(e[0], e[1], center_tss) for e in separate_results] + ) + self.rsquared_adj_per_group = np.array( + [ + rsquared_adj( + self.rsquared_per_group[i], + len(separate_results[i][0]), + len(separate_results[i][0]) - x.shape[1], + center_tss, + ) + for i in range(len(self.rsquared_per_group)) + ] + ) if summarize: return self.summary() diff --git a/pymer4/models/Lmer.py b/pymer4/models/Lmer.py index d15e1f31..c82b1281 100644 --- a/pymer4/models/Lmer.py +++ b/pymer4/models/Lmer.py @@ -67,6 +67,7 @@ class Lmer(object): fits (numpy.ndarray): model fits/predictions model_obj (lmer model): rpy2 lmer model object factors (dict): factors used to fit the model if any + ranef_df (pd.DataFrame): Contains the best linear unbiased predictors (BLUPs) (also called the conditional modes) and the conditional standard deviations of the random effects """ @@ -108,6 +109,7 @@ def __init__(self, formula, data, family="gaussian"): self.sig_type = None self.factors_prev_ = None self.contrasts = None + self.ranef_df = None def __repr__(self): out = "{}(fitted = {}, formula = {}, family = {})".format( @@ -318,7 +320,7 @@ def fit( n_boot (int): number of bootstrap intervals if bootstrapped confidence intervals are requests; default 500 factors (dict): dictionary with column names specified as keys and values as a list for dummy/treatment/polynomial contrast or a dict with keys as factor leves and values as desired comparisons in human readable format See examples below permute (int): if non-zero, computes parameter significance tests by permuting test stastics rather than parametrically. Permutation is done by shuffling observations within clusters to respect random effects structure of data. - ordered (bool): whether factors should be treated as ordered polynomial contrasts; this will parameterize a model with K-1 orthogonal polynomial regressors beginning with a linear contrast based on the factor order provided; default is False + ordered (bool): whether factors should be treated as ordered polynomial contrasts; this will parameterize a model with K-1 orthogonal polynomial regressors beginning with a linear contrast based on the factor order provided. Ordering applies to **all** contrasts!; default is False summarize/summary (bool): whether to print a model summary after fitting; default is True verbose (bool): whether to print when and which model and confidence interval are being fitted REML (bool): whether to fit using restricted maximum likelihood estimation instead of maximum likelihood estimation; default True @@ -825,6 +827,30 @@ def fit( else: self.ranef = R2pandas(ranefs[0]).drop(columns=["index"]) + # Cluster (e.g subject) level random deviations and their associated conditional standard deviations + + rstring = """ + function(model){ + ranef_df <- as.data.frame(ranef(model, condVar=TRUE)) + } + """ + + get_ranef_r_df = robjects.r(rstring) + ranef_r_df = get_ranef_r_df(self.model_obj) + + # Converting the R dataframe to a Pandas dataframe. + self.ranef_df = robjects.pandas2ri.rpy2py(ranef_r_df) + + # ranef_r_df[2] is an R FactorVector that is automatically converted to int R factors for Pandas + # so we replace the int R factors in the grp column with their associated strings. + # [factor - 1] is used because factor is an R index which starts at 1 (Python indexing starts at 0). + self.ranef_df["grp"] = [ + ranef_r_df[2].levels[factor - 1] for factor in ranef_r_df[2] + ] + # The R indices are automatically preserved as strings but they are just integers which do not need to be as + # strings. We therefore reset the index. + self.ranef_df.reset_index(drop=True, inplace=True) + # Model residuals rstring = """ function(model){ @@ -1587,6 +1613,7 @@ def confint( boot_type="perc", quiet=False, oldnames=False, + seed=None, ): """ Compute confidence intervals on the parameters of a Lmer object (this is a wrapper for confint.merMod in lme4). @@ -1604,6 +1631,7 @@ def confint( quiet (bool): (logical) suppress messages about computationally intensive profiling? oldnames: (logical) use old-style names for variance-covariance parameters, e.g. ".sig01", rather than newer (more informative) names such as "sd_(Intercept)|Subject"? + seed (int): seed to be passed to bootMer for repeatability. Returns: pd.DataFrame: confidence intervals for the parameters of interest @@ -1654,6 +1682,7 @@ def _f(x): + method + """'""" + ((""",zeta=""" + str(zeta)) if zeta is not None else """""") + + ((""",seed=""" + str(seed)) if seed is not None else """""") + """,nsim=""" + str(nsim) + """,boot.type='""" diff --git a/pymer4/resources/ranef_as_dataframe_correct_results.csv b/pymer4/resources/ranef_as_dataframe_correct_results.csv new file mode 100644 index 00000000..6bdc07ac --- /dev/null +++ b/pymer4/resources/ranef_as_dataframe_correct_results.csv @@ -0,0 +1,48 @@ +grpvar,term,grp,condval,condsd +Group,1,1,2.86581381106489,1.06657399430703 +Group,1,2,-0.226996797847915,1.06657399430703 +Group,1,3,1.20774518274478,1.06657399430703 +Group,1,4,-1.00554252019402,1.06657399430703 +Group,1,5,-0.560592873023175,1.06657399430703 +Group,1,6,-0.902087477732985,1.06657399430703 +Group,1,7,-1.00554252019402,1.06657399430703 +Group,1,8,0.854021714782246,1.06657399430703 +Group,1,9,-0.923459966353692,1.06657399430703 +Group,1,10,-0.0761506302168548,1.06657399430703 +Group,1,11,-0.203765850055764,1.06657399430703 +Group,1,14,4.30225933586703,1.06657399430703 +Group,1,15,-1.00554252019402,1.06657399430703 +Group,1,16,-0.609687542402631,1.06657399430703 +Group,1,17,-1.00554252019402,1.06657399430703 +Group,1,18,-1.00554252019402,1.06657399430703 +Group,1,19,-0.0193122942275899,1.06657399430703 +Group,1,20,0.300642834252139,1.06657399430703 +Group,1,21,0.499201977587342,1.06657399430703 +Group,1,22,-1.00554252019402,1.06657399430703 +Group,1,23,-1.00554252019402,1.06657399430703 +Group,1,24,-1.00554252019402,1.06657399430703 +Group,1,27,-0.400454357497617,1.06657399430703 +Group,1,29,-0.862801365997165,1.06657399430703 +Group,1,30,-0.343770884433095,1.06657399430703 +Group,1,31,4.30225933586703,1.06657399430703 +Group,1,32,0.00918430522941495,1.06657399430703 +Group,1,33,-1.00554252019402,1.06657399430703 +Group,1,34,-0.0419237382835018,1.06657399430703 +Group,1,35,0.629656559195606,1.06657399430703 +Group,1,36,0.217023671774479,1.06657399430703 +Group,1,37,-0.71554311630898,1.06657399430703 +Group,1,38,-1.00554252019402,1.06657399430703 +Group,1,39,-0.0504417715542911,1.06657399430703 +Group,1,40,-0.745201291776092,1.06657399430703 +Group,1,42,-0.286416286657419,1.06657399430703 +Group,1,43,0.51422466999546,1.06657399430703 +Group,1,44,-0.699204037003285,1.06657399430703 +Group,1,45,-0.625562032934102,1.06657399430703 +Group,1,46,4.30225933586703,1.06657399430703 +Group,1,47,0.229878075087127,1.06657399430703 +Group,1,48,0.987670968660583,1.06657399430703 +Group,1,49,0.484385821116152,1.06657399430703 +Group,1,50,-1.00554252019402,1.06657399430703 +Group,1,51,-1.00554252019402,1.06657399430703 +Group,1,52,-0.340802522263727,1.06657399430703 +Group,1,53,-1.00554252019402,1.06657399430703 diff --git a/pymer4/tests/conftest.py b/pymer4/tests/conftest.py index 11d0c5c9..751d2a80 100644 --- a/pymer4/tests/conftest.py +++ b/pymer4/tests/conftest.py @@ -14,3 +14,13 @@ def gammas(): @fixture(scope="module") def df(): return pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv")) + + +@fixture(scope="module") +def ranef_as_dataframe_correct_results(): + # grp is an int in the sample data but in general grp can be a string. + # Therefore, we read grp as an object dtype to match the R output. + return pd.read_csv( + os.path.join(get_resource_path(), "ranef_as_dataframe_correct_results.csv"), + dtype={"grp": object}, + ) diff --git a/pymer4/tests/test_io.py b/pymer4/tests/test_io.py new file mode 100644 index 00000000..6133b13b --- /dev/null +++ b/pymer4/tests/test_io.py @@ -0,0 +1,46 @@ +from pymer4.models import Lm, Lm2, Lmer +from pymer4.io import load_model, save_model + + +def test_saveload_lmer(df, tmp_path): + + model = Lmer("DV ~ IV3 + IV2 + (IV2|Group) + (1|IV3)", data=df) + model.fit(summarize=False) + output_file = tmp_path / "model.joblib" + rds_file = tmp_path / "model.rds" + + save_model(model, output_file) + assert output_file.exists() + assert rds_file.exists() + + m = load_model(output_file) + assert m.coefs.equals(model.coefs) + assert m.data.equals(model.data) + + +def test_saveload_lm(df, tmp_path): + + model = Lm("DV ~ IV1 + IV3", data=df) + model.fit(summarize=False) + output_file = tmp_path / "model.joblib" + + save_model(model, output_file) + assert output_file.exists() + + m = load_model(output_file) + assert m.coefs.equals(model.coefs) + assert m.data.equals(model.data) + + +def test_saveload_lm2(df, tmp_path): + + model = Lm2("DV ~ IV3 + IV2", group="Group", data=df) + model.fit(summarize=False) + output_file = tmp_path / "model.joblib" + + save_model(model, output_file) + assert output_file.exists() + + m = load_model(output_file) + assert m.coefs.equals(model.coefs) + assert m.data.equals(model.data) diff --git a/pymer4/tests/test_models.py b/pymer4/tests/test_models.py index a227ec74..cc4ef0d6 100644 --- a/pymer4/tests/test_models.py +++ b/pymer4/tests/test_models.py @@ -217,6 +217,14 @@ def test_gaussian_lmm(df): # ci for random effects should be estimates by bootstrapping assert boot_confint["2.5 %"].isna().sum() == 0 + # test seed for confint + boot_df1 = model.confint(method="boot", nsim=10) + boot_df2 = model.confint(method="boot", nsim=10) + assert not boot_df1.equals(boot_df2) + boot_df1 = model.confint(method="boot", nsim=10, seed=123) + boot_df2 = model.confint(method="boot", nsim=10, seed=123) + assert boot_df1.equals(boot_df2) + # Smoketest for old_optimizer model.fit(summarize=False, old_optimizer=True) diff --git a/pymer4/tests/test_stats.py b/pymer4/tests/test_stats.py index 68e3382b..17a36263 100644 --- a/pymer4/tests/test_stats.py +++ b/pymer4/tests/test_stats.py @@ -225,3 +225,11 @@ def test_lrt(df): ) pd.testing.assert_frame_equal(r_lrt_ml_sub, r_lrt_ml_sub, check_dtype=False) + +def test_ranef_as_data_frame(df, ranef_as_dataframe_correct_results): + model = Lmer("IV1 ~ (1|Group)", data=df) + model.fit(summarize=False) + + pd.testing.assert_frame_equal( + ranef_as_dataframe_correct_results, model.ranef_df, check_exact=False, rtol=1e-5 + ) \ No newline at end of file diff --git a/pymer4/utils.py b/pymer4/utils.py index d22165dd..e5c2a3ae 100644 --- a/pymer4/utils.py +++ b/pymer4/utils.py @@ -408,7 +408,7 @@ def _logregress(x, y, all_stats=True): # Design matrix already has intercept. We want no regularization and the newton # solver to match as closely with R - model = LogisticRegression(penalty="none", solver="newton-cg", fit_intercept=False) + model = LogisticRegression(penalty=None, solver="newton-cg", fit_intercept=False) _ = model.fit(x, y) b = model.coef_ fits = model.decision_function(x) diff --git a/pymer4/version.py b/pymer4/version.py index ce0bc3ae..cf6ccb24 100644 --- a/pymer4/version.py +++ b/pymer4/version.py @@ -1,4 +1,4 @@ """Specifies current version of pymer4 to be used by setup.py and __init__.py """ -__version__ = "0.7.8" +__version__ = "0.8.2" diff --git a/requirements.txt b/requirements.txt index 99c525a5..81b3ea02 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,5 +6,4 @@ matplotlib>=3.0 patsy>=0.5.1 joblib>=0.14 scipy>=1.4.0 -deepdish>=0.3.6 scikit-learn>=1.0 diff --git a/setup.cfg b/setup.cfg index d00b507a..8753e106 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [metadata] -description-file = README.md +description_file = README.md [bdist_wheel] universal=1 diff --git a/setup.py b/setup.py index 30a1e3ee..b669ac68 100644 --- a/setup.py +++ b/setup.py @@ -22,9 +22,10 @@ long_description="pymer4 is a Python package to make it simple to perform multi-level modeling by interfacing with the popular R package lme4. pymer4 is also capable of fitting a variety of standard regression models with robust, bootstrapped, and permuted estimators", keywords=["statistics", "multi-level-modeling", "regression", "analysis"], classifiers=[ - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Operating System :: OS Independent", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License",