Skip to content
This repository was archived by the owner on Feb 8, 2025. It is now read-only.

DOC: Adds a tract profile plot to the ALS example. #125

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@ jobs:
python-version: ${{ matrix.python-version }}
- name: Install
run: |
python -m pip install --upgrade pip --use-feature=2020-resolver
python -m pip install coveralls --use-feature=2020-resolver
python -m pip install .[all] --use-feature=2020-resolver
python -m pip install --upgrade pip
python -m pip install coveralls
python -m pip install .[all]
python -m pip install https://github.com/bboe/coveralls-python/archive/github_actions.zip
- name: Lint
run: |
Expand Down
2 changes: 2 additions & 0 deletions afqinsight/plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,8 @@ def plot_tract_profiles(
)
group_by = np.ones(X.shape[0])
group_by_name = None
else:
group_by = np.asarray(group_by)

figs = {}

Expand Down
43 changes: 34 additions & 9 deletions examples/plot_als_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,15 @@
DOI: 10.1371/journal.pcbi.1009136

"""
import os.path as op
import matplotlib.pyplot as plt
import numpy as np

from afqinsight import AFQDataset
from afqinsight import make_afq_classifier_pipeline
from afqinsight.datasets import download_sarica
from afqinsight.plot import plot_tract_profiles


from groupyr.decomposition import GroupPCA

Expand All @@ -40,15 +44,23 @@
#############################################################################
# Fetch data from Sarica et al.
# -----------------------------
# As a shortcut, we have incorporated a few studies into the software. In these
# cases, a :class:`AFQDataset` class instance can be initialized using the
# :func:`AFQDataset.from_study` static method. This expects the name of one of
# the studies that are supported (see the method documentation for the list of
# these studies). By passing `"sarica"`, we request that the software download
# the data from this study and initialize an object for us from this data.


afqdata = AFQDataset.from_study("sarica")
# The :func:`download_sarica` function downloads the data used in this
# example and places it in the `~/.cache/afq-insight/sarica` directory.
# If the directory does not exist, it is created. The data follows the format
# expected by the :func:`load_afq_data` function: a file called `nodes.csv` that
# contains AFQ tract profiles and a file called `subjects.csv` that contains
# information about the subjects. The two files are linked through the
# `subjectID` column that should exist in both of them. For more information
# about this format, see also the `AFQ-Browser documentation
# <https://yeatmanlab.github.io/AFQ-Browser/dataformat.html>`_ (items 2 and 3).

workdir = download_sarica()
afqdata = AFQDataset.from_files(
fn_nodes=op.join(workdir, "nodes.csv"),
fn_subjects=op.join(workdir, "subjects.csv"),
dwi_metrics=["md", "fa"],
target_cols=["class"],
)

# Examine the data
# ----------------
Expand All @@ -61,6 +73,19 @@
group_names = afqdata.group_names
subjects = afqdata.subjects

# Visualize the data
# ----------------
# We can visualize the data using the :func:`plot_tract_profiles` function. We
# tell the function to use the `y` variable that we created as a grouping
# variable, so that we get separate tract profile lines for the participants
# with ALS and the controls. These plots are produced with means and 95% confidence intervals, separately for mean diffusivity and for fractional anisotropy.

plot_tract_profiles(
afqdata,
group_by=y,
)


# Reduce data dimensionality
# --------------------------
# Here we reduce computation time by taking the first 10 principal components of
Expand Down