diff --git a/.gitignore b/.gitignore index 7970718..6bbfe35 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ notebooks/.ipynb_checkpoints .DS_Store .ipynb_checkpoints /notebooks/eval/cleaned/tests/data +/tmp/ diff --git a/envs/csi_pip_req_minimal.txt b/envs/csi_pip_req_minimal.txt new file mode 100644 index 0000000..9d41fcf --- /dev/null +++ b/envs/csi_pip_req_minimal.txt @@ -0,0 +1,32 @@ +# Do not forget to install csi package (current) after installing the following +anndata==0.9.1 +colorcet==3.0.1 +joblib==1.1.1 +matplotlib==3.7.1 +networkx==2.8.4 +numpy==1.24.3 +pandas==1.5.3 +#rpy2==3.5.11 +sacred==0.8.4 +scanpy==1.9.3 +scipy==1.9.3 +seaborn==0.12.2 +seml==0.3.7 +setuptools==66.0.0 +statsmodels==0.13.5 +torch==1.13.1 +torchmetrics==0.11.4 +typing_extensions==4.5.0 +adjustText==0.8 +pytorch_lightning==1.6.5 +#scib-metrics==0.4.0 +#scib-metrics==0.5.1 +scib-metrics==0.3.3 +scvi-tools==0.17.3 +scikit-learn==1.2.2 +leidenalg==0.10.2 +jax==0.4.10 +jaxlib==0.4.10 +ml-dtypes==0.1.0 +harmonypy==0.0.10 + diff --git a/envs/perturb_pip_req_minimal.txt b/envs/perturb_pip_req_minimal.txt new file mode 100644 index 0000000..6e20fce --- /dev/null +++ b/envs/perturb_pip_req_minimal.txt @@ -0,0 +1,28 @@ +scgen==2.1.0 + +anndata==0.9.1 +colorcet==3.0.1 +joblib==1.1.1 +matplotlib==3.7.1 +networkx==2.8.4 +numpy==1.24.3 +pandas==1.5.3 +#rpy2==3.5.11 +sacred==0.8.4 +scanpy==1.9.3 +scipy==1.9.3 +seaborn==0.12.2 +seml==0.3.7 +setuptools==66.0.0 +statsmodels==0.13.5 +torch==1.13.1 +torchmetrics==0.11.4 +typing_extensions==4.5.0 +adjustText==0.8 +pytorch_lightning==1.6.5 +#scib-metrics==0.4.0 +scib-metrics==0.5.1 +scvi-tools==0.17.3 +scikit-learn==1.2.2 + + diff --git a/envs/pipelines_pip_req_minimal.txt b/envs/pipelines_pip_req_minimal.txt new file mode 100644 index 0000000..7d02bd0 --- /dev/null +++ b/envs/pipelines_pip_req_minimal.txt @@ -0,0 +1,5 @@ +pandas==1.5.3 +wandb==0.17.5 +seml=0.3.7 +numpy==1.24.3 + diff --git a/envs/r_env_readme.txt b/envs/r_env_readme.txt new file mode 100644 index 0000000..a867a31 --- /dev/null +++ b/envs/r_env_readme.txt @@ -0,0 +1,21 @@ +# We used apptainer based on ml-verse +``` +apptainer build --sandbox my_ml_verse docker://rocker/ml-verse +``` + + +# Install python pacjages +``` +pip install anndata +``` + +# Install R packages inside R: + +``` +install.packages("argparse") +install.packages("Seurat") +install.packages("harmony") +install.packages("anndata") +install.packages("reticulate") +``` + diff --git a/envs/scglue_pip_req_minimal.txt b/envs/scglue_pip_req_minimal.txt new file mode 100644 index 0000000..15759e0 --- /dev/null +++ b/envs/scglue_pip_req_minimal.txt @@ -0,0 +1,28 @@ +scglue==0.3.2 + +anndata==0.9.1 +colorcet==3.0.1 +joblib==1.1.1 +matplotlib==3.7.1 +networkx==2.8.4 +numpy==1.24.3 +pandas==1.5.3 +#rpy2==3.5.11 +sacred==0.8.4 +scanpy==1.9.3 +scipy==1.9.3 +seaborn==0.12.2 +seml==0.3.7 +setuptools==66.0.0 +statsmodels==0.13.5 +torch==1.13.1 +torchmetrics==0.11.4 +typing_extensions==4.5.0 +adjustText==0.8 +pytorch_lightning==1.6.5 +#scib-metrics==0.4.0 +scib-metrics==0.5.1 +scvi-tools==0.17.3 +scikit-learn==1.2.2 + + diff --git a/envs/sysvi_pip_req_minimal.txt b/envs/sysvi_pip_req_minimal.txt new file mode 100644 index 0000000..99f8089 --- /dev/null +++ b/envs/sysvi_pip_req_minimal.txt @@ -0,0 +1,30 @@ +anndata==0.11.1 +colorcet==3.1.0 +joblib==1.4.2 +matplotlib==3.9.3 +networkx==3.4.2 +numpy==1.26.4 +pandas==2.2.3 +#rpy2 +sacred==0.8.7 +scanpy==1.10.4 +scipy==1.14.1 +seaborn==0.13.2 +seml==0.5.4 +statsmodels==0.14.4 +torch==2.5.1 +torchmetrics==1.6.0 +typing_extensions==4.12.2 +adjustText==1.3.0 +# pytorch_lightning +#scib-metrics +#scib-metrics +scib-metrics==0.5.1 +# scvi-tools + +# git+https://github.com/Hrovatin/scvi-tools.git@main +git+https://github.com/Hrovatin/scvi-tools.git@stable + +scikit-learn==1.6.0 +leidenalg==0.10.2 + diff --git a/notebooks/data/retina_atlas_sc_sn.py b/notebooks/data/retina_atlas_sc_sn.py new file mode 100644 index 0000000..fdb8d2c --- /dev/null +++ b/notebooks/data/retina_atlas_sc_sn.py @@ -0,0 +1,437 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.16.3 +# kernelspec: +# display_name: csi +# language: python +# name: csi +# --- + +# %% +# %load_ext autoreload +# %autoreload 2 + +# %% +import scanpy as sc +import pandas as pd +import numpy as np +from collections import defaultdict +from scipy.io import mmread +from scipy.sparse import csr_matrix +import pickle as pkl + +from sklearn.metrics.pairwise import euclidean_distances +from scipy.stats import mannwhitneyu +from statsmodels.stats.multitest import multipletests + +import gc + +from matplotlib import rcParams +import matplotlib.pyplot as plt +import seaborn as sb + +import scib_metrics as sm +import sys +import os +sys.path.append('/'.join(os.getcwd().split('/')[:-1]+['eval','cleaned',''])) +from metrics import ilisi, asw_batch + + +# %% +path_data = '/home/moinfar/data/human_retina_atlas/' +path_sc_data = path_data + 'human_retina_atlas_scrna_all.h5ad' +path_sn_data = path_data + 'human_retina_atlas_snrna_all.h5ad' +path_save = path_data + 'human_retina_atlas_sc_sn_hvg.h5ad' + +# %% [markdown] +# # Load data + +# %% [markdown] +# # Combien adatas for training + +# %% [markdown] +# Sn need to subset to same type of fat as sc as else there are a lot of unmatched cts. In cs dont need to remove cancer partients as this does not seem to affect fat. + +# %% +# PP sc +adata_sc=sc.read(path_sc_data) +adata_sc.X = adata_sc.raw.X +del adata_sc.raw +adata_sc.obs['system'] = 0 +adata_sc + +# %% +# Subset to expr genes and normalise +adata_sc = adata_sc[:,np.array((adata_sc.X>0).sum(axis=0)>20).ravel()].copy() +adata_sc.layers['counts']=adata_sc.X.copy() +sc.pp.normalize_total(adata_sc, target_sum=1e4) +sc.pp.log1p(adata_sc) +# HVG +sc.pp.highly_variable_genes( + adata=adata_sc, n_top_genes=3000, flavor='cell_ranger', batch_key='donor_id', subset=True) + +adata_sc + +# %% +adata_sc.write(path_data + '_tmp_sc_hvg.h5ad') + +# %% +adata_sc = sc.read(path_data + '_tmp_sc_hvg.h5ad') +adata_sc + +# %% + +# %% + +# %% +# PP sn +adata_sn=sc.read(path_sn_data) +adata_sn.X = adata_sn.raw.X +del adata_sn.raw +adata_sn.obs['system'] = 1 +adata_sn + +# %% +# Subset to expr genes and normalise +adata_sn=adata_sn[:,np.array((adata_sn.X>0).sum(axis=0)>20).ravel()].copy() +adata_sn.layers['counts']=adata_sn.X.copy() +sc.pp.normalize_total(adata_sn, target_sum=1e4) +sc.pp.log1p(adata_sn) +# HVG +sc.pp.highly_variable_genes( + adata=adata_sn, n_top_genes=3000, flavor='cell_ranger', batch_key='donor_id', subset=True) + +adata_sn + +# %% +adata_sn.write(path_data + '_tmp_sn_hvg.h5ad') + +# %% +adata_sn = sc.read(path_data + '_tmp_sn_hvg.h5ad') +adata_sn + +# %% + +# %% +# Shared HVGs +shared_hvgs=list(set(adata_sc.var_names) & set(adata_sn.var_names)) +len(shared_hvgs) + +# %% + +# %% [markdown] +# Match cell type names + +# %% +adata_sc.obs.cell_type.value_counts(), adata_sn.obs.cell_type.value_counts() + +# %% +assert len(set(adata_sn.obs.donor_id.unique()).intersection(set(adata_sc.obs.donor_id.unique()))) == 0 + +# %% [markdown] +# Joint adata + +# %% +# Subset to shraed HVGs and concat +adata=sc.concat([adata_sc[:,shared_hvgs], adata_sn[:,shared_hvgs]], + join='outer', + index_unique='_', keys=['sc','sn']) +adata + +# %% +pd.crosstab(adata.obs.cell_type, adata.obs.system) + +# %% +# N samples and cells per system +display(adata.obs.groupby('system')['donor_id'].nunique()) +display(adata.obs.groupby('system').size()) + +# %% + +# %% [markdown] +# Add PCA for scGLUE + +# %% +# PCA and clusters per system +n_pcs=15 +X_pca_system=[] +for system in adata.obs.system.unique(): + adata_sub=adata[adata.obs.system==system,:].copy() + sc.pp.scale(adata_sub) + sc.pp.pca(adata_sub, n_comps=n_pcs) + X_pca_system.append(pd.DataFrame(adata_sub.obsm['X_pca'],index=adata_sub.obs_names)) +del adata_sub +X_pca_system=pd.concat(X_pca_system) +adata.obsm['X_pca_system']=X_pca_system.loc[adata.obs_names,:].values + +# %% [markdown] +# ### Save + +# %% +adata + +# %% +adata.write(path_save) + +# %% + +# %% [markdown] +# # Non-integrated embedding + +# %% +adata = sc.read(path_save) +path_save = path_data + +# %% +# Non-integrated embedding +n_pcs=15 +cells_eval=np.random.RandomState(seed=0).permutation(adata.obs_names)[:100000] +adata_temp=adata[cells_eval,:].copy() +sc.pp.scale(adata_temp) +sc.pp.pca(adata_temp, n_comps=n_pcs) +sc.pp.neighbors(adata_temp, use_rep='X_pca') +sc.tl.umap(adata_temp) + +# %% +# Slimmed down data for saving +adata_embed=sc.AnnData(adata_temp.obsm['X_pca'],obs=adata_temp.obs) +for k in ['pca','neighbors','umap']: + adata_embed.uns[k]=adata_temp.uns[k] +adata_embed.obsm['X_umap']=adata_temp.obsm['X_umap'] +for k in ['distances', 'connectivities']: + adata_embed.obsp[k]=adata_temp.obsp[k] +display(adata_embed) + +# %% +# Save +adata_embed.write(path_save + 'human_retina_atlas_sc_sn_hvg_embed.h5ad') + +# %% [markdown] +# # Integration metrics on non-integrated data + +# %% +# Reload +adata_embed=sc.read(path_save + 'human_retina_atlas_sc_sn_hvg_embed.h5ad') + +# %% +# Check ranges of individual PCs +rcParams['figure.figsize']=(6,2) +_=plt.boxplot(adata_embed.X) +plt.ylabel('PCA value') +plt.xlabel('PCs') + +# %% +# Compute ASW +asw, asw_macro, asw_data_label=asw_batch( + X=adata_embed.X, + batches=adata_embed.obs['system'], + labels=adata_embed.obs['cell_type']) + +# %% +asws={ + 'asw_micro':asw, + 'asw_macro':asw_macro, + 'asw_data_label':asw_data_label +} +for k,v in asws.items(): + print(k) + print(v) + print('\n') + +# %% +pkl.dump({'asw_batch':asws},open(path_save+'human_retina_atlas_sc_sn_hvg_embed_integrationMetrics.pkl','wb')) + +# %% [markdown] +# # Moran's I for eval +# Find genes that would be appropriate for computing Moran's I on for evaluation in every sample-cell type group (of appropriate size) by computing Moran's I on per-sample non integrated data. This can then also be used as a reference later on to compute relative preservation of Moran's I. +# + +# %% +adata_path = path_data + 'human_retina_atlas_sc_sn_hvg.h5ad' +adata=sc.read(adata_path) +adata + +# %% +# Potential groups to compute Moran's I on (batch-system and group) +pd.crosstab(adata.obs.donor_id, adata.obs.cell_type) + +# %% +# Filtered groups based on N cells +groups=adata.obs.groupby(['cell_type', 'system', 'donor_id']).size() +groups=groups[groups>=500] +display(groups) +print('N cell types', groups.index.get_level_values('cell_type').nunique()) + +# %% +# Compute Moran's I per group +data=[] +for group in groups.index: + # Group adata + print(group) + adata_sub=adata[ + (adata.obs.cell_type==group[0]).values&\ + (adata.obs.system==group[1]).values&\ + (adata.obs.donor_id==group[2]).values,:].copy() + # Remove lowly expr genes before Moran's I computation as they will be less likely relevant + # As this is done per small cell group within sample+cell type and HVGs there is not many genes (200-500) + # so all can be used for Moran's I computation + sc.pp.filter_genes(adata_sub, min_cells=adata_sub.shape[0]*0.1) + # Compute embedding of group + sc.pp.pca(adata_sub, n_comps=15) + sc.pp.neighbors(adata_sub, n_pcs=15) + # Compute I + morans_i=sc.metrics._morans_i._morans_i( + g=adata_sub.obsp['connectivities'], + vals=adata_sub.X.T) + # Save data + morans_i=pd.DataFrame({'morans_i':morans_i},index=adata_sub.var_names) + morans_i['group']=group[0] + morans_i['system']=group[1] + morans_i['batch']=group[2] + data.append(morans_i) +data=pd.concat(data,axis=0) + +# %% +# Moran's I distn accross groups +sb.catplot(x='batch',y='morans_i',hue='system',row='group',data=data,kind='violin', + inner=None,height=3,aspect=5) + +# %% +# Moran's I thr +thr_mi=0.2 + +# %% +# N genes per group at certain thr +data.groupby(['group','system','batch']).apply(lambda x: (x['morans_i']>=thr_mi).sum()) + +# %% +# N genes vs N cells in group +rcParams['figure.figsize']=(4,4) +sb.scatterplot(x='N cells',y='N genes',hue='level_0',style='system', + data=pd.concat( + [data.groupby(['group','system','batch']).apply(lambda x: (x['morans_i']>=thr_mi).sum()).rename('N genes'), + groups.rename('N cells')],axis=1).reset_index()) +plt.legend(bbox_to_anchor=(1,1)) +plt.xscale('log') + +# %% [markdown] +# C: Thr of 0.2 seems to separate in general approximately between highly and lowly variable genes and has at least some genes for every group and not too many in any of the groups. +# +# C: There is no clear bias between N cells in group and N genes. +# +# C: Selected genes may not be diverse though - they may capture the same pattern and maybe more subtle patterns are at lower Moran's I. + +# %% +# Prepare selected genes for saving (fileterd genes&I per group) +selected=list() +for group,data_sub in data.groupby(['group','system','batch']): + group=dict(zip(['group','system','batch'],group)) + group['genes']=(data_sub.query('morans_i>=@thr_mi')['morans_i']+1)/2 + selected.append(group) + +# %% +# Save +pkl.dump(selected,open(path_data+'human_retina_atlas_sc_sn_hvg_moransiGenes.pkl','wb')) + +# %% [markdown] +# # Batch effects within and between systems + +# %% +adata_path = path_data + 'human_retina_atlas_sc_sn_hvg.h5ad' +adata=sc.read(adata_path) +adata + +# %% +# Compute PCA on the whole data +adata_scl=adata.copy() +sc.pp.scale(adata_scl) +n_pcs=15 +sc.pp.pca(adata_scl, n_comps=n_pcs) +pca=pd.DataFrame(adata_scl.obsm['X_pca'],index=adata_scl.obs_names) +del adata_scl + +# %% +# Average PCA accross system-batch-group pseudobulks. +# Only use pseudobulks with at least 50 cells +# Only use cell types with at least 3 samples per system +pca[['system','batch','group']]=adata.obs[['system', 'donor_id', 'cell_type']] +pca_pb=pca.groupby(['system','batch','group']) +pca_mean=pca_pb.mean() +pb_size=pca_pb.size() +# Remove samples with too little cells +filtered_pb=pb_size.index[pb_size>=50] +# Get pbs/cts where both systems have enough samples +n_samples_system=filtered_pb.to_frame().rename({'group':'group_col'},axis=1).groupby( + 'group_col',observed=True)['system'].value_counts().rename('n_samples').reset_index() +cts=set(n_samples_system.query('system==0 & n_samples>=3').group_col)&\ + set(n_samples_system.query('system==1 & n_samples>=3').group_col) +filtered_pb=filtered_pb[filtered_pb.get_level_values(2).isin(cts)] +pca_mean=pca_mean.loc[filtered_pb,:] + +# %% +# Compute per-ct distances of samples within and between systems +distances={} +for ct in cts: + pca_s0=pca_mean[(pca_mean.index.get_level_values(0)==0) & + (pca_mean.index.get_level_values(2)==ct)] + pca_s1=pca_mean[(pca_mean.index.get_level_values(0)==1) & + (pca_mean.index.get_level_values(2)==ct)] + d_s0=euclidean_distances(pca_s0)[np.triu_indices(pca_s0.shape[0],k=1)] + d_s1=euclidean_distances(pca_s1)[np.triu_indices(pca_s1.shape[0],k=1)] + d_s0s1=euclidean_distances(pca_s0,pca_s1).ravel() + distances[ct]={'s0':d_s0,'s1':d_s1,'s0s1':d_s0s1} + +# %% +# Save distances +pkl.dump(distances,open(path_data + 'human_retina_atlas_sc_sn_hvg_PcaSysBatchDist.pkl','wb')) + +# %% +distances=pkl.load(open(path_data + 'human_retina_atlas_sc_sn_hvg_PcaSysBatchDist.pkl','rb')) + +# %% +# Prepare df for plotting +plot=[] +for ct,dat in distances.items(): + for comparison,dist in dat.items(): + dist=pd.DataFrame(dist,columns=['dist']) + dist['group']=ct + dist['comparison']=comparison + plot.append(dist) +plot=pd.concat(plot) + +# %% +# Plot distances +sb.catplot(x='comparison',y='dist',col='group', + data=plot.reset_index(drop=True),kind='swarm', + sharey=False, height=2.5,aspect=1 ) + +# %% [markdown] +# Evaluate statisticsal significance + +# %% +# Compute significance of differences within and accross systems +signif=[] +for ct,dat in distances.items(): + for ref in ['s0','s1']: + u,p=mannwhitneyu( dat[ref],dat['s0s1'],alternative='less') + signif.append(dict( cell_type=ct,system=ref, u=u,pval=p, + n_system=dat[ref].shape[0],n_crossystem=dat['s0s1'].shape[0])) +signif=pd.DataFrame(signif) +signif['padj']=multipletests(signif['pval'],method='fdr_bh')[1] + +# %% +signif + +# %% +# Save signif +signif.to_csv(path_data + 'human_retina_atlas_sc_sn_hvg_PcaSysBatchDist_Signif.tsv',sep='\t',index=False) + +# %% + +# %% diff --git a/notebooks/data/skin_mouse_human.py b/notebooks/data/skin_mouse_human.py new file mode 100644 index 0000000..653b3b4 --- /dev/null +++ b/notebooks/data/skin_mouse_human.py @@ -0,0 +1,1140 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.16.3 +# kernelspec: +# display_name: csi +# language: python +# name: csi +# --- + +# %% +import scanpy as sc +import pandas as pd +import numpy as np +from collections import defaultdict +from scipy.io import mmread +from scipy.sparse import csr_matrix +import pickle as pkl +from pathlib import Path + +from sklearn.metrics.pairwise import euclidean_distances +from scipy.stats import mannwhitneyu +from statsmodels.stats.multitest import multipletests + +import gc + +from matplotlib import rcParams +import matplotlib.pyplot as plt +import seaborn as sb + +import scib_metrics as sm +import sys +import os +sys.path.append('/'.join(os.getcwd().split('/')[:-1]+['eval','cleaned',''])) +from metrics import ilisi,asw_batch + +# %% +path_data='/home/moinfar/data/skin_mouse_human/' +path_mm=path_data+'GSE222250.h5ad' +path_hs=path_data+'HRA004936.h5ad' +path_genes=path_data +path_save=Path(path_data+'processed/') +path_save.mkdir(parents=True, exist_ok=True) + +# %% +# Orthologues +orthology_info=pd.read_table(path_genes+'orthologues_ORGmus_musculus_ORG2homo_sapiens_V109.tsv' + ).rename( + {'Gene name':'gs_mm','Human gene name':'gs_hs', + 'Gene stable ID':'eid_mm','Human gene stable ID':'eid_hs'},axis=1) +orthology_info[:3] + +# %% [markdown] +# # One-to-one orthologues + +# %% [markdown] +# ## Prepare data for integration + +# %% +# One to one orthologues - dont have same mm/hs gene in the table 2x +oto_orthologues=orthology_info[~orthology_info.duplicated('eid_mm',keep=False).values & + ~orthology_info.duplicated('eid_hs',keep=False).values] + +# %% +oto_orthologues.shape + +# %% [markdown] +# ### Mouse + +# %% +adata_mm=sc.read(path_mm) + +# %% +adata_mm + +# %% +# TODO: check what to remove + +# %% +# Add raw expression to X, remove lowly expr genes, and normalise +adata_mm_raw=adata_mm.raw.to_adata() +adata_mm=adata_mm_raw.copy() +adata_mm.layers['counts']=adata_mm_raw[adata_mm.obs_names,adata_mm.var_names].X.copy() +del adata_mm_raw +adata_mm=adata_mm[:,np.array((adata_mm.X>0).sum(axis=0)>20).ravel()] +adata_mm=adata_mm[:,[g for g in oto_orthologues.eid_mm if g in adata_mm.var_names]] +sc.pp.normalize_total(adata_mm, target_sum=1e4) +sc.pp.log1p(adata_mm) + +# %% +adata_mm + +# %% +# HVGs on the final cell subset +# donor_id and batch are the same in this dataset +sc.pp.highly_variable_genes( + adata=adata_mm, n_top_genes=3000, flavor='cell_ranger', batch_key='donor_id', + subset=True) +adata_mm.shape + +# %% +adata_mm + +# %% [markdown] +# ### Human + +# %% +adata_hs=sc.read(path_hs) + +# %% +adata_hs + +# %% +# Remove erythrocytes +adata_hs = adata_hs[adata_hs.obs['cell_type'] != 'erythrocyte'].copy() +adata_hs + +# %% +adata_hs.X[:100, :100].max() + +# %% +adata_hs.var.index[:3], oto_orthologues[:3] + +# %% +# X is already count, remove lowly expr genes, and normalise +adata_hs=adata_hs[:,np.array((adata_hs.X>0).sum(axis=0)>20).ravel()] +gs=set(adata_hs.var_names) +adata_hs=adata_hs[:,[g for g in set(oto_orthologues.eid_hs.values) if g in gs]] +adata_hs.layers['counts']=adata_hs.X.copy() +sc.pp.normalize_total(adata_hs, target_sum=1e4) +sc.pp.log1p(adata_hs) + +# %% +# HVGs on the final cell subset +# donor_id and sample_id are equal in this data +sc.pp.highly_variable_genes( + adata=adata_hs, n_top_genes=3000, flavor='cell_ranger', batch_key='donor_id', + subset=True) +adata_hs.shape + +# %% +adata_hs + +# %% [markdown] +# ### Shared genes + +# %% +# Find shared HVGs +eids_hs=set(adata_hs.var_names) +eids_mm=set(adata_mm.var_names) +shared_orthologues=oto_orthologues.query('eid_hs in @eids_hs') +shared_orthologues=shared_orthologues.query('eid_mm in @eids_mm') +print('N shared:',shared_orthologues.shape[0]) + +# %% +# Subset adatas to shared HVGs +# This already ensures same gene order +adata_hs=adata_hs[:,shared_orthologues.eid_hs] +adata_mm=adata_mm[:,shared_orthologues.eid_mm] + + +# %% [markdown] +# ### Combine adatas of mm and hs + +# %% [markdown] +# Match cell types + +# %% +sorted(adata_hs.obs['cell_type'].str.lower().unique()) + +# %% +sorted(adata_mm.obs['cell_type'].str.lower().unique()) + +# %% +adata_mm.obs['cell_type_eval'] = adata_mm.obs['cell_type'].str.lower().replace({ + 'schwann cell': 'SOX10', + 'chondrocyte': 'Chondrocyte', + 'endothelial cell': 'Endothelial', + 'fibroblast': 'Fibroblast', + 'fibroblast of lymphatic vessel': 'Lymphatic', + 'innate lymphoid cell': 'CD3E & CPA3', # CD3E cluster + 'keratinocyte': 'Epidermal', + 'mast cell': 'CD3E & CPA3', # CPA3 cluster + 'neutrophil': 'Neutrophil', # Not sure if they should be separate (absent in human) or if in human they are present within the Immune cluster and should be merged there + 'pericyte': 'Pericyte', + 'professional antigen presenting cell': 'Immune', +}) +pd.crosstab(adata_mm.obs['cell_type'], adata_mm.obs['cell_type_eval']) + +# %% +adata_hs.obs['cell_type_eval']=adata_hs.obs['cell_type'].str.lower().replace({ + 'langerhans cell': 'Immune', # CD74 and others cluster + 'basal cell of epidermis': 'Epidermal', + 'cytotoxic t cell': 'CD3E & CPA3', # CD3E and CPA3 are two separate cell types, but in human they are not properly subclustered (in mouse they are), so I merged the annotation + 'dendritic cell, human': 'Immune', + 'endothelial cell': 'Endothelial', + 'endothelial cell of lymphatic vessel': 'Lymphatic', + 'fibroblast': 'Fibroblast', + 'fibroblast of papillary layer of dermis': 'Fibroblast', + 'granular cell of epidermis': 'Epidermal', + 'hair follicular keratinocyte': 'Epidermal', + 'helper t cell': 'CD3E & CPA3', + 'macrophage': 'Immune', + 'melanocyte of skin': 'SOX10', # Schwann & melanocyte cluster + 'monocyte': 'Immune', + 'natural killer cell': 'CD3E & CPA3', + 'pericyte': 'Pericyte', + 'prickle cell': 'Epidermal', + 'regulatory t cell': 'CD3E & CPA3', + 'skin fibroblast': 'Fibroblast', + 'suprabasal keratinocyte': 'Epidermal', +}) +pd.crosstab(adata_hs.obs['cell_type'], adata_hs.obs['cell_type_eval']) + +# %% +sorted(adata_mm.obs['cell_type_eval'].unique()) + +# %% +sorted(adata_hs.obs['cell_type_eval'].unique()) + +# %% + +# %% +# Prepare adatas for concat and concat +adata_hs_sub=adata_hs.copy() +adata_mm_sub=adata_mm.copy() + +# Human +obs_keep_hs=['donor_id', 'disease', 'sex', 'cell_type', 'cell_type_eval', 'sample_id'] +adata_hs_sub.obs=adata_hs_sub.obs[obs_keep_hs] +adata_hs_sub.obs.rename({'sample_id': 'batch'}, axis=1, inplace=True) +adata_hs_sub.obs.rename({c:'hs_'+c for c in adata_hs_sub.obs.columns + if c not in ['cell_type_eval', 'batch']}, + axis=1, inplace=True) +adata_hs_sub.obs['system']=1 +del adata_hs_sub.obsm +del adata_hs_sub.uns + +# Mouse +obs_keep_mm=['donor_id', 'disease', 'cell_type','cell_type_eval', 'batch'] +adata_mm_sub.obs=adata_mm_sub.obs[obs_keep_mm] +adata_mm_sub.obs.rename({'batch': 'batch'}, axis=1, inplace=True) +adata_mm_sub.obs.rename({c:'mm_'+c for c in adata_mm_sub.obs.columns + if c not in ['cell_type_eval', 'batch']}, + axis=1, inplace=True) +adata_mm_sub.obs['system']=0 +del adata_mm_sub.obsm +del adata_hs_sub.uns + +# Concat +adata_hs_sub.var['gene_symbol'] = adata_hs_sub.var.GeneSym +adata_hs_sub.var['gs_hs'] = adata_hs_sub.var.GeneSym +adata_hs_sub.var['gs_mm'] = adata_mm_sub.var.feature_name.tolist() +adata_hs_sub.var['hs_eid'] = adata_hs_sub.var_names +adata_hs_sub.var['mm_eid'] = adata_mm_sub.var_names.tolist() +adata_hs_sub.var_names = adata_hs_sub.var['gene_symbol'].tolist() # Can do as matched subsetting +adata_hs_sub.var = adata_hs_sub.var[['gene_symbol', 'gs_hs', 'gs_mm', 'hs_eid', 'mm_eid']] +adata_mm_sub.var = adata_hs_sub.var + +adata=sc.concat([adata_mm_sub, adata_hs_sub], join='outer', merge='unique') + +del adata_mm_sub +del adata_hs_sub +gc.collect() + +# %% +adata.var, adata.obs + +# %% +pd.crosstab(adata.obs.cell_type_eval,adata.obs.system) + +# %% +# N samples and cells per system +display(adata.obs.groupby('system')['batch'].nunique()) +display(adata.obs.groupby('system').size()) + +# %% [markdown] +# Add PCA and leiden for scGLUE and Saturn + +# %% +# PCA and clusters per system +n_pcs=15 +X_pca_system=[] +leiden_system=[] +for system in adata.obs.system.unique(): + adata_sub=adata[adata.obs.system==system,:].copy() + sc.pp.scale(adata_sub) + sc.pp.pca(adata_sub, n_comps=n_pcs) + sc.pp.neighbors(adata_sub, n_pcs=n_pcs) + sc.tl.leiden(adata_sub) + X_pca_system.append(pd.DataFrame(adata_sub.obsm['X_pca'],index=adata_sub.obs_names)) + leiden_system.append(adata_sub.obs.apply(lambda x: str(x['system'])+'_'+x['leiden'],axis=1)) +del adata_sub +X_pca_system=pd.concat(X_pca_system) +leiden_system=pd.concat(leiden_system) +adata.obsm['X_pca_system']=X_pca_system.loc[adata.obs_names,:].values +adata.obs['leiden_system']=leiden_system.loc[adata.obs_names].values + +# %% [markdown] +# ### Save + +# %% +display(adata) + +# %% +adata.write(path_save / 'skin_mm_hs_hvg.h5ad') + +# %% +path_save / 'skin_mm_hs_hvg.h5ad' + +# %% [markdown] +# #### Save in format for scGLUE/Saturn + +# %% +# Save with gene symbols in var names for Saturn +adata_mm_sub=adata[adata.obs.system==0,:] +adata_mm_sub.var_names=adata_mm_sub.var['gs_mm'] +adata_mm_sub.write(path_save / 'skin_mm_hs_mmPart_hvg.h5ad') +adata_hs_sub=adata[adata.obs.system==1,:] +adata_hs_sub.var_names=adata_hs_sub.var['gs_hs'] +adata_hs_sub.write(path_save / 'skin_mm_hs_hsPart_hvg.h5ad') + +# %% +adata_mm_sub + +# %% +adata_hs_sub + +# %% +adata.var.to_csv(path_save / 'combined_orthologuesHVG_geneMapping.tsv',index=False,sep='\t') + +# %% [markdown] +# ## Non-integrated embedding + +# %% +# Non-integrated embedding +n_pcs=15 +cells_eval=np.random.RandomState(seed=0).permutation(adata.obs_names)[:100000] +adata_temp=adata[cells_eval,:].copy() +sc.pp.scale(adata_temp) +sc.pp.pca(adata_temp, n_comps=n_pcs) +sc.pp.neighbors(adata_temp, use_rep='X_pca') +sc.tl.umap(adata_temp) + +# %% +# Slimmed down data for saving +adata_embed=sc.AnnData(adata_temp.obsm['X_pca'],obs=adata_temp.obs) +for k in ['pca','neighbors','umap']: + adata_embed.uns[k]=adata_temp.uns[k] +adata_embed.obsm['X_umap']=adata_temp.obsm['X_umap'] +for k in ['distances', 'connectivities']: + adata_embed.obsp[k]=adata_temp.obsp[k] +display(adata_embed) + +# %% +# Save +adata_embed.write(path_save / 'skin_mm_hs_hvg_embed.h5ad') + +# %% [markdown] +# ## Integration metrics on non-integrated data + +# %% +# Reload +adata_embed=sc.read(path_save / 'skin_mm_hs_hvg_embed.h5ad') + +# %% +# Check ranges of individual PCs +rcParams['figure.figsize']=(6,2) +_=plt.boxplot(adata_embed.X) +plt.ylabel('PCA value') +plt.xlabel('PCs') + +# %% +# Compute ASW +asw, asw_macro, asw_data_label=asw_batch( + X=adata_embed.X, + batches=adata_embed.obs['system'], + labels=adata_embed.obs['cell_type_eval']) + +# %% +asws={ + 'asw_micro':asw, + 'asw_macro':asw_macro, + 'asw_data_label':asw_data_label +} +for k,v in asws.items(): + print(k) + print(v) + print('\n') + +# %% +pkl.dump({'asw_batch':asws},open(path_save / 'skin_mm_hs_hvg_embed_integrationMetrics.pkl','wb')) + +# %% [markdown] +# ## Moran's I for eval +# Find genes that would be appropriate for computing Moran's I on for evaluation in every sample-cell type group (of appropriate size) by computing Moran's I on per-sample non integrated data. This can then also be used as a reference later on to compute relative preservation of Moran's I. + +# %% +adata=sc.read(path_save / 'skin_mm_hs_hvg.h5ad') + +# %% +# Potential groups to compute Moran's I on (batch-system and group) +cols=pd.get_option('display.max_rows') +pd.set_option('display.max_rows', 120) +display(pd.crosstab(adata.obs.batch,adata.obs.cell_type_eval)) +pd.set_option('display.max_rows', cols) + +# %% +# Filtered groups based on N cells and also remove the doublets +groups=adata.obs.groupby(['cell_type_eval','system','batch']).size() +groups=groups[(groups>=500).values&~groups.index.get_level_values(0).str.contains('\+')] +rows=pd.get_option('display.max_rows') +pd.set_option('display.max_rows', 250) +display(groups) +pd.set_option('display.max_rows', rows) +print('N cell types',groups.index.get_level_values('cell_type_eval').nunique()) + +# %% +# Compute Moran's I per group +data=[] +for group in groups.index: + # Group adata + print(group) + adata_sub=adata[ + (adata.obs.cell_type_eval==group[0]).values&\ + (adata.obs.system==group[1]).values&\ + (adata.obs.batch==group[2]).values,:].copy() + # Remove lowly expr genes before Moran's I computation as they will be less likely relevant + # As this is done per small cell group within sample+cell type and HVGs there is not many genes (200-500) + # so all can be used for Moran's I computation + sc.pp.filter_genes(adata_sub, min_cells=adata_sub.shape[0]*0.1) + # Compute embedding of group + sc.pp.pca(adata_sub, n_comps=15) + sc.pp.neighbors(adata_sub, n_pcs=15) + # Compute I + morans_i=sc.metrics._morans_i._morans_i( + g=adata_sub.obsp['connectivities'], + vals=adata_sub.X.T) + # Save data + morans_i=pd.DataFrame({'morans_i':morans_i},index=adata_sub.var_names) + morans_i['group']=group[0] + morans_i['system']=group[1] + morans_i['batch']=group[2] + data.append(morans_i) +data=pd.concat(data,axis=0) + +# %% +# Moran's I distn accross groups +sb.catplot(x='batch',y='morans_i',hue='system',row='group',data=data,kind='violin', + inner=None,height=3,aspect=5) + +# %% +# I thr +thr_mi=0.2 + +# %% +# N genes per group at certain thr +rows=pd.get_option('display.max_rows') +pd.set_option('display.max_rows', 250) +display(data.groupby(['group','system','batch']).apply(lambda x: (x['morans_i']>=thr_mi).sum())) +pd.set_option('display.max_rows', rows) + +# %% +# N genes vs N cells in group +rcParams['figure.figsize']=(4,4) +sb.scatterplot(x='N cells',y='N genes',hue='level_0',style='system', + data=pd.concat( + [data.groupby(['group','system','batch']).apply(lambda x: (x['morans_i']>=thr_mi).sum()).rename('N genes'), + groups.rename('N cells')],axis=1).reset_index(),s=20) +plt.legend(bbox_to_anchor=(1,1)) +plt.xscale('log') + +# %% [markdown] +# C: Thr of 0.2 has at least some genes for every group and not too many in any of the groups. Some groups would need lower/higher thr potentially. +# +# C: There is no clear bias between N cells in group and N genes. +# +# C: Selected genes may not be diverse though - they may capture the same pattern and maybe more subtle patterns are at lower Moran's I. + +# %% +# Prepare selected genes for saving (fileterd genes&I per group) +selected=list() +for group,data_sub in data.groupby(['group','system','batch']): + group=dict(zip(['group','system','batch'],group)) + group['genes']=(data_sub.query('morans_i>=@thr_mi')['morans_i']+1)/2 + selected.append(group) + +# %% +# Save +pkl.dump(selected,open(path_save / 'skin_mm_hs_hvg_moransiGenes.pkl','wb')) + +# %% [markdown] +# ## Batch effects within and between systems + +# %% +adata=sc.read(path_save / 'skin_mm_hs_hvg.h5ad') + +# %% +# Compute PCA on the whole data +adata_scl=adata.copy() +sc.pp.scale(adata_scl) +n_pcs=15 +sc.pp.pca(adata_scl, n_comps=n_pcs) +pca=pd.DataFrame(adata_scl.obsm['X_pca'],index=adata_scl.obs_names) +del adata_scl + +# %% +# Average PCA accross system-batch-group pseudobulks. +# Only use pseudobulks with at least 50 cells +# Only use cell types with at least 3 samples per system +pca[['system','batch','group']]=adata.obs[['system','batch', 'cell_type_eval']] +pca_pb=pca.groupby(['system','batch','group']) +pca_mean=pca_pb.mean() +pb_size=pca_pb.size() +# Remove samples with too little cells +filtered_pb=pb_size.index[pb_size>=50] +# Get pbs/cts where both systems have enough samples +n_samples_system=filtered_pb.to_frame().rename({'group':'group_col'},axis=1).groupby( + 'group_col',observed=True)['system'].value_counts().rename('n_samples').reset_index() +cts=set(n_samples_system.query('system==0 & n_samples>=3').group_col)&\ + set(n_samples_system.query('system==1 & n_samples>=3').group_col) +filtered_pb=filtered_pb[filtered_pb.get_level_values(2).isin(cts)] +pca_mean=pca_mean.loc[filtered_pb,:] + +# %% +# Compute per-ct distances of samples within and between systems +distances={} +for ct in cts: + # Data for computing distances + pca_s0=pca_mean[(pca_mean.index.get_level_values(0)==0) & + (pca_mean.index.get_level_values(2)==ct)] + pca_s1=pca_mean[(pca_mean.index.get_level_values(0)==1) & + (pca_mean.index.get_level_values(2)==ct)] + + # Distances for s0 - within or between datasets + d_s0=euclidean_distances(pca_s0) + triu=np.triu_indices(pca_s0.shape[0],k=1) + idx_map=dict(enumerate(pca_s0.index.get_level_values(1))) + idx_within=([],[]) + idx_between=([],[]) + for i,j in zip(*triu): + if idx_map[i]==idx_map[j]: + idx_list=idx_within + else: + idx_list=idx_between + idx_list[0].append(i) + idx_list[1].append(j) + d_s0_within=d_s0[idx_within] + d_s0_between=d_s0[idx_between] + + # Distances for s1 and s0s1 + d_s1=euclidean_distances(pca_s1)[np.triu_indices(pca_s1.shape[0],k=1)] + d_s0s1=euclidean_distances(pca_s0,pca_s1).ravel() + distances[ct]={'s0_within':d_s0_within,'s0_between':d_s0_between,'s1':d_s1,'s0s1':d_s0s1} + +# %% +# Save distances +pkl.dump(distances,open(path_save / 'skin_mm_hs_hvg_PcaSysBatchDist.pkl','wb')) + +# %% +# Reload distances +#distances=pkl.load(open(path_save+'combined_orthologuesHVG_PcaSysBatchDist.pkl','rb')) + +# %% +# Prepare df for plotting +plot=[] +for ct,dat in distances.items(): + for comparison,dist in dat.items(): + dist=pd.DataFrame(dist,columns=['dist']) + dist['group']=ct + dist['comparison']=comparison + plot.append(dist) +plot=pd.concat(plot) + +# %% +# Plot distances +sb.catplot(x='comparison',y='dist',col='group', + data=plot.reset_index(drop=True),kind='violin', + sharey=False, height=2.5,aspect=1.5) + +# %% [markdown] +# Evaluate statisticsal significance + +# %% +# Compute significance of differences within and accross systems +signif=[] +for ct,dat in distances.items(): + for ref in ['s0_within','s0_between','s1']: + n_system=dat[ref].shape[0] + if n_system>=3: + u,p=mannwhitneyu( dat[ref],dat['s0s1'],alternative='less') + signif.append(dict( cell_type=ct,system=ref, u=u,pval=p, + n_system=n_system,n_crossystem=dat['s0s1'].shape[0])) +signif=pd.DataFrame(signif) +signif['padj']=multipletests(signif['pval'],method='fdr_bh')[1] + +# %% +signif + +# %% +# Save signif +signif.to_csv(path_save / 'skin_mm_hs_hvg_PcaSysBatchDist_Signif.tsv',sep='\t',index=False) + +# %% [markdown] +# # Include non-one-to-one orthologues (Saturn, GLUE) + +# %% [markdown] +# ## Prepare data for integration + +# %% +adata = sc.read(path_save / 'skin_mm_hs_hvg.h5ad') +adata + +# %% +adata[adata.obs['system'] == 1].obs['batch'].nunique() + +# %% [markdown] +# ### Mouse + +# %% +adata_mm=sc.read(path_mm) + +# %% +# Genes with unique symbols (as Saturn embeddings are symbol based) +unique_gs=set(adata_mm.var_names[~adata_mm.var.duplicated('feature_name', keep=False)]) + +# %% +len(unique_gs) + +# %% +# Add raw expression to X, remove lowly expr genes, and normalise +adata_mm_raw=adata_mm.raw.to_adata() +adata_mm=adata_mm_raw.copy() +adata_mm.layers['counts']=adata_mm_raw[adata_mm.obs_names,adata_mm.var_names].X.copy() +del adata_mm_raw +adata_mm=adata_mm[:,np.array((adata_mm.X>0).sum(axis=0)>20).ravel()] +adata_mm=adata_mm[:,[g for g in adata_mm.var_names if g in unique_gs]] +sc.pp.normalize_total(adata_mm, target_sum=1e4) +sc.pp.log1p(adata_mm) + +# %% +# Add gene symbols as var names for Saturn +adata_mm.var['mm_eid'] = adata_mm.var_names +adata_mm.var['gs_mm'] = adata_mm.var.feature_name +adata_mm.var_names = adata_mm.var.feature_name.astype(str) + +# %% +# HVGs on the final cell subset +sc.pp.highly_variable_genes( + adata=adata_mm, n_top_genes=3000, flavor='cell_ranger', batch_key='donor_id', + subset=True) +adata_mm.shape + +# %% +adata_mm + +# %% [markdown] +# ### Human + +# %% +adata_hs=sc.read(path_hs) + +# %% +# Remove erythrocytes +adata_hs = adata_hs[adata_hs.obs['cell_type'] != 'erythrocyte'].copy() +adata_hs + +# %% +unique_gs=set(adata_hs.var_names[~adata_hs.var.duplicated('GeneSym', keep=False)]) + +# %% +# Add raw expression to X, remove lowly expr genes, and normalise +adata_hs=adata_hs[:,np.array((adata_hs.X>0).sum(axis=0)>20).ravel()] +adata_hs.layers['counts']=adata_hs.X.copy() +adata_hs=adata_hs[:,[g for g in adata_hs.var_names if g in unique_gs]] +sc.pp.normalize_total(adata_hs, target_sum=1e4) +sc.pp.log1p(adata_hs) + +# %% +# Add gene symbols as var names for Saturn + +adata_hs.var['hs_eid'] = adata_hs.var_names +adata_hs.var['gs_hs'] = adata_hs.var.GeneSym +adata_hs.var_names = adata_hs.var.GeneSym.astype(str) + +# %% +# HVGs on the final cell subset +sc.pp.highly_variable_genes( + adata=adata_hs, n_top_genes=3000, flavor='cell_ranger', batch_key='donor_id', + subset=True) +adata_hs.shape + +# %% +adata_hs + +# %% [markdown] +# ### Combine adatas of mm and hs + +# %% [markdown] +# Map cell type names + +# %% +adata_mm.obs['cell_type_eval']=adata[adata_mm.obs.index].obs['cell_type_eval'] +adata_hs.obs['cell_type_eval']=adata[adata_hs.obs.index].obs['cell_type_eval'] + +# %% +# Prepare adatas + +# Human +obs_keep_hs=['donor_id', 'disease', 'sex', 'cell_type', 'cell_type_eval', 'sample_id'] +adata_hs_sub=adata_hs.copy() +adata_hs_sub.obs=adata_hs_sub.obs[obs_keep_hs] +adata_hs_sub.obs.rename({'sample_id': 'batch'}, axis=1, inplace=True) +adata_hs_sub.obs.rename({c:'hs_'+c for c in adata_hs_sub.obs.columns + if c not in ['cell_type_eval','batch']}, + axis=1, inplace=True) +adata_hs_sub.obs['system']=1 +del adata_hs_sub.obsm +del adata_hs_sub.uns +adata_hs_sub.var = adata_hs_sub.var[['hs_eid', 'gs_hs']] + +# Mouse +adata_mm_sub=adata_mm.copy() +obs_keep_mm=['donor_id', 'disease', 'cell_type','cell_type_eval', 'batch'] +adata_mm_sub.obs=adata_mm_sub.obs[obs_keep_mm] +adata_mm_sub.obs.rename({'batch': 'batch'}, axis=1, inplace=True) +adata_mm_sub.obs.rename({c:'mm_'+c for c in adata_mm_sub.obs.columns + if c not in ['cell_type_eval','batch']}, + axis=1, inplace=True) +adata_mm_sub.obs['system']=0 + +del adata_mm_sub.obsm +del adata_mm_sub.uns +adata_mm_sub.var = adata_mm_sub.var[['mm_eid', 'gs_mm']] + +# %% + +# %% [markdown] +# PCA and clusters for scglue and saturn + +# %% +# PCA and clusters per system +n_pcs=15 +for adata_temp in [adata_hs_sub,adata_mm_sub]: + adata_sub=adata_temp.copy() + sc.pp.scale(adata_sub) + sc.pp.pca(adata_sub, n_comps=n_pcs) + sc.pp.neighbors(adata_sub, n_pcs=n_pcs) + sc.tl.leiden(adata_sub) + adata_temp.obsm['X_pca_system']=adata_sub[adata_temp.obs_names,:].obsm['X_pca'] + adata_temp.obs['leiden_system']=adata_sub.obs.apply(lambda x: str(x['system'])+'_'+x['leiden'],axis=1) +del adata_sub + +# %% +adata_mm_sub + +# %% +adata_hs_sub + +# %% [markdown] +# #### Save in format for scGLUE/Saturn + +# %% +# Save with gene symbols in var names for Saturn +adata_mm_sub.write(path_save / 'skin_mm_hs_hvg-mmPart_nonortholHVG.h5ad') +adata_hs_sub.write(path_save / 'skin_mm_hs_hvg-hsPart_nonortholHVG.h5ad') + +# %% +genes_mm=set(adata_mm_sub.var_names) +genes_hs=set(adata_hs_sub.var_names) +gene_mapping=orthology_info.query('gs_mm in @genes_mm and gs_hs in @genes_hs')[['gs_mm','gs_hs']] +print(gene_mapping.shape[0]) +gene_mapping.to_csv(path_save / 'skin_mm_hs_hvg_nonortholHVG_geneMapping.tsv',index=False,sep='\t') + + +# %% +#adata_mm_sub=sc.read(path_save+'combined-mmPart_nonortholHVG.h5ad') +#adata_hs_sub=sc.read(path_save+'combined-hsPart_nonortholHVG.h5ad') + +# %% [markdown] +# ## Moran's I eval of non-orthol +# To make it comparable it must anyway use the same set of genes as above for one-to-one orthologues. So no new Morans'I values/genes are selected. + +# %% + +# %% + +# %% + +# %% [markdown] +# # Remove all disease human samples as a use-case + +# %% +def limit_to_human(adata): + return adata[adata.obs['hs_disease'] != "psoriasis"].copy() + + +# %% +adata = sc.read(path_save / 'skin_mm_hs_hvg.h5ad') +adata = limit_to_human(adata) +adata.write(path_save / 'limited_data_skin_mm_hs_hvg.h5ad') + +adata_mm_sub = sc.read(path_save / 'skin_mm_hs_mmPart_hvg.h5ad') +adata_mm_sub = limit_to_human(adata_mm_sub) +adata_mm_sub.write(path_save / 'limited_data_skin_mm_hs_mmPart_hvg.h5ad') + +adata_hs_sub = sc.read(path_save / 'skin_mm_hs_hsPart_hvg.h5ad') +adata_hs_sub = limit_to_human(adata_hs_sub) +adata_hs_sub.write(path_save / 'limited_data_skin_mm_hs_hsPart_hvg.h5ad') + + +# %% + +# %% [markdown] +# ## Non-integrated embedding + +# %% +# Non-integrated embedding +n_pcs=15 +cells_eval=np.random.RandomState(seed=0).permutation(adata.obs_names)[:100000] +adata_temp=adata[cells_eval,:].copy() +sc.pp.scale(adata_temp) +sc.pp.pca(adata_temp, n_comps=n_pcs) +sc.pp.neighbors(adata_temp, use_rep='X_pca') +sc.tl.umap(adata_temp) + +# %% +# Slimmed down data for saving +adata_embed=sc.AnnData(adata_temp.obsm['X_pca'],obs=adata_temp.obs) +for k in ['pca','neighbors','umap']: + adata_embed.uns[k]=adata_temp.uns[k] +adata_embed.obsm['X_umap']=adata_temp.obsm['X_umap'] +for k in ['distances', 'connectivities']: + adata_embed.obsp[k]=adata_temp.obsp[k] +display(adata_embed) + +# %% +# Save +adata_embed.write(path_save / 'limited_data_skin_mm_hs_hvg_embed.h5ad') + +# %% [markdown] +# ## Integration metrics on non-integrated data + +# %% +# Reload +adata_embed=sc.read(path_save / 'limited_data_skin_mm_hs_hvg_embed.h5ad') + +# %% +# Check ranges of individual PCs +rcParams['figure.figsize']=(6,2) +_=plt.boxplot(adata_embed.X) +plt.ylabel('PCA value') +plt.xlabel('PCs') + +# %% +# Compute ASW +asw, asw_macro, asw_data_label=asw_batch( + X=adata_embed.X, + batches=adata_embed.obs['system'], + labels=adata_embed.obs['cell_type_eval']) + +# %% +asws={ + 'asw_micro':asw, + 'asw_macro':asw_macro, + 'asw_data_label':asw_data_label +} +for k,v in asws.items(): + print(k) + print(v) + print('\n') + +# %% +pkl.dump({'asw_batch':asws},open(path_save / 'limited_data_skin_mm_hs_hvg_embed_integrationMetrics.pkl','wb')) + + +# %% [markdown] +# ## Moran's I for eval +# Find genes that would be appropriate for computing Moran's I on for evaluation in every sample-cell type group (of appropriate size) by computing Moran's I on per-sample non integrated data. This can then also be used as a reference later on to compute relative preservation of Moran's I. + +# %% +adata=sc.read(path_save / 'limited_data_skin_mm_hs_hvg.h5ad') + +# %% +# Potential groups to compute Moran's I on (batch-system and group) +cols=pd.get_option('display.max_rows') +pd.set_option('display.max_rows', 120) +display(pd.crosstab(adata.obs.batch,adata.obs.cell_type_eval)) +pd.set_option('display.max_rows', cols) + +# %% +# Filtered groups based on N cells and also remove the doublets +groups=adata.obs.groupby(['cell_type_eval','system','batch']).size() +groups=groups[(groups>=500).values&~groups.index.get_level_values(0).str.contains('\+')] +rows=pd.get_option('display.max_rows') +pd.set_option('display.max_rows', 250) +display(groups) +pd.set_option('display.max_rows', rows) +print('N cell types',groups.index.get_level_values('cell_type_eval').nunique()) + +# %% +# Compute Moran's I per group +data=[] +for group in groups.index: + # Group adata + print(group) + adata_sub=adata[ + (adata.obs.cell_type_eval==group[0]).values&\ + (adata.obs.system==group[1]).values&\ + (adata.obs.batch==group[2]).values,:].copy() + # Remove lowly expr genes before Moran's I computation as they will be less likely relevant + # As this is done per small cell group within sample+cell type and HVGs there is not many genes (200-500) + # so all can be used for Moran's I computation + sc.pp.filter_genes(adata_sub, min_cells=adata_sub.shape[0]*0.1) + # Compute embedding of group + sc.pp.pca(adata_sub, n_comps=15) + sc.pp.neighbors(adata_sub, n_pcs=15) + # Compute I + morans_i=sc.metrics._morans_i._morans_i( + g=adata_sub.obsp['connectivities'], + vals=adata_sub.X.T) + # Save data + morans_i=pd.DataFrame({'morans_i':morans_i},index=adata_sub.var_names) + morans_i['group']=group[0] + morans_i['system']=group[1] + morans_i['batch']=group[2] + data.append(morans_i) +data=pd.concat(data,axis=0) + +# %% +# Moran's I distn accross groups +sb.catplot(x='batch',y='morans_i',hue='system',row='group',data=data,kind='violin', + inner=None,height=3,aspect=5) + +# %% +# I thr +thr_mi=0.2 + +# %% +# N genes per group at certain thr +rows=pd.get_option('display.max_rows') +pd.set_option('display.max_rows', 250) +display(data.groupby(['group','system','batch']).apply(lambda x: (x['morans_i']>=thr_mi).sum())) +pd.set_option('display.max_rows', rows) + +# %% +# N genes vs N cells in group +rcParams['figure.figsize']=(4,4) +sb.scatterplot(x='N cells',y='N genes',hue='level_0',style='system', + data=pd.concat( + [data.groupby(['group','system','batch']).apply(lambda x: (x['morans_i']>=thr_mi).sum()).rename('N genes'), + groups.rename('N cells')],axis=1).reset_index(),s=20) +plt.legend(bbox_to_anchor=(1,1)) +plt.xscale('log') + +# %% [markdown] +# C: Thr of 0.2 has at least some genes for every group and not too many in any of the groups. Some groups would need lower/higher thr potentially. +# +# C: There is no clear bias between N cells in group and N genes. +# +# C: Selected genes may not be diverse though - they may capture the same pattern and maybe more subtle patterns are at lower Moran's I. + +# %% +# Prepare selected genes for saving (fileterd genes&I per group) +selected=list() +for group,data_sub in data.groupby(['group','system','batch']): + group=dict(zip(['group','system','batch'],group)) + group['genes']=(data_sub.query('morans_i>=@thr_mi')['morans_i']+1)/2 + selected.append(group) + +# %% +# Save +pkl.dump(selected,open(path_save / 'limited_data_skin_mm_hs_hvg_moransiGenes.pkl','wb')) + +# %% [markdown] +# ## Batch effects within and between systems + +# %% +adata=sc.read(path_save / 'limited_data_skin_mm_hs_hvg.h5ad') + +# %% +# Compute PCA on the whole data +adata_scl=adata.copy() +sc.pp.scale(adata_scl) +n_pcs=15 +sc.pp.pca(adata_scl, n_comps=n_pcs) +pca=pd.DataFrame(adata_scl.obsm['X_pca'],index=adata_scl.obs_names) +del adata_scl + +# %% +# Average PCA accross system-batch-group pseudobulks. +# Only use pseudobulks with at least 50 cells +# Only use cell types with at least 3 samples per system +pca[['system','batch','group']]=adata.obs[['system','batch', 'cell_type_eval']] +pca_pb=pca.groupby(['system','batch','group']) +pca_mean=pca_pb.mean() +pb_size=pca_pb.size() +# Remove samples with too little cells +filtered_pb=pb_size.index[pb_size>=50] +# Get pbs/cts where both systems have enough samples +n_samples_system=filtered_pb.to_frame().rename({'group':'group_col'},axis=1).groupby( + 'group_col',observed=True)['system'].value_counts().rename('n_samples').reset_index() +cts=set(n_samples_system.query('system==0 & n_samples>=3').group_col)&\ + set(n_samples_system.query('system==1 & n_samples>=3').group_col) +filtered_pb=filtered_pb[filtered_pb.get_level_values(2).isin(cts)] +pca_mean=pca_mean.loc[filtered_pb,:] + +# %% +# Compute per-ct distances of samples within and between systems +distances={} +for ct in cts: + # Data for computing distances + pca_s0=pca_mean[(pca_mean.index.get_level_values(0)==0) & + (pca_mean.index.get_level_values(2)==ct)] + pca_s1=pca_mean[(pca_mean.index.get_level_values(0)==1) & + (pca_mean.index.get_level_values(2)==ct)] + + # Distances for s0 - within or between datasets + d_s0=euclidean_distances(pca_s0) + triu=np.triu_indices(pca_s0.shape[0],k=1) + idx_map=dict(enumerate(pca_s0.index.get_level_values(1))) + idx_within=([],[]) + idx_between=([],[]) + for i,j in zip(*triu): + if idx_map[i]==idx_map[j]: + idx_list=idx_within + else: + idx_list=idx_between + idx_list[0].append(i) + idx_list[1].append(j) + d_s0_within=d_s0[idx_within] + d_s0_between=d_s0[idx_between] + + # Distances for s1 and s0s1 + d_s1=euclidean_distances(pca_s1)[np.triu_indices(pca_s1.shape[0],k=1)] + d_s0s1=euclidean_distances(pca_s0,pca_s1).ravel() + distances[ct]={'s0_within':d_s0_within,'s0_between':d_s0_between,'s1':d_s1,'s0s1':d_s0s1} + +# %% +# Save distances +pkl.dump(distances,open(path_save / 'limited_data_skin_mm_hs_hvg_PcaSysBatchDist.pkl','wb')) + +# %% +# Reload distances +#distances=pkl.load(open(path_save+'combined_orthologuesHVG_PcaSysBatchDist.pkl','rb')) + +# %% +# Prepare df for plotting +plot=[] +for ct,dat in distances.items(): + for comparison,dist in dat.items(): + dist=pd.DataFrame(dist,columns=['dist']) + dist['group']=ct + dist['comparison']=comparison + plot.append(dist) +plot=pd.concat(plot) + +# %% +# Plot distances +sb.catplot(x='comparison',y='dist',col='group', + data=plot.reset_index(drop=True),kind='violin', + sharey=False, height=2.5,aspect=1.5) + +# %% [markdown] +# Evaluate statisticsal significance + +# %% +# Compute significance of differences within and accross systems +signif=[] +for ct,dat in distances.items(): + for ref in ['s0_within','s0_between','s1']: + n_system=dat[ref].shape[0] + if n_system>=3: + u,p=mannwhitneyu( dat[ref],dat['s0s1'],alternative='less') + signif.append(dict( cell_type=ct,system=ref, u=u,pval=p, + n_system=n_system,n_crossystem=dat['s0s1'].shape[0])) +signif=pd.DataFrame(signif) +signif['padj']=multipletests(signif['pval'],method='fdr_bh')[1] + +# %% +signif + +# %% +# Save signif +signif.to_csv(path_save / 'limited_data_skin_mm_hs_hvg_PcaSysBatchDist_Signif.tsv',sep='\t',index=False) + +# %% + +# %% + +# %% [markdown] +# # Include non-one-to-one orthologues (Saturn, GLUE) + +# %% [markdown] +# ## Prepare data for integration + +# %% +adata_mm_sub = sc.read(path_save / 'skin_mm_hs_hvg-mmPart_nonortholHVG.h5ad') +adata_hs_sub = sc.read(path_save / 'skin_mm_hs_hvg-hsPart_nonortholHVG.h5ad') + +# %% +adata_hs_sub = limit_to_human(adata_hs_sub) + +# %% [markdown] +# PCA and clusters for scglue and saturn + +# %% +# PCA and clusters per system +n_pcs=15 +for adata_temp in [adata_hs_sub,adata_mm_sub]: + adata_sub=adata_temp.copy() + sc.pp.scale(adata_sub) + sc.pp.pca(adata_sub, n_comps=n_pcs) + sc.pp.neighbors(adata_sub, n_pcs=n_pcs) + sc.tl.leiden(adata_sub) + adata_temp.obsm['X_pca_system']=adata_sub[adata_temp.obs_names,:].obsm['X_pca'] + adata_temp.obs['leiden_system']=adata_sub.obs.apply(lambda x: str(x['system'])+'_'+x['leiden'],axis=1) +del adata_sub + +# %% [markdown] +# #### Save in format for scGLUE/Saturn + +# %% +# Save with gene symbols in var names for Saturn +adata_mm_sub.write(path_save / 'limited_data_skin_mm_hs_hvg-mmPart_nonortholHVG.h5ad') +adata_hs_sub.write(path_save / 'limited_data_skin_mm_hs_hvg-hsPart_nonortholHVG.h5ad') + +# %% +genes_mm=set(adata_mm_sub.var_names) +genes_hs=set(adata_hs_sub.var_names) +gene_mapping=orthology_info.query('gs_mm in @genes_mm and gs_hs in @genes_hs')[['gs_mm','gs_hs']] +print(gene_mapping.shape[0]) +gene_mapping.to_csv(path_save / 'limited_data_skin_mm_hs_hvg_nonortholHVG_geneMapping.tsv',index=False,sep='\t') + +# %% diff --git a/notebooks/eval/cleaned/analysis/bio_preservation_analysis_retina.py b/notebooks/eval/cleaned/analysis/bio_preservation_analysis_retina.py index d113796..24a203f 100644 --- a/notebooks/eval/cleaned/analysis/bio_preservation_analysis_retina.py +++ b/notebooks/eval/cleaned/analysis/bio_preservation_analysis_retina.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: csi # language: python @@ -22,6 +22,7 @@ import glob import os import gc +from pathlib import Path from matplotlib import rcParams import matplotlib.pyplot as plt @@ -29,7 +30,7 @@ import matplotlib.colors as mcolors # %% -path_data='/om2/user/khrovati/data/cross_system_integration/' +path_data='/home/moinfar/io/csi/' path_names=path_data+'names_parsed/' path_fig=path_data+'figures/' path_ds='/om2/user/khrovati/data/datasets/d10_1016_j_cell_2020_08_013/' @@ -86,7 +87,7 @@ sc.pl.umap(embed,ax=ax,show=False,frameon=False) sc.pl.umap(embed[embed.obs.system==system,:],color='cell_type', groups=['astrocyte','retinal pigment epithelial cell'], - ax=ax,show=False,title=method+' '+system,frameon=False) + ax=ax,show=False,title=method+' '+str(system),frameon=False) if j==0: ax.get_legend().remove() @@ -210,6 +211,7 @@ # %% # Save embeds +Path(path_save_mueller).mkdir(parents=True, exist_ok=True) pkl.dump(embeds_sub,open(path_save_mueller+'density_topmodels.pkl','wb')) # %% @@ -413,6 +415,9 @@ except: pass +# %% +Path(path_save_amacrine).mkdir(parents=True, exist_ok=True) + # %% # Save scGEN embeds pkl.dump( @@ -433,3 +438,9 @@ # %% + +# %% + +# %% + +# %% diff --git a/notebooks/eval/cleaned/analysis/moransi_examples.py b/notebooks/eval/cleaned/analysis/moransi_examples.py index 65e3236..f5ea51e 100644 --- a/notebooks/eval/cleaned/analysis/moransi_examples.py +++ b/notebooks/eval/cleaned/analysis/moransi_examples.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: csi # language: python @@ -26,9 +26,8 @@ import gc # %% -path_data='/om2/user/khrovati/data/' -path_adata=path_data+'datasets/d10_1101_2022_12_22_521557/' -path_integration=path_data+'cross_system_integration/eval/pancreas_conditions_MIA_HPAP2/' +path_adata='/om2/user/khrovati/data/'+'datasets/d10_1101_2022_12_22_521557/' +path_integration='/home/moinfar/io/csi/'+'eval/pancreas_conditions_MIA_HPAP2/' path_save=path_integration+'integration_summary/moransi/' # %% @@ -110,3 +109,5 @@ # %% # Save embeddings pkl.dump(embeds,open(path_save+'pancreas_STZG1_healthyvar_topmodels.pkl','wb')) + +# %% diff --git a/notebooks/eval/cleaned/eval_summary.py b/notebooks/eval/cleaned/eval_summary.py index 2863c13..284639a 100644 --- a/notebooks/eval/cleaned/eval_summary.py +++ b/notebooks/eval/cleaned/eval_summary.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: csi # language: python @@ -20,6 +20,8 @@ import pickle as pkl import os import itertools +from pathlib import Path +import yaml from sklearn.preprocessing import minmax_scale @@ -33,7 +35,7 @@ from params_opt_maps import * # %% -path_data='/om2/user/khrovati/data/cross_system_integration/' +path_data='/home/moinfar/io/csi/' path_eval=path_data+'eval/' path_names=path_data+'names_parsed/' @@ -42,6 +44,54 @@ params_opt_map=pkl.load(open(path_names+'params_opt_model.pkl','rb'))# Query to remove irrelevant runs accross all models for final model selection # Parameter values in specific models to be optimized param_opt_vals=pkl.load(open(path_names+'optimized_parameter_values.pkl','rb')) + +def load_integration_results(path_integration): + res=[] + metrics_data=[] + args_keys = set() + for run in glob.glob(path_integration+'*/'): + if (os.path.exists(run+'args.pkl') or os.path.exists(run+'args.yml')) and \ + os.path.exists(run+'scib_metrics.pkl') and \ + os.path.exists(run+'scib_metrics_scaled.pkl') and\ + os.path.exists(run+'scib_metrics_data.pkl'): + if os.path.exists(run+'args.pkl'): + args=pd.Series(vars(pkl.load(open(run+'args.pkl','rb')))) + if os.path.exists(run+'args.yml'): + args=pd.Series(yaml.safe_load(open(run+'args.yml','rb'))) + args_keys.update(args.keys().to_list()) + metrics=pd.Series(pkl.load(open(run+'scib_metrics.pkl','rb'))) + metrics_scl=pd.Series(pkl.load(open(run+'scib_metrics_scaled.pkl','rb'))) + metrics_scl.index=metrics_scl.index.map(lambda x: x+'_scaled') + data=pd.concat([args,metrics,metrics_scl]) + name=run.split('/')[-2] + data.name=name + res.append(data) + metrics_data_sub=pkl.load(open(run+'scib_metrics_data.pkl','rb')) + metrics_data_sub['name']=name + metrics_data.append(metrics_data_sub) + res=pd.concat(res,axis=1).T + # Convert dict to str if some parameter is dict + def convert_dict(cell): + return cell if not isinstance(cell, dict) else str(cell) + res = res.applymap(convert_dict) + return res.drop_duplicates(args_keys) + +def prepare_results(res): + res = res.copy() + res['params_opt']=res.params_opt.replace(params_opt_correct_map) + res['param_opt_col']=res.params_opt.replace(param_opt_col_map) + res['param_opt_val']=res.apply( + lambda x: x[x['param_opt_col']] if x['param_opt_col'] is not None else 0,axis=1) + res['params_opt']=np.where(res.index.str.contains('harmonypy'), + res['params_opt'].replace({'harmony_theta': 'harmonypy_theta'}), + res['params_opt']) + res['param_opt_col']=np.where(res.index.str.contains('harmonypy'), + res['param_opt_col'].replace({'harmony_theta': 'harmonypy_theta'}), + res['param_opt_col']) + res['harmonypy_theta'] = res['harmony_theta'] + res['params_opt']=pd.Categorical(res['params_opt'],sorted(res['params_opt'].unique()), True) + return res + def get_top_runs(res,param_opt_vals=param_opt_vals,params_opt_map=params_opt_map): """ Find best runs for each method-and-parameter accross tunned params values. @@ -69,7 +119,6 @@ def get_top_runs(res,param_opt_vals=param_opt_vals,params_opt_map=params_opt_map res_query_sub='(('+' | '.join(res_query_sub)+f') & model in {models})' res_query.append(res_query_sub) res_query=' | '.join(res_query) - #print(res_query) res_sub=res_sub.query(res_query).copy() display(res_sub.groupby(['model','params_opt'],observed=True).size()) @@ -94,7 +143,10 @@ def get_top_runs(res,param_opt_vals=param_opt_vals,params_opt_map=params_opt_map top_setting=dict(zip(setting_cols,setting_means.index[setting_means.argmax()])) runs_data=res_model.query( f'params_opt=="{top_setting["params_opt"]}" & param_opt_val== {top_setting["param_opt_val"]}') - mid_run=runs_data.index[runs_data.overall_score==runs_data.overall_score.median()][0] + # This requires n_runs to be odd but some runs may fail, etc. + # mid_run=runs_data.index[runs_data.overall_score==runs_data.overall_score.median()][0] + # alternative: + mid_run=runs_data.index[np.argmin(np.abs(runs_data.overall_score-runs_data.overall_score.median()))] top_settings[model]=dict( params=top_setting, runs=list(runs_data.index),mid_run=mid_run) @@ -112,36 +164,25 @@ def get_top_runs(res,param_opt_vals=param_opt_vals,params_opt_map=params_opt_map # %% path_integration=path_eval+'pancreas_conditions_MIA_HPAP2/integration/' +Path(path_integration.rstrip('/')+'_summary/').mkdir(parents=True, exist_ok=True) # %% # Load integration results - params and metrics -res=[] -metrics_data=[] -for run in glob.glob(path_integration+'*/'): - if os.path.exists(run+'args.pkl') and \ - os.path.exists(run+'scib_metrics.pkl') and \ - os.path.exists(run+'scib_metrics_scaled.pkl') and\ - os.path.exists(run+'scib_metrics_data.pkl'): - args=pd.Series(vars(pkl.load(open(run+'args.pkl','rb')))) - metrics=pd.Series(pkl.load(open(run+'scib_metrics.pkl','rb'))) - metrics_scl=pd.Series(pkl.load(open(run+'scib_metrics_scaled.pkl','rb'))) - metrics_scl.index=metrics_scl.index.map(lambda x: x+'_scaled') - data=pd.concat([args,metrics,metrics_scl]) - name=run.split('/')[-2] - data.name=name - res.append(data) - metrics_data_sub=pkl.load(open(run+'scib_metrics_data.pkl','rb')) - metrics_data_sub['name']=name - metrics_data.append(metrics_data_sub) -res=pd.concat(res,axis=1).T +res=load_integration_results(path_integration) # %% # Parse param that was optimised -res['params_opt']=res.params_opt.replace(params_opt_correct_map) -res['param_opt_col']=res.params_opt.replace(param_opt_col_map) -res['param_opt_val']=res.apply( - lambda x: x[x['param_opt_col']] if x['param_opt_col'] is not None else 0,axis=1) -res['params_opt']=pd.Categorical(res['params_opt'],sorted(res['params_opt'].unique()), True) +res = prepare_results(res) + +# %% +with open("/home/moinfar/io/csi/tmp/pancreas_metrics.pkl", "wb") as f: + pkl.dump(res, f) + +# %% +for path in path_integration + res[res.params_opt.replace(params_opt_map).astype(str).isin(['sysvi', 'sysvi_stable'])].index: + print(path + "/args.pkl", end=" ") + print(path + "/loss*", end=" ") + print(path + "/scib*", end=" ") # %% [markdown] # ### Best runs @@ -154,14 +195,18 @@ def get_top_runs(res,param_opt_vals=param_opt_vals,params_opt_map=params_opt_map print('Top settings') for model,setting in top_settings.items(): print(model) - print(tuple(setting['params'].values())) - print(setting['mid_run']) + print("params", tuple(setting['params'].values())) + print("runs", len(setting['runs']), "mid_run", setting['mid_run']) # %% # Save pkl.dump(top_runs,open(path_integration.rstrip('/')+'_summary/top_runs.pkl','wb')) pkl.dump(top_settings,open(path_integration.rstrip('/')+'_summary/top_settings.pkl','wb')) +# %% + +# %% + # %% [markdown] # ## Retina adult organoid @@ -170,35 +215,25 @@ def get_top_runs(res,param_opt_vals=param_opt_vals,params_opt_map=params_opt_map # %% path_integration=path_eval+'retina_adult_organoid/integration/' +Path(path_integration.rstrip('/')+'_summary/').mkdir(parents=True, exist_ok=True) # %% # Load integration results - params and metrics -res=[] -metrics_data=[] -for run in glob.glob(path_integration+'*/'): - if os.path.exists(run+'args.pkl') and \ - os.path.exists(run+'scib_metrics.pkl') and \ - os.path.exists(run+'scib_metrics_scaled.pkl') and\ - os.path.exists(run+'scib_metrics_data.pkl'): - args=pd.Series(vars(pkl.load(open(run+'args.pkl','rb')))) - metrics=pd.Series(pkl.load(open(run+'scib_metrics.pkl','rb'))) - metrics_scl=pd.Series(pkl.load(open(run+'scib_metrics_scaled.pkl','rb'))) - metrics_scl.index=metrics_scl.index.map(lambda x: x+'_scaled') - data=pd.concat([args,metrics,metrics_scl]) - name=run.split('/')[-2] - data.name=name - res.append(data) - metrics_data_sub=pkl.load(open(run+'scib_metrics_data.pkl','rb')) - metrics_data_sub['name']=name - metrics_data.append(metrics_data_sub) -res=pd.concat(res,axis=1).T +res = load_integration_results(path_integration) # %% # Parse param that was optimised -res['param_opt_col']=res.params_opt.replace(param_opt_col_map) -res['param_opt_val']=res.apply( - lambda x: x[x['param_opt_col']] if x['param_opt_col'] is not None else 0,axis=1) -res['params_opt']=pd.Categorical(res['params_opt'],sorted(res['params_opt'].unique()), True) +res = prepare_results(res) + +# %% +with open("/home/moinfar/io/csi/tmp/retadorg_metrics.pkl", "wb") as f: + pkl.dump(res, f) + +# %% +for path in path_integration + res[res.params_opt.replace(params_opt_map).astype(str).isin(['vamp_cycle', 'sysvi', 'sysvi_stable'])].index: + print(path + "/args.pkl", end=" ") + print(path + "/loss*", end=" ") + print(path + "/scib*", end=" ") # %% [markdown] # ### Best runs @@ -211,8 +246,8 @@ def get_top_runs(res,param_opt_vals=param_opt_vals,params_opt_map=params_opt_map print('Top settings') for model,setting in top_settings.items(): print(model) - print(tuple(setting['params'].values())) - print(setting['mid_run']) + print("params", tuple(setting['params'].values())) + print("runs", len(setting['runs']), "mid_run", setting['mid_run']) # %% # Save @@ -237,37 +272,121 @@ def get_top_runs(res,param_opt_vals=param_opt_vals,params_opt_map=params_opt_map # %% path_integration=path_eval+'adipose_sc_sn_updated/integration/' +Path(path_integration.rstrip('/')+'_summary/').mkdir(parents=True, exist_ok=True) + +# %% +# Load integration results - params and metrics +res=load_integration_results(path_integration) + +# %% +# Parse param that was optimised +res = prepare_results(res) + +# %% +with open("/home/moinfar/io/csi/tmp/adipose_metrics.pkl", "wb") as f: + pkl.dump(res, f) + +# %% +for path in path_integration + res[res.params_opt.replace(params_opt_map).astype(str).isin(['vamp_cycle', 'sysvi', 'sysvi_stable'])].index: + print(path + "/args.pkl", end=" ") + print(path + "/loss*", end=" ") + print(path + "/scib*", end=" ") + +# %% [markdown] +# ### Best runs + +# %% +# Top runs/settings +top_runs,top_settings=get_top_runs(res) +print('Top runs') +display(top_runs) +print('Top settings') +for model,setting in top_settings.items(): + print(model) + print("params", tuple(setting['params'].values())) + print("runs", len(setting['runs']), "mid_run", setting['mid_run']) + +# %% +# Save +pkl.dump(top_runs,open(path_integration.rstrip('/')+'_summary/top_runs.pkl','wb')) +pkl.dump(top_settings,open(path_integration.rstrip('/')+'_summary/top_settings.pkl','wb')) + +# %% + +# %% + +# %% [markdown] +# ## Retina atlas sc sn + +# %% [markdown] +# Load data + +# %% +path_integration=path_eval+'retina_atlas_sc_sn/integration/' +Path(path_integration.rstrip('/')+'_summary/').mkdir(parents=True, exist_ok=True) + +# %% +# Load integration results - params and metrics +res=load_integration_results(path_integration) + +# %% +# Parse param that was optimised +res = prepare_results(res) + +# %% +with open("/home/moinfar/io/csi/tmp/retatlas_metrics.pkl", "wb") as f: + pkl.dump(res, f) + +# %% [markdown] +# ### Best runs + +# %% +# Top runs/settings +top_runs,top_settings=get_top_runs(res) +print('Top runs') +display(top_runs) +print('Top settings') +for model,setting in top_settings.items(): + print(model) + print("params", tuple(setting['params'].values())) + print("runs", len(setting['runs']), setting['runs'], "mid_run", setting['mid_run']) + +# %% +# Save +pkl.dump(top_runs,open(path_integration.rstrip('/')+'_summary/top_runs.pkl','wb')) +pkl.dump(top_settings,open(path_integration.rstrip('/')+'_summary/top_settings.pkl','wb')) + +# %% + +# %% + +# %% [markdown] +# ## Skin Mouse Human + +# %% [markdown] +# Load data + +# %% +path_integration=path_eval+'skin_mm_hs/integration/' +Path(path_integration.rstrip('/')+'_summary/').mkdir(parents=True, exist_ok=True) # %% # Load integration results - params and metrics -res=[] -metrics_data=[] -for run in glob.glob(path_integration+'*/'): - if os.path.exists(run+'args.pkl') and \ - os.path.exists(run+'scib_metrics.pkl') and \ - os.path.exists(run+'scib_metrics_scaled.pkl') and\ - os.path.exists(run+'scib_metrics_data.pkl'): - args=pd.Series(vars(pkl.load(open(run+'args.pkl','rb')))) - metrics=pd.Series(pkl.load(open(run+'scib_metrics.pkl','rb'))) - metrics_scl=pd.Series(pkl.load(open(run+'scib_metrics_scaled.pkl','rb'))) - metrics_scl.index=metrics_scl.index.map(lambda x: x+'_scaled') - data=pd.concat([args,metrics,metrics_scl]) - name=run.split('/')[-2] - data.name=name - res.append(data) - metrics_data_sub=pkl.load(open(run+'scib_metrics_data.pkl','rb')) - metrics_data_sub['name']=name - metrics_data.append(metrics_data_sub) -res=pd.concat(res,axis=1).T +res=load_integration_results(path_integration) # %% # Parse param that was optimised -res['param_opt_col']=res.params_opt.replace(param_opt_col_map) -res['param_opt_val']=res.apply( - lambda x: (x[x['param_opt_col']] if not isinstance(x[x['param_opt_col']],dict) - else x[x['param_opt_col']]['weight_end']) - if x['param_opt_col'] is not None else 0,axis=1) -res['params_opt']=pd.Categorical(res['params_opt'],sorted(res['params_opt'].unique()), True) +res = prepare_results(res) + +# %% +with open("/home/moinfar/io/csi/tmp/skin_metrics.pkl", "wb") as f: + pkl.dump(res, f) + +# %% +for path in path_integration + res[res.params_opt.replace(params_opt_map).astype(str).isin(['vamp_cycle', 'sysvi', 'sysvi_stable'])].index: + print(path + "/args.pkl", end=" ") + print(path + "/loss*", end=" ") + print(path + "/scib*", end=" ") # %% [markdown] # ### Best runs @@ -280,10 +399,69 @@ def get_top_runs(res,param_opt_vals=param_opt_vals,params_opt_map=params_opt_map print('Top settings') for model,setting in top_settings.items(): print(model) - print(tuple(setting['params'].values())) - print(setting['mid_run']) + print("params", tuple(setting['params'].values())) + print("runs", len(setting['runs']), "mid_run", setting['mid_run']) # %% # Save pkl.dump(top_runs,open(path_integration.rstrip('/')+'_summary/top_runs.pkl','wb')) pkl.dump(top_settings,open(path_integration.rstrip('/')+'_summary/top_settings.pkl','wb')) + +# %% + +# %% + +# %% + +# %% + +# %% [markdown] +# ## Skin Mouse Human (without human disease) + +# %% [markdown] +# Load data + +# %% +path_integration=path_eval+'skin_mm_hs_limited/integration/' +Path(path_integration.rstrip('/')+'_summary/').mkdir(parents=True, exist_ok=True) + +# %% +# Load integration results - params and metrics +res=load_integration_results(path_integration) + +# %% +# Parse param that was optimised +res = prepare_results(res) + +# %% +with open("/home/moinfar/io/csi/tmp/skin_limited_metrics.pkl", "wb") as f: + pkl.dump(res, f) + +# %% +for path in path_integration + res[res.params_opt.replace(params_opt_map).astype(str).isin(['vamp_cycle', 'sysvi', 'sysvi_stable'])].index: + print(path + "/args.pkl", end=" ") + print(path + "/loss*", end=" ") + print(path + "/scib*", end=" ") + +# %% [markdown] +# ### Best runs + +# %% +# Top runs/settings +top_runs,top_settings=get_top_runs(res) +print('Top runs') +display(top_runs) +print('Top settings') +for model,setting in top_settings.items(): + print(model) + print("params", tuple(setting['params'].values())) + print("runs", len(setting['runs']), "mid_run", setting['mid_run']) + +# %% +# Save +pkl.dump(top_runs,open(path_integration.rstrip('/')+'_summary/top_runs.pkl','wb')) +pkl.dump(top_settings,open(path_integration.rstrip('/')+'_summary/top_settings.pkl','wb')) + +# %% + +# %% diff --git a/notebooks/eval/cleaned/model_runtime.py b/notebooks/eval/cleaned/model_runtime.py new file mode 100644 index 0000000..7d814de --- /dev/null +++ b/notebooks/eval/cleaned/model_runtime.py @@ -0,0 +1,434 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.16.3 +# kernelspec: +# display_name: csi +# language: python +# name: csi +# --- + +# %% [markdown] +# # Evaluate runtime + +# %% +import scanpy as sc +import pickle as pkl +import pandas as pd +import numpy as np +# Make random number for seed before scvi import sets seed to 0 +seed=np.random.randint(0,1000000) +import argparse +import os +import pathlib +import string +import subprocess +import scvi +import gc +import time +import torch + + +from memory_profiler import memory_usage +from datetime import datetime +from matplotlib.pyplot import rcParams +import matplotlib.pyplot as plt +import seaborn as sns + +from cross_system_integration.model._xxjointmodel import XXJointModel +import pytorch_lightning as pl + +# Otherwise the seed remains constant +from scvi._settings import ScviConfig +config=ScviConfig() +config.seed=seed + +# %% +import wandb +from pytorch_lightning.loggers.wandb import WandbLogger + +# %% +path_adata = '/om2/user/khrovati/data/cross_system_integration/pancreas_conditions_MIA_HPAP2/combined_orthologuesHVG.h5ad' +system_key = 'system' +batch_key = 'batch' + +# %% +config.seed=1 + +# %% +details_filename = pathlib.Path('/home/moinfar/io/csi/runtime/details.pkl') +results_filename = pathlib.Path('/home/moinfar/io/csi/runtime/results.csv') + +# %% [markdown] +# ## Integration + +# %% [markdown] +# ### Prepare data + +# %% +# Load data +adata=sc.read(path_adata) + +# %% [markdown] +# ### Training + +# %% +# ! nvidia-smi + +# %% + +# %% + +# %% +if details_filename.exists(): + details = pkl.load(open(details_filename, 'rb')) +else: + details = {} + +# %% +api = wandb.Api() +wandb_project = f"sysvi_measure_runtime_etc" + +# %% +results = [] +max_epochs = 50 + +for n_samples in [10_000, 50_000, 100_000, 200_000, 300_000, 450_000]: + for model_name in ['scVI', 'sysVI']: + run_key = f"{model_name}#{n_samples}#{max_epochs}" + + if run_key in details: + continue + adata_training = sc.pp.subsample(adata, n_obs=n_samples, random_state=0, copy=True) + + wandb.finish() + wb_logger = WandbLogger(project=wandb_project, name=run_key, save_dir='/home/moinfar/tmp/wandb', + reinit=True, settings=wandb.Settings(start_method="fork"), + config={'n_samples': n_samples, 'model_name': model_name, + 'max_epochs': max_epochs, 'path_adata': path_adata}) + + gc.collect() + time.sleep(5) + torch.cuda.empty_cache() + mem_before = torch.cuda.max_memory_allocated() / (1024 * 1024) # in MB + train_start = datetime.now() + def run_training(adata_training, system_key, batch_key): + if model_name == 'sysVI': + # Setup adata + adata_training = XXJointModel.setup_anndata( + adata=adata_training, + system_key=system_key, + group_key=None, + categorical_covariate_keys=[batch_key], + ) + + # Define pseudoinputs + n_prior_components=5 + pdi=None + + # Train + model = XXJointModel( + adata=adata_training, + out_var_mode='feature', + mixup_alpha=None, + system_decoders=False, + prior='vamp', + n_prior_components=n_prior_components, + pseudoinputs_data_init=True, + pseudoinputs_data_indices=pdi, + trainable_priors=True, + encode_pseudoinputs_on_eval_mode=True, + z_dist_metric = 'MSE_standard', + n_layers=2, + n_hidden=256, + ) + model.train(max_epochs=max_epochs, + log_every_n_steps=1, + check_val_every_n_epoch=1, + val_check_interval=1.0 if True else 1, # Assuming log_on_epoch is True by default + train_size=0.9, + callbacks=[pl.callbacks.LearningRateMonitor(logging_interval='epoch')] +\ + [pl.callbacks.StochasticWeightAveraging( + swa_lrs=0.001, + swa_epoch_start=10, + annealing_epochs=10)] + if False else [], # Assuming swa is False by default + plan_kwargs={ + 'optimizer': "Adam", + 'lr': 0.001, + 'reduce_lr_on_plateau': False, + 'lr_scheduler_metric': 'loss_train', + 'lr_patience': 5., + 'lr_factor': 0.1, + 'lr_min': 1e-7, + 'lr_threshold_mode': 'rel', + 'lr_threshold': 0.1, + 'log_on_epoch': True, + 'log_on_step': False, + 'loss_weights':{ + 'kl_weight': 1., + 'kl_cycle_weight': 0., + 'reconstruction_weight': 1., + 'reconstruction_mixup_weight': 0., + 'reconstruction_cycle_weight': 0., + 'z_distance_cycle_weight': 0., + 'translation_corr_weight': 0., + 'z_contrastive_weight': 0., + + }}, logger=[wb_logger]) + elif model_name == 'scVI': + scvi.model.SCVI.setup_anndata( + adata_training, + layer="counts", + batch_key=system_key, + categorical_covariate_keys=[batch_key]) + model = scvi.model.SCVI(adata_training, + n_layers=2, n_hidden=256, n_latent=15, + gene_likelihood="nb") + + model.train( + max_epochs = max_epochs, + # Uses n_steps_kl_warmup (128*5000/400) if n_epochs_kl_warmup is not set + plan_kwargs=dict( + n_epochs_kl_warmup = None, + n_steps_kl_warmup = 1600, + max_kl_weight=1., + ), + log_every_n_steps=1, + check_val_every_n_epoch=1, + callbacks=[pl.callbacks.LearningRateMonitor(logging_interval='epoch')], + logger=[wb_logger], + ) + mem_usage = memory_usage((run_training, (adata_training, system_key, batch_key), {}), interval=1, timeout=None, max_usage=True) + runtime = (datetime.now() - train_start).total_seconds() + mem_after = torch.cuda.max_memory_allocated() / (1024 * 1024) # in MB + cuda_mem_usage = mem_after - mem_before + results.append((model_name, n_samples, runtime, mem_usage, cuda_mem_usage)) + wb_logger.log_hyperparams({'runtime': runtime}) + wb_logger.log_hyperparams({'mem_usage': mem_usage}) + wb_logger.log_hyperparams({'cuda_mem_usage': cuda_mem_usage}) + wandb.finish() + details[run_key] = results[-1] + with open(details_filename, 'wb') as f: + pkl.dump(details, f) + print(results[-1]) + +# %% + +# %% +result_df = pd.DataFrame(results, columns=['model', 'n_samples', 'runtime', 'mem_usage', 'cuda_mem_usage']) +result_df.to_csv(results_filename) + +# %% +result_df + +# %% + +# %% +result_df = pd.read_csv(results_filename, index_col=0) +result_df['model'].replace({'sysVI': 'VAMP+CYC'}, inplace=True) +result_df + +# %% +import pandas as pd +import wandb +api = wandb.Api() +api.flush() + +# Project is specified by +runs = api.runs("moinfar_proj/sysvi_measure_runtime_etc") + +result_list = [] +for run in runs: + system_metrics = run.history(stream='systemMetrics').max() + system_metrics.index = system_metrics.index.str.replace('\\.', '_') + result_list.append({ + 'name': run.name, + 'summary': run.summary._json_dict, + 'config': {k: v for k,v in run.config.items() if not k.startswith('_')}, + 'system_metrics': system_metrics.to_dict(), + }) + +runs_df = pd.json_normalize(result_list, sep='_') +runs_df['config_model_name'].replace({'sysVI': 'VAMP+CYC'}, inplace=True) +runs_df + +# %% +runs_df.columns + +# %% [markdown] +# ## Plotting + +# %% +# Define the order for the x-axis (n_samples) +x_order = list(sorted(runs_df.drop_duplicates(subset=['config_n_samples'])['config_n_samples'])) + +# Convert runtime to minutes +runs_df['n_samples'] = runs_df['config_n_samples'] +runs_df['runtime_minutes'] = runs_df['config_runtime'] / 60 + +lineplot = sns.lineplot( + data=runs_df, + x='n_samples', + y='runtime_minutes', + hue='config_model_name', + hue_order=sorted(runs_df['config_model_name'].unique()), + palette='tab10', + alpha=1.0, # Transparency of the lines + marker='o', # Add markers at the data points + markersize=8, # Size of the markers + linewidth=2, # Thickness of the lines +) + +# Overlay scatter plot for better visibility of points +# scatterplot = sns.scatterplot( +# data=result_df, +# x='n_samples', +# y='runtime_minutes', +# hue='model', +# hue_order=sorted(result_df['model'].unique()), +# palette='tab10', +# s=100, # Size of the points +# alpha=0.8, # Transparency of the points +# legend=False, # Avoid duplicate legend entries +# ) + +# Customize the plot +plt.xlabel('Number of samples', fontsize=12) +plt.ylabel('Runtime (Minutes)', fontsize=12) +plt.title(f'Runtime for VAMP+CYC versus scVI on different\n subsamples of the pancreas data (NVIDIA A100 GPU)', fontsize=14) +plt.xticks(fontsize=10, rotation=90, ha='center') +plt.yticks(fontsize=10) +plt.legend(title='Model', fontsize=10) +plt.grid(axis='y', linestyle='--', alpha=0.3) + +# Show the plot +plt.tight_layout() +plt.savefig("/home/moinfar/io/csi/figures/runtime_of_sysvi_vs_scvi_pancreas_scatter.pdf", bbox_inches='tight') +plt.savefig("/home/moinfar/io/csi/figures/runtime_of_sysvi_vs_scvi_pancreas_scatter.png", bbox_inches='tight') +plt.show() + +# %% +runs_df = runs_df.sort_values("config_runtime", ascending=False) +if x_order is None: + x_order = list(sorted(runs_df.drop_duplicates(subset=['config_n_samples'])['config_n_samples'])) + +runs_df['n_samples'] = runs_df['config_n_samples'] +runs_df['runtime_ps_minutes'] = runs_df['config_runtime'] * 1000 / runs_df['n_samples'] / runs_df['config_max_epochs'] + +plt.figure(figsize=(7, 5)) +barplot = sns.barplot( + data=runs_df, + x='n_samples', + y='runtime_ps_minutes', + order=x_order, + hue='config_model_name', + hue_order=sorted(runs_df['config_model_name'].unique()), + dodge=True, # Avoid gaps by overlapping bars slightly + palette='tab10' +) + +# Customize the plot +plt.xlabel('Number of samples', fontsize=12) +plt.ylabel('Runtime of one iteration\nper sample (milliseconds)', fontsize=12) +plt.title(f'Runtime for VAMP+CYC versus scVI on different\n subsamples of the pancreas data (NVIDIA A100 GPU)', fontsize=14) +plt.xticks(fontsize=10, rotation=90, ha='center') +plt.yticks(fontsize=10) +plt.legend(title='Model', fontsize=10) +plt.grid(axis='y', linestyle='--', alpha=0.3) + +# Show the plot +plt.tight_layout() + +plt.savefig("/home/moinfar/io/csi/figures/runtime_per_sample_of_sysvi_vs_scvi_pancreas.pdf", bbox_inches='tight') +plt.savefig("/home/moinfar/io/csi/figures/runtime_per_sample_of_sysvi_vs_scvi_pancreas.png", bbox_inches='tight') +plt.show() + +# %% + +# %% + +# %% +# Define the order for the x-axis (n_samples) +if x_order is None: + x_order = list(sorted(runs_df.drop_duplicates(subset=['config_n_samples'])['config_n_samples'])) + + +# Convert runtime to minutes +runs_df['n_samples'] = runs_df['config_n_samples'] +runs_df['mem_usage_gb'] = runs_df['config_mem_usage'] / 1024 + +lineplot = sns.lineplot( + data=runs_df, + x='n_samples', + y='mem_usage_gb', + hue='config_model_name', + hue_order=sorted(runs_df['config_model_name'].unique()), + palette='tab10', + alpha=1.0, # Transparency of the lines + marker='o', # Add markers at the data points + markersize=8, # Size of the markers + linewidth=2, # Thickness of the lines +) + +# Customize the plot +plt.xlabel('Number of samples', fontsize=12) +plt.ylabel('Memory usage (GB)', fontsize=12) +plt.title(f'Momory requirement of VAMP+CYC versus scVI on different\n subsamples of the pancreas data', fontsize=14) +plt.xticks(fontsize=10, rotation=90, ha='center') +plt.yticks(fontsize=10) +plt.legend(title='Model', fontsize=10) +plt.grid(axis='y', linestyle='--', alpha=0.3) +plt.ylim((0, runs_df['mem_usage_gb'].max() * 1.1)) + +# Show the plot +plt.tight_layout() +plt.savefig("/home/moinfar/io/csi/figures/mem_usage_of_sysvi_vs_scvi_pancreas_scatter.pdf", bbox_inches='tight') +plt.savefig("/home/moinfar/io/csi/figures/mem_usage_of_sysvi_vs_scvi_pancreas_scatter.png", bbox_inches='tight') +plt.show() + +# %% +# Define the order for the x-axis (n_samples) +if x_order is None: + x_order = list(sorted(runs_df.drop_duplicates(subset=['config_n_samples'])['config_n_samples'])) + + +# Convert runtime to minutes +runs_df['n_samples'] = runs_df['config_n_samples'] +runs_df['gpu_mem_usage_gb'] = runs_df['system_metrics_system_gpu_0_memoryAllocatedBytes'] / 1024 / 1024 / 1024 + +lineplot = sns.lineplot( + data=runs_df, + x='n_samples', + y='gpu_mem_usage_gb', + hue='config_model_name', + hue_order=sorted(runs_df['config_model_name'].unique()), + palette='tab10', + alpha=1.0, # Transparency of the lines + marker='o', # Add markers at the data points + markersize=8, # Size of the markers + linewidth=2, # Thickness of the lines +) + +# Customize the plot +plt.xlabel('Number of samples', fontsize=12) +plt.ylabel('GPU memory usage (GB)', fontsize=12) +plt.title(f'GPU momory requirement of VAMP+CYC versus scVI on different\n subsamples of the pancreas data (NVIDIA A100 GPU)', fontsize=14) +plt.xticks(fontsize=10, rotation=90, ha='center') +plt.yticks(fontsize=10) +plt.legend(title='Model', fontsize=10) +plt.grid(axis='y', linestyle='--', alpha=0.3) +plt.ylim((0, runs_df['gpu_mem_usage_gb'].max() * 1.1)) + +# Show the plot +plt.tight_layout() +plt.savefig("/home/moinfar/io/csi/figures/gpu_mem_usage_of_sysvi_vs_scvi_pancreas_scatter.pdf", bbox_inches='tight') +plt.savefig("/home/moinfar/io/csi/figures/gpu_mem_usage_of_sysvi_vs_scvi_pancreas_scatter.png", bbox_inches='tight') +plt.show() + +# %% diff --git a/notebooks/eval/cleaned/params_opt_maps.py b/notebooks/eval/cleaned/params_opt_maps.py index f6675a4..9ca1806 100644 --- a/notebooks/eval/cleaned/params_opt_maps.py +++ b/notebooks/eval/cleaned/params_opt_maps.py @@ -36,6 +36,12 @@ 'prior_group':'prior_components_group', 'scgen_kl':'kl_weight', 'scgen_sample_kl':'kl_weight', + 'seurat_rpca_k_anchor':'k_anchor', + 'seurat_cca_k_anchor':'k_anchor', + 'harmony_theta':'harmony_theta', + 'harmonypy_theta':'harmonypy_theta', + 'sysvi_vamp_cyc_z_distance_cycle_weight_2': 'z_distance_cycle_weight', + 'sysvi_vamp_cyc_z_distance_cycle_weight_2_stable': 'z_distance_cycle_weight', } diff --git a/notebooks/eval/cleaned/run_integration.py b/notebooks/eval/cleaned/run_integration.py index e17551f..f0bc71c 100644 --- a/notebooks/eval/cleaned/run_integration.py +++ b/notebooks/eval/cleaned/run_integration.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: csi # language: python @@ -25,6 +25,7 @@ seed=np.random.randint(0,1000000) import argparse import os +import pathlib import string import subprocess @@ -278,7 +279,7 @@ def weight_to_str(x): ('-TEST' if TESTING else '')+\ os.sep -os.mkdir(path_save) +pathlib.Path(path_save).mkdir(parents=True, exist_ok=False) print("PATH_SAVE=",path_save) # %% @@ -367,7 +368,7 @@ def weight_to_str(x): else: pdi=None -if len(pdi)!=n_prior_components: +if pdi is not None and len(pdi) != n_prior_components: raise ValueError('Not sufficent number of prior components could be sampled') # %% diff --git a/notebooks/eval/cleaned/run_integration_harmonypy.py b/notebooks/eval/cleaned/run_integration_harmonypy.py new file mode 100644 index 0000000..1fb346d --- /dev/null +++ b/notebooks/eval/cleaned/run_integration_harmonypy.py @@ -0,0 +1,191 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.16.3 +# kernelspec: +# display_name: csi +# language: python +# name: csi +# --- + +# %% +import sys +import scanpy as sc +import scanpy.external as sce +import pickle as pkl +import pandas as pd +import numpy as np +import random +seed=np.random.randint(0,1000000) +import argparse +import os +import pathlib +import string +import subprocess + +from matplotlib.pyplot import rcParams +import matplotlib.pyplot as plt +import seaborn as sb + +# %% +parser = argparse.ArgumentParser() +def intstr_to_bool(x): + return bool(int(x)) +def str_to_float_zeronone(x): + if x is None or x=="0": + return None + else: + return float(x) +parser.add_argument('-n', '--name', required=False, type=str, default=None, + help='name of replicate, if unspecified set to rSEED if seed is given '+\ + 'and else to blank string') +parser.add_argument('-s', '--seed', required=False, type=int, default=None, + help='random seed, if none it is randomly generated') +parser.add_argument('-po', '--params_opt', required=False, type=str, default='', + help='name of optimized params/test purpose') +parser.add_argument('-pa', '--path_adata', required=True, type=str, + help='full path to adata obj') +parser.add_argument('-ps', '--path_save', required=True, type=str, + help='directory path for saving, creates subdir within it') +parser.add_argument('-sk', '--system_key', required=True, type=str, + help='obs col with system info') +parser.add_argument('-gk', '--group_key', required=True, type=str, + help='obs col with group info') +parser.add_argument('-bk', '--batch_key', required=True, type=str, + help='obs col with batch info') +parser.add_argument('-nce', '--n_cells_eval', required=False, type=int, default=-1, + help='Max cells to be used for eval, if -1 use all cells. '+\ + 'For cell subsetting seed 0 is always used to be reproducible accros '+\ + 'runs with different seeds.') +parser.add_argument('-ht', '--harmony_theta', required=False, type=float, default=1.0, + help='Controls the strength of batch effect correction in Harmony (default is 1.0)') + +parser.add_argument('-t', '--testing', required=False, type=intstr_to_bool,default='0', + help='Testing mode') +# %% +# Set args for manual testing +if hasattr(sys, 'ps1'): + args= parser.parse_args(args=[ + '-pa','/om2/user/khrovati/data/cross_system_integration/adipose_sc_sn_updated/adiposeHsSAT_sc_sn.h5ad', + '-ps','/home/moinfar/tmp/', + '-sk','system', + '-gk','cell_type', + '-bk','donor_id', + + '-s','1', + + '-nce','1000', + + '-t','1' + ]) +# Read command line args +else: + args, args_unknown = parser.parse_known_args() + +print(args) + +TESTING=args.testing + +if args.name is None: + if args.seed is not None: + args.name='r'+str(args.seed) + +# %% +# Make folder for saving +path_save=args.path_save+'harmonypy'+\ + '_'+''.join(np.random.permutation(list(string.ascii_letters)+list(string.digits))[:8])+\ + ('-TEST' if TESTING else '')+\ + os.sep + +pathlib.Path(path_save).mkdir(parents=True, exist_ok=False) +print("PATH_SAVE=",path_save) + +# %% +# Set seed for eval +# Set only here below as need randomness for generation of out directory name (above) +if args.seed is not None: + np.random.seed(args.seed) + random.seed(args.seed) + +# %% +# Save args +pkl.dump(args,open(path_save+'args.pkl','wb')) + +# %% [markdown] +# ## Integration + +# %% [markdown] +# ### Prepare data + +# %% +# Load data +adata=sc.read(args.path_adata) + +# %% +if TESTING: + # Make data smaller if testing the script + random_idx=np.random.permutation(adata.obs_names)[:5000] + adata=adata[random_idx,:].copy() + print(adata.shape) + +# %% [markdown] +# ### Training + +# %% +print('Train') + +# %% +# PCA calculation +sc.pp.pca(adata) + +# %% +sce.pp.harmony_integrate(adata, args.batch_key) + +# %% +adata + +# %% [markdown] +# ### Eval + +# %% [markdown] +# #### Embedding + +# %% +print('Get embedding') + +# %% +embed_full = adata.obsm['X_pca_harmony'] +embed_full=sc.AnnData(embed_full,obs=adata.obs) +# Make system categorical for eval as below +embed_full.obs[args.system_key]=embed_full.obs[args.system_key].astype(str) +# Save full embed +embed_full.write(path_save+'embed_full.h5ad') + +# %% +# Compute embedding +cells_eval=adata.obs_names if args.n_cells_eval==-1 else \ + np.random.RandomState(seed=0).permutation(adata.obs_names)[:args.n_cells_eval] +print('N cells for eval:',cells_eval.shape[0]) +embed = embed_full[cells_eval,:].copy() +# Make system categorical for metrics and plotting +embed.obs[args.system_key]=embed.obs[args.system_key].astype(str) +# Save embed +embed.write(path_save+'embed.h5ad') + +# %% +del adata +del embed +del embed_full + +# %% [markdown] +# # End + +# %% +print('Finished integration!') + +# %% diff --git a/notebooks/eval/cleaned/run_integration_saturn.py b/notebooks/eval/cleaned/run_integration_saturn.py index 5646267..5919945 100644 --- a/notebooks/eval/cleaned/run_integration_saturn.py +++ b/notebooks/eval/cleaned/run_integration_saturn.py @@ -6,11 +6,11 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: -# display_name: saturn +# display_name: sysvi # language: python -# name: saturn +# name: sysvi # --- # %% @@ -34,6 +34,16 @@ import torch import random +# %% + +# %% +adata=sc.read("/home/moinfar/data/skin_mouse_human/processed/skin_mm_hs_hvg-hsPart_nonortholHVG.h5ad") + +# %% +adata.var + +# %% + # %% parser = argparse.ArgumentParser() def intstr_to_bool(x): diff --git a/notebooks/eval/cleaned/run_integration_scgen.py b/notebooks/eval/cleaned/run_integration_scgen.py index 962f511..f74103c 100644 --- a/notebooks/eval/cleaned/run_integration_scgen.py +++ b/notebooks/eval/cleaned/run_integration_scgen.py @@ -22,6 +22,7 @@ seed=np.random.randint(0,1000000) import argparse import os +import pathlib import string import subprocess @@ -114,7 +115,7 @@ def str_to_float_zeronone(x): ('-TEST' if TESTING else '')+\ os.sep -os.mkdir(path_save) +pathlib.Path(path_save).mkdir(parents=True, exist_ok=False) print("PATH_SAVE=",path_save) # %% diff --git a/notebooks/eval/cleaned/run_integration_scglue.py b/notebooks/eval/cleaned/run_integration_scglue.py index 323de11..67a2e7a 100644 --- a/notebooks/eval/cleaned/run_integration_scglue.py +++ b/notebooks/eval/cleaned/run_integration_scglue.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: scglue # language: python @@ -20,7 +20,9 @@ import pandas as pd import numpy as np import argparse +import gc import os +import pathlib import string import subprocess @@ -170,7 +172,7 @@ def str_to_float_zeronone(x): ('-TEST' if TESTING else '')+\ os.sep -os.mkdir(path_save) +pathlib.Path(path_save).mkdir(parents=True, exist_ok=False) print("PATH_SAVE=",path_save) # %% @@ -232,20 +234,24 @@ def str_to_float_zeronone(x): # N systems if SINGLE_ADATA: total_mods = list(adata.obs[args.system_key].unique()) + all_obs_names = list(adata.obs_names) else: total_mods = list(sorted( list(adata.obs[args.system_key].unique()) + list(adata_2.obs[args.system_key].unique()))) + all_obs_names = list(adata.obs_names) + list(adata_2.obs_names) # %% # Prepare adatas mods_adata = {} if SINGLE_ADATA: for mod in total_mods: - mods_adata[mod] = adata[adata.obs[args.system_key] == mod] + mods_adata[mod] = adata[adata.obs[args.system_key] == mod].copy() mods_adata[mod].var['original_index'] = mods_adata[mod].var.index mods_adata[mod].var.index = mods_adata[mod].var.index + f'-{mod}' print(f"mod: {mod}\n", mods_adata[mod]) + del adata + gc.collect() else: for adata_current in [adata, adata_2]: mod = adata_current.obs[args.system_key].unique()[0] @@ -378,10 +384,6 @@ def str_to_float_zeronone(x): # %% # Prepare embedding adata -if SINGLE_ADATA: - all_obs_names = list(adata.obs_names) -else: - all_obs_names = list(adata.obs_names) + list(adata_2.obs_names) embed_full = sc.AnnData(combined[all_obs_names].obsm['X_glue'], obs=combined[all_obs_names,:].obs.copy()) cells_eval = all_obs_names if args.n_cells_eval==-1 else \ diff --git a/notebooks/eval/cleaned/run_integration_scvi.py b/notebooks/eval/cleaned/run_integration_scvi.py index ef395d7..b96975e 100644 --- a/notebooks/eval/cleaned/run_integration_scvi.py +++ b/notebooks/eval/cleaned/run_integration_scvi.py @@ -22,6 +22,7 @@ seed=np.random.randint(0,1000000) import argparse import os +import pathlib import string import subprocess @@ -118,7 +119,7 @@ def str_to_float_zeronone(x): ('-TEST' if TESTING else '')+\ os.sep -os.mkdir(path_save) +pathlib.Path(path_save).mkdir(parents=True, exist_ok=False) print("PATH_SAVE=",path_save) # %% diff --git a/notebooks/eval/cleaned/run_integration_seml.py b/notebooks/eval/cleaned/run_integration_seml.py index 4f26eec..cfa801e 100644 --- a/notebooks/eval/cleaned/run_integration_seml.py +++ b/notebooks/eval/cleaned/run_integration_seml.py @@ -1,5 +1,6 @@ import logging from sacred import Experiment +import pathlib import seml import subprocess @@ -102,19 +103,27 @@ def run(eval_type:str, hv_genes:str=None, num_macrogenes:str=None, pe_sim_penalty:str=None, + language:str=None, + reduction:str=None, + k_anchor:str=None, + k_weight:str=None, + harmony_theta:str=None, ): params_info=locals() params_info['seed']=params_info['seed_num'] del params_info['seed_num'] + lang = params_info.pop('language') or 'python' + logging.info('Received the following configuration:') logging.info(str(params_info)) # Prepare args for running script + ignore_args = ['eval_type', 'model'] args=[] for k,v in params_info.items(): - if k!='eval_type' and k!='model' and v is not None: + if k not in ignore_args and v is not None: # Set path save based on eval type # Expects that dirs were created before if k=='path_save': @@ -128,20 +137,33 @@ def run(eval_type:str, # Script to run - based on eval_type model_fn_part='_'+model if model is not None else '' - script=f'run_{eval_type}{model_fn_part}.py' + script=f'run_{eval_type}{model_fn_part}.py' + if model in ['seurat', 'harmony']: + assert lang == 'R' + script = 'run_integration_seurat.R' logging.info('Running integration script') logging.info(script) + # Send stderr to stdout and stdout pipe to output to save in log # (print statements from the child script would else not be saved) - process_integration = subprocess.Popen('conda run -n'.split()+[conda_env, 'python',script]+args, - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + if lang == 'python': + process_cmd = 'conda run -n'.split() + [conda_env, 'python', script] + args + elif lang == 'R': + process_cmd = 'bash -x run_r.sh'.split() + [script] + args + else: + raise NotImplementedError() + + logging.info(" ".join(process_cmd)) + process_integration = subprocess.Popen(process_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + # Make sure that process has finished res=process_integration.communicate() # Save stdout from the child script for line in res[0].decode(encoding='utf-8').split('\n'): if line.startswith('PATH_SAVE='): - path_save=line.replace('PATH_SAVE=','').strip(' ') + path_save = line.replace('PATH_SAVE=','').strip(' ') + path_save = str(pathlib.Path(path_save)) logging.info(line) # Check that child process did not fail - if this was not checked then # the status of the whole job would be succesfull @@ -159,8 +181,10 @@ def run(eval_type:str, '--batch_key',batch_key, '--testing',str(testing) if testing is not None else '0', ] - process_neigh = subprocess.Popen(['python','run_neighbors.py']+args_neigh, - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + process_cmd = ['python', 'run_neighbors.py'] + args_neigh + + logging.info(" ".join(process_cmd)) + process_neigh = subprocess.Popen(process_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # Make sure that process has finished res=process_neigh.communicate() # Save stdout from the child script @@ -190,8 +214,11 @@ def run(eval_type:str, logging.info('Computing metrics with param scaled='+scaled) args_metrics_sub=args_metrics.copy() args_metrics_sub.extend(['--scaled',scaled]) - process_metrics = subprocess.Popen(['python','run_metrics.py']+args_metrics_sub, - stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + + process_cmd = ['python', 'run_metrics.py'] + args_metrics_sub + logging.info(" ".join(process_cmd)) + + process_metrics = subprocess.Popen(process_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # Make sure that process has finished res=process_metrics.communicate() # Save stdout from the child script diff --git a/notebooks/eval/cleaned/run_integration_seurat.R b/notebooks/eval/cleaned/run_integration_seurat.R new file mode 100644 index 0000000..22b68b5 --- /dev/null +++ b/notebooks/eval/cleaned/run_integration_seurat.R @@ -0,0 +1,232 @@ +# Load required libraries +library(Seurat) +library(argparse) +library(Matrix) +library(anndata) +library(future) + +# Use all available CPUs for parallel processing +plan("multicore", workers = availableCores()) +options(future.globals.maxSize = 150 * 1024^3) # 150GB + +# Define argument parser +parser <- ArgumentParser() + +parser$add_argument('-n', '--name', required=FALSE, type="character", default=NULL, + help='name of replicate, if unspecified set to rSEED if seed is given, else to blank string') +parser$add_argument('-s', '--seed', required=FALSE, type="integer", default=NULL, + help='random seed, if none it is randomly generated') +parser$add_argument('-po', '--params_opt', required=FALSE, type="character", default='', + help='name of optimized params/test purpose') +parser$add_argument('-pa', '--path_adata', required=TRUE, type="character", + help='full path to rds object containing Seurat object') +parser$add_argument('-ps', '--path_save', required=TRUE, type="character", + help='directory path for saving, creates subdir within it') +parser$add_argument('-sk', '--system_key', required=TRUE, type="character", + help='metadata column with system info') +parser$add_argument('-bk', '--batch_key', required=TRUE, type="character", + help='metadata column with batch info') +parser$add_argument('-nce', '--n_cells_eval', required=FALSE, type="integer", default=-1, + help='Max cells to be used for eval, if -1 use all cells') +parser$add_argument("-rd", "--reduction", help="Seurat reduction", default='cca', type="character") +parser$add_argument('-nl', '--n_latent', required=FALSE, type="integer", default=15, + help='number of latent variables') + +# Optimization params +parser$add_argument("--k_anchor", required=FALSE, default=5, type="integer", help="Seurat integration k.anchor (default is 5)") +parser$add_argument("--k_weight", required=FALSE, default=100, type="integer", help="Seurat integration k.weight (default is 100)") +parser$add_argument('--harmony_theta', required = FALSE, default = 1.0, type = "numeric", + help = 'Controls the strength of batch effect correction in Harmony (default is 1.0)') + + +# For testing +parser$add_argument("-t", "--testing", help="Testing mode", default='0', type="character") + + + +# Ignored params +parser$add_argument('-gk', '--group_key', required=TRUE, type="character", + help='metadata column with group info') +parser$add_argument('-me', '--max_epochs', required=FALSE, type="integer", default=50, + help='max_epochs for training') +parser$add_argument('-edp', '--epochs_detail_plot', required=FALSE, type="integer", default=20, + help='Loss subplot from this epoch on') + +# args <- parser$parse_args() # There are excess args +all_args <- parser$parse_known_args() + +args <- all_args[[1]] +unknown_args <- all_args[[2]] + +print(args) +cat("Unrecognized", unknown_args, "\n") + +args$testing <- as.logical(as.integer(args$testing)) + + +################## +# ## For testing in R interactive shell +# args <- list( +# path_adata = "/om2/user/khrovati/data/cross_system_integration/adipose_sc_sn_updated/adiposeHsSAT_sc_sn.h5ad", +# path_save = "/home/moinfar/tmp/", +# testing = FALSE, +# system_key = "system", +# batch_key = "sample_id", +# reduction = "cca", +# n_cells_eval = 50000, +# n_latent = 15, +# harmony_theta = 1., +# k_anchor = 5, +# k_weight=50 +# ) +################## + + + +# Set name if not provided +if (is.null(args$name)) { + if (!is.null(args$seed)) { + args$name <- paste0("r", args$seed) + } else { + args$name <- "" + } +} + +# Create saving directory +path_save <- file.path(args$path_save, paste0("seurat_", args$name, "_", + paste0(sample(c(letters, LETTERS, 0:9), 8, replace=TRUE), collapse=""), + ifelse(args$testing, "-TEST", ""))) +cat("PATH_SAVE=", path_save, "\n") +dir.create(path_save, recursive=TRUE) + +# Set seed +if (!is.null(args$seed)) { + set.seed(args$seed) +} + +yaml::write_yaml(args, file.path(path_save, "args.yml")) + + +# Load data +sdata <- schard::h5ad2seurat(args$path_adata) + +# remove _index column present in obs of one data! +if("_index" %in% colnames(sdata@meta.data)) +{ + sdata@meta.data$`_index` <- NULL +} + + +n_hvg <- nrow(sdata) +n_latent = args$n_latent + + +# If testing, reduce dataset size +if (args$testing) { + random_idx <- sample(ncol(sdata), 5000) + sdata <- sdata[, random_idx] + + # remove samples with few cells + # min 200 as k.filter = 200 in FindIntegrationAnchors + cell_counts <- table(sdata@meta.data[[args$batch_key]]) + samples_to_keep <- names(cell_counts[cell_counts > 200]) + sdata <- subset(sdata, cells = Cells(sdata)[sdata@meta.data[[args$batch_key]] %in% samples_to_keep]) + + cat("Dataset shape after reduction:", nrow(sdata), ncol(sdata), "\n") +} + +flush.console() + +# Make final batch variable by concating system and batch +sdata@meta.data[["_batch"]] <- paste(sdata@meta.data[[args$system_key]], sdata@meta.data[[args$batch_key]], sep = "_") + + +### Seurat v5 API ### + +all_features = rownames(sdata) +VariableFeatures(sdata) <- all_features + +# Splitting +sdata[["RNA"]] <- split(sdata[["RNA"]], f = sdata@meta.data[["_batch"]]) + +# Pre-processing + +#### sdata <- NormalizeData(sdata) # Our data is always normalized +sdata <- ScaleData(sdata) +sdata <- RunPCA(sdata, npcs = n_latent) + + +# Integration + +if (args$reduction == "cca") { + min_sample_in_a_batch = min(table(sdata@meta.data[[args$batch_key]])) + sdata <- IntegrateLayers( + object = sdata, + method = CCAIntegration, + features=all_features, + orig.reduction = "pca", + new.reduction = "integrated", + k.anchor = args$k_anchor, + k.weight = args$k_weight, + verbose = TRUE + ) +} +if (args$reduction == "rpca") { + min_sample_in_a_batch = min(table(sdata@meta.data[[args$batch_key]])) + sdata <- IntegrateLayers( + object = sdata, + method = RPCAIntegration, + features=all_features, + orig.reduction = "pca", + new.reduction = "integrated", + k.anchor = args$k_anchor, + k.weight = args$k_weight, + verbose = TRUE + ) +} +if (args$reduction == "harmony") { + sdata <- IntegrateLayers( + object = sdata, + method = HarmonyIntegration, + features=all_features, + orig.reduction = "pca", + new.reduction = "integrated", + npcs=n_latent, + theta = args$harmony_theta, + verbose = TRUE + ) +} + +embed_vactord <- Embeddings(sdata, reduction = "integrated") + + +# Save seurat object + +embed_full_seurat <- CreateSeuratObject(counts = t(embed_vactord), meta.data = sdata@meta.data) +saveRDS(embed_full_seurat, file.path(path_save, "embed_full.rds")) + + +# Convert to anndata and save +embed_full <- AnnData(X = embed_vactord, obs = sdata@meta.data) + +if ((args$n_cells_eval != -1) & (embed_full$n_obs > args$n_cells_eval)) { + set.seed(0) + cells_eval <- sample(Cells(sdata), size = args$n_cells_eval) + embed <- embed_full[embed_full$obs_names %in% cells_eval]$copy() + write_h5ad(embed_full, file.path(path_save, "embed_full.h5ad")) +} else { + embed <- embed_full +} + +write_h5ad(embed, file.path(path_save, "embed.h5ad")) + +cat("Finished integration!") + + + + + + + + + diff --git a/notebooks/eval/cleaned/run_integration_seurat.old.R b/notebooks/eval/cleaned/run_integration_seurat.old.R new file mode 100644 index 0000000..01d70cb --- /dev/null +++ b/notebooks/eval/cleaned/run_integration_seurat.old.R @@ -0,0 +1,249 @@ +# Load required libraries +library(Seurat) +library(argparse) +library(Matrix) +library(anndata) + + +# Define argument parser +parser <- ArgumentParser() + +parser$add_argument('-n', '--name', required=FALSE, type="character", default=NULL, + help='name of replicate, if unspecified set to rSEED if seed is given, else to blank string') +parser$add_argument('-s', '--seed', required=FALSE, type="integer", default=NULL, + help='random seed, if none it is randomly generated') +parser$add_argument('-po', '--params_opt', required=FALSE, type="character", default='', + help='name of optimized params/test purpose') +parser$add_argument('-pa', '--path_adata', required=TRUE, type="character", + help='full path to rds object containing Seurat object') +parser$add_argument('-ps', '--path_save', required=TRUE, type="character", + help='directory path for saving, creates subdir within it') +parser$add_argument('-sk', '--system_key', required=TRUE, type="character", + help='metadata column with system info') +parser$add_argument('-bk', '--batch_key', required=TRUE, type="character", + help='metadata column with batch info') +parser$add_argument('-nce', '--n_cells_eval', required=FALSE, type="integer", default=-1, + help='Max cells to be used for eval, if -1 use all cells') +parser$add_argument("-rd", "--reduction", help="Seurat reduction", default='cca', type="character") +parser$add_argument('-nl', '--n_latent', required=FALSE, type="integer", default=15, + help='number of latent variables') + +# Optimization params +parser$add_argument("--k_anchor", required=FALSE, default=5, type="integer", help="Seurat integration k.anchor (default is 5)") +parser$add_argument("--k_weight", required=FALSE, default=100, type="integer", help="Seurat integration k.weight (default is 100)") +parser$add_argument('--harmony_theta', required = FALSE, default = 1.0, type = "numeric", + help = 'Controls the strength of batch effect correction in Harmony (default is 1.0)') + + +# For testing +parser$add_argument("-t", "--testing", help="Testing mode", default='0', type="character") + + + +# Ignored params +parser$add_argument('-gk', '--group_key', required=TRUE, type="character", + help='metadata column with group info') +parser$add_argument('-me', '--max_epochs', required=FALSE, type="integer", default=50, + help='max_epochs for training') +parser$add_argument('-edp', '--epochs_detail_plot', required=FALSE, type="integer", default=20, + help='Loss subplot from this epoch on') + +# args <- parser$parse_args() # There are excess args +all_args <- parser$parse_known_args() + +args <- all_args[[1]] +unknown_args <- all_args[[2]] + +print(args) +cat("Unrecognized", unknown_args, "\n") + +args$testing <- as.logical(as.integer(args$testing)) + + +################## +# ## For testing in R interactive shell +# args <- list( +# path_adata = "/om2/user/khrovati/data/cross_system_integration/adipose_sc_sn_updated/adiposeHsSAT_sc_sn.h5ad", +# path_save = "/home/moinfar/tmp/", +# testing = FALSE, +# system_key = "system", +# batch_key = "sample_id", +# reduction = "cca", +# n_cells_eval = 50000, +# n_latent = 15, +# harmony_theta = 1., +# k_anchor = 5, +# k_weight=50 +# ) +################## + + + +# Set name if not provided +if (is.null(args$name)) { + if (!is.null(args$seed)) { + args$name <- paste0("r", args$seed) + } else { + args$name <- "" + } +} + +# Create saving directory +path_save <- file.path(args$path_save, paste0("seurat_", args$name, "_", + paste0(sample(c(letters, LETTERS, 0:9), 8, replace=TRUE), collapse=""), + ifelse(args$testing, "-TEST", ""))) +cat("PATH_SAVE=", path_save, "\n") +dir.create(path_save, recursive=TRUE) + +# Set seed +if (!is.null(args$seed)) { + set.seed(args$seed) +} + +yaml::write_yaml(args, file.path(path_save, "args.yml")) + + +# Load data +sdata <- schard::h5ad2seurat(args$path_adata) + +# remove _index column present in obs of one data! +if("_index" %in% colnames(sdata@meta.data)) +{ + sdata@meta.data$`_index` <- NULL +} + + +n_hvg <- nrow(sdata) +n_latent = args$n_latent + + +# If testing, reduce dataset size +if (args$testing) { + random_idx <- sample(ncol(sdata), 5000) + sdata <- sdata[, random_idx] + + # remove samples with few cells + # min 200 as k.filter = 200 in FindIntegrationAnchors + cell_counts <- table(sdata@meta.data[[args$batch_key]]) + samples_to_keep <- names(cell_counts[cell_counts > 200]) + sdata <- subset(sdata, cells = Cells(sdata)[sdata@meta.data[[args$batch_key]] %in% samples_to_keep]) + + cat("Dataset shape after reduction:", nrow(sdata), ncol(sdata), "\n") +} + +flush.console() + +# Make final batch variable by concating system and batch +sdata@meta.data[["_batch"]] <- paste(sdata@meta.data[[args$system_key]], sdata@meta.data[[args$batch_key]], sep = "_") + + +# ### Seurat v4 API ### +# sdata <- FindVariableFeatures(sdata, nfeatures = n_hvg) + +# # Splitting based on batch (batch covers system) +# seurat_list <- SplitObject(sdata, split.by = args$batch_key) +# seurat_list <- lapply(X = seurat_list, FUN = function(x) { +# x <- ScaleData(x) +# x <- RunPCA(x, npcs = n_latent) +# return(x) +# }) + + +# # Integration using CCA +# anchors <- FindIntegrationAnchors( +# object.list = seurat_list, +# anchor.features = n_hvg, +# dims = 1:n_latent, +# reduction = args$reduction, +# ) +# integrated = IntegrateData( +# anchorset = anchors, +# new.assay.name = "integrated", +# dims = 1:n_latent, +# ) + + +### Seurat v5 API ### + +# Splitting +sdata[["RNA"]] <- split(sdata[["RNA"]], f = sdata@meta.data[["_batch"]]) + +# Pre-processing + +#### sdata <- NormalizeData(sdata) # Our data is always normalized +sdata <- FindVariableFeatures(sdata, nfeatures = n_hvg) # Make all variables as hvg since we already did hvg selection +sdata <- ScaleData(sdata) +sdata <- RunPCA(sdata, npcs = n_latent) + + +# Integration + +if (args$reduction == "cca") { + min_sample_in_a_batch = min(table(sdata@meta.data[[args$batch_key]])) + sdata <- IntegrateLayers( + object = sdata, + method = CCAIntegration, + orig.reduction = "pca", + new.reduction = "integrated", + k.anchor = args$k_anchor, + k.weight = args$k_weight, + verbose = TRUE + ) +} +if (args$reduction == "rpca") { + min_sample_in_a_batch = min(table(sdata@meta.data[[args$batch_key]])) + sdata <- IntegrateLayers( + object = sdata, + method = RPCAIntegration, + orig.reduction = "pca", + new.reduction = "integrated", + k.anchor = args$k_anchor, + k.weight = args$k_weight, + verbose = TRUE + ) +} +if (args$reduction == "harmony") { + sdata <- IntegrateLayers( + object = sdata, + method = HarmonyIntegration, + orig.reduction = "pca", + new.reduction = "integrated", + npcs=n_latent, + theta = args$harmony_theta, + verbose = TRUE + ) +} + +embed_vactord <- Embeddings(sdata, reduction = "integrated") + + +# Save seurat object + +embed_full_seurat <- CreateSeuratObject(counts = t(embed_vactord), meta.data = sdata@meta.data) +saveRDS(embed_full_seurat, file.path(path_save, "embed_full.rds")) + + +# Convert to anndata and save +embed_full <- AnnData(X = embed_vactord, obs = sdata@meta.data) + +if ((args$n_cells_eval != -1) & (embed_full$n_obs > args$n_cells_eval)) { + set.seed(0) + cells_eval <- sample(Cells(sdata), size = args$n_cells_eval) + embed <- embed_full[embed_full$obs_names %in% cells_eval]$copy() + write_h5ad(embed_full, file.path(path_save, "embed_full.h5ad")) +} else { + embed <- embed_full +} + +write_h5ad(embed, file.path(path_save, "embed.h5ad")) + +cat("Finished integration!") + + + + + + + + + diff --git a/notebooks/eval/cleaned/run_integration_sysvi.py b/notebooks/eval/cleaned/run_integration_sysvi.py new file mode 100644 index 0000000..33d0927 --- /dev/null +++ b/notebooks/eval/cleaned/run_integration_sysvi.py @@ -0,0 +1,454 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.16.3 +# kernelspec: +# display_name: sysvi +# language: python +# name: sysvi +# --- + +# %% [markdown] +# # Evaluate integration accross species + +# %% +import scanpy as sc +import pickle as pkl +import pandas as pd +import numpy as np +# Make random number for seed before scvi import sets seed to 0 +seed=np.random.randint(0,1000000) +import argparse +import os +import sys +import pathlib +import string +import subprocess + +from matplotlib.pyplot import rcParams +import matplotlib.pyplot as plt +import seaborn as sb + +import scvi +from scvi.external import SysVI + +# Otherwise the seed remains constant +from scvi._settings import ScviConfig +config=ScviConfig() +config.seed=seed + +# %% +parser = argparse.ArgumentParser() +def intstr_to_bool(x): + return bool(int(x)) +def str_to_float_zeronone(x): + if x is None or x=="0": + return None + else: + return float(x) +def str_to_weight(x): + # Format: wMIN_MAX_START_END (starts with w and separated by _ ) + # Quick seml fix to pass str and not int/list - add w at start and change sep + x=[float(i) for i in x.replace('w','').split('_')] + if len(x)==1: + x=x[0] + else: + x={'weight_start':x[0], 'weight_end':x[1], + 'point_start':x[2], 'point_end':x[3], + 'update_on':'step'} + return x + +parser.add_argument('-n', '--name', required=False, type=str, default=None, + help='name of replicate, if unspecified set to rSEED if seed is given '+\ + 'and else to blank string') +parser.add_argument('-s', '--seed', required=False, type=int, default=None, + help='random seed, if none it is randomly generated') +parser.add_argument('-loe', '--log_on_epoch', required=False, + type=intstr_to_bool,default='1', + help='if true log on epoch and if false on step. Converts 0/1 to bool') +parser.add_argument('-po', '--params_opt', required=False, type=str, default='', + help='name of optimized params/test purpose') +parser.add_argument('-pa', '--path_adata', required=True, type=str, + help='full path to adata obj') +parser.add_argument('-ps', '--path_save', required=True, type=str, + help='directory path for saving, creates subdir within it') +parser.add_argument('-sk', '--system_key', required=True, type=str, + help='obs col with system info') +parser.add_argument('-gk', '--group_key', required=True, type=str, + help='obs col with group info - used only for eval, not integration') +parser.add_argument('-bk', '--batch_key', required=True, type=str, + help='obs col with batch info') +parser.add_argument('-ts', '--train_size', required=False, type=float,default=0.9, + help='train_size for training') +parser.add_argument('-p', '--prior', required=False, type=str, default='standard_normal', + help='VAE prior') +parser.add_argument('-npc', '--n_prior_components', required=False, type=int, default=100, + help='n_prior_components used for vamp prior.'+ + 'if -1 use as many prior components as there are cell groups'+ + '(ignoring nan groups)') +# N prior components system is int as system itself is 0/1 +parser.add_argument('-pcs', '--prior_components_system', required=False, type=int, default=None, + help='system to sample prior components from.'+ + 'If -1 samples balanced from both systems'+ + 'If unsepcified samples randomly'+ + 'Either this of prior_components_group must be None') +parser.add_argument('-pcg', '--prior_components_group', required=False, type=str, default=None, + help='group to sample prior components from.'+ + 'If BALANCED sample balanced across groups (ignores nan groups)'+ + 'If unsepcified samples randomly'+ + 'Either this of prior_components_system must be None') +parser.add_argument('-tp', '--trainable_priors', required=False, + type=intstr_to_bool,default='1', + help='trainable_priors for module. Converts 0/1 to bool') +parser.add_argument('-nl', '--n_layers', required=False, type=int, default=2, + help='n_layers of module') +parser.add_argument('-nh', '--n_hidden', required=False, type=int, default=256, + help='n_hidden of module') +parser.add_argument('-me', '--max_epochs', required=False, type=int,default=50, + help='max_epochs for training') +parser.add_argument('-edp', '--epochs_detail_plot', required=False, type=int, default=20, + help='Loss subplot from this epoch on') + +parser.add_argument('-kw', '--kl_weight', required=False, + type=str_to_weight,default=1, + help='kl_weight for training') +parser.add_argument('-rw', '--reconstruction_weight', required=False, + type=str_to_weight,default=1, + help='reconstruction_weight for training') +parser.add_argument('-zdcw', '--z_distance_cycle_weight', required=False, + type=str_to_weight,default=0, + help='z_distance_cycle_weight for training') + +parser.add_argument('-o', '--optimizer', required=False, type=str,default="Adam", + help='optimizer for training plan') +parser.add_argument('-lr', '--lr', required=False, type=float,default=0.001, + help='learning rate for training plan') + +parser.add_argument('-swa', '--swa', required=False, + type=intstr_to_bool, default="0", help='use SWA') +parser.add_argument('-swalr', '--swa_lr', required=False, type=float, default=0.001, + help='final SWA lr') +parser.add_argument('-swaes', '--swa_epoch_start', required=False, type=int, default=10, + help='start SWA on epoch') +parser.add_argument('-swaae', '--swa_annealing_epochs', required=False, type=int, + default=10, help='SWA annealing epochs') + +parser.add_argument('-nce', '--n_cells_eval', required=False, type=int, default=-1, + help='Max cells to be used for eval, if -1 use all cells. '+\ + 'For cell subsetting seed 0 is always used to be reproducible accros '+\ + 'runs with different seeds.') + +parser.add_argument('-t', '--testing', required=False, type=intstr_to_bool,default='0', + help='Testing mode') +# %% +# Set args for manual testing +if "ipykernel" in sys.modules and "IPython" in sys.modules: + args= parser.parse_args(args=[ + #'-pa','/lustre/groups/ml01/workspace/karin.hrovatin/data/cross_species_prediction/pancreas_example/combined_tabula_orthologues.h5ad', + '-pa','/om2/user/khrovati/data/cross_species_prediction/pancreas_healthy/combined_orthologuesHVG2000.h5ad', + #'-pa','/om2/user/khrovati/data/cross_system_integration/pancreas_conditions_MIA_HPAP2/combined_orthologuesHVG.h5ad', + #'-fmi','/om2/user/khrovati/data/cross_system_integration/eval/test/integration/example/moransiGenes_mock.pkl', + '-ps','/home/moinfar/io/csi/eval/test', + '-sk','system', + '-gk','cell_type', + #'-gk','cell_type_eval', + '-bk','sample', + #'-bk','batch', + '-me','2', + '-edp','0', + '-p','vamp', + + '-npc','2', + '-pcs','-1', + + '-epe','1', + '-tp','0', + + '-kw','w0_1_1_2', + '-rw','1', + + '-s','1', + + # Lr testing + '-o','AdamW', + '-lr','0.001', + '-rlrp','1', + '-lrt','-0.05', + # SWA testing + '-swa','1', + + '-nce','1000', + + '-t','1' + ]) +# Read command line args +else: + args, args_unknown = parser.parse_known_args() + +print(args) + +TESTING=args.testing + +if args.name is None: + if args.seed is not None: + args.name='r'+str(args.seed) + +# %% +if args.prior_components_group is not None and args.prior_components_system is not None: + raise ValueError('Either prior_components_group or prior_components_system must be None') + + +# %% +# Make folder for saving +def weight_to_str(x): + if isinstance(x,dict): + x='-'.join([str(x) for x in x.values() if not isinstance(x,str)]) + else: + x=str(x) + return x + +path_save=args.path_save+\ + 'KLW'+weight_to_str(args.kl_weight)+\ + 'RW'+weight_to_str(args.reconstruction_weight)+\ + 'ZDCW'+weight_to_str(args.z_distance_cycle_weight)+\ + 'P'+str(''.join([i[0] for i in args.prior.split('_')]))+\ + 'NPC'+str(args.n_prior_components)+\ + 'NL'+str(args.n_layers)+\ + 'NH'+str(args.n_hidden)+\ + '_'+''.join(np.random.permutation(list(string.ascii_letters)+list(string.digits))[:8])+\ + ('-TEST' if TESTING else '')+\ + os.sep + +pathlib.Path(path_save).mkdir(parents=True, exist_ok=False) +print("PATH_SAVE=",path_save) + +# %% +# Set seed for eval +# Set only here below as need randomness for generation of out directory name (above) +if args.seed is not None: + config.seed=args.seed + +# %% +# Save args +pkl.dump(args,open(path_save+'args.pkl','wb')) + +# %% [markdown] +# ## Integration + +# %% [markdown] +# ### Prepare data + +# %% +# Load data +adata=sc.read(args.path_adata) + +# %% +if TESTING: + # Make data smaller if testing the script + random_idx=np.random.permutation(adata.obs_names)[:5000] + adata=adata[random_idx,:].copy() + print(adata.shape) + # Set some groups to nan for testing if this works + adata.obs[args.group_key]=[np.nan]*10+list(adata.obs[args.group_key].iloc[10:]) + +# %% [markdown] +# ### Training + +# %% +print('Train') + +# %% +# Setup adata +adata_training = adata.copy() +SysVI.setup_anndata( + adata=adata_training, + batch_key=args.system_key, + categorical_covariate_keys=[args.batch_key], +) + +# %% +# Define pseudoinputs +if args.n_prior_components==-1: + n_prior_components=adata_training.obs[args.group_key].dropna().nunique() +else: + n_prior_components=args.n_prior_components + +if args.prior_components_group is not None: + if args.prior_components_group!='BALANCED': + prior_locations=[(args.prior_components_group,n_prior_components)] + else: + groups=list(np.random.permutation(adata_training.obs[args.group_key].dropna().unique())) + multiple, remainder = divmod(n_prior_components, len(groups)) + prior_locations=[(k,v) for k,v in + pd.Series(groups * multiple + groups[:remainder]).value_counts().iteritems()] + + pdi=[] + for (group,npc) in prior_locations: + pdi.extend(np.random.permutation(np.argwhere(( + adata_training.obs[args.group_key]==group + ).ravel()).ravel())[:npc]) + print('Prior components group:') + print(adata_training.obs.iloc[pdi,:][args.group_key].value_counts()) + +elif args.prior_components_system is not None: + if args.prior_components_system==-1: + npc_half=int(n_prior_components/2) + npc_half2=n_prior_components-npc_half + prior_locations=[(0,npc_half),(1,npc_half2)] + else: + prior_locations=[(args.prior_components_system,n_prior_components)] + pdi=[] + for (system,npc) in prior_locations: + pdi.extend(np.random.permutation(np.argwhere(( + adata_training.obs[args.system_key]==system + ).ravel()).ravel())[:npc]) + print('Prior components system:') + print(adata_training.obs.iloc[pdi,:][args.system_key].value_counts()) + +else: + pdi=None + +if pdi is not None and len(pdi) != n_prior_components: + raise ValueError('Not sufficent number of prior components could be sampled') + +if pdi is not None: + pdi = np.asarray(pdi) + +# %% +# Train +model = SysVI( + adata=adata_training, + prior=args.prior, + n_prior_components=n_prior_components, + pseudoinputs_data_indices=pdi, + trainable_priors=args.trainable_priors, + n_layers=args.n_layers, + n_hidden=args.n_hidden, +) +max_epochs=args.max_epochs if not TESTING else 3 +model.train(max_epochs=max_epochs, + log_every_n_steps=1, + check_val_every_n_epoch=1, + val_check_interval=1.0 if args.log_on_epoch else 1, + train_size=args.train_size, + plan_kwargs={ + 'optimizer':args.optimizer, + 'lr':args.lr, + 'kl_weight':args.kl_weight, + 'reconstruction_weight':args.reconstruction_weight, + 'z_distance_cycle_weight':args.z_distance_cycle_weight, + }) + +# %% [markdown] +# ### Eval + +# %% [markdown] +# #### Losses + +# %% +print('Plot losses') + +# %% +# Save losses +pkl.dump(model.trainer.logger.history,open(path_save+'losses.pkl','wb')) + +# %% +# Plot all loses +steps_detail_plot = args.epochs_detail_plot*int( + model.trainer.logger.history['validation_loss'].shape[0]/max_epochs) +detail_plot=args.epochs_detail_plot if args.log_on_epoch else steps_detail_plot +losses=[k for k in model.trainer.logger.history.keys() + if #'_step' not in k and '_epoch' not in k and + ('validation' not in k or 'eval' in k)] +fig,axs=plt.subplots(2,len(losses),figsize=(len(losses)*3,4)) +for ax_i,l_train in enumerate(losses): + if 'lr-' not in l_train and '_eval' not in l_train: + l_val=l_train.replace('_train','_validation') + l_name=l_train.replace('_train','') + # Change idx of epochs to start with 1 so that below adjustment when + # train on step which only works for val leads to appropriate multiplication + l_val_values=model.trainer.logger.history[l_val].copy() + l_val_values.index=l_val_values.index+1 + l_train_values=model.trainer.logger.history[l_train].copy() + l_train_values.index=l_train_values.index+1 + # This happens if log on step as currently this works only for val loss + if l_train_values.shape[0]50: @@ -257,7 +254,7 @@ def intstr_to_bool(x): if ILISI_BATCH_SYSTEM or TESTING: print('ilisi_batch_system') for system in sorted(embed_group.obs[args.system_key].unique()): - embed_sub=embed_group[embed_group.obs[args.system_key]==system,:].copy() + embed_sub=embed_group[embed_group.obs[args.system_key].astype(str)==str(system),:].copy() sc.pp.neighbors(embed_sub, use_rep='X', n_neighbors=90) metrics['ilisi_batch_system-'+system], metrics[ 'ilisi_batch_macro_system-'+system], metrics_data[ @@ -279,5 +276,3 @@ def intstr_to_bool(x): # %% print('Finished metrics!') - -# %% diff --git a/notebooks/eval/cleaned/run_neighbors.py b/notebooks/eval/cleaned/run_neighbors.py index df95117..510eda3 100644 --- a/notebooks/eval/cleaned/run_neighbors.py +++ b/notebooks/eval/cleaned/run_neighbors.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: csi # language: python @@ -14,8 +14,10 @@ # --- # %% +import numpy as np import scanpy as sc import argparse +import pathlib from matplotlib.pyplot import rcParams import matplotlib.pyplot as plt @@ -38,13 +40,7 @@ def intstr_to_bool(x): # %% # Set args for manual testing if False: - args= parser.parse_args(args=[ - '-p','/om2/user/khrovati/data/cross_system_integration/eval/test/integration/example/', - '-sk','system', - '-gk','cell_type', - '-bk','sample', - '-t','1', - ]) + args= parser.parse_args(args="--path /home/moinfar/io/csi/eval/retina_adult_organoid/integration/seurat_r3_hPWuSmEE --system_key system --group_key cell_type --batch_key sample_id --testing 0".split(" ")) # Read command line args else: args = parser.parse_args() @@ -54,19 +50,45 @@ def intstr_to_bool(x): # %% # only save if something was changed save=False +exp_path = pathlib.Path(args.path) # %% # Load embed -embed=sc.read(args.path+'embed.h5ad') +embed=sc.read(exp_path / 'embed.h5ad') + +# %% # %% # Compute neighbors if 'neighbors' not in embed.uns: save=True print('Computing embedding') + # Copy data + embed_cp=embed.copy() # Use 90 neighbours so that this can be also used for lisi metrics - sc.pp.neighbors(embed, use_rep='X', n_neighbors=90) - sc.tl.umap(embed) + sc.pp.neighbors(embed_cp, use_rep='X', n_neighbors=90) + + ################# Correct neighborhood if overlapping cells exist ######################## + X = embed_cp.obsp['distances'] + n_neighbors = np.unique(X.nonzero()[0], return_counts=True)[1] + if len(np.unique(n_neighbors)) > 1: + "There are zero distances. Recomputing with some noise ..." + embed_cp.X = embed_cp.X + np.random.randn(*embed_cp.X.shape).clip(-1, 1) * 1e-3 + sc.pp.neighbors(embed_cp, use_rep='X', n_neighbors=90) + X = embed_cp.obsp['distances'] + n_neighbors = np.unique(X.nonzero()[0], return_counts=True)[1] + if len(np.unique(n_neighbors)) > 1: + raise ValueError("Failed to Corrent overlapping cells") + ################# End correction ######################## + + sc.tl.umap(embed_cp) + + # Add back to embed + embed.uns['neighbors']=embed_cp.uns['neighbors'] + embed.obsp['connectivities']=embed_cp.obsp['connectivities'] + embed.obsp['distances']=embed_cp.obsp['distances'] + embed.obsm['X_umap']=embed_cp.obsm['X_umap'] + del embed_cp # Plot embedding rcParams['figure.figsize']=(8,8) @@ -74,7 +96,7 @@ def intstr_to_bool(x): fig,axs=plt.subplots(len(cols),1,figsize=(8,8*len(cols))) for col,ax in zip(cols,axs): sc.pl.embedding(embed,'X_umap',color=col,s=10,ax=ax,show=False,sort_order=False) - plt.savefig(args.path+'umap.png',dpi=300,bbox_inches='tight') + plt.savefig(exp_path / 'umap.png',dpi=300,bbox_inches='tight') # %% # Compute neighbors on scaled data @@ -88,6 +110,20 @@ def intstr_to_bool(x): sc.pp.scale(embed_scl) # Use 90 neighbours so that this can be also used for lisi metrics sc.pp.neighbors(embed_scl, use_rep='X', n_neighbors=90,key_added='scaled') + + ################# Correct neighborhood if overlapping cells exist ######################## + X = embed_scl.obsp['distances'] + n_neighbors = np.unique(X.nonzero()[0], return_counts=True)[1] + if len(np.unique(n_neighbors)) > 1: + "There are zero distances. Recomputing with some noise ..." + embed_scl.X = embed_scl.X + np.random.randn(*embed_scl.X.shape).clip(-1, 1) * 1e-3 + sc.pp.neighbors(embed_scl, use_rep='X', n_neighbors=90) + X = embed_scl.obsp['distances'] + n_neighbors = np.unique(X.nonzero()[0], return_counts=True)[1] + if len(np.unique(n_neighbors)) > 1: + raise ValueError("Failed to Corrent overlapping cells") + ################# End correction ######################## + sc.tl.umap(embed_scl,neighbors_key='scaled') # Add back to embed embed.uns['scaled_neighbors']=embed_scl.uns['scaled'] @@ -102,8 +138,7 @@ def intstr_to_bool(x): fig,axs=plt.subplots(len(cols),1,figsize=(8,8*len(cols))) for col,ax in zip(cols,axs): sc.pl.embedding(embed,'X_umap_scaled',color=col,s=10,ax=ax,show=False,sort_order=False) - plt.savefig(args.path+'umap_scaled.png',dpi=300,bbox_inches='tight') - + plt.savefig(exp_path / 'umap_scaled.png',dpi=300,bbox_inches='tight') # %% # Compute clusters @@ -120,7 +155,7 @@ def intstr_to_bool(x): # Save embed if save: print('Saving') - embed.write(args.path+'embed.h5ad') + embed.write(exp_path / 'embed.h5ad') # %% print('Finished!') diff --git a/notebooks/eval/cleaned/run_r.sh b/notebooks/eval/cleaned/run_r.sh new file mode 100644 index 0000000..93f5e5e --- /dev/null +++ b/notebooks/eval/cleaned/run_r.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# Check if at least one argument (the program.R file) is provided +if [ "$#" -lt 1 ]; then + echo "Usage: $0 program.R [arguments...]" + exit 1 +fi + +# Extract the program.R file (first argument) and all additional arguments +PROGRAM_FILE="$1" +shift # Remove the first argument so $@ contains the remaining arguments + +# Load the OpenMind8 Apptainer module +# Next line does not work. Find the app by loading module and running `which apptainer` +# module load openmind8/apptainer + +# Run the command inside an Apptainer container +/cm/shared/openmind8/apptainer/1.1.7/bin/apptainer exec -B /om2/user/moinfar -B /om2/user/khrovati ~/containers/my_ml_verse Rscript "$PROGRAM_FILE" "$@" diff --git a/notebooks/eval/cleaned/seml_adipose_sc_sn_updated.yaml b/notebooks/eval/cleaned/seml_adipose_sc_sn_updated.yaml index 3546c9c..997311a 100644 --- a/notebooks/eval/cleaned/seml_adipose_sc_sn_updated.yaml +++ b/notebooks/eval/cleaned/seml_adipose_sc_sn_updated.yaml @@ -1,20 +1,21 @@ seml: - executable: /om2/user/khrovati/code/cross_system_integration/notebooks/eval/cleaned/run_integration_seml.py + executable: /home/moinfar/projects/cross_system_integration/notebooks/eval/cleaned/run_integration_seml.py name: csi_eval - output_dir: /om2/user/khrovati/data/cross_system_integration/eval/adipose_sc_sn_updated/logs # CHANGE! OUT! - project_root_dir: /om2/user/khrovati/code/cross_system_integration/notebooks/eval/cleaned/ + output_dir: /home/moinfar/io/csi/eval/adipose_sc_sn_updated/logs # CHANGE! OUT! + project_root_dir: /home/moinfar/projects/cross_system_integration/notebooks/eval/cleaned conda_environment: csi slurm: + max_simultaneous_jobs: 10 sbatch_options_template: GPU sbatch_options: - mem: 20G + mem: 60G cpus-per-task: 4 - time: 0-03:00 + time: 0-12:00 nice: 10000 fixed: - path_save: /om2/user/khrovati/data/cross_system_integration/eval/adipose_sc_sn_updated/ # CHANGE! OUT! + path_save: /home/moinfar/io/csi/eval/adipose_sc_sn_updated/ # CHANGE! OUT! max_epochs: 110 epochs_detail_plot: 40 # Model params @@ -274,4 +275,107 @@ scgen_sample: kl_weight: type: choice options: - - 0.1 \ No newline at end of file + - 0.1 + +seurat_cca: + fixed: + model: seurat + params_opt: seurat_cca_k_anchor + conda_env: NA + reduction: cca + k_weight: 50 # We have to enforce this since one batch is small + language: R + grid: + k_anchor: + type: choice + options: + - 2 + - 5 + - 10 + - 20 + - 50 + +seurat_rpca: + fixed: + model: seurat + params_opt: seurat_rpca_k_anchor + conda_env: NA + reduction: rpca + k_weight: 35 # We have to enforce this since one batch is small + language: R + grid: + k_anchor: + type: choice + options: + - 2 + - 5 + - 10 + - 20 + - 50 + +harmony: + fixed: + model: harmonypy + params_opt: harmony_theta + conda_env: csi + grid: + harmony_theta: + type: choice + options: + - 0.1 + - 1.0 + - 5.0 + - 10.0 + - 100.0 + +harmonyr: + fixed: + model: harmony + params_opt: harmony_theta + conda_env: NA + reduction: harmony + language: R + grid: + harmony_theta: + type: choice + options: + - 0.1 + - 1.0 + - 5.0 + - 10.0 + - 100.0 + + +sysvi_vamp_cyc: + fixed: + model: sysvi + prior: vamp + n_prior_components: 5 + params_opt: sysvi_vamp_cyc_z_distance_cycle_weight_2 + conda_env: sysvi_master + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + + +sysvi_vamp_cyc_stable: + fixed: + model: sysvi_stable + prior: vamp + n_prior_components: 5 + params_opt: sysvi_vamp_cyc_z_distance_cycle_weight_2_stable + conda_env: sysvi + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + \ No newline at end of file diff --git a/notebooks/eval/cleaned/seml_pancreas_conditions_MIA_HPAP2.yaml b/notebooks/eval/cleaned/seml_pancreas_conditions_MIA_HPAP2.yaml index 9a20017..7b604a0 100644 --- a/notebooks/eval/cleaned/seml_pancreas_conditions_MIA_HPAP2.yaml +++ b/notebooks/eval/cleaned/seml_pancreas_conditions_MIA_HPAP2.yaml @@ -1,20 +1,21 @@ seml: - executable: /om2/user/khrovati/code/cross_system_integration/notebooks/eval/cleaned/run_integration_seml.py + executable: /home/moinfar/projects/cross_system_integration/notebooks/eval/cleaned/run_integration_seml.py name: csi_eval - output_dir: /om2/user/khrovati/data/cross_system_integration/eval/pancreas_conditions_MIA_HPAP2/logs # CHANGE! OUT! - project_root_dir: /om2/user/khrovati/code/cross_system_integration/notebooks/eval/cleaned/ + output_dir: /home/moinfar/io/csi/eval/pancreas_conditions_MIA_HPAP2/logs # CHANGE! OUT! + project_root_dir: /home/moinfar/projects/cross_system_integration/notebooks/eval/cleaned conda_environment: csi slurm: - sbatch_options_template: GPU + max_simultaneous_jobs: 20 +# sbatch_options_template: CPU sbatch_options: - mem: 50G - cpus-per-task: 4 - time: 0-05:00 + mem: 200G + cpus-per-task: 16 + time: 0-30:00 nice: 10000 fixed: - path_save: /om2/user/khrovati/data/cross_system_integration/eval/pancreas_conditions_MIA_HPAP2/ # CHANGE! OUT! + path_save: /home/moinfar/io/csi/eval/pancreas_conditions_MIA_HPAP2/ # CHANGE! OUT! max_epochs: 20 epochs_detail_plot: 5 # Model params @@ -405,4 +406,107 @@ scgen_sample: kl_weight: type: choice options: - - 0.1 \ No newline at end of file + - 0.1 + +seurat_cca: + fixed: + model: seurat + params_opt: seurat_cca_k_anchor + conda_env: NA + reduction: cca + k_weight: 50 # We have to enforce this since one batch is small + language: R + grid: + k_anchor: + type: choice + options: + - 2 + - 5 + - 10 + - 20 + - 50 + +seurat_rpca: + fixed: + model: seurat + params_opt: seurat_rpca_k_anchor + conda_env: NA + reduction: rpca + k_weight: 35 # We have to enforce this since one batch is small + language: R + grid: + k_anchor: + type: choice + options: + - 2 + - 5 + - 10 + - 20 + - 50 + +harmony: + fixed: + model: harmonypy + params_opt: harmony_theta + conda_env: csi + grid: + harmony_theta: + type: choice + options: + - 0.1 + - 1.0 + - 5.0 + - 10.0 + - 100.0 + +harmonyr: + fixed: + model: harmony + params_opt: harmony_theta + conda_env: NA + reduction: harmony + language: R + grid: + harmony_theta: + type: choice + options: + - 0.1 + - 1.0 + - 5.0 + - 10.0 + - 100.0 + + +sysvi_vamp_cyc: + fixed: + model: sysvi + prior: vamp + n_prior_components: 5 + params_opt: sysvi_vamp_cyc_z_distance_cycle_weight_2 + conda_env: sysvi_master + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + + +sysvi_vamp_cyc_stable: + fixed: + model: sysvi_stable + prior: vamp + n_prior_components: 5 + params_opt: sysvi_vamp_cyc_z_distance_cycle_weight_2_stable + conda_env: sysvi + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + \ No newline at end of file diff --git a/notebooks/eval/cleaned/seml_retina_adult_organoid.yaml b/notebooks/eval/cleaned/seml_retina_adult_organoid.yaml index 6460bef..ed0516d 100644 --- a/notebooks/eval/cleaned/seml_retina_adult_organoid.yaml +++ b/notebooks/eval/cleaned/seml_retina_adult_organoid.yaml @@ -1,20 +1,21 @@ seml: - executable: /om2/user/khrovati/code/cross_system_integration/notebooks/eval/cleaned/run_integration_seml.py + executable: /home/moinfar/projects/cross_system_integration/notebooks/eval/cleaned/run_integration_seml.py name: csi_eval - output_dir: /om2/user/khrovati/data/cross_system_integration/eval/retina_adult_organoid/logs # CHANGE! OUT! - project_root_dir: /om2/user/khrovati/code/cross_system_integration/notebooks/eval/cleaned/ + output_dir: /home/moinfar/io/csi/eval/retina_adult_organoid/logs # CHANGE! OUT! + project_root_dir: /home/moinfar/projects/cross_system_integration/notebooks/eval/cleaned conda_environment: csi slurm: + max_simultaneous_jobs: 10 sbatch_options_template: GPU sbatch_options: - mem: 20G + mem: 60G cpus-per-task: 4 - time: 0-03:00 + time: 0-10:00 nice: 10000 fixed: - path_save: /om2/user/khrovati/data/cross_system_integration/eval/retina_adult_organoid/ # CHANGE! OUT! + path_save: /home/moinfar/io/csi/eval/retina_adult_organoid/ # CHANGE! OUT! max_epochs: 100 epochs_detail_plot: 40 # Model params @@ -275,4 +276,106 @@ scgen_sample: kl_weight: type: choice options: - - 0.1 \ No newline at end of file + - 0.1 + +seurat_cca: + fixed: + model: seurat + params_opt: seurat_cca_k_anchor + conda_env: NA + reduction: cca + k_weight: 50 # We have to enforce this since one batch is small + language: R + grid: + k_anchor: + type: choice + options: + - 2 + - 5 + - 10 + - 20 + - 50 + +seurat_rpca: + fixed: + model: seurat + params_opt: seurat_rpca_k_anchor + conda_env: NA + reduction: rpca + k_weight: 35 # We have to enforce this since one batch is small + language: R + grid: + k_anchor: + type: choice + options: + - 2 + - 5 + - 10 + - 20 + - 50 + +harmony: + fixed: + model: harmonypy + params_opt: harmony_theta + conda_env: csi + grid: + harmony_theta: + type: choice + options: + - 0.1 + - 1.0 + - 5.0 + - 10.0 + - 100.0 + +harmonyr: + fixed: + model: harmony + params_opt: harmony_theta + conda_env: NA + reduction: harmony + language: R + grid: + harmony_theta: + type: choice + options: + - 0.1 + - 1.0 + - 5.0 + - 10.0 + - 100.0 + +sysvi_vamp_cyc: + fixed: + model: sysvi + prior: vamp + n_prior_components: 5 + params_opt: sysvi_vamp_cyc_z_distance_cycle_weight_2 + conda_env: sysvi_master + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + + +sysvi_vamp_cyc_stable: + fixed: + model: sysvi_stable + prior: vamp + n_prior_components: 5 + params_opt: sysvi_vamp_cyc_z_distance_cycle_weight_2_stable + conda_env: sysvi + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + \ No newline at end of file diff --git a/notebooks/eval/cleaned/seml_retina_atlas_sc_sn.yaml b/notebooks/eval/cleaned/seml_retina_atlas_sc_sn.yaml new file mode 100644 index 0000000..8f2b4e2 --- /dev/null +++ b/notebooks/eval/cleaned/seml_retina_atlas_sc_sn.yaml @@ -0,0 +1,309 @@ +seml: + executable: /home/moinfar/projects/cross_system_integration/notebooks/eval/cleaned/run_integration_seml.py + name: csi_eval + output_dir: /home/moinfar/io/csi/eval/retina_atlas_sc_sn/logs # CHANGE! OUT! + project_root_dir: /home/moinfar/projects/cross_system_integration/notebooks/eval/cleaned + conda_environment: csi + +slurm: + max_simultaneous_jobs: 20 + sbatch_options_template: GPU + sbatch_options: + mem: 200G + cpus-per-task: 10 + time: 0-12:00 + nice: 10000 + +fixed: + path_save: /home/moinfar/io/csi/eval/retina_atlas_sc_sn/ # CHANGE! OUT! + max_epochs: 50 + epochs_detail_plot: 10 + # Model params + system_decoders: 0 + # Task + eval_type: integration + # Data + # TODO: add .h5ad + path_adata: /home/moinfar/data/human_retina_atlas/human_retina_atlas_sc_sn_hvg + fn_moransi: /home/moinfar/data/human_retina_atlas/human_retina_atlas_sc_sn_hvg_moransiGenes.pkl + group_key: cell_type + batch_key: donor_id + system_key: system + n_cells_eval: 100000 + conda_env: csi + pca_key: X_pca_system + + +grid: + seed_num: + type: choice + options: + - 1 + - 2 + - 3 + +# small_test: +# fixed: +# params_opt: kl_weight +# max_epochs: 10 +# epochs_detail_plot: 5 +# testing: 1 +# grid: +# kl_weight: +# type: choice +# options: +# - 1 + +klw_opt: + fixed: + params_opt: kl_weight + grid: + kl_weight: + type: choice + options: + - 1 + - 1.5 + - 2 + - 5 + +# vamp_opt_eval: +# fixed: +# params_opt: vamp_eval +# prior: vamp +# encode_pseudoinputs_on_eval_mode: 1 +# grid: +# n_prior_components: +# type: choice +# options: +# - 1 +# - 2 +# - 5 +# - 10 +# - 100 +# - 5000 + +vamp_opt_eval_fixed: + fixed: + params_opt: vamp_eval_fixed + prior: vamp + encode_pseudoinputs_on_eval_mode: 1 + trainable_priors: 0 + grid: + n_prior_components: + type: choice + options: + - 1 + - 2 + - 5 + - 10 + - 100 + - 5000 + +gmm_opt_eval: + fixed: + params_opt: gmm_eval + prior: gmm + encode_pseudoinputs_on_eval_mode: 1 # not needed + grid: + n_prior_components: + type: choice + options: + - 1 + - 2 + - 5 + - 10 + - 100 + - 5000 + +# gmm_opt_eval_fixed: +# fixed: +# params_opt: gmm_eval_fixed +# prior: gmm +# trainable_priors: 0 +# grid: +# n_prior_components: +# type: choice +# options: +# - 1 +# - 2 +# - 5 +# - 10 +# - 100 +# - 5000 + +# gmm_opt_eval_ri: +# fixed: +# params_opt: gmm_eval_ri +# prior: gmm +# pseudoinputs_data_init: 0 +# grid: +# n_prior_components: +# type: choice +# options: +# - 1 +# - 2 +# - 5 +# - 10 +# - 100 +# - 5000 + +# gmm_opt_eval_ri_fixed: +# fixed: +# params_opt: gmm_eval_ri_fixed +# prior: gmm +# trainable_priors: 0 +# pseudoinputs_data_init: 0 +# grid: +# n_prior_components: +# type: choice +# options: +# - 1 +# - 2 +# - 5 +# - 10 +# - 100 +# - 5000 + +zdcw_opt_std: + fixed: + z_dist_metric: MSE_standard + params_opt: z_distance_cycle_weight_std + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + +vamp_eval_zdcw_opt_std: + fixed: + z_dist_metric: MSE_standard + prior: vamp + n_prior_components: 5 + encode_pseudoinputs_on_eval_mode: 1 + params_opt: vamp_z_distance_cycle_weight_std_eval + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + +vamp_klw_opt_eval: + fixed: + prior: vamp + n_prior_components: 5 + encode_pseudoinputs_on_eval_mode: 1 + params_opt: vamp_kl_weight_eval + grid: + kl_weight: + type: choice + options: + - 1 + - 1.5 + - 2 + - 5 + +scvi_kl_anneal: + fixed: + model: scvi + params_opt: scvi_kl_anneal + use_n_steps_kl_warmup: 1 + grid: + kl_weight: + type: choice + options: + - 0.5 + - 1 + - 1.5 + - 2 + +scglue_rgw: + fixed: + model: scglue + conda_env: scglue + params_opt: scglue_rel_gene_weight + max_epochs: -1 + grid: + rel_gene_weight: + type: choice + options: + - 0.4 + - 0.6 + - 0.8 + - 1 + +scglue_la: + fixed: + model: scglue + conda_env: scglue + params_opt: scglue_lam_align + max_epochs: -1 + grid: + lam_align: + type: choice + options: + - 0.0005 + - 0.005 + - 0.05 + - 0.5 + +scglue_lg: + fixed: + model: scglue + conda_env: scglue + params_opt: scglue_lam_graph + max_epochs: -1 + grid: + lam_graph: + type: choice + options: + - 0.005 + - 0.1 + - 0.5 + - 2 + +scgen: + fixed: + model: scgen + params_opt: scgen_kl + conda_env: perturb + grid: + kl_weight: + type: choice + options: + - 0.1 + +scgen_sample: + fixed: + model: scgen + params_opt: scgen_sample_kl + conda_env: perturb + integrate_by: batch + grid: + kl_weight: + type: choice + options: + - 0.1 + + +harmony: + fixed: + model: harmonypy + params_opt: harmony_theta + conda_env: csi + grid: + harmony_theta: + type: choice + options: + - 0.1 + - 1.0 + - 5.0 + - 10.0 + - 100.0 + + diff --git a/notebooks/eval/cleaned/seml_skin_mm_hs.yaml b/notebooks/eval/cleaned/seml_skin_mm_hs.yaml new file mode 100644 index 0000000..3bec598 --- /dev/null +++ b/notebooks/eval/cleaned/seml_skin_mm_hs.yaml @@ -0,0 +1,472 @@ +seml: + executable: /home/moinfar/projects/cross_system_integration/notebooks/eval/cleaned/run_integration_seml.py + name: csi_eval + output_dir: /home/moinfar/io/csi/eval/skin_mm_hs/logs # CHANGE! OUT! + project_root_dir: /home/moinfar/projects/cross_system_integration/notebooks/eval/cleaned + conda_environment: csi + +slurm: + max_simultaneous_jobs: 20 + sbatch_options_template: GPU + sbatch_options: + mem: 200G + cpus-per-task: 10 + time: 0-12:00 + nice: 10000 + +fixed: + path_save: /home/moinfar/io/csi/eval/skin_mm_hs/ # CHANGE! OUT! + max_epochs: 50 + epochs_detail_plot: 10 + # Model params + system_decoders: 0 + # Task + eval_type: integration + # Data + path_adata: /home/moinfar/data/skin_mouse_human/processed/skin_mm_hs_hvg.h5ad + fn_moransi: /home/moinfar/data/skin_mouse_human/processed/skin_mm_hs_hvg_moransiGenes.pkl + group_key: cell_type_eval + batch_key: batch + system_key: system + n_cells_eval: 100000 + conda_env: csi + pca_key: X_pca_system + saturn_emb: /om2/user/khrovati/data/saturn/protein_embeddings_export/ESM2/ + saturn_code: /home/moinfar/projects/SATURN_fix/ + system_values: mouse-human + var_name_keys: gs_mm-gs_hs + + +grid: + seed_num: + type: choice + options: + - 1 + - 2 + - 3 + +small_test: + fixed: + params_opt: kl_weight + max_epochs: 10 + epochs_detail_plot: 5 + testing: 1 + grid: + kl_weight: + type: choice + options: + - 1 + +klw_opt: + fixed: + params_opt: kl_weight + grid: + kl_weight: + type: choice + options: + - 1 + - 1.5 + - 2 + - 5 + +vamp_opt_eval: + fixed: + params_opt: vamp_eval + prior: vamp + encode_pseudoinputs_on_eval_mode: 1 + grid: + n_prior_components: + type: choice + options: + - 1 + - 2 + - 5 + - 10 + - 100 + - 5000 + +vamp_opt_eval_fixed: + fixed: + params_opt: vamp_eval_fixed + prior: vamp + encode_pseudoinputs_on_eval_mode: 1 + trainable_priors: 0 + grid: + n_prior_components: + type: choice + options: + - 1 + - 2 + - 5 + - 10 + - 100 + - 5000 + +gmm_opt_eval: + fixed: + params_opt: gmm_eval + prior: gmm + encode_pseudoinputs_on_eval_mode: 1 # not needed + grid: + n_prior_components: + type: choice + options: + - 1 + - 2 + - 5 + - 10 + - 100 + - 5000 + +gmm_opt_eval_fixed: + fixed: + params_opt: gmm_eval_fixed + prior: gmm + trainable_priors: 0 + grid: + n_prior_components: + type: choice + options: + - 1 + - 2 + - 5 + - 10 + - 100 + - 5000 + +gmm_opt_eval_ri: + fixed: + params_opt: gmm_eval_ri + prior: gmm + pseudoinputs_data_init: 0 + grid: + n_prior_components: + type: choice + options: + - 1 + - 2 + - 5 + - 10 + - 100 + - 5000 + +gmm_opt_eval_ri_fixed: + fixed: + params_opt: gmm_eval_ri_fixed + prior: gmm + trainable_priors: 0 + pseudoinputs_data_init: 0 + grid: + n_prior_components: + type: choice + options: + - 1 + - 2 + - 5 + - 10 + - 100 + - 5000 + +zdcw_opt_std: + fixed: + z_dist_metric: MSE_standard + params_opt: z_distance_cycle_weight_std + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + +vamp_eval_zdcw_opt_std: + fixed: + z_dist_metric: MSE_standard + prior: vamp + n_prior_components: 5 + encode_pseudoinputs_on_eval_mode: 1 + params_opt: vamp_z_distance_cycle_weight_std_eval + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + +vamp_klw_opt_eval: + fixed: + prior: vamp + n_prior_components: 5 + encode_pseudoinputs_on_eval_mode: 1 + params_opt: vamp_kl_weight_eval + grid: + kl_weight: + type: choice + options: + - 1 + - 1.5 + - 2 + - 5 + +scvi_kl_anneal: + fixed: + model: scvi + params_opt: scvi_kl_anneal + use_n_steps_kl_warmup: 1 + grid: + kl_weight: + type: choice + options: + - 0.5 + - 1 + - 1.5 + - 2 + +scglue_rgw: + fixed: + model: scglue + conda_env: scglue + params_opt: scglue_rel_gene_weight + max_epochs: -1 + grid: + rel_gene_weight: + type: choice + options: + - 0.4 + - 0.6 + - 0.8 + - 1 + +scglue_la: + fixed: + model: scglue + conda_env: scglue + params_opt: scglue_lam_align + max_epochs: -1 + grid: + lam_align: + type: choice + options: + - 0.0005 + - 0.005 + - 0.05 + - 0.5 + +scglue_lg: + fixed: + model: scglue + conda_env: scglue + params_opt: scglue_lam_graph + max_epochs: -1 + grid: + lam_graph: + type: choice + options: + - 0.005 + - 0.1 + - 0.5 + - 2 + +scgen: + fixed: + model: scgen + params_opt: scgen_kl + conda_env: perturb + grid: + kl_weight: + type: choice + options: + - 0.1 + +scgen_sample: + fixed: + model: scgen + params_opt: scgen_sample_kl + conda_env: perturb + integrate_by: batch + grid: + kl_weight: + type: choice + options: + - 0.1 + + +harmony: + fixed: + model: harmonypy + params_opt: harmony_theta + conda_env: csi + grid: + harmony_theta: + type: choice + options: + - 0.1 + - 1.0 + - 5.0 + - 10.0 + - 100.0 + + +saturn_psp: + fixed: + model: saturn + conda_env: saturn + params_opt: saturn_pe_sim_penalty + max_epochs: 50 + max_epochs_pretrain: 10 + cluster_key: leiden_system + grid: + pe_sim_penalty: + type: choice + options: + - 0.01 + - 0.1 + - 1 + - 10 + + +saturn_psp_no: + fixed: + model: saturn + conda_env: saturn + params_opt: saturn_no_pe_sim_penalty + max_epochs: 50 + max_epochs_pretrain: 10 + path_adata: /home/moinfar/data/skin_mouse_human/processed/skin_mm_hs_hvg-mmPart_nonortholHVG.h5ad + path_adata_2: /home/moinfar/data/skin_mouse_human/processed/skin_mm_hs_hvg-hsPart_nonortholHVG.h5ad + cluster_key: leiden_system + grid: + pe_sim_penalty: + type: choice + options: + - 0.01 + - 0.1 + - 1 + - 10 + +saturn_psp_super: + fixed: + model: saturn + conda_env: saturn + params_opt: saturn_pe_sim_penalty_super + max_epochs: 50 + max_epochs_pretrain: 10 + cluster_key: cell_type_eval + grid: + pe_sim_penalty: + type: choice + options: + - 0.01 + - 0.1 + - 1 + - 10 + +saturn_psp_no_super: + fixed: + model: saturn + conda_env: saturn + params_opt: saturn_no_pe_sim_penalty_super + max_epochs: 50 + max_epochs_pretrain: 10 + cluster_key: cell_type_eval + path_adata: /home/moinfar/data/skin_mouse_human/processed/skin_mm_hs_hvg-mmPart_nonortholHVG.h5ad + path_adata_2: /home/moinfar/data/skin_mouse_human/processed/skin_mm_hs_hvg-hsPart_nonortholHVG.h5ad + grid: + pe_sim_penalty: + type: choice + options: + - 0.01 + - 0.1 + - 1 + - 10 + + +seurat_cca: + fixed: + model: seurat + params_opt: seurat_cca_k_anchor + conda_env: NA + reduction: cca + # k_weight: 50 # We have to enforce this since one batch is small + language: R + grid: + k_anchor: + type: choice + options: + - 2 + - 5 + - 10 + - 20 + - 50 + +seurat_rpca: + fixed: + model: seurat + params_opt: seurat_rpca_k_anchor + conda_env: NA + reduction: rpca + # k_weight: 35 # We have to enforce this since one batch is small + language: R + grid: + k_anchor: + type: choice + options: + - 2 + - 5 + - 10 + - 20 + - 50 + + +sysvi_vamp_cyc: + fixed: + model: sysvi + prior: vamp + n_prior_components: 5 + params_opt: sysvi_vamp_cyc_z_distance_cycle_weight_2 + conda_env: sysvi_master + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 w + + +sysvi_vamp_cyc_stable: + fixed: + model: sysvi_stable + prior: vamp + n_prior_components: 5 + params_opt: sysvi_vamp_cyc_z_distance_cycle_weight_2_stable + conda_env: sysvi + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + +harmonyr: + fixed: + model: harmony + params_opt: harmony_theta + conda_env: NA + reduction: harmony + language: R + grid: + harmony_theta: + type: choice + options: + - 0.1 + - 1.0 + - 5.0 + - 10.0 + - 100.0 \ No newline at end of file diff --git a/notebooks/eval/cleaned/seml_skin_mm_hs_limited.yaml b/notebooks/eval/cleaned/seml_skin_mm_hs_limited.yaml new file mode 100644 index 0000000..2fd9ec9 --- /dev/null +++ b/notebooks/eval/cleaned/seml_skin_mm_hs_limited.yaml @@ -0,0 +1,472 @@ +seml: + executable: /home/moinfar/projects/cross_system_integration/notebooks/eval/cleaned/run_integration_seml.py + name: csi_eval + output_dir: /home/moinfar/io/csi/eval/skin_mm_hs_limited/logs # CHANGE! OUT! + project_root_dir: /home/moinfar/projects/cross_system_integration/notebooks/eval/cleaned + conda_environment: csi + +slurm: + max_simultaneous_jobs: 20 + sbatch_options_template: GPU + sbatch_options: + mem: 200G + cpus-per-task: 10 + time: 0-12:00 + nice: 10000 + +fixed: + path_save: /home/moinfar/io/csi/eval/skin_mm_hs_limited/ # CHANGE! OUT! + max_epochs: 50 + epochs_detail_plot: 10 + # Model params + system_decoders: 0 + # Task + eval_type: integration + # Data + path_adata: /home/moinfar/data/skin_mouse_human/processed/limited_data_skin_mm_hs_hvg.h5ad + fn_moransi: /home/moinfar/data/skin_mouse_human/processed/limited_data_skin_mm_hs_hvg_moransiGenes.pkl + group_key: cell_type_eval + batch_key: batch + system_key: system + n_cells_eval: 100000 + conda_env: csi + pca_key: X_pca_system + saturn_emb: /om2/user/khrovati/data/saturn/protein_embeddings_export/ESM2/ + saturn_code: /home/moinfar/projects/SATURN_fix/ + system_values: mouse-human + var_name_keys: gs_mm-gs_hs + + +grid: + seed_num: + type: choice + options: + - 1 + - 2 + - 3 + +small_test: + fixed: + params_opt: kl_weight + max_epochs: 10 + epochs_detail_plot: 5 + testing: 1 + grid: + kl_weight: + type: choice + options: + - 1 + +klw_opt: + fixed: + params_opt: kl_weight + grid: + kl_weight: + type: choice + options: + - 1 + - 1.5 + - 2 + - 5 + +vamp_opt_eval: + fixed: + params_opt: vamp_eval + prior: vamp + encode_pseudoinputs_on_eval_mode: 1 + grid: + n_prior_components: + type: choice + options: + - 1 + - 2 + - 5 + - 10 + - 100 + - 5000 + +vamp_opt_eval_fixed: + fixed: + params_opt: vamp_eval_fixed + prior: vamp + encode_pseudoinputs_on_eval_mode: 1 + trainable_priors: 0 + grid: + n_prior_components: + type: choice + options: + - 1 + - 2 + - 5 + - 10 + - 100 + - 5000 + +gmm_opt_eval: + fixed: + params_opt: gmm_eval + prior: gmm + encode_pseudoinputs_on_eval_mode: 1 # not needed + grid: + n_prior_components: + type: choice + options: + - 1 + - 2 + - 5 + - 10 + - 100 + - 5000 + +gmm_opt_eval_fixed: + fixed: + params_opt: gmm_eval_fixed + prior: gmm + trainable_priors: 0 + grid: + n_prior_components: + type: choice + options: + - 1 + - 2 + - 5 + - 10 + - 100 + - 5000 + +gmm_opt_eval_ri: + fixed: + params_opt: gmm_eval_ri + prior: gmm + pseudoinputs_data_init: 0 + grid: + n_prior_components: + type: choice + options: + - 1 + - 2 + - 5 + - 10 + - 100 + - 5000 + +gmm_opt_eval_ri_fixed: + fixed: + params_opt: gmm_eval_ri_fixed + prior: gmm + trainable_priors: 0 + pseudoinputs_data_init: 0 + grid: + n_prior_components: + type: choice + options: + - 1 + - 2 + - 5 + - 10 + - 100 + - 5000 + +zdcw_opt_std: + fixed: + z_dist_metric: MSE_standard + params_opt: z_distance_cycle_weight_std + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + +vamp_eval_zdcw_opt_std: + fixed: + z_dist_metric: MSE_standard + prior: vamp + n_prior_components: 5 + encode_pseudoinputs_on_eval_mode: 1 + params_opt: vamp_z_distance_cycle_weight_std_eval + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + +vamp_klw_opt_eval: + fixed: + prior: vamp + n_prior_components: 5 + encode_pseudoinputs_on_eval_mode: 1 + params_opt: vamp_kl_weight_eval + grid: + kl_weight: + type: choice + options: + - 1 + - 1.5 + - 2 + - 5 + +scvi_kl_anneal: + fixed: + model: scvi + params_opt: scvi_kl_anneal + use_n_steps_kl_warmup: 1 + grid: + kl_weight: + type: choice + options: + - 0.5 + - 1 + - 1.5 + - 2 + +scglue_rgw: + fixed: + model: scglue + conda_env: scglue + params_opt: scglue_rel_gene_weight + max_epochs: -1 + grid: + rel_gene_weight: + type: choice + options: + - 0.4 + - 0.6 + - 0.8 + - 1 + +scglue_la: + fixed: + model: scglue + conda_env: scglue + params_opt: scglue_lam_align + max_epochs: -1 + grid: + lam_align: + type: choice + options: + - 0.0005 + - 0.005 + - 0.05 + - 0.5 + +scglue_lg: + fixed: + model: scglue + conda_env: scglue + params_opt: scglue_lam_graph + max_epochs: -1 + grid: + lam_graph: + type: choice + options: + - 0.005 + - 0.1 + - 0.5 + - 2 + +scgen: + fixed: + model: scgen + params_opt: scgen_kl + conda_env: perturb + grid: + kl_weight: + type: choice + options: + - 0.1 + +scgen_sample: + fixed: + model: scgen + params_opt: scgen_sample_kl + conda_env: perturb + integrate_by: batch + grid: + kl_weight: + type: choice + options: + - 0.1 + + +harmony: + fixed: + model: harmonypy + params_opt: harmony_theta + conda_env: csi + grid: + harmony_theta: + type: choice + options: + - 0.1 + - 1.0 + - 5.0 + - 10.0 + - 100.0 + + +saturn_psp: + fixed: + model: saturn + conda_env: saturn + params_opt: saturn_pe_sim_penalty + max_epochs: 50 + max_epochs_pretrain: 10 + cluster_key: leiden_system + grid: + pe_sim_penalty: + type: choice + options: + - 0.01 + - 0.1 + - 1 + - 10 + + +saturn_psp_no: + fixed: + model: saturn + conda_env: saturn + params_opt: saturn_no_pe_sim_penalty + max_epochs: 50 + max_epochs_pretrain: 10 + path_adata: /home/moinfar/data/skin_mouse_human/processed/limited_data_skin_mm_hs_hvg-mmPart_nonortholHVG.h5ad + path_adata_2: /home/moinfar/data/skin_mouse_human/processed/limited_data_skin_mm_hs_hvg-hsPart_nonortholHVG.h5ad + cluster_key: leiden_system + grid: + pe_sim_penalty: + type: choice + options: + - 0.01 + - 0.1 + - 1 + - 10 + +saturn_psp_super: + fixed: + model: saturn + conda_env: saturn + params_opt: saturn_pe_sim_penalty_super + max_epochs: 50 + max_epochs_pretrain: 10 + cluster_key: cell_type_eval + grid: + pe_sim_penalty: + type: choice + options: + - 0.01 + - 0.1 + - 1 + - 10 + +saturn_psp_no_super: + fixed: + model: saturn + conda_env: saturn + params_opt: saturn_no_pe_sim_penalty_super + max_epochs: 50 + max_epochs_pretrain: 10 + cluster_key: cell_type_eval + path_adata: /home/moinfar/data/skin_mouse_human/processed/limited_data_skin_mm_hs_hvg-mmPart_nonortholHVG.h5ad + path_adata_2: /home/moinfar/data/skin_mouse_human/processed/limited_data_skin_mm_hs_hvg-hsPart_nonortholHVG.h5ad + grid: + pe_sim_penalty: + type: choice + options: + - 0.01 + - 0.1 + - 1 + - 10 + + +seurat_cca: + fixed: + model: seurat + params_opt: seurat_cca_k_anchor + conda_env: NA + reduction: cca + # k_weight: 50 # We have to enforce this since one batch is small + language: R + grid: + k_anchor: + type: choice + options: + - 2 + - 5 + - 10 + - 20 + - 50 + +seurat_rpca: + fixed: + model: seurat + params_opt: seurat_rpca_k_anchor + conda_env: NA + reduction: rpca + # k_weight: 35 # We have to enforce this since one batch is small + language: R + grid: + k_anchor: + type: choice + options: + - 2 + - 5 + - 10 + - 20 + - 50 + + +sysvi_vamp_cyc: + fixed: + model: sysvi + prior: vamp + n_prior_components: 5 + params_opt: sysvi_vamp_cyc_z_distance_cycle_weight_2 + conda_env: sysvi_master + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + + +sysvi_vamp_cyc_stable: + fixed: + model: sysvi_stable + prior: vamp + n_prior_components: 5 + params_opt: sysvi_vamp_cyc_z_distance_cycle_weight_2_stable + conda_env: sysvi + grid: + z_distance_cycle_weight: + type: choice + options: + - 2 + - 5 + - 10 + - 50 + +harmonyr: + fixed: + model: harmony + params_opt: harmony_theta + conda_env: NA + reduction: harmony + language: R + grid: + harmony_theta: + type: choice + options: + - 0.1 + - 1.0 + - 5.0 + - 10.0 + - 100.0 \ No newline at end of file diff --git a/notebooks/results/batch_strength/batch_strength_across_datasets.py b/notebooks/results/batch_strength/batch_strength_across_datasets.py index 3aa2e59..02a68bd 100644 --- a/notebooks/results/batch_strength/batch_strength_across_datasets.py +++ b/notebooks/results/batch_strength/batch_strength_across_datasets.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: csi # language: python @@ -26,9 +26,9 @@ import matplotlib.pyplot as plt # %% -path_data='/om2/user/khrovati/data/cross_system_integration/' -path_names=path_data+'names_parsed/' -path_fig=path_data+'figures/' +path_data='/home/moinfar/data/' +path_fig='/home/moinfar/io/csi/figures/' +path_names='/home/moinfar/io/csi/names_parsed/' # %% # Names @@ -56,13 +56,16 @@ # %% # Load results dataset_metric_fns={ - 'pancreas_conditions_MIA_HPAP2':'combined_orthologuesHVG', - 'retina_adult_organoid':'combined_HVG', - 'adipose_sc_sn_updated':'adiposeHsSAT_sc_sn', + 'pancreas_conditions_MIA_HPAP2':('pancreas_conditions_MIA_HPAP2','combined_orthologuesHVG'), + 'retina_adult_organoid':('retina_adult_organoid', 'combined_HVG'), + 'adipose_sc_sn_updated':('adipose_sc_sn_updated', 'adiposeHsSAT_sc_sn'), + 'retina_atlas_sc_sn':('human_retina_atlas', 'human_retina_atlas_sc_sn_hvg'), + 'skin_mm_hs':('skin_mouse_human/processed', 'skin_mm_hs_hvg'), + 'skin_mm_hs_limited':('skin_mouse_human/processed', 'limited_data_skin_mm_hs_hvg'), } res={} -for dataset,fn_part in dataset_metric_fns.items(): - res[dataset]=pkl.load(open(f'{path_data}{dataset}/{fn_part}_embed_integrationMetrics.pkl','rb' +for dataset,(dataset_path,fn_part) in dataset_metric_fns.items(): + res[dataset]=pkl.load(open(f'{path_data}{dataset_path}/{fn_part}_embed_integrationMetrics.pkl','rb' ))['asw_batch']['asw_data_label'].values # %% @@ -74,7 +77,7 @@ # %% # Plot -fig,ax=plt.subplots(figsize=(1,1)) +fig,ax=plt.subplots(figsize=(2,3)) sb.swarmplot(x=score_name,y=ds_name,data=res,s=3,c='k') ax.set(facecolor = (0,0,0,0)) ax.spines['top'].set_visible(False) diff --git a/notebooks/results/batch_strength/batch_strength_plot.py b/notebooks/results/batch_strength/batch_strength_plot.py index 59d6486..6c26307 100644 --- a/notebooks/results/batch_strength/batch_strength_plot.py +++ b/notebooks/results/batch_strength/batch_strength_plot.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: csi # language: python @@ -27,26 +27,21 @@ from matplotlib.gridspec import GridSpec # %% -path_data='/om2/user/khrovati/data/cross_system_integration/' -path_fig=path_data+'figures/' -path_names=path_data+'names_parsed/' +path_data='/home/moinfar/data/' +path_fig='/home/moinfar/io/csi/figures/' +path_names='/home/moinfar/io/csi/names_parsed/' # %% # Load distances dataset_map=pkl.load(open(path_names+'datasets.pkl','rb')) system_map=pkl.load(open(path_names+'systems.pkl','rb')) - -# %% -dataset_metric_fns={ - 'pancreas_conditions_MIA_HPAP2':'combined_orthologuesHVG', - 'retina_adult_organoid':'combined_HVG', - 'adipose_sc_sn_updated':'adiposeHsSAT_sc_sn', -} +dataset_path=pkl.load(open(path_names+'dataset_path.pkl','rb')) +dataset_h5ad_path=pkl.load(open(path_names+'dataset_h5ad_path.pkl','rb')) # %% distances={ - dataset:pkl.load(open(path_data+dataset+'/'+fn+'_PcaSysBatchDist.pkl','rb')) - for dataset, fn in dataset_metric_fns.items()} + dataset:pkl.load(open(dataset_path[dataset]+'/'+dataset_h5ad_path[dataset][:-5]+'_PcaSysBatchDist.pkl','rb')) + for dataset in dataset_path.keys()} # %% [markdown] # ## Plot one cell type in all sample pair groups (delta cells of pancreas) @@ -107,7 +102,7 @@ # Use gridspec as different datasets have different number of categories heights=[scores.query('dataset==@dataset').compared.nunique() for dataset in dataset_map] nrows=len(heights) -fig = plt.figure(figsize=(1.4, 4.2)) +fig = plt.figure(figsize=(1.4, 1.4 * len(heights))) fig.set(facecolor = (0,0,0,0)) gs = GridSpec(nrows, 1, height_ratios=heights) # Set one ax as ref so that ax can be shared @@ -125,7 +120,8 @@ for x in data_plot.compared.unique()} data_plot['compared_name']=data_plot.compared.map(compared_map) sb.swarmplot(x='score',y='compared_name',data=data_plot,ax=ax, c='k',s=2) - ax.set_ylabel(dataset_name.replace('-','-\n')) + # ax.set_ylabel(dataset_name.replace('-','-\n')) + ax.set_ylabel("\n".join(dataset_name.rsplit(" ", 1))) if idx==nrows-1: ax.set_xlabel('Distances between samples\n(mean per cell type)') ax.xaxis.set_label_coords(-0.11, -0.3) @@ -142,3 +138,5 @@ plt.savefig(path_fig+f'batch_strength-overview_absolute-swarm.png',dpi=300,bbox_inches='tight') # %% + +# %% diff --git a/notebooks/results/names_parsed.py b/notebooks/results/names_parsed.py index dec0dd9..2dc4755 100644 --- a/notebooks/results/names_parsed.py +++ b/notebooks/results/names_parsed.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: csi # language: python @@ -17,13 +17,17 @@ # # Set names and cmaps for plotting # %% +import os import pickle as pkl # %% -path_data='/om2/user/khrovati/data/cross_system_integration/' +path_data='/home/moinfar/io/csi/' path_save=path_data+'names_parsed/' +# %% +# ! mkdir /home/moinfar/io/csi/names_parsed/ + # %% [markdown] # ## Names @@ -36,9 +40,14 @@ 'vamp_cycle':'VAMP+CYC', 'cVAE': 'cVAE', 'scvi': 'scVI', + 'harmony': 'Harmony', + 'harmonypy': 'Harmony-py', 'scglue': 'GLUE', + 'seurat': 'Seaurat', 'saturn': 'SATURN', - 'saturn_super': 'SATURN-CT' + 'saturn_super': 'SATURN-CT', + 'sysvi': 'SysVI', + 'sysvi_stable': 'SysVI-stable', } pkl.dump(model_map,open(path_save+'models.pkl','wb')) @@ -75,9 +84,12 @@ # %% # Datasets dataset_map={ - 'pancreas_conditions_MIA_HPAP2':'Mouse-Human', - 'retina_adult_organoid':'Organoid-Tissue', - 'adipose_sc_sn_updated':'Cell-Nuclei', + 'pancreas_conditions_MIA_HPAP2':'Pancreas Mouse-Human', + 'retina_adult_organoid':'Retina Organoid-Tissue', + 'adipose_sc_sn_updated':'Adipose Cell-Nuclei', + 'retina_atlas_sc_sn':'Retina Atlas Cell-Nuclei', + 'skin_mm_hs':'Skin Mouse-Human', + 'skin_mm_hs_limited':'Limited Skin Mouse-Human', } pkl.dump(dataset_map,open(path_save+'datasets.pkl','wb')) @@ -99,6 +111,18 @@ ('pe_sim_penalty',[0.01, 0.1, 1.0, 10.0]) ]), (['cycle', 'vamp_cycle'],[ ('z_distance_cycle_weight',[2.0, 5.0, 10.0, 50.0]) ]), + (['seurat_cca'],[ + ('seurat_cca_k_anchor',[2, 5, 10, 20, 50]) ]), + (['seurat_rpca'],[ + ('seurat_rpca_k_anchor',[2, 5, 10, 20, 50]) ]), + (['harmony'],[ + ('harmony_theta',[0.1, 1.0, 5., 10., 100.]) ]), + (['harmonypy'],[ + ('harmonypy_theta',[0.1, 1.0, 5., 10., 100.]) ]), + (['sysvi'],[ + ('z_distance_cycle_weight',[2.0, 5.0, 10.0, 50.0]) ]), + (['sysvi_stable'],[ + ('z_distance_cycle_weight',[2.0, 5.0, 10.0, 50.0]) ]), ] pkl.dump(param_vals,open(path_save+'optimized_parameter_values.pkl','wb')) @@ -135,6 +159,12 @@ 'vamp_z_distance_cycle_weight_std_eval':'vamp_cycle', 'vamp_kl_weight_eval':'vamp', 'z_distance_cycle_weight_std':'cycle', + 'seurat_ccca_k_anchor':'seurat', + 'seurat_rpca_k_anchor':'seurat', + 'harmony_theta':'harmony', + 'harmonypy_theta':'harmonypy', + 'sysvi_vamp_cyc_z_distance_cycle_weight_2':'sysvi', + 'sysvi_vamp_cyc_z_distance_cycle_weight_2_stable':'sysvi_stable', } pkl.dump(params_opt_map,open(path_save+'params_opt_model.pkl','wb')) @@ -161,6 +191,11 @@ 'lam_align':'Alignment LW', 'rel_gene_weight':'Graph W', 'pe_sim_penalty':'Protein sim. LW', + 'pe_sim_penalty':'Protein sim. LW', + 'k_anchor':'K.Anchor', + 'theta':'Theta', + 'harmonypy_theta': 'Theta (Harmony-py)', + 'harmony_theta': 'Theta (Harmony)', None:'None', } pkl.dump(param_names,open(path_save+'params.pkl','wb')) @@ -177,6 +212,7 @@ # Map params_opt to gene relationship type params_opt_gene_map={ 'kl_weight':'OTO', + 'z_distance_cycle_weight_std':'OTO', 'saturn_pe_sim_penalty':'OTO', 'saturn_pe_sim_penalty_no':'FO', 'saturn_pe_sim_penalty_super':'OTO', @@ -190,7 +226,12 @@ 'scvi_kl_anneal':'OTO', 'vamp_kl_weight_eval':'OTO', 'vamp_z_distance_cycle_weight_std_eval':'OTO', - 'z_distance_cycle_weight_std':'OTO', + 'seurat_ccca_k_anchor':'OTO', + 'seurat_rpca_k_anchor':'OTO', + 'harmony_theta':'OTO', + 'harmonypy_theta':'OTO', + 'sysvi_vamp_cyc_z_distance_cycle_weight_2': 'OTO', + 'sysvi_vamp_cyc_z_distance_cycle_weight_2_stable': 'OTO', } pkl.dump(params_opt_gene_map,open(path_save+'params_opt_genes.pkl','wb')) @@ -200,6 +241,9 @@ 'pancreas_conditions_MIA_HPAP2':{'0':'Mouse','1':'Human'}, 'retina_adult_organoid':{'0':'Organoid','1':'Tissue'}, 'adipose_sc_sn_updated':{'0':'Cell','1':'Nuclei'}, + 'retina_atlas_sc_sn':{'0':'Cell','1':'Nuclei'}, + 'skin_mm_hs':{'0':'Mouse','1':'Human'}, + 'skin_mm_hs_limited':{'0':'Mouse','1':'Human'}, } pkl.dump(system_map,open(path_save+'systems.pkl','wb')) @@ -264,7 +308,68 @@ 'nk_cell': 'NK cell', 'pericyte': 'Pericyte', 't_cell': 'T cell' - } + }, + 'retina_atlas_sc_sn':{ + 'retinal rod cell': 'Retinal rod cell', + 'Mueller cell': 'Mueller cell', + 'retinal bipolar neuron': 'Retinal bipolar neuron', + 'rod bipolar cell': 'Rod bipolar cell', + 'glycinergic amacrine cell': 'Glycinergic amacrine cell', + 'GABAergic amacrine cell': 'GABAergic amacrine cell', + 'amacrine cell': 'Amacrine cell', + 'retinal cone cell': 'Retinal cone cell', + 'S cone cell': 'S cone cell', + 'starburst amacrine cell': 'Starburst amacrine cell', + 'H1 horizontal cell': 'H1 horizontal cell', + 'H2 horizontal cell': 'H2 horizontal cell', + 'midget ganglion cell of retina': 'Midget ganglion cell of retina', + 'astrocyte': 'Astrocyte', + 'retinal ganglion cell': 'Retinal ganglion cell', + 'microglial cell': 'Microglial cell', + 'parasol ganglion cell of retina': 'Parasol ganglion cell of retina', + 'retinal pigment epithelial cell': 'Retinal pigment epithelial cell', + 'invaginating midget bipolar cell': 'Invaginating midget bipolar cell', + 'diffuse bipolar 1 cell': 'Diffuse bipolar 1 cell', + 'OFFx cell': 'OFFx cell', + 'flat midget bipolar cell': 'Flat midget bipolar cell', + 'diffuse bipolar 2 cell': 'Diffuse bipolar 2 cell', + 'giant bipolar cell': 'Giant bipolar cell', + 'diffuse bipolar 6 cell': 'Diffuse bipolar 6 cell', + 'diffuse bipolar 3b cell': 'Diffuse bipolar 3b cell', + 'diffuse bipolar 4 cell': 'Diffuse bipolar 4 cell', + 'diffuse bipolar 3a cell': 'Diffuse bipolar 3a cell', + 'ON-blue cone bipolar cell': 'ON-blue cone bipolar cell', + }, + 'skin_mm_hs': { + 'CD3E & CPA3': 'CD3E+ and CPA3+', + 'Chondrocyte': 'Chondrocyte', + 'cytotoxic t cell': 'Cytotoxic T cell', + 'Endothelial': 'Endothelial', + 'Epidermal': 'Epidermal', + 'Fibroblast': 'Fibroblast', + 'helper t cell': 'Helper T cell', + 'Immune': 'Immune', + 'Lymphatic': 'Lymphatic', + 'Neutrophil': 'Neutrophil', + 'Pericyte': 'Pericyte', + 'regulatory t cell': 'Regulatory T cell', + 'SOX10': 'SOX10+', + }, + 'skin_mm_hs_limited': { + 'CD3E & CPA3': 'CD3E+ and CPA3+', + 'Chondrocyte': 'Chondrocyte', + 'cytotoxic t cell': 'Cytotoxic T cell', + 'Endothelial': 'Endothelial', + 'Epidermal': 'Epidermal', + 'Fibroblast': 'Fibroblast', + 'helper t cell': 'Helper T cell', + 'Immune': 'Immune', + 'Lymphatic': 'Lymphatic', + 'Neutrophil': 'Neutrophil', + 'Pericyte': 'Pericyte', + 'regulatory t cell': 'Regulatory T cell', + 'SOX10': 'SOX10+', + }, } pkl.dump(cell_type_map,open(path_save+'cell_types.pkl','wb')) @@ -276,6 +381,31 @@ } pkl.dump(prior_init_map,open(path_save+'prior_init.pkl','wb')) +# %% [markdown] +# # Paths + +# %% +dataset_path = { + 'pancreas_conditions_MIA_HPAP2': '/om2/user/khrovati/data/cross_system_integration/pancreas_conditions_MIA_HPAP2', + 'retina_adult_organoid': '/om2/user/khrovati/data/cross_system_integration/retina_adult_organoid', + 'adipose_sc_sn_updated': '/om2/user/khrovati/data/cross_system_integration/adipose_sc_sn_updated', + 'retina_atlas_sc_sn': '/home/moinfar/data/human_retina_atlas', + 'skin_mm_hs': '/home/moinfar/data/skin_mouse_human/processed', + 'skin_mm_hs_limited': '/home/moinfar/data/skin_mouse_human/processed', +} +pkl.dump(dataset_path,open(path_save+'dataset_path.pkl','wb')) + +# %% +dataset_h5ad = { + 'pancreas_conditions_MIA_HPAP2': 'combined_orthologuesHVG.h5ad', + 'retina_adult_organoid': 'combined_HVG.h5ad', + 'adipose_sc_sn_updated': 'adiposeHsSAT_sc_sn.h5ad', + 'retina_atlas_sc_sn': 'human_retina_atlas_sc_sn_hvg.h5ad', + 'skin_mm_hs': 'skin_mm_hs_hvg.h5ad', + 'skin_mm_hs_limited': 'limited_data_skin_mm_hs_hvg.h5ad', +} +pkl.dump(dataset_h5ad,open(path_save+'dataset_h5ad_path.pkl','wb')) + # %% [markdown] # ## Color maps @@ -290,23 +420,23 @@ # %% # Model colors models=[m for m in model_map.values() if m!='non-integrated'] -colors =[mcolors.to_hex(color) for color in sb.color_palette("colorblind")] +colors =[mcolors.to_hex(color) for color in sb.color_palette("colorblind")] + [mcolors.to_hex(color) for color in sb.color_palette("dark")] palette=dict(zip(models,colors[:len(models)])) pkl.dump(palette,open(path_save+'model_cmap.pkl','wb')) # %% # UMAP metadata colloring for individual datasets cmaps=defaultdict(dict) -for fn,cols in [ - ('adipose_sc_sn_updated/adiposeHsSAT_sc_sn.h5ad', - ['cluster','system','donor_id']), - ('pancreas_conditions_MIA_HPAP2/combined_orthologuesHVG.h5ad', - ['cell_type_eval','system','batch','leiden_system']), - ('retina_adult_organoid/combined_HVG.h5ad', - ['cell_type','system','sample_id']) +for dataset,cols in [ + ('adipose_sc_sn_updated', ['cluster','system','donor_id']), + ('pancreas_conditions_MIA_HPAP2', ['cell_type_eval','system','batch','leiden_system']), + ('retina_adult_organoid', ['cell_type','system','sample_id']), + ('retina_atlas_sc_sn', ['cell_type','system','donor_id']), + ('skin_mm_hs', ['cell_type_eval', 'system', 'batch']), + ('skin_mm_hs_limited', ['cell_type_eval', 'system', 'batch']), ]: - dataset=fn.split('/')[0] - adata=sc.read(path_data+fn,backed='r') + fn = os.path.join(dataset_path[dataset], dataset_h5ad[dataset]) + adata=sc.read(fn,backed='r') for col in cols: adata.obs[col]=adata.obs[col].astype(str) adata.obs[col]=pd.Categorical(values=adata.obs[col], @@ -333,5 +463,16 @@ 'pancreas_conditions_MIA_HPAP2':'#8a9e59', 'retina_adult_organoid':'#c97fac', 'adipose_sc_sn_updated':'#92C2D0', + 'retina_atlas_sc_sn':'#97d092', + 'skin_mm_hs':'#b07c0c', + 'skin_mm_hs_limited':'#d94214', } pkl.dump(palette,open(path_save+'dataset_cmap.pkl','wb')) + +# %% + +# %% +# Dont forget to update this file (strange? ya!) if adding a method: +# .../eva/cleaned/params_opt_maps.py + +# %% diff --git a/notebooks/results/performance/HP_metric_corr.py b/notebooks/results/performance/HP_metric_corr.py index f71ca20..f9e449d 100644 --- a/notebooks/results/performance/HP_metric_corr.py +++ b/notebooks/results/performance/HP_metric_corr.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: csi # language: python @@ -18,6 +18,7 @@ import numpy as np import scanpy as sc import pickle as pkl +import yaml import math import glob import os @@ -32,7 +33,7 @@ from params_opt_maps import * # %% -path_data='/om2/user/khrovati/data/cross_system_integration/' +path_data='/home/moinfar/io/csi/' path_names=path_data+'names_parsed/' path_fig=path_data+'figures/' @@ -65,9 +66,12 @@ path_integration=f'{path_data}eval/{dataset}/integration/' res=[] for run in glob.glob(path_integration+'*/'): - if os.path.exists(run+'args.pkl') and \ + if (os.path.exists(run+'args.pkl') or os.path.exists(run+'args.yml')) and \ os.path.exists(run+'scib_metrics.pkl'): - args=pd.Series(vars(pkl.load(open(run+'args.pkl','rb')))) + if os.path.exists(run+'args.pkl'): + args=pd.Series(vars(pkl.load(open(run+'args.pkl','rb')))) + if os.path.exists(run+'args.yml'): + args=pd.Series(yaml.safe_load(open(run+'args.yml','rb'))) metrics=pd.Series(pkl.load(open(run+'scib_metrics.pkl','rb'))) data=pd.concat([args,metrics]) name=run.split('/')[-2] @@ -90,6 +94,14 @@ res['params_opt']=pd.Categorical(res['params_opt'],sorted(res['params_opt'].unique()), True) + res['params_opt']=np.where(res.index.str.contains('harmonypy'), + res['params_opt'].replace({'harmony_theta': 'harmonypy_theta'}), + res['params_opt']) + res['param_opt_col']=np.where(res.index.str.contains('harmonypy'), + res['param_opt_col'].replace({'harmony_theta': 'harmonypy_theta'}), + res['param_opt_col']) + res['harmonypy_theta'] = res['harmony_theta'] + # Keep relevant params and name model params_opt_vals=set(params_opt_map.keys()) res_sub=res.query('params_opt in @params_opt_vals').copy() @@ -149,6 +161,13 @@ ]+['none'], ordered=True) +# %% +ress = ress.query('testing == False') +ress.drop_duplicates(['model_parsed', 'param_parsed', 'genes_parsed', 'dataset_parsed', 'name', 'seed', 'params_opt', 'param_opt_val'], inplace=True) + +# %% +ress = ress[~(ress['model_parsed'].str.contains('SysVI'))].copy() + # %% # HP-metric correlations corrs=[] @@ -172,7 +191,7 @@ # Plot palette={dataset_map[dataset]:color for dataset,color in dataset_cmap.items()} g=sb.catplot(x='Optimized',y='HP corr.',hue='Dataset',row='metric', data=corrs, - height=1.5,aspect=3, kind='swarm',palette=palette) + height=2.5,aspect=3, kind='swarm',palette=palette) # Make pretty g.axes[-1][0].set_xticklabels(g.axes[-1][0].get_xticklabels(),rotation=90) for ax in g.axes: diff --git a/notebooks/results/performance/moransi_example.py b/notebooks/results/performance/moransi_example.py index 907eae2..54d7be4 100644 --- a/notebooks/results/performance/moransi_example.py +++ b/notebooks/results/performance/moransi_example.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: csi # language: python @@ -24,10 +24,10 @@ import seaborn as sb # %% -path_data='/om2/user/khrovati/data/cross_system_integration/' -path_names=path_data+'names_parsed/' -path_embed=path_data+'eval/pancreas_conditions_MIA_HPAP2/integration_summary/moransi/' -path_fig=path_data+'figures/' +path_data='/home/moinfar/data/csi/' +path_names='/home/moinfar/io/csi/'+'names_parsed/' +path_embed='/home/moinfar/io/csi/'+'eval/pancreas_conditions_MIA_HPAP2/integration_summary/moransi/' +path_fig='/home/moinfar/io/csi/'+'figures/' # %% model_map=pkl.load(open(path_names+'models.pkl','rb')) @@ -35,7 +35,7 @@ # %% # Load embeddings and sort+rename for plotting embeds=pkl.load(open(path_embed+'pancreas_STZG1_healthyvar_topmodels.pkl','rb')) -embeds={parsed:embeds[name] for name,parsed in model_map.items()} +embeds={parsed:embeds[name] for name,parsed in model_map.items() if (name in embeds) and ('sysvi' not in name)} # %% # gene group score columns @@ -84,3 +84,5 @@ dpi=300,bbox_inches='tight') # %% + +# %% diff --git a/notebooks/results/performance/performance.py b/notebooks/results/performance/performance.py index f677d42..e8603d9 100644 --- a/notebooks/results/performance/performance.py +++ b/notebooks/results/performance/performance.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: csi # language: python @@ -18,9 +18,12 @@ import numpy as np import scanpy as sc import pickle as pkl +import yaml import math import glob import os +import itertools +from copy import deepcopy from scipy.stats import ttest_ind from statsmodels.stats.multitest import multipletests @@ -30,16 +33,26 @@ import seaborn as sb import matplotlib.colors as mcolors +from pathlib import Path import sys sys.path.append('/'.join(os.getcwd().split('/')[:-2]+['eval','cleaned',''])) from params_opt_maps import * # %% -path_data='/om2/user/khrovati/data/cross_system_integration/' +import warnings +# warnings.filterwarnings(action='once') +warnings.filterwarnings('ignore') + +# %% +path_data='/home/moinfar/io/csi/' path_names=path_data+'names_parsed/' path_fig=path_data+'figures/' path_tab=path_data+'tables/' +# %% +Path(path_fig).mkdir(parents=True, exist_ok=True) +Path(path_tab).mkdir(parents=True, exist_ok=True) + # %% # Names model_map={**pkl.load(open(path_names+'models.pkl','rb')), @@ -61,13 +74,19 @@ obs_col_cmap=pkl.load(open(path_names+'obs_col_cmap.pkl','rb')) metric_background_cmap=pkl.load(open(path_names+'metric_background_cmap.pkl','rb')) +# data +dataset_path=pkl.load(open(path_names+'dataset_path.pkl','rb')) +dataset_h5ad_path=pkl.load(open(path_names+'dataset_h5ad_path.pkl','rb')) + # %% [markdown] # ## Top settings # Best performing parameter setting results +# %% +load_embed = True + # %% # Load metrics and embeddings -load_embed=True metrics={} if load_embed: embeds={} @@ -82,15 +101,21 @@ for model,model_setting in top_settings.items(): model=model_map[model] for run in model_setting['runs']: - args_run=pkl.load(open(path_integration+run+'/args.pkl','rb')) + if os.path.exists(path_integration+run+'/args.pkl'): + args_run=vars(pkl.load(open(path_integration+run+'/args.pkl','rb'))) + else: + args_run=yaml.safe_load(open(path_integration+run+'/args.yml','rb')) metrics_data=pd.Series( pkl.load(open(path_integration+run+'/scib_metrics.pkl','rb')), name=model) - metrics_data['seed']=args_run.seed + metrics_data['seed']=args_run['seed'] metrics[dataset_name].append(metrics_data) if run==model_setting['mid_run']: if load_embed: - embeds[dataset_name][model]=sc.read(path_integration+run+'/embed.h5ad') + print(dataset_name, model, path_integration+run+'/embed.h5ad') + embed=sc.read(path_integration+run+'/embed.h5ad') + sc.pp.subsample(embed, fraction=1.) + embeds[dataset_name][model]=embed args[dataset_name][model]=args_run metrics[dataset_name]=pd.DataFrame(metrics[dataset_name]) metrics[dataset_name]['model']=pd.Categorical( @@ -101,17 +126,28 @@ # Seed to str for plotting metrics[dataset_name]['seed']=metrics[dataset_name]['seed'].astype(str) +# %% +with open("/home/moinfar/io/csi/tmp/metrics.pkl", "wb") as f: + pkl.dump(metrics, f) + +# %% +with open("/home/moinfar/io/csi/tmp/metrics.pkl", "rb") as f: + metrics = pkl.load(f) + # %% # Add non-integrated embeds if load_embed: dataset_embed_fns={ - 'pancreas_conditions_MIA_HPAP2':'combined_orthologuesHVG_embed.h5ad', - 'retina_adult_organoid':'combined_HVG_embed.h5ad', - 'adipose_sc_sn_updated':'adiposeHsSAT_sc_sn_embed.h5ad', + dataset: os.path.join(dataset_path[dataset], dataset_h5ad_path[dataset]) + for dataset in dataset_path.keys() } for dataset,dataset_name in dataset_map.items(): - embeds[dataset_name][model_map['non-integrated']]=sc.read( - f'{path_data}{dataset}/{dataset_embed_fns[dataset]}') + adata = sc.read(dataset_embed_fns[dataset][:-len('.h5ad')] + '_embed.h5ad') + if adata.n_obs > 100_000: + sc.pp.subsample(adata, n_obs=100_000) + else: + sc.pp.subsample(adata, fraction=1.) + embeds[dataset_name][model_map['non-integrated']] = adata # %% # Add scGEN example embeds to retina @@ -122,7 +158,10 @@ path_integration=f'{path_data}eval/{dataset}/integration/' for model,run in example_runs.items(): model=model_map[model] - embeds[dataset_name][model]=sc.read(path_integration+run+'/embed.h5ad') + embed=sc.read(path_integration+run+'/embed.h5ad') + sc.pp.subsample(embed, fraction=1.) + embeds[dataset_name][model]=embed + # %% @@ -136,65 +175,267 @@ # ### Metric scores # %% -# Plot metric scores +model_cmap + +# %% +# Remove harmony (R) as it performs worse and does not scale to large datasets. We use harmonypy instead. + +all_models = [ + "SysVI", + "SysVI-stable", + "VAMP+CYC", + "CYC", "VAMP", + "cVAE", "scVI", + "Harmony", + "Harmony", + "Seaurat", + "Harmony-py", + "GLUE", + "SATURN", "SATURN-CT" +] +model_order = [ + # "SysVI", + # "SysVI-stable", + "VAMP+CYC", + "CYC", "VAMP", + "cVAE", "scVI", + "Seaurat", + "Harmony", + "Harmony-py", + "GLUE", + "SATURN", "SATURN-CT" +] +drop_models = [model_ for model_ in all_models if model_ not in model_order] -# X range for metrics axes -xlims={} -for metric in metric_map.values(): - mins=[] - maxs=[] +# %% +# Remove harmony (R) as it performs worse and does not scale to large datasets. We use harmonypy instead. + +for metrics_sub in metrics.values(): + metrics_sub.query('model not in @drop_models', inplace=True) + metrics_sub["model"] = pd.Categorical(metrics_sub["model"].astype(str), categories=model_order) + + +# %% + +# %% +# total_score_formula = lambda metrics_df: 0.3 * metrics_df["NMI"] + 0.3 * metrics_df["Moran's I"] + 0.4 * metrics_df["iLISI"] +# total_score_formula = lambda metrics_df: (0.3 * metrics_df["NMI"]**2 + 0.3 * metrics_df["Moran's I"]**2 + 0.4 * metrics_df["iLISI"]**2)**0.5 +def add_total_score_formula(metrics_df): + min_nmi, max_nmi = metrics_df["NMI"].min(), metrics_df["NMI"].max() + min_mori, max_mori = metrics_df["Moran's I"].min(), metrics_df["Moran's I"].max() + min_ilisi, max_ilisi = metrics_df["iLISI"].min(), metrics_df["iLISI"].max() + + bio_score = 1 / 2 * ( + (metrics_df["NMI"] - min_nmi) / (max_nmi - min_nmi) + + (metrics_df["Moran's I"] - min_mori) / (max_mori - min_mori) + ) + batch_score = (metrics_df["iLISI"] - min_ilisi) / (max_ilisi - min_ilisi) + metrics_df['bio'] = bio_score + metrics_df['batch'] = batch_score + metrics_df['total_score'] = 0.6 * bio_score + 0.4 * batch_score + + +for metrics_sub in metrics.values(): + # metrics_sub['total_score'] = total_score_formula(metrics_sub) + add_total_score_formula(metrics_sub) + metrics_sub['This paper'] = metrics_sub.index.isin(['CYC', 'VAMP', 'VAMP+CYC']) + +# %% +xlims = {} +for metric in list(metric_map.values()) + ['total_score']: + mins = [] + maxs = [] for data in metrics.values(): mins.append(data[metric].min()) maxs.append(data[metric].max()) - x_min=min(mins) - x_max=max(maxs) - x_buffer=(x_max-x_min)*0.15 - x_min=x_min-x_buffer - x_max=x_max+x_buffer - xlims[metric]=(x_min,x_max) + x_min = min(mins) + x_max = max(maxs) + x_buffer = (x_max - x_min) * 0.15 + x_min = x_min - x_buffer + x_max = x_max + x_buffer + xlims[metric] = (x_min, x_max) # Plots -n_rows=len(metrics) -n_cols=len(metric_map) -fig,axs=plt.subplots(n_rows,n_cols,figsize=(2*n_cols,len(metric_map)/1.5*n_rows), - sharey='row',sharex='col') -for row,(dataset_name,metrics_sub) in enumerate(metrics.items()): - for col,metric in enumerate(metric_map.values()): - ax=axs[row,col] - means=metrics_sub.groupby('model')[metric].mean().reset_index() - sb.swarmplot(y='model',x=metric,data=metrics_sub,ax=ax, - edgecolor='k',linewidth=0.25, - hue='model',palette=model_cmap, s=5, zorder=1) - sb.scatterplot(y='model',x=metric,data=means,ax=ax, - edgecolor='k',linewidth=2.5, +n_rows = len(metrics) +n_cols = len(metric_map)# + 1 # Add one column for the scatter plot +fig, axs = plt.subplots(n_rows, n_cols, figsize=(2 * n_cols, len(metric_map) / 1.5 * n_rows), + sharey=False, sharex='col') + +for row, (dataset_name, metrics_sub) in enumerate(metrics.items()): + for col, metric in enumerate(list(metric_map.values())): + ax = axs[row, col] + means = metrics_sub.groupby('model')[metric].mean().reset_index() + sb.swarmplot(y='model', x=metric, data=metrics_sub, ax=ax, + edgecolor='k', linewidth=0.25, + hue='model', palette=model_cmap, s=5, zorder=1) + sb.scatterplot(y='model', x=metric, data=means, ax=ax, + edgecolor='k', linewidth=2.5, color='k', s=150, marker='|', zorder=2) # Make pretty - ax.set_xlim(xlims[metric][0],xlims[metric][1]) + ax.set_xlim(xlims[metric][0], xlims[metric][1]) ax.get_legend().remove() ax.tick_params(axis='y', which='major', length=0) - ax.set(facecolor = (0,0,0,0)) + ax.set(facecolor=(0, 0, 0, 0)) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) - if row!=(n_rows-1): + if row != (n_rows - 1): ax.set_xlabel('') ax.tick_params(axis='x', which='major', length=0) else: ax.locator_params(axis='x', nbins=5) - if row==0: - ax.set_title(metric_meaning_map[metric_map_rev[metric]],fontsize=10) - if col==0: - ax.set_ylabel(dataset_name) + if row == 0: + title = metric_meaning_map[metric_map_rev[metric]] if metric != 'total_score' else 'Total Score' + ax.set_title(title, fontsize=10) + if col == 0: + ax.set_ylabel(dataset_name.replace(" ", "\n")) else: ax.set_ylabel('') - ax.grid(which='major', axis='x',linestyle=(0, (5, 10)),lw=0.5) -fig.set(facecolor = (0,0,0,0)) -plt.subplots_adjust( wspace=0.05,hspace=0.05) + ax.tick_params(axis='y', which='both', labelleft=False) + ax.grid(which='major', axis='x', linestyle=(0, (5, 10)), lw=0.5) + +fig.set(facecolor=(0, 0, 0, 0)) +plt.subplots_adjust(wspace=0.3, hspace=0.05) # Save -plt.savefig(path_fig+'performance-score_topsettings-swarm.pdf', - dpi=300,bbox_inches='tight') -plt.savefig(path_fig+'performance-score_topsettings-swarm.png', - dpi=300,bbox_inches='tight') +plt.savefig(path_fig + 'combined_performance.pdf', dpi=300, bbox_inches='tight') +plt.savefig(path_fig + 'combined_performance.png', dpi=300, bbox_inches='tight') +plt.show() + +# %% +xlims = {} +for metric in list(metric_map.values()) + ['total_score']: + mins = [] + maxs = [] + for data in metrics.values(): + mins.append(data[metric].min()) + maxs.append(data[metric].max()) + x_min = min(mins) + x_max = max(maxs) + x_buffer = (x_max - x_min) * 0.15 + x_min = x_min - x_buffer + x_max = x_max + x_buffer + xlims[metric] = (x_min, x_max) + +# Plots +n_rows = len(metrics) +n_cols = len(metric_map) + 1 # Add one column for the scatter plot +fig, axs = plt.subplots(n_rows, n_cols, figsize=(2 * n_cols, len(metric_map) / 1.5 * n_rows), + sharey=False, sharex='col') + +for row, (dataset_name, metrics_sub) in enumerate(metrics.items()): + for col, metric in enumerate(list(metric_map.values())): + ax = axs[row, col] + means = metrics_sub.groupby('model')[metric].mean().reset_index() + sb.swarmplot(y='model', x=metric, data=metrics_sub, ax=ax, + edgecolor='k', linewidth=0.25, + hue='model', palette=model_cmap, s=5, zorder=1) + sb.scatterplot(y='model', x=metric, data=means, ax=ax, + edgecolor='k', linewidth=2.5, + color='k', s=150, marker='|', zorder=2) + # Make pretty + ax.set_xlim(xlims[metric][0], xlims[metric][1]) + ax.get_legend().remove() + ax.tick_params(axis='y', which='major', length=0) + ax.set(facecolor=(0, 0, 0, 0)) + ax.spines['top'].set_visible(False) + ax.spines['right'].set_visible(False) + if row != (n_rows - 1): + ax.set_xlabel('') + ax.tick_params(axis='x', which='major', length=0) + else: + ax.locator_params(axis='x', nbins=5) + if row == 0: + title = metric_meaning_map[metric_map_rev[metric]] if metric != 'total_score' else 'Total Score' + ax.set_title(title, fontsize=10) + if col == 0: + ax.set_ylabel(dataset_name.replace(" ", "\n")) + else: + ax.set_ylabel('') + ax.tick_params(axis='y', which='both', labelleft=False) + ax.grid(which='major', axis='x', linestyle=(0, (5, 10)), lw=0.5) + + ax = axs[row, -1] + if row == 0: + ax.set_title("Bio vs. Batch", fontsize=10) + # Add scatter plot for bio vs batch as the last column + # Make runs in small dots and avg in large dots + sb.scatterplot(data=metrics_sub, x='batch', y='bio', hue='model', ax=ax, style="This paper", + palette=model_cmap, s=10, edgecolor='k', linewidth=0.1) + metrics_sub_avg=metrics_sub.groupby('model')[['bio','batch','This paper']].mean().reset_index().rename({'model':'Model'},axis=1) + metrics_sub_avg['This paper']=metrics_sub_avg['This paper'].map({1:'Yes',0:'No'}) + sb.scatterplot( + data=metrics_sub_avg, + x='batch', y='bio', hue='Model', ax=ax, style="This paper", + palette=model_cmap, s=40, edgecolor='k', linewidth=0.3) + + # Calculate and plot the average for each method + # for model_name in metrics_sub['model'].unique(): + # model_data = metrics_sub[metrics_sub['model'] == model_name] + # avg_batch = model_data['batch'].mean() + # avg_bio = model_data['bio'].mean() + # ax.scatter(avg_batch, avg_bio, color='k', s=20, marker="D", zorder=-3) # Black dot for average + + # # Annotate with the model name + # ax.annotate(model_name, (avg_batch, avg_bio), textcoords="offset points", xytext=(7, 0), + # ha='left', fontsize=8, color='k', zorder=-4) + + + ax.set_ylabel('Bio') + ax.set_xlabel('Batch') + # Remove top and right solid border lines + ax.spines['top'].set_visible(False) + ax.spines['right'].set_visible(False) + ax.grid(True, which='major', linestyle='--', linewidth=0.5) + if row>0: + ax.get_legend().remove() + else: + # Add plot b legend + handles,labels=ax.get_legend_handles_labels() + keep_legend_part=int(len(handles)/2) + handles=handles[keep_legend_part:] + labels=labels[keep_legend_part:] + handle_run=deepcopy(handles[1]) + handle_mean=deepcopy(handles[1]) + # handle_run.set_markerfacecolor('k') + # handle_run.set_markersize(3) + # handle_mean.set_markerfacecolor('k') + handles=handles+[ + deepcopy(handles[0]), + handle_run, + handle_mean, + + ] + labels=labels+[ + 'Measured', + 'Run', + 'Average' + ] + ax.legend(handles=handles,labels=labels, + bbox_to_anchor=(1.05,0.95)) + + +fig.set(facecolor=(0, 0, 0, 0)) +fig.subplots_adjust(wspace=0.1, hspace=0.05) + +# Move the last column away from the rest of the plot +for ax in axs[:,-1]: + ax.set_position([ax.get_position().x0+0.07,ax.get_position().y0,ax.get_position().width,ax.get_position().height]) + +# Save +fig.savefig(path_fig + 'combined_performance_and_scatter.pdf', dpi=300, bbox_inches='tight') +fig.savefig(path_fig + 'combined_performance_and_scatter.png', dpi=300, bbox_inches='tight') +fig.show() + +# %% + +# %% + +# %% +metric_meaning_map={'nmi_opt': 'Bio (coarse)', 'moransi': 'Bio (fine)', 'ilisi_system': 'Batch'} +metric_map_rev={'NMI': 'nmi_opt', "Moran's I": 'moransi', 'iLISI': 'ilisi_system'} +metric_map={'nmi_opt': 'NMI', 'moransi': "Moran's I", 'ilisi_system': 'iLISI'} + +# %% # %% [markdown] # Significance of difference between pairs of models per metric and datatset. Pvalue adjustment is performed per metric and dataset across model pairs. @@ -211,9 +452,14 @@ for m2 in range(m1+1,n_models): model1=models[m1] model2=models[m2] - t,p=ttest_ind( - metrics_sub.loc[model1,metric], metrics_sub.loc[model2,metric], - equal_var=False, alternative='two-sided') + try: + t,p=ttest_ind( + metrics_sub.loc[model1,metric], metrics_sub.loc[model2,metric], + equal_var=False, alternative='two-sided') + except Exception as e: + print(e) + print(f"{dataset_name}: Could not find metrics for {metric} for at least {model1} or {model2}") + t,p=0., 1. pvals_sub.append(dict( dataset=dataset_name,metric=metric,p=p,t=t, model_cond=model1,model_ctrl=model2, @@ -238,7 +484,7 @@ sys_col_name='System' sample_col_name='Sample' for dataset,dataset_name in dataset_map.items(): - embeds_ds=embeds[dataset_name] + embeds_ds={k:v for k,v in embeds[dataset_name].items() if k not in drop_models} ncols=3 nrows=len(embeds_ds) fig,axs=plt.subplots(nrows,ncols,figsize=(2*ncols,2*nrows)) @@ -247,9 +493,13 @@ # Plot every model for irow,model_name in enumerate([m for m in model_map.values() if m in embeds_ds]): embed=embeds_ds[model_name] + if 'X_umap' not in embed.obsm: + print(f"'X_umap' not in embed.obsm for {model_name}") + continue + for icol,(col_name,col) in enumerate(zip( [ct_col_name,sys_col_name,sample_col_name], - [args_sub.group_key,args_sub.system_key,args_sub.batch_key])): + [args_sub['group_key'],args_sub['system_key'],args_sub['batch_key']])): # Set cmap and col val names cmap=obs_col_cmap[dataset_map_rev[dataset_name]][col] @@ -266,6 +516,7 @@ # Plot ax=axs[irow,icol] + sc.pp.subsample(embed, fraction=1.) sc.pl.umap(embed,color=col+'_parsed',ax=ax,show=False, palette=cmap, frameon=False,title='') @@ -305,14 +556,23 @@ ress=[] for dataset,dataset_name in dataset_map.items(): print(dataset_name) + + top_settings=pkl.load(open(f'{path_data}eval/{dataset}/integration_summary/top_settings.pkl','rb')) + top_runs = sum([v['runs'] for k, v in top_settings.items()], []) + path_integration=f'{path_data}eval/{dataset}/integration/' res=[] for run in glob.glob(path_integration+'*/'): - if os.path.exists(run+'args.pkl') and \ + if (os.path.exists(run+'args.pkl') or os.path.exists(run+'args.yml')) and \ os.path.exists(run+'scib_metrics.pkl'): - args=pd.Series(vars(pkl.load(open(run+'args.pkl','rb')))) - metrics=pd.Series(pkl.load(open(run+'scib_metrics.pkl','rb'))) - data=pd.concat([args,metrics]) + if os.path.exists(run+'args.pkl'): + args_=pd.Series(vars(pkl.load(open(run+'args.pkl','rb')))) + if os.path.exists(run+'args.yml'): + args_=pd.Series(yaml.safe_load(open(run+'args.yml','rb'))) + metrics_=pd.Series(pkl.load(open(run+'scib_metrics.pkl','rb'))) + run_id = run.strip('/').rsplit('/', 1)[-1] + run_stats_ = pd.Series({'is_top': run_id in top_runs}) + data=pd.concat([args_,metrics_, run_stats_]) name=run.split('/')[-2] data.name=name res.append(data) @@ -330,6 +590,16 @@ # Param opt val for plotting - converted to str categ below res['param_opt_val_str']=res.apply( lambda x: x[x['param_opt_col']] if x['param_opt_col'] is not None else np.nan,axis=1) + + #### + res['params_opt']=np.where(res.index.str.contains('harmonypy'), + res['params_opt'].replace({'harmony_theta': 'harmonypy_theta'}), + res['params_opt']) + res['param_opt_col']=np.where(res.index.str.contains('harmonypy'), + res['param_opt_col'].replace({'harmony_theta': 'harmonypy_theta'}), + res['param_opt_col']) + res['harmonypy_theta'] = res['harmony_theta'] + #### res['params_opt']=pd.Categorical(res['params_opt'],sorted(res['params_opt'].unique()), True) @@ -395,14 +665,28 @@ ]+['none'], ordered=True) +# %% +ress = ress.query('testing == False') +ress.drop_duplicates(['model_parsed', 'param_parsed', 'genes_parsed', 'dataset_parsed', 'name', 'seed', 'params_opt', 'param_opt_val'], inplace=True) + # %% [markdown] # ### Metric scores # Overview of all metric results +# %% +( + ress.query('model_parsed not in @drop_models') + .groupby(['genes_parsed', 'model_parsed','param_parsed'],observed=True,sort=True + ).size().index.to_frame().reset_index(drop=True) +) + # %% # Plot model+opt_param * metrics+dataset -params=ress.groupby(['model_parsed','param_parsed','genes_parsed'],observed=True,sort=True +params=( + ress.query('model_parsed not in @drop_models') + .groupby(['genes_parsed', 'model_parsed','param_parsed'],observed=True,sort=True ).size().index.to_frame().reset_index(drop=True) +) nrow=params.shape[0] n_metrics=len(metric_map) ncol=ress['dataset_parsed'].nunique()*n_metrics @@ -413,10 +697,10 @@ models_parsed_ds=set(res_ds.model_parsed) params_parsed_ds=set(res_ds.param_parsed) genes_parsed_ds=set(res_ds.genes_parsed) - irow_max_ds=max([irow for irow,(model_parsed,param_parsed,genes_parsed) in params.iterrows() if - model_parsed in models_parsed_ds and - param_parsed in params_parsed_ds and - genes_parsed in genes_parsed_ds]) + irow_max_ds=max([irow for irow,row in params.iterrows() if + row.model_parsed in models_parsed_ds and + row.param_parsed in params_parsed_ds and + row.genes_parsed in genes_parsed_ds]) # Plot metric + opt param settings for icol_metric,(metric,metric_name) in enumerate(metric_map.items()): @@ -432,8 +716,11 @@ res_sub['param_opt_val_str']=\ res_sub['param_opt_val_str'].cat.remove_unused_categories() # Plot - sb.swarmplot(x=metric,y='param_opt_val_str',data=res_sub,ax=ax, - hue='param_opt_val_str',palette='cividis') + sb.swarmplot(x=metric,y='param_opt_val_str', + hue="is_top", + # hue='param_opt_val_str', + data=res_sub,ax=ax, + palette='tab10') # Make pretty ax.set(facecolor = metric_background_cmap[metric]) @@ -478,3 +765,7 @@ # %% + +# %% + +# %% diff --git a/notebooks/results/performance/performance_for_disease.py b/notebooks/results/performance/performance_for_disease.py new file mode 100644 index 0000000..0f8a656 --- /dev/null +++ b/notebooks/results/performance/performance_for_disease.py @@ -0,0 +1,769 @@ +# --- +# jupyter: +# jupytext: +# formats: ipynb,py:percent +# text_representation: +# extension: .py +# format_name: percent +# format_version: '1.3' +# jupytext_version: 1.16.3 +# kernelspec: +# display_name: csi +# language: python +# name: csi +# --- + +# %% +import pandas as pd +import numpy as np +import scanpy as sc +import pickle as pkl +import yaml +import math +import glob +import os +import itertools +from datetime import datetime + +from scipy.stats import ttest_ind +from statsmodels.stats.multitest import multipletests + +from matplotlib import rcParams +import matplotlib.pyplot as plt +import seaborn as sb +import matplotlib.colors as mcolors + +from pathlib import Path +import sys +sys.path.append('/'.join(os.getcwd().split('/')[:-2]+['eval','cleaned',''])) +from params_opt_maps import * + +# %% +import warnings +# warnings.filterwarnings(action='once') +warnings.filterwarnings('ignore') + +# %% +path_data='/home/moinfar/io/csi/' +path_names=path_data+'names_parsed/' +path_fig=path_data+'figures/' +path_tab=path_data+'tables/' + +# %% +Path(path_fig).mkdir(parents=True, exist_ok=True) +Path(path_tab).mkdir(parents=True, exist_ok=True) + +# %% +# Names +model_map={**pkl.load(open(path_names+'models.pkl','rb')), + **pkl.load(open(path_names+'models_additional.pkl','rb'))} +param_map=pkl.load(open(path_names+'params.pkl','rb')) +metric_map=pkl.load(open(path_names+'metrics.pkl','rb')) +dataset_map=pkl.load(open(path_names+'datasets.pkl','rb')) +metric_meaning_map=pkl.load(open(path_names+'metric_meanings.pkl','rb')) +metric_map_rev=dict(zip(metric_map.values(),metric_map.keys())) +dataset_map_rev=dict(zip(dataset_map.values(),dataset_map.keys())) +system_map=pkl.load(open(path_names+'systems.pkl','rb')) +params_opt_map=pkl.load(open(path_names+'params_opt_model.pkl','rb')) +params_opt_gene_map=pkl.load(open(path_names+'params_opt_genes.pkl','rb')) +param_opt_vals=pkl.load(open(path_names+'optimized_parameter_values.pkl','rb')) +cell_type_map=pkl.load(open(path_names+'cell_types.pkl','rb')) + +# cmap +model_cmap=pkl.load(open(path_names+'model_cmap.pkl','rb')) +obs_col_cmap=pkl.load(open(path_names+'obs_col_cmap.pkl','rb')) +metric_background_cmap=pkl.load(open(path_names+'metric_background_cmap.pkl','rb')) + +# data +dataset_path=pkl.load(open(path_names+'dataset_path.pkl','rb')) +dataset_h5ad_path=pkl.load(open(path_names+'dataset_h5ad_path.pkl','rb')) + +# %% [markdown] +# ## All Runs + +# %% +# Load data and keep relevant runs +ress=[] +for dataset,dataset_name in dataset_map.items(): + print(dataset_name) + + top_settings=pkl.load(open(f'{path_data}eval/{dataset}/integration_summary/top_settings.pkl','rb')) + top_runs = sum([v['runs'] for k, v in top_settings.items()], []) + + path_integration=f'{path_data}eval/{dataset}/integration/' + res=[] + for run in glob.glob(path_integration+'*/'): + if (os.path.exists(run+'args.pkl') or os.path.exists(run+'args.yml')) and \ + os.path.exists(run+'scib_metrics.pkl'): + if os.path.exists(run+'args.pkl'): + args_=pd.Series(vars(pkl.load(open(run+'args.pkl','rb')))) + if os.path.exists(run+'args.yml'): + args_=pd.Series(yaml.safe_load(open(run+'args.yml','rb'))) + metrics_=pd.Series(pkl.load(open(run+'scib_metrics.pkl','rb'))) + run_id = run.strip('/').rsplit('/', 1)[-1] + run_stats_ = pd.Series({ + 'is_top': run_id in top_runs, + 'run_id': run_id, + 'run_path': run, + }) + data=pd.concat([args_,metrics_, run_stats_]) + name=run.split('/')[-2] + data.name=name + res.append(data) + res=pd.concat(res,axis=1).T + + # Parse res table + + # Parse params + res['params_opt']=res.params_opt.replace(params_opt_correct_map) + res['param_opt_col']=res.params_opt.replace(param_opt_col_map) + res['param_opt_val']=res.apply( + lambda x: (x[x['param_opt_col']] if not isinstance(x[x['param_opt_col']],dict) + else x[x['param_opt_col']]['weight_end']) + if x['param_opt_col'] is not None else 0,axis=1) + # Param opt val for plotting - converted to str categ below + res['param_opt_val_str']=res.apply( + lambda x: x[x['param_opt_col']] if x['param_opt_col'] is not None else np.nan,axis=1) + + #### + res['params_opt']=np.where(res.index.str.contains('harmonypy'), + res['params_opt'].replace({'harmony_theta': 'harmonypy_theta'}), + res['params_opt']) + res['param_opt_col']=np.where(res.index.str.contains('harmonypy'), + res['param_opt_col'].replace({'harmony_theta': 'harmonypy_theta'}), + res['param_opt_col']) + res['harmonypy_theta'] = res['harmony_theta'] + #### + + res['params_opt']=pd.Categorical(res['params_opt'],sorted(res['params_opt'].unique()), True) + + # Keep relevant runs + params_opt_vals=set(params_opt_map.keys()) + res_sub=res.query('params_opt in @params_opt_vals').copy() + # Name models + res_sub['model']=res_sub.params_opt.replace(params_opt_map).astype(str) + # Models present in data but have no params opt + nonopt_models=list( + (set(params_opt_map.values()) & set(res_sub['model'].unique()))-set( + [model for models,params_vals in param_opt_vals for model in models])) + # Query: a.) model not optimized OR b.) model belongs to one of the models that have + # optimized params and the optimized param is within list of param values + res_query=[f'model in {nonopt_models}'] + # Models with opt params + for models,params_vals in param_opt_vals: + res_query_sub=[] + # Param value in vals to keep if the param was optimised + for param,vals in params_vals: + # For param check if it was opt in data setting as else there will be no col for it + if param in res_sub.columns: + res_query_sub.append(f'({param} in {vals} & "{param}"==param_opt_col)') + # Only add to the query the models for which any param was opt + if len(res_query_sub)>0: + res_query_sub='(('+' | '.join(res_query_sub)+f') & model in {models})' + res_query.append(res_query_sub) + res_query=' | '.join(res_query) + res_sub=res_sub.query(res_query).copy() + + # Add pretty model names + res_sub['model_parsed']=pd.Categorical( + values=res_sub['model'].map(model_map), + categories=model_map.values(), ordered=True) + # Add prety param names + res_sub['param_parsed']=pd.Categorical( + values=res_sub['param_opt_col'].map(param_map), + categories=param_map.values(), ordered=True) + # Add gene setting names + res_sub['genes_parsed']=pd.Categorical( + values=res_sub['params_opt'].map(params_opt_gene_map), + categories=list(dict.fromkeys(params_opt_gene_map.values())), ordered=True) + + # display(res_sub.groupby(['model_parsed','param_parsed','genes_parsed'],observed=True).size()) + + # Store + res_sub['dataset_parsed']=dataset_name + ress.append(res_sub) + +# Combine results of all datasets +ress=pd.concat(ress) + +# Order datasets +ress['dataset_parsed']=pd.Categorical( + values=ress['dataset_parsed'], + categories=list(dataset_map.values()), ordered=True) + +# Parse param valuse for plotting +ress['param_opt_val_str']=pd.Categorical( + values=ress['param_opt_val_str'].fillna('none').astype(str), + categories=[str(i) for i in + sorted([i for i in ress['param_opt_val_str'].unique() if not np.isnan(i)]) + ]+['none'], + ordered=True) + +# %% [markdown] +# ### Metric scores + +# %% +model_cmap + +# %% +model_order = [ + "VAMP+CYC", "CYC", "VAMP", + "cVAE", "scVI", + "Harmony", + "Seaurat", "Harmony-py", "GLUE", + "SATURN", "SATURN-CT" +] +drop_models = [ + "SysVI", + "SysVI-stable", +] + +# %% +ress.query('model not in @drop_models', inplace=True) + +# %% + +# %% [markdown] +# ## Add disease annotations for pancreas data + +# %% +mm_pancreas_adata = sc.read(os.path.expanduser("~/data/cxg/f6044f53-41de-4654-8437-0d22b17dfd31.h5ad"), backed='r') +hs_pancreas_adata = sc.read("/om2/user/khrovati/data/cross_system_integration/pancreas_conditions_MIA_HPAP2/combined_orthologuesHVG.h5ad", backed='r') +mm_pancreas_adata, hs_pancreas_adata + +# %% + +# %% [markdown] +# ## Calculate metrics for different setups + +# %% +setup_dicts = { + 'test_1': { + 'dataset_name': 'Retina Organoid-Tissue', + 'description': 'Compare organoid with periphery (similar) and fovea (dissimilar).', + 'description_short': 'Mueller cell\snOrganoid vs \nperiphery (+) and fovea (-).', + 'embed_subset': lambda embed: embed[embed.obs['cell_type'].astype(str) == 'Mueller cell'], + 'group_similar': lambda embed_subset: embed_subset[(embed_subset.obs['system'].astype(str) == '0') | (embed_subset.obs['region'].astype(str) == 'periphery')], + 'group_dissimilar': lambda embed_subset: embed_subset[(embed_subset.obs['system'].astype(str) == '0') | (embed_subset.obs['region'].astype(str) == 'fovea')], + 'ignore_models': ['Non-integrated', 'Harmony'], + 'plot_cols': ['system', 'region'], + }, + 'test_2': { + 'dataset_name': 'Pancreas Mouse-Human', + 'description': 'Compare mouse T2D with T2D human (similar) and normal human (dissimilar).', + 'description_short': 'Beta cells\nMouse T2D vs \nhuman T2D (+) and healthy (-)', + 'embed_subset': lambda embed: embed[embed.obs['cell_type_eval'].astype(str) == 'beta'], + 'group_similar': lambda embed_subset: embed_subset[ + ((embed_subset.obs['system'].astype(str) == '0') & (embed_subset.obs['mm_Diabetes Status'].astype(str) == 'type 2 diabetes mellitus')) | + ((embed_subset.obs['system'].astype(str) == '1') & (embed_subset.obs['hs_Diabetes Status'].astype(str) == 'T2D')) + ], + 'group_dissimilar': lambda embed_subset: embed_subset[ + ((embed_subset.obs['system'].astype(str) == '0') & (embed_subset.obs['mm_Diabetes Status'].astype(str) == 'type 2 diabetes mellitus')) | + ((embed_subset.obs['system'].astype(str) == '1') & (embed_subset.obs['hs_Diabetes Status'].astype(str) == 'ND')) + ], + 'ignore_models': ['Non-integrated', 'Harmony'], + 'plot_cols': ['system', 'mm_Diabetes Status', 'hs_Diabetes Status'], + }, + 'test_3': { + 'dataset_name': 'Pancreas Mouse-Human', + 'description': 'Compare T2D human with T2D mouse (similar) and normal mouse (dissimilar).', + 'description_short': 'Beta cells\nHuman T2D vs \nmouse T2D (+) and healthy (-)', + 'embed_subset': lambda embed: embed[embed.obs['cell_type_eval'].astype(str) == 'beta'], + 'group_similar': lambda embed_subset: embed_subset[ + ((embed_subset.obs['system'].astype(str) == '0') & (embed_subset.obs['mm_Diabetes Status'].astype(str) == 'type 2 diabetes mellitus')) | + ((embed_subset.obs['system'].astype(str) == '1') & (embed_subset.obs['hs_Diabetes Status'].astype(str) == 'T2D')) + ], + 'group_dissimilar': lambda embed_subset: embed_subset[ + ((embed_subset.obs['system'].astype(str) == '0') & (embed_subset.obs['mm_Diabetes Status'].astype(str) == 'normal')) | + ((embed_subset.obs['system'].astype(str) == '1') & (embed_subset.obs['hs_Diabetes Status'].astype(str) == 'T2D')) + ], + 'ignore_models': ['Non-integrated', 'Harmony'], + 'plot_cols': ['system', 'mm_Diabetes Status', 'hs_Diabetes Status'], + }, + 'test_4': { + 'dataset_name': 'Skin Mouse-Human', + 'description': 'Compare mouse epidermolysis bullosa with human psoriasis (similar) and normal human (dissimilar).', + 'description_short': 'Fibroblasts\nMouse disease vs \nhuman disease (+) and healthy (-)', + 'embed_subset': lambda embed: embed[embed.obs['cell_type_eval'].astype(str) == 'Fibroblast'], + 'group_similar': lambda embed_subset: embed_subset[ + ((embed_subset.obs['system'].astype(str) == '0') & (embed_subset.obs['mm_disease'].astype(str) == 'epidermolysis bullosa')) | + ((embed_subset.obs['system'].astype(str) == '1') & (embed_subset.obs['hs_disease'].astype(str) == 'psoriasis')) + ], + 'group_dissimilar': lambda embed_subset: embed_subset[ + ((embed_subset.obs['system'].astype(str) == '0') & (embed_subset.obs['mm_disease'].astype(str) == 'epidermolysis bullosa')) | + ((embed_subset.obs['system'].astype(str) == '1') & (embed_subset.obs['hs_disease'].astype(str) == 'normal')) + ], + 'ignore_models': ['Non-integrated', 'Harmony'], + 'plot_cols': ['system', 'mm_disease', 'hs_disease'], + }, + 'test_5': { + 'dataset_name': 'Skin Mouse-Human', + 'description': 'Compare human psoriasis with mouse epidermolysis bullosa (similar) and normal mouse (dissimilar).', + 'description_short': 'Fibroblasts\nHuman disease vs \nmouse disease (+) and healthy (-)', + 'embed_subset': lambda embed: embed[embed.obs['cell_type_eval'].astype(str) == 'Fibroblast'], + 'group_similar': lambda embed_subset: embed_subset[ + ((embed_subset.obs['system'].astype(str) == '0') & (embed_subset.obs['mm_disease'].astype(str) == 'epidermolysis bullosa')) | + ((embed_subset.obs['system'].astype(str) == '1') & (embed_subset.obs['hs_disease'].astype(str) == 'psoriasis')) + ], + 'group_dissimilar': lambda embed_subset: embed_subset[ + ((embed_subset.obs['system'].astype(str) == '0') & (embed_subset.obs['mm_disease'].astype(str) == 'normal')) | + ((embed_subset.obs['system'].astype(str) == '1') & (embed_subset.obs['hs_disease'].astype(str) == 'psoriasis')) + ], + 'ignore_models': ['Non-integrated', 'Harmony'], + 'plot_cols': ['system', 'mm_disease', 'hs_disease'], + }, + 'test_6': { + 'dataset_name': 'Limited Skin Mouse-Human', + 'description': 'Compare normal human with and normal mouse (similar) and mouse epidermolysis bullosa (dissimilar).', + 'description_short': 'Fibroblasts\nHuman healthy vs \nmouse healthy (+) and disease (-)', + 'embed_subset': lambda embed: embed[embed.obs['cell_type_eval'].astype(str) == 'Fibroblast'], + 'group_similar': lambda embed_subset: embed_subset[ + ((embed_subset.obs['system'].astype(str) == '0') & (embed_subset.obs['mm_disease'].astype(str) == 'normal')) | + ((embed_subset.obs['system'].astype(str) == '1') & (embed_subset.obs['hs_disease'].astype(str) == 'normal')) + ], + 'group_dissimilar': lambda embed_subset: embed_subset[ + ((embed_subset.obs['system'].astype(str) == '0') & (embed_subset.obs['mm_disease'].astype(str) == 'epidermolysis bullosa')) | + ((embed_subset.obs['system'].astype(str) == '1') & (embed_subset.obs['hs_disease'].astype(str) == 'normal')) + ], + 'ignore_models': ['Non-integrated', 'Harmony'], + 'plot_cols': ['system', 'mm_disease', 'hs_disease'], + }, + 'test_7': { + 'dataset_name': 'Skin Mouse-Human', + 'description': 'Compare normal human with normal mouse (similar) and mouse epidermolysis bullosa (dissimilar).', + 'description_short': 'Fibroblasts\nHuman healthy vs \nmouse healthy (+) and disease (-)', + 'embed_subset': lambda embed: embed[embed.obs['cell_type_eval'].astype(str) == 'Fibroblast'], + 'group_similar': lambda embed_subset: embed_subset[ + ((embed_subset.obs['system'].astype(str) == '0') & (embed_subset.obs['mm_disease'].astype(str) == 'normal')) | + ((embed_subset.obs['system'].astype(str) == '1') & (embed_subset.obs['hs_disease'].astype(str) == 'normal')) + ], + 'group_dissimilar': lambda embed_subset: embed_subset[ + ((embed_subset.obs['system'].astype(str) == '0') & (embed_subset.obs['mm_disease'].astype(str) == 'epidermolysis bullosa')) | + ((embed_subset.obs['system'].astype(str) == '1') & (embed_subset.obs['hs_disease'].astype(str) == 'normal')) + ], + 'ignore_models': ['Non-integrated', 'Harmony'], + 'plot_cols': ['system', 'mm_disease', 'hs_disease'], + }, + 'test_8': None, # here there was a bug + 'test_9': { + 'dataset_name': 'Skin Mouse-Human', + 'description': 'Compare human psoriasis with mouse epidermolysis bullosa (similar) and normal mouse (dissimilar).', + 'description_short': 'Fibroblasts\nHuman Disease vs \nmouse disease (+) and healthy (-)', + 'embed_subset': lambda embed: embed[embed.obs['cell_type_eval'].astype(str) == 'Fibroblast'], + 'group_similar': lambda embed_subset: embed_subset[ + ((embed_subset.obs['system'].astype(str) == '0') & (embed_subset.obs['mm_disease'].astype(str) == 'epidermolysis bullosa')) | + ((embed_subset.obs['system'].astype(str) == '1') & (embed_subset.obs['hs_disease'].astype(str) == 'psoriasis')) + ], + 'group_dissimilar': lambda embed_subset: embed_subset[ + ((embed_subset.obs['system'].astype(str) == '0') & (embed_subset.obs['mm_disease'].astype(str) == 'normal')) | + ((embed_subset.obs['system'].astype(str) == '1') & (embed_subset.obs['hs_disease'].astype(str) == 'psoriasis')) + ], + 'ignore_models': ['Non-integrated', 'Harmony'], + 'plot_cols': ['system', 'mm_disease', 'hs_disease'], + }, +} + +# %% +from scib_metrics.nearest_neighbors import pynndescent +from scib_metrics import lisi_knn, ilisi_knn + +def calculate_neighbors(adata, obsm_emb_key, k=100, n_jobs=1, neighbor_computer=None): + # Calculates `k` nearest neighbors in the space defined by `obsm_emb_key` of `adata` + # One can pass `neighbor_computer` as in scib_metrics. Default is good enough. + if neighbor_computer is not None: + neigh_output = neighbor_computer(adata.obsm[obsm_emb_key], k) + neigh_output = pynndescent(adata.obsm[obsm_emb_key], n_neighbors=k, random_state=0, n_jobs=n_jobs) + #indices, distances = neigh_output.indices, neigh_output.distances + return neigh_output + + +# %% + +# %% +for setup_name, setup_dict in setup_dicts.items(): + print(setup_name, setup_dict) + if setup_dict is None: + continue + + dataset_name = setup_dict['dataset_name'] + fn_embed_subset = setup_dict['embed_subset'] + fn_group_similar = setup_dict['group_similar'] + fn_group_dissimilar = setup_dict['group_dissimilar'] + ignore_models = setup_dict['ignore_models'] + plot_cols = setup_dict['plot_cols'] + + ress_subset = ress[ress['dataset_parsed'] == setup_dict['dataset_name']] + for i, run_info in ress_subset.sample(frac=1).iterrows(): + print(run_info['run_path']) + print(datetime.now()) + + (Path(path_data) / "performance_disease" / run_info['dataset_parsed']).mkdir(parents=True, exist_ok=True) + result_filename = Path(path_data) / "performance_disease" / run_info['dataset_parsed'] / f"results_{run_info['run_id']}_{setup_name}.pkl" + if result_filename.exists(): + print("Already calculated") + continue + + try: + print(f">>> Writing to {result_filename}") + result_filename.touch() + if Path(run_info['run_path'] + '/embed_full.h5ad').exists(): + embed = sc.read_h5ad(run_info['run_path'] + '/embed_full.h5ad') + else: + embed = sc.read_h5ad(run_info['run_path'] + '/embed.h5ad') + if dataset_name == 'Pancreas Mouse-Human': + embed.obs['mm_Diabetes Status'] = mm_pancreas_adata.obs['disease'] + embed.obs['hs_Diabetes Status'] = hs_pancreas_adata.obs['hs_Diabetes Status'] + embed_subset = fn_embed_subset(embed).copy() + embed_subset.obsm['method'] = embed_subset.X.copy() + + embed_subset_s = fn_group_similar(embed_subset).copy() + embed_subset_d = fn_group_dissimilar(embed_subset).copy() + + result_dict = { + 'setup_name': setup_name, + 'dataset_name': dataset_name, + 'run_id': run_info['run_id'], + } + try: + results_dict['seed'] = args_run['seed'] + results_dict['name'] = args_run['name'] + except: + pass + for embed_, key in [(embed_subset_s, "similar"), (embed_subset_d, "dissimilar")]: + ng_output = calculate_neighbors(embed_, obsm_emb_key='method') + sp_distances, sp_conns = sc.neighbors._compute_connectivities_umap( + ng_output.indices, ng_output.distances, + ng_output.indices.shape[0], n_neighbors=ng_output.indices.shape[1], + ) + ilisi_value = ilisi_knn(sp_distances, embed_.obs['system'].values) + result_dict[f"ilisi_{key}"] = ilisi_value + + print(result_dict) + with open(result_filename, 'wb') as f: + pkl.dump(result_dict, f) + except Exception as e: + print(e) + result_filename.unlink() + raise e + +# %% + +# %% + +# %% +results_list = [] + +# %% +for setup_name, setup_dict in setup_dicts.items(): + print(setup_name, setup_dict) + if setup_dict is None: + continue + + dataset_name = setup_dict['dataset_name'] + fn_embed_subset = setup_dict['embed_subset'] + fn_group_similar = setup_dict['group_similar'] + fn_group_dissimilar = setup_dict['group_dissimilar'] + ignore_models = setup_dict['ignore_models'] + plot_cols = setup_dict['plot_cols'] + + ress_subset = ress[ress['dataset_parsed'] == setup_dict['dataset_name']] + for i, run_info in ress_subset.sample(frac=1).iterrows(): + print(run_info['run_path']) + print(datetime.now()) + + (Path(path_data) / "performance_disease" / run_info['dataset_parsed']).mkdir(parents=True, exist_ok=True) + result_filename = Path(path_data) / "performance_disease" / run_info['dataset_parsed'] / f"results_{run_info['run_id']}_{setup_name}.pkl" + if not result_filename.exists(): + print(f"File does not exists: {result_filename}") + continue + + print(">>", result_filename) + with open(result_filename, "rb") as f: + result_dict = pkl.load(f) + print(result_dict) + ress_row = ress.loc[result_dict['run_id']] + result_dict = { + **result_dict, + **ress_row.to_dict(), + } + + results_list.append(result_dict) + +# %% + +# %% +results_df = pd.DataFrame(results_list) +results_df.to_csv(os.path.expanduser("~/tmp/sysvi_paired_comparison_lisi_scores__results_df.csv")) + + +# %% +results_df + +# %% + +# %% + +# %% + +# %% + +# %% [markdown] +# # Plot results + +# %% +import pandas as pd +results_df = pd.read_csv(os.path.expanduser("~/tmp/sysvi_paired_comparison_lisi_scores__results_df.csv")) +results_df['model_parsed'] = pd.Categorical(values=results_df['model_parsed'], categories=model_map.values(), ordered=True) +results_df + +# %% +results_df['setup_name'].unique() + +# %% +results_df = results_df.query('testing == False') +results_df.drop_duplicates(['setup_name', 'model_parsed', 'param_parsed', 'genes_parsed', 'dataset_parsed', 'name', 'seed', 'params_opt', 'param_opt_val'], inplace=True) + +# %% +keep_setups = ['test_1', 'test_2', 'test_3', 'test_7', 'test_9', 'test_6'] +results_df = results_df[results_df['setup_name'].astype(str).isin(keep_setups)].copy() +results_df['setup_name'] = pd.Categorical(values=results_df['setup_name'], categories=keep_setups, ordered=True) +results_df['setup_name'].unique() + +# %% + +# %% +( + results_df.query('model_parsed not in @drop_models') + .query('genes_parsed == "OTO"') + .groupby(['model_parsed','param_parsed','genes_parsed'],observed=True,sort=True + ).size().index.to_frame().reset_index(drop=True) +) + +# %% +metric_meaning_map_ = { + 'ilisi_similar': 'ilisi Similar', + 'ilisi_dissimilar': 'ilisi Dissimilar', +} + +# %% +# Plot model+opt_param * metrics+dataset +params=( + results_df + .query('model_parsed not in @drop_models') + .query('genes_parsed == "OTO"') + .groupby(['model_parsed','param_parsed','genes_parsed'],observed=True,sort=True + ).size().index.to_frame().reset_index(drop=True) +) +nrow=params.shape[0] +n_metrics=2 +ncol=results_df['setup_name'].nunique()*n_metrics +fig,axs=plt.subplots(nrow,ncol,figsize=(ncol*1.9,nrow*2),sharex='col',sharey='row') +for icol_ds, (setup_name,res_ds) in enumerate(results_df.groupby('setup_name')): + # Max row for ds - some models not in all ds + models_parsed_ds=set(res_ds.model_parsed) + params_parsed_ds=set(res_ds.param_parsed) + genes_parsed_ds=set(res_ds.genes_parsed) + irow_max_ds=max([irow for irow,(model_parsed,param_parsed,genes_parsed) in params.iterrows() if + model_parsed in models_parsed_ds and + param_parsed in params_parsed_ds and + genes_parsed in genes_parsed_ds]) + + # Plot metric + opt param settings + for icol_metric,(metric,metric_name) in enumerate([('ilisi_similar', 'ilisi Similar'), + ('ilisi_dissimilar', 'ilisi Dissimilar'),]): + icol=icol_ds*n_metrics+icol_metric + for irow,(_,param_data) in enumerate(params.iterrows()): + dataset_name = setup_dicts[setup_name]['dataset_name'] + ax=axs[irow,icol] + res_sub=res_ds.query( + f'model_parsed=="{param_data.model_parsed}" & '+\ + f'param_parsed=="{param_data.param_parsed}" & '+\ + f'genes_parsed=="{param_data.genes_parsed}"') + if res_sub.shape[0]>0: + res_sub=res_sub.copy() + res_sub['param_opt_val_str']=\ + res_sub['param_opt_val_str'].astype('category').cat.remove_unused_categories() + # Plot + sb.swarmplot(x=metric,y='param_opt_val_str', + hue="is_top", + # hue='param_opt_val_str', + data=res_sub,ax=ax, + palette='tab10') + + # Make pretty + ax.set(facecolor = '#F9F3F3') + ax.spines['top'].set_visible(False) + ax.spines['right'].set_visible(False) + ax.grid(axis='x', linestyle='--', color='gray') + ax.get_legend().remove() + if irow!=irow_max_ds: + ax.set_xlabel('') + else: + # Add xaxis + # Must turn label to visible as sb will set it off if sharex + # Must reset ticks as will be of due to sharex + ax.set_xlabel(metric_name,visible=True) + ax.xaxis.set_ticks_position('bottom') + if irow==0: + title='' + if icol%2==0: + title=title+dataset_name+'\n'+setup_dicts[setup_name]['description_short']+"\n\n" + ax.set_title(title+metric_meaning_map_[metric]+'\n',fontsize=10) + if icol==0: + ax.set_ylabel( + param_data.model_parsed+' '+param_data.genes_parsed+'\n'+\ + param_data.param_parsed+'\n') + else: + ax.set_ylabel('') + else: + ax.remove() + + +plt.subplots_adjust(wspace=0.2,hspace=0.2) +fig.set(facecolor = (0,0,0,0)) + +# Turn off tight layout as it messes up spacing if adding xlabels on intermediate plots +#fig.tight_layout() + +# Save +plt.savefig(path_fig+'performance-disease-score_all-swarm.pdf', + dpi=300,bbox_inches='tight') +plt.savefig(path_fig+'performance-disease-score_all-swarm.png', + dpi=300,bbox_inches='tight') + +# %% + +# %% + +# %% + +# %% +top_results_df = ( + results_df + .query('is_top == True') + .query('model_parsed not in @drop_models') +).copy() +top_results_df['model_parsed'] = pd.Categorical(values=top_results_df['model_parsed'], categories=[c for c in model_map.values() if c in top_results_df['model_parsed'].unique().tolist()], ordered=True) + +# %% +import matplotlib.pyplot as plt +import seaborn as sb +import matplotlib.cm as cm +from matplotlib.lines import Line2D + + +# Define xlims dynamically for uniform axis limits +xlims = {} +metrics_list = [("ilisi_similar", "iLISI similar"), ("ilisi_dissimilar", "iLISI dissimilar")] + +for metric, _ in metrics_list: + mins = [] + maxs = [] + for _, subset_df in top_results_df.groupby('setup_name'): + mins.append(subset_df[metric].min()) + maxs.append(subset_df[metric].max()) + x_min = min(mins) + x_max = max(maxs) + x_buffer = (x_max - x_min) * 0.15 + xlims[metric] = (x_min - x_buffer, x_max + x_buffer) + +# Setup figure and axes +n_rows = len(top_results_df["setup_name"].unique()) +n_cols = len(metrics_list) + 1 # +1 for the combined scatter plot +fig, axs = plt.subplots(n_rows, n_cols, figsize=(3.4 * n_cols, n_rows * 3.4), + sharey=False, sharex=False) + +# Define colormap for this setup +unique_models = top_results_df["model_parsed"].cat.categories +cmap = cm.get_cmap("tab20", len(unique_models)) +model_cmap = {model: cmap(i) for i, model in enumerate(unique_models)} +top_results_df['This paper'] = top_results_df['model_parsed'].isin(['CYC', 'VAMP', 'VAMP+CYC']) + +# Generate plots +for row, (setup_name, subset_df) in enumerate(top_results_df.groupby('setup_name')): + x_min_ = min(list(subset_df['ilisi_similar']) + list(subset_df['ilisi_dissimilar'])) + x_max_ = max(list(subset_df['ilisi_similar']) + list(subset_df['ilisi_dissimilar'])) + x_buffer_ = (x_max_ - x_min_) * 0.1 + xlims_ = (x_min_ - x_buffer_, x_max_ + x_buffer_) + + # Metric-specific plots + for col, (metric, metric_title) in enumerate(metrics_list): + ax = axs[row, col] + means = subset_df.groupby('model_parsed')[metric].mean().reset_index() + + sb.swarmplot(y='model_parsed', x=metric, data=subset_df, ax=ax, + hue='model_parsed', palette=model_cmap, s=5, zorder=1, edgecolor='k', linewidth=0.25) + sb.scatterplot(y='model_parsed', x=metric, data=means, ax=ax, + color='k', s=150, marker='|', zorder=2, edgecolor='k', linewidth=2.5) + + # Formatting + ax.set_xlim(xlims_) + ax.grid(True, which='major', axis='x', linestyle='--', linewidth=0.5) + ax.spines['top'].set_visible(False) + ax.spines['right'].set_visible(False) + if row == 0: + ax.set_title(f"{metric_title}", fontsize=10) + if col == 0: + ax.set_ylabel( + setup_dicts[setup_name]["dataset_name"].replace(" ", "\n") + "\n" + + setup_dicts[setup_name]['description_short']+"\n" + ) + ax.set_xlabel('') + else: + ax.set_ylabel('') + ax.set_xlabel('') + ax.tick_params(axis='y', which='both', labelleft=False) + ax.get_legend().remove() + + # Combined scatter plot + ax = axs[row, -1] + scatter = sb.scatterplot(data=subset_df, x="ilisi_similar", y="ilisi_dissimilar", style="This paper", style_order=[0, 1], + hue="model_parsed", palette=model_cmap, s=10, ax=ax, edgecolor='k', linewidth=0.3, legend=False) + + subset_df_avg=subset_df.groupby('model_parsed')[['ilisi_similar','ilisi_dissimilar','This paper']].mean().reset_index().rename({'model_parsed':'Model'},axis=1) + subset_df_avg['This paper']=subset_df_avg['This paper'].map({1:'Yes',0:'No'}) + scatter = sb.scatterplot(data=subset_df_avg, x="ilisi_similar", y="ilisi_dissimilar", style="This paper", style_order=['No', 'Yes'], + hue="Model", palette=model_cmap, s=30, ax=ax, edgecolor='k', linewidth=0.3) + + ax.plot([xlims_[0], xlims_[1]], + [xlims_[0], xlims_[1]], 'k--', linewidth=0.8) + ax.set_xlim(xlims_) + ax.set_ylim(xlims_) + ax.set_title("", fontsize=10) + ax.grid(True, linestyle="--", alpha=0.3) + ax.set_xlabel("iLISI similar") + ax.set_ylabel("iLISI Dissimilar") + ax.get_legend().remove() + +# Add legend outside the plots +handles, labels = scatter.get_legend_handles_labels() +# fig.legend(handles, labels, loc='center left', bbox_to_anchor=(1.02, 0.5), title="Models", fontsize=8) +handles, labels = plt.gca().get_legend_handles_labels() + +points = [ + Line2D([0], [0], label='Style guide', marker='s', markersize=0, markeredgecolor='black', markerfacecolor='k', linestyle=''), + Line2D([0], [0], label='Run', marker='s', markersize=3, markeredgecolor='black', markerfacecolor='k', linestyle=''), + Line2D([0], [0], label='Average', marker='s', markersize=7, markeredgecolor='black', markerfacecolor='k', linestyle=''), +] +handles.extend(points) + +plt.legend(handles=handles, loc='center left', bbox_to_anchor=(1.03, 4.5), fontsize=8) + +# Final adjustments +plt.subplots_adjust(wspace=0.3, hspace=0.3) +plt.savefig(path_fig + "performance_disease_metrics_combined_scatter.pdf", dpi=300, bbox_inches="tight") +plt.savefig(path_fig + "performance_disease_metrics_combined_scatter.png", dpi=300, bbox_inches="tight") +plt.show() + + +# %% + +# %% + +# %% + +# %% + +# %% + +# %% diff --git a/notebooks/results/performance/performance_vamp.py b/notebooks/results/performance/performance_vamp.py index ab9d5f8..f9d5ecd 100644 --- a/notebooks/results/performance/performance_vamp.py +++ b/notebooks/results/performance/performance_vamp.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: csi # language: python @@ -40,7 +40,7 @@ from params_opt_maps import * # %% -path_data='/om2/user/khrovati/data/cross_system_integration/' +path_data='/home/moinfar/io/csi/' path_names=path_data+'names_parsed/' path_fig=path_data+'figures/' path_tab=path_data+'tables/' diff --git a/notebooks/results/performance/retina_bio_analysis.py b/notebooks/results/performance/retina_bio_analysis.py index 092c39c..868c70f 100644 --- a/notebooks/results/performance/retina_bio_analysis.py +++ b/notebooks/results/performance/retina_bio_analysis.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: csi # language: python @@ -22,13 +22,18 @@ import glob import os +import itertools from matplotlib import rcParams import matplotlib.pyplot as plt import seaborn as sb import matplotlib.colors as mcolors # %% -path_data='/om2/user/khrovati/data/cross_system_integration/' +import warnings +warnings.filterwarnings("ignore") + +# %% +path_data='/home/moinfar/io/csi/' path_names=path_data+'names_parsed/' path_fig=path_data+'figures/' path_integration=path_data+'/eval/retina_adult_organoid/' @@ -71,6 +76,9 @@ **pkl.load(open(path_summaries_mueller+'density_scgen_example.pkl','rb'))} adata=sc.read(path_summaries_mueller+'adata_markers.h5ad') +# %% +embeds = {k:v for k,v in embeds.items() if 'sysvi' not in k} + # %% # Density & gene expression UMAPs across models genes=['RHCG'] @@ -134,6 +142,85 @@ plt.savefig(path_fig+'retina_bio_analysis-density_expr-umap.png', dpi=300,bbox_inches='tight') +# %% +conditions_map + +# %% +# Density & gene expression UMAPs across models +genes = ['RHCG'] +cols = list(conditions_map) + [f"{c}-{g}" for c,g in itertools.product(conditions_map.keys(), genes)] +ncol = len(cols) +nrow = len(embeds) +fig, axs = plt.subplots(nrow, ncol, figsize=(2 * ncol, 2 * nrow)) +cmaps_keep = [] + +for i, (model, model_name) in enumerate([(k, v) for k, v in model_map.items() if k in embeds]): + embed = embeds[model] + for j, col in enumerate(cols): + ax = axs[i, j] + adata.obsm['X_umap'] = embed[adata.obs_names, :].obsm['X_umap'] + + if col in list(conditions_map.keys()): + sc.pl.umap(embed, ax=ax, show=False, frameon=False) + sc.pl.embedding_density( + embed[embed.obs.system_region == col, :], fg_dotsize=10, + basis='umap', key='umap_density_system_region', title='', + ax=ax, show=False, frameon=False + ) + else: + condition, gene=col.split("-") + subset = adata[adata.obs["system_region"] == condition] + sc.pl.umap( + subset, color=gene, gene_symbols='feature_name', + title=f'{condition} - RHCG', ax=ax, show=False, frameon=False, + vmin=adata[:, adata.var.feature_name == gene].X.A.flatten().min(), + vmax=adata[:, adata.var.feature_name == gene].X.A.flatten().max(), + ) + + # Adjust axis labels for these plots + if i == 0: + ax.set_title(f'{condition}', fontsize=10) + + # Make pretty + if j == 0: + ax.axis('on') + ax.tick_params( + top='off', bottom='off', left='off', right='off', + labelleft='on', labelbottom='off') + ax.set_ylabel(model_name, fontsize=10) + ax.set_xlabel('') + ax.set(frame_on=False) + if i == 0: + ax.set_title( + conditions_map[col] if col in conditions_map else col, + fontsize=10 + ) + + # Edit cmaps + cmaps = [a for a in fig.axes if a.get_label() == ''] + if i == 0 and j >= len(conditions_map) - 1: + cmap = cmaps[-1] + cmaps_keep.append(cmap) + cmap.set_title('Expression\n' if '-' in col else 'Cell\ndensity\n', fontsize=10) + else: + for cmap in [cmap for cmap in cmaps if cmap not in cmaps_keep]: + cmap.remove() + +# Adjust colorbars and layout +plt.subplots_adjust(wspace=0.01, hspace=0.01) +for idx, cmap in enumerate(cmaps_keep): + adjust = 0.25 if idx == 0 else 0.15 + pos = cmap.get_position() + pos.x0 = pos.x0 + adjust + pos.x1 = pos.x1 + adjust + pos.y1 = pos.y1 - 0.03 + pos.y0 = pos.y0 - 0.03 + cmap.set_position(pos) + +plt.show() + +# %% + # %% # Plot for subset of models genes=['RHCG'] diff --git a/notebooks/results/performance/scaling_embed.py b/notebooks/results/performance/scaling_embed.py index ec99bab..ff1952c 100644 --- a/notebooks/results/performance/scaling_embed.py +++ b/notebooks/results/performance/scaling_embed.py @@ -6,7 +6,7 @@ # extension: .py # format_name: percent # format_version: '1.3' -# jupytext_version: 1.14.5 +# jupytext_version: 1.16.3 # kernelspec: # display_name: csi # language: python @@ -31,7 +31,7 @@ from matplotlib.lines import Line2D # %% -path_data='/om2/user/khrovati/data/cross_system_integration/' +path_data='/home/moinfar/io/csi/' path_names=path_data+'names_parsed/' path_fig=path_data+'figures/' @@ -164,7 +164,8 @@ ordered=True) # %% -ress.groupby(['dataset_parsed','model_parsed','param_parsed','genes_parsed'],observed=True).size() +ress = ress.query('testing == False') +ress.drop_duplicates(['model_parsed', 'param_parsed', 'genes_parsed', 'dataset_parsed', 'name', 'seed', 'params_opt', 'param_opt_val'], inplace=True) # %% params=ress.groupby(['model_parsed','param_parsed','genes_parsed'],observed=True,sort=True @@ -325,57 +326,74 @@ # Dimension std swarm params=ress_sub.groupby(['model_parsed','param_parsed','genes_parsed'],observed=True,sort=True ).size().index.to_frame().reset_index(drop=True) -nrow=params.shape[0] +nrow=params.shape[0]*2 ncol=ress_sub['dataset_parsed'].nunique() -fig,axs=plt.subplots(nrow,ncol,figsize=(ncol*3,nrow*1.5),sharex=False,sharey=True) +fig,axs=plt.subplots(ncol,nrow,figsize=(nrow*3, ncol*1.5),sharex=False,sharey=False) for icol, (dataset_name,res_ds) in enumerate(ress_sub.groupby('dataset_parsed')): for irow,(_,param_data) in enumerate(params.iterrows()): - res_sub=res_ds.query( - f'model_parsed=="{param_data.model_parsed}" & '+\ - f'param_parsed=="{param_data.param_parsed}" & '+\ - f'genes_parsed=="{param_data.genes_parsed}"') - res_sub=res_sub.copy() - res_sub['param_opt_val_str']=\ - res_sub['param_opt_val_str'].cat.remove_unused_categories() - - # Data for plotting - # Dimensions ordered by std - plot=[] - for run,param_val in res_sub['param_opt_val_str'].items(): - embed=embeds[run] - stds=embed.X.std(axis=0) - dims=np.argsort(stds)[::-1] - for idim,dim in enumerate(dims): - plot.append({ - 'feature':idim+1, - param_data.param_parsed:param_val, - 'std':stds[dim] - }) - plot=pd.DataFrame(plot) - plot['feature']=pd.Categorical( - values=plot['feature'].astype(str), - categories=[str(i) for i in sorted(plot['feature'].unique())], - ordered=True) - plot[param_data.param_parsed]=pd.Categorical( - values=plot[param_data.param_parsed], - categories=res_sub['param_opt_val_str'].cat.categories, - ordered=True) - - # Plot - ax=axs[irow,icol] - sb.swarmplot(x=param_data.param_parsed,y='std',data=plot, ax=ax,s=2, - c='k',linewidth=0) - - # Make pretty - ax.set(facecolor = (0,0,0,0)) - ax.spines['top'].set_visible(False) - ax.spines['right'].set_visible(False) - if icol==0: - ax.set_ylabel(param_data.model_parsed+'\nFeature std') - else: - ax.set_ylabel('',visible=False) - if irow==0: - ax.set_title(dataset_name+'\n',fontsize=10) + for iagg, agg in enumerate(['std', 'max-abs']): + res_sub=res_ds.query( + f'model_parsed=="{param_data.model_parsed}" & '+\ + f'param_parsed=="{param_data.param_parsed}" & '+\ + f'genes_parsed=="{param_data.genes_parsed}"') + res_sub=res_sub.copy() + res_sub['param_opt_val_str']=\ + res_sub['param_opt_val_str'].cat.remove_unused_categories() + + # Data for plotting + # Dimensions ordered by std + plot=[] + for run,param_val in res_sub['param_opt_val_str'].items(): + embed=embeds[run] + agg_std=embed.X.std(axis=0) + agg_mean=embed.X.mean(axis=0) + agg_abs_mean=np.abs(embed.X).mean(axis=0) + agg_max=np.abs(embed.X).max(axis=0) + dims=np.argsort(agg_std)[::-1] + for idim,dim in enumerate(dims): + plot.append({ + 'feature':idim+1, + param_data.param_parsed:param_val, + 'std':agg_std[dim], + 'mean':agg_mean[dim], + 'mean<0.1':agg_abs_mean[dim] < 0.1, + 'max-abs':agg_max[dim], + }) + plot=pd.DataFrame(plot) + plot['feature']=pd.Categorical( + values=plot['feature'].astype(str), + categories=[str(i) for i in sorted(plot['feature'].unique())], + ordered=True) + plot[param_data.param_parsed]=pd.Categorical( + values=plot[param_data.param_parsed], + categories=res_sub['param_opt_val_str'].cat.categories, + ordered=True) + plot['mean'] = plot['mean'].astype(float) + + # Plot + ax=axs[icol, irow * 2 + iagg] + # ,hue='mean', palette='vlag' -> visually ugly + sb.swarmplot(x=param_data.param_parsed,y=agg, hue='mean<0.1', data=plot, ax=ax,s=2, + c='k',linewidth=0) + + def rreplace(s, old, new, occurrence=1): + li = s.rsplit(old, occurrence) + return new.join(li) + + # Make pretty + ax.set(facecolor = (0,0,0,0)) + ax.spines['top'].set_visible(False) + ax.spines['right'].set_visible(False) + if icol==0: + ax.set_title(param_data.model_parsed+f'\nFeature {agg}') + else: + ax.set_title('',visible=False) + if irow==0 and iagg==0: + ax.set_ylabel(rreplace(dataset_name, ' ', '\n')+'\n',fontsize=10) + if icol!=0 or irow * 2 + iagg<3: + ax.get_legend().remove() + else: + ax.legend(title='mean(abs(dim))<0.1', frameon=False, bbox_to_anchor=(1.05,0.95)) fig.set(facecolor = (0,0,0,0)) fig.tight_layout()