diff --git a/docs/instructions.rst b/docs/instructions.rst
index cf9eeb11..3e0b1408 100644
--- a/docs/instructions.rst
+++ b/docs/instructions.rst
@@ -20,9 +20,9 @@ Installation
- The whole pipeline was designed and tested on Linux systems
Before you can set up SyConn, ensure that the
-`conda `__
-package manager is installed on your system. Then you can install SyConn
-and all of its dependencies into a new conda
+`mamba `__
+package manager is installed on your system. Then you can install SyConn and
+all of its dependencies into a new conda
`environment `__
named “syconn2” by running:
@@ -30,8 +30,8 @@ named “syconn2” by running:
git clone https://github.com/StructuralNeurobiologyLab/SyConn
cd SyConn
- conda env create -f environment.yml -n syconn2 python=3.7
- conda activate syconn2
+ mamba env create -n syconn2 -f environment.yml
+ mamba activate syconn2
pip install -e .
The last command will install SyConn in
@@ -43,12 +43,6 @@ command with:
pip install .
-To update the environment, e.g. if the environment file changed, use:
-
-::
-
- conda env update --name syco --file environment.yml --prune
-
If you encounter
::
@@ -207,6 +201,10 @@ After initialization of the SDs (cell and sub-cellular structures, step
SyConn KNOSSOS viewer
---------------------
+This setup assumes that you run Linux (or WSL on Windows). If you don't
+have requried packages installed, you additional need ``sudo`` rights or
+ask your system administrator to install them for you.
+
The following packages have to be available in the system’s python2
interpreter (will differ from the conda environment):
@@ -214,25 +212,54 @@ interpreter (will differ from the conda environment):
- lz4
- requests
+One approach is to install them via `pip`:
+::
+
+ wget -P ~/.local/lib https://bootstrap.pypa.io/pip/2.7/get-pip.py
+ python2 ~/.local/lib/get-pip.py --user
+ python2 -m pip install numpy requests lz4
+
In order to inspect the resulting data via the SyConnViewer
KNOSSOS-plugin follow these steps:
- Wait until ``start.py`` finished. For starting the server manually
- run ``syconn.server --working_dir=`` which executes
- ``syconn/kplugin/server.py`` and allows to visualize the analysis
+ run ``syconn.server --working_dir=`` in the syconn conda environment
+ which executes ``syconn/analysis/server.py`` and allows to visualize the analysis
results of the working directory at (````) in KNOSSOS. The
server address and port will be printed.
-- Download and run the nightly build of KNOSSOS
- (https://github.com/knossos-project/knossos/releases/tag/nightly)
+- Download and run version 5.1 of KNOSSOS
+ (https://github.com/knossos-project/knossos/releases/tag/v5.1)
+ ::
+
+ wget https://github.com/knossos-project/knossos/releases/download/v5.1/linux.KNOSSOS-5.1.AppImage
+ chmod u+x linux.KNOSSOS-5.1.AppImage
+ ./linux.KNOSSOS-5.1.AppImage
+
+ Possible pitfalls:
+ ``libpython2.7.so.1.0: cannot open shared object file: No such file or directory``
+ you need to install the python-devtool package on your system:
+ ::
+
+ sudo apt install libpython2.7
+
+ If the AppImage complains about missing ``fusermount`` you need to install it (i.e. on Ubuntu 22.04)
+ ::
+
+ sudo apt install libfuse2
+
+ if AppImage complains about ``error while loading shared libraries: libGL.so.1: cannot open shared object file: No such file or directory`` you need to [install](https://stackoverflow.com/a/68666500) it:
+ ::
+
+ sudo apt install libgl1
- In KNOSSOS -> File -> Choose Dataset -> browse to your working
directory and open ``knossosdatasets/seg/mag1/knossos.conf`` with
enabled ‘load_segmentation_overlay’ (at the bottom of the dialog).
- Then go to Scripting (top row) -> Run file -> browse to
- ``syconn/kplugin/syconn_knossos_viewer.py``, open it and enter the
- port and address of the syconn server.
+ ``syconn/analysis/syconn_knossos_viewer.py``, open it and enter the
+ port and address of the syconn server as printed in the terminal.
- After the SyConnViewer window has opened, the selection of
segmentation fragments in the slice-viewports (exploration mode) or
diff --git a/environment.yml b/environment.yml
index 4bc7f889..c958cc35 100755
--- a/environment.yml
+++ b/environment.yml
@@ -18,8 +18,9 @@ dependencies:
# - pytorch-sparse
# From conda-forge and defaults
- - python >= 3.9 # (3.6 should also work)
+ - python=3.7 # (3.6 should also work)
- pip
+ #- gxx_linux-64
- lemon
- vigra
- freeglut
@@ -41,7 +42,7 @@ dependencies:
- scipy < 1.9 # <1.9 because n_jobs parameter in every tree query was removed. Can be >=1.9 if refactored in the whole project
- termcolor
# fix version due to stored RFC models.
- - scikit-learn >= 0.24.1
+ - scikit-learn = 0.21.3
- scikit-image
- opencv
- numba >0.48 # =0.48 led to freeze when importing elektronn3.data
@@ -76,16 +77,13 @@ dependencies:
# Required dependencies that are not yet available via conda:
- pip:
- - open3d
+ - open3d<=0.9
- zmesh
- plyfile
- - torch_geometric == 2.0.2 # 2.0.3 is incompatible with lcp.knn.quantized_sampling
- - --find-links https://data.pyg.org/whl/torch-1.12.0+cu116.html
- - torch-sparse
- - torch-scatter
+ - torch_geometric == 2.3.1
# Pre-release packages
- - git+https://github.com/ELEKTRONN/elektronn3.git@syconn2#egg=elektronn3
- - git+https://github.com/knossos-project/knossos_utils.git@syconn2#egg=knossos_utils
+ - git+https://github.com/mpinb/elektronn3.git@syconn2-mod#egg=elektronn3
+ - git+https://github.com/mpinb/knossos_utils.git@syconn2-mod#egg=knossos_utils
- git+https://github.com/StructuralNeurobiologyLab/MorphX.git@v0.1#egg=MorphX
# cloud-volume >=4 throws an error in simple_merge during np.concatenate if any skeleton has no vertices
diff --git a/syconn/analysis/syconn_knossos_viewer.py b/syconn/analysis/syconn_knossos_viewer.py
index 22c56232..35a0ede6 100755
--- a/syconn/analysis/syconn_knossos_viewer.py
+++ b/syconn/analysis/syconn_knossos_viewer.py
@@ -4,7 +4,6 @@
# Copyright (c) 2016 - now
# Max-Planck-Institute of Neurobiology, Munich, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
-from typing import Dict, Any
from PythonQt import QtGui, Qt, QtCore
from PythonQt.QtGui import QTableWidget, QTableWidgetItem
@@ -27,7 +26,7 @@ class SyConnGateInteraction(object):
"""
Query the SyConn backend server.
"""
- ct_from_cache: Dict[Any, Any]
+ ct_from_cache = {} # type: Dict[Any, Any]
def __init__(self, server, synthresh=0.5, axodend_only=True):
self.server = server
diff --git a/syconn/extraction/cs_processing_steps.py b/syconn/extraction/cs_processing_steps.py
index 84ce6085..f6c668aa 100755
--- a/syconn/extraction/cs_processing_steps.py
+++ b/syconn/extraction/cs_processing_steps.py
@@ -1411,16 +1411,17 @@ def synssv_o_features(synssv_o: segmentation.SegmentationObject) -> list:
Returns:
list
"""
- features = [synssv_o.size, synssv_o.mesh_area]
+ # print(synssv_o.attr_dict.keys())
+ features = [synssv_o.size, synssv_o.mesh_area, synssv_o.attr_dict["syn_type_sym_ratio"]] #NOTE(ada): we need to delete 3
partner_ids = synssv_o.attr_dict["neuron_partners"]
for i_partner_id, partner_id in enumerate(partner_ids):
features.append(synssv_o.attr_dict["n_mi_objs_%d" % i_partner_id])
features.append(synssv_o.attr_dict["n_mi_vxs_%d" % i_partner_id])
- features.append(synssv_o.attr_dict["min_dst_mi_nm_%d" % i_partner_id])
+ #features.append(synssv_o.attr_dict["min_dst_mi_nm_%d" % i_partner_id])
features.append(synssv_o.attr_dict["n_vc_objs_%d" % i_partner_id])
features.append(synssv_o.attr_dict["n_vc_vxs_%d" % i_partner_id])
- features.append(synssv_o.attr_dict["min_dst_vc_nm_%d" % i_partner_id])
+ #features.append(synssv_o.attr_dict["min_dst_vc_nm_%d" % i_partner_id])
return features
diff --git a/syconn/extraction/find_object_properties_C.pyx b/syconn/extraction/find_object_properties_C.pyx
index 6bc6d3e7..16935efe 100755
--- a/syconn/extraction/find_object_properties_C.pyx
+++ b/syconn/extraction/find_object_properties_C.pyx
@@ -13,7 +13,7 @@ ctypedef fused n_type:
ctypedef vector[int] int_vec
ctypedef vector[int_vec] int_vec_vec
-ctypedef vector[n_type[:, :, :]] uintarr_vec
+
ctypedef vector[unordered_map[uint64_t, int_vec]] umvec_rc
ctypedef vector[unordered_map[uint64_t, int_vec_vec]] umvec_bb
ctypedef vector[unordered_map[uint64_t, int]] umvec_size
diff --git a/syconn/handler/prediction_pts.py b/syconn/handler/prediction_pts.py
index b48fd469..da29de92 100755
--- a/syconn/handler/prediction_pts.py
+++ b/syconn/handler/prediction_pts.py
@@ -1926,7 +1926,8 @@ def predict_cmpt_ssd(ssd_kwargs, mpath: Optional[str] = None, ssv_ids: Optional[
mpath = os.path.expanduser(mpath)
if os.path.isdir(mpath):
# multiple models
- mpaths = glob.glob(mpath + '*/state_dict.pth')
+ #mpaths = glob.glob(mpath + '*/state_dict.pth')
+ mpaths = glob.glob(mpath + '*.pth')
else:
# single model
mpaths = [mpath]
@@ -1967,7 +1968,7 @@ def predict_cmpt_ssd(ssd_kwargs, mpath: Optional[str] = None, ssv_ids: Optional[
batchsizes[ctx] = int(batchsizes[ctx]*default_kwargs['bs'])
default_kwargs['bs'] = batchsizes
out_dc = predict_pts_plain(ssd_kwargs,
- model_loader=get_cmpt_model_pts,
+ model_loader=get_cpmt_model_pts_OLD,
loader_func=pts_loader_cpmt,
pred_func=pts_pred_cmpt,
postproc_func=pts_postproc_cpmt,
@@ -1997,7 +1998,7 @@ def get_cpmt_model_pts_OLD(mpath: Optional[str] = None, device='cuda', pred_type
mpath = os.path.expanduser(mpath)
if os.path.isdir(mpath):
# multiple models
- mpaths = glob.glob(mpath + '*/*.pth')
+ mpaths = glob.glob(mpath + '*.pth')
else:
# single model, must contain 'cmpt' in its name
mpaths = [mpath]
@@ -2223,9 +2224,9 @@ def pts_pred_cmpt(m, inp, q_out, d_out, q_cnt, device, bs):
high = bs * (ii + 1)
with torch.no_grad():
# transpose is required for lcp architectures
- g_inp = [torch.from_numpy(i[low:high]).to(device).float().transpose(1, 2) for i in model_inp]
+ g_inp = [torch.from_numpy(i[low:high]).to(device).float() for i in model_inp]
out = m[batch_progress[2]](*g_inp)
- out = out.transpose(1, 2).cpu().numpy()
+ out = out.cpu().numpy()
masks = batch_mask[low:high]
# filter vertices which belong to sv (discard predictions for cell organelles)
out = out[masks]
@@ -2435,7 +2436,7 @@ def get_cmpt_kwargs(mdir: str) -> Tuple[dict, dict]:
ctx = int(re.findall(r'_ctx(\d+)_', mdir)[-1])
feat_dim = int(re.findall(r'_fdim(\d+)', mdir)[-1])
class_num = int(re.findall(r'_cnum(\d+)', mdir)[-1])
- pred_type = re.findall(r'_types([^_]+)_', mdir)[-1]
+ pred_type = re.findall(r'_t([^_]+)_', mdir)[-1]
batchsize = int(re.findall(r'_bs(\d+)_', mdir)[-1])
# TODO: Fix neighbor_nums or create extra model
mkwargs = dict(input_channels=feat_dim, output_channels=class_num, use_norm=use_norm, use_bias=use_bias,