Skip to content

Commit 6947227

Browse files
committed
upgrade bokeh to v3
1 parent 9ab9cec commit 6947227

19 files changed

+468
-498
lines changed

README.md

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -462,7 +462,8 @@ Workstation](#another-workstation) to specify the path to the SongExplorer
462462
executable. If modifying your .bashrc file to export a modified PATH is not an
463463
option, then specify the `-env` flag in your configuration.py:
464464

465-
cluster_cmd="bsub -env 'all, PATH=<path-to-unzipped-executable>/songexplorer/bin:<paths-to-everything-else>'"
465+
cluster_cmd="bsub -env 'all, \
466+
PATH=<path-to-unzipped-executable>/songexplorer/bin:<paths-to-everything-else>'"
466467

467468

468469
# Tutorial #
@@ -1714,7 +1715,10 @@ documentation showing how to call it. Here, for example, is the interface for
17141715
# e.g.
17151716
# time-freq-threshold.py \
17161717
# --filename=`pwd`/groundtruth-data/round2/20161207T102314_ch1_p1.wav \
1717-
# --parameters={"time_sigma":"9,4", "time_smooth_ms":"6.4", "frequency_n_ms":"25.6", "frequency_nw":"4", "frequency_p":"0.1,1.0", "frequency_smooth_ms":"25.6", "time_sigma_robust":"median"} \
1718+
# --parameters={"time_sigma":"9,4", "time_smooth_ms":"6.4", \
1719+
"frequency_n_ms":"25.6", "frequency_nw":"4", \
1720+
"frequency_p":"0.1,1.0", "frequency_smooth_ms":"25.6", \
1721+
"time_sigma_robust":"median"} \
17181722
# --audio_tic_rate=2500 \
17191723
# --audio_nchannels=1 \
17201724
# --audio_read_plugin=load-wav \
@@ -2000,7 +2004,8 @@ library, set XLA_FLAGS in your environment, and install cuda-nvcc:
20002004
$ conda activate songexplorer
20012005
$ mkdir -p $CONDA_PREFIX/lib/nvvm/libdevice/
20022006
$ cp -p $CONDA_PREFIX/lib/libdevice.10.bc $CONDA_PREFIX/lib/nvvm/libdevice/
2003-
$ echo 'export XLA_FLAGS=--xla_gpu_cuda_data_dir=$CONDA_PREFIX/lib' >> $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh
2007+
$ echo 'export XLA_FLAGS=--xla_gpu_cuda_data_dir=$CONDA_PREFIX/lib' \
2008+
>> $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh
20042009
$ conda install -c nvidia cuda-nvcc
20052010

20062011
To upload to the [Janelia forge](https://anaconda.org/janelia):
@@ -2035,9 +2040,10 @@ asset:
20352040
To upload a tarball to Github, compress the conda environment and drag and drop
20362041
it into the assets section of the releases page:
20372042

2038-
$ cd $CONDA_PREFIX/envs
2043+
$ cd $CONDA_PREFIX/..
20392044
$ tar czf songexplorer-<version>-<architecture>.tar.gz songexplorer
2040-
$ cat songexplorer-<version>-<architecture>.tar.gz | split --bytes=2GB - songexplorer-<version>-<architecture>.tar.gz.
2045+
$ cat songexplorer-<version>-<architecture>.tar.gz | \
2046+
split --bytes=2GB - songexplorer-<version>-<architecture>.tar.gz.
20412047

20422048
After downloading, some users will need to re-install some pip dependencies
20432049
(e.g. tensorflow-metal on MacOS) as they are not in general relocatable:
@@ -2075,7 +2081,8 @@ To build an image, change to a local (i.e. not NFS mounted; e.g.
20752081

20762082
$ git clone https://github.com/JaneliaSciComp/SongExplorer.git
20772083
$ rm -rf songexplorer/.git
2078-
$ sudo singularity build -s [-B /opt:/mnt] [--no-cleanup] songexplorer.img songexplorer/install/singularity.def
2084+
$ sudo singularity build -s [-B /opt:/mnt] [--no-cleanup] \
2085+
songexplorer.img songexplorer/install/singularity.def
20792086

20802087
To confirm that the image works:
20812088

configuration.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333

3434
# GUI
3535
gui_nlabels=7
36-
gui_gui_width_pix=1250
36+
gui_gui_width_pix=1350
3737
#gui_label_palette="Category10_10"
3838
# https://graphicdesign.stackexchange.com/questions/3682/where-can-i-find-a-large-palette-set-of-contrasting-colors-for-coloring-many-d
3939
gui_label_palette="('#0075dc','#993f00','#4c005c','#191919','#005c31','#2bce48','#ffcc99','#808080','#94ffb5','#8f7c00','#9dcc00','#c20088','#003380','#ffa405','#ffa8bb','#426600','#ff0010','#5ef1f2','#00998f','#e0ff66','#740aff','#990000','#ffff80','#ffff00','#ff5005')"

install/conda/songexplorer/meta.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ requirements:
1919
- cudatoolkit=11 # [win]
2020
- cudnn=8 # [win]
2121
- cuda-nvcc=11 # [win]
22-
- bokeh >=2,<3
22+
- bokeh
2323
- matplotlib-base
2424
- natsort
2525
- scikit-image
@@ -30,7 +30,7 @@ requirements:
3030
- portion
3131
- matplotlib-venn
3232
- psutil
33-
- nodejs >=10.13.10
33+
- nodejs >=18
3434
- aitch >=0.2.2
3535
- opentsne
3636
- libssh2 [win]

src/UMAP.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,14 +25,14 @@
2525

2626
def _callback(p,M,V,C):
2727
C.time.sleep(0.5)
28-
V.cluster_parameters[p].css_classes = []
28+
V.cluster_parameters[p].stylesheets = [""]
2929
M.save_state_callback()
3030
V.buttons_update()
3131

3232
def pca_fraction_callback(n,M,V,C):
3333
pca_fraction = float(V.cluster_parameters['pca-fraction'].value)
3434
if not 0 < pca_fraction <= 1:
35-
V.cluster_parameters['pca-fraction'].css_classes = ['changed']
35+
V.cluster_parameters['pca-fraction'].stylesheets = M.changed_style
3636
V.cluster_parameters['pca-fraction'].value = str(min(1, max(0, pca_fraction)))
3737
bokehlog.info("WARNING: `PCA fraction` must be between 0 and 1")
3838
if V.bokeh_document:
@@ -43,7 +43,7 @@ def pca_fraction_callback(n,M,V,C):
4343
def positive_callback(key,n,M,V,C):
4444
value = float(V.cluster_parameters[key].value)
4545
if value <= 0:
46-
V.cluster_parameters[key].css_classes = ['changed']
46+
V.cluster_parameters[key].stylesheets = M.changed_style
4747
V.cluster_parameters[key].value = "1"
4848
bokehlog.info("WARNING: `"+key+"` must be positive")
4949
if V.bokeh_document:

src/architecture-plugin.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,17 +11,17 @@
1111
# optional callbacks can be used to validate user input
1212
def _callback(p,M,V,C):
1313
C.time.sleep(0.5)
14-
V.model_parameters[p].css_classes = []
14+
V.model_parameters[p].stylesheets = []
1515
M.save_state_callback()
1616
V.buttons_update()
1717

1818
def callback(n,M,V,C):
1919
# M, V, C are the model, view, and controller in src/gui
2020
# access the hyperparameters below with the V.model_parameters dictionary
21-
# the value is stored in .value, and the appearance can be controlled with .css_classes
21+
# the value is stored in .value, and the appearance can be controlled with .stylesheets
2222
if int(V.model_parameters['a-bounded-value'].value) < 0:
2323
#bokehlog.info("a-bounded-value = "+str(V.model_parameters['a-bounded-value'].value)) # uncomment to debug
24-
V.model_parameters['a-bounded-value'].css_classes = ['changed']
24+
V.model_parameters['a-bounded-value'].stylesheets = [".bk-input { background-color: #FFA500; }"]
2525
V.model_parameters['a-bounded-value'].value = "0"
2626
if V.bokeh_document: # if interactive
2727
V.bokeh_document.add_next_tick_callback(lambda: _callback('a-bounded-value',M,V,C))

src/autoencoder.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
def _callback(ps,M,V,C):
1414
C.time.sleep(0.5)
1515
for p in ps:
16-
V.model_parameters[p].css_classes = []
16+
V.model_parameters[p].stylesheets = [""]
1717
M.save_state_callback()
1818
V.buttons_update()
1919

@@ -22,15 +22,15 @@ def window_callback(n,M,V,C):
2222
changed, window_sec2 = M.next_pow2_sec(float(V.model_parameters['window'].value) * M.time_scale)
2323
if changed:
2424
bokehlog.info("WARNING: adjusting `window ("+M.time_units+")` to be a power of two in tics")
25-
V.model_parameters['window'].css_classes = ['changed']
25+
V.model_parameters['window'].stylesheets = M.changed_style
2626
V.model_parameters['window'].value = str(window_sec2 / M.time_scale)
2727
ps.append('window')
2828
mel, _ = V.model_parameters['mel_dct'].value.split(',')
2929
nfreqs = round(window_sec2*M.audio_tic_rate/2+1)
3030
if int(mel) != nfreqs:
3131
changed=True
3232
bokehlog.info("WARNING: adjusting `mel & DCT` to both be equal to the number of frequencies")
33-
V.model_parameters['mel_dct'].css_classes = ['changed']
33+
V.model_parameters['mel_dct'].stylesheets = M.changed_style
3434
V.model_parameters['mel_dct'].value = str(nfreqs)+','+str(nfreqs)
3535
ps.append('mel_dct')
3636
if changed:
@@ -70,7 +70,7 @@ def stride_callback(n,M,V,C):
7070
else:
7171
stride_sec2 = "-1"
7272
bokehlog.info("ERROR: downsampling achieved by `stride after` is prime")
73-
V.model_parameters['stride'].css_classes = ['changed']
73+
V.model_parameters['stride'].stylesheets = M.changed_style
7474
V.model_parameters['stride'].value = str(stride_sec2 * M.time_scale)
7575
if V.bokeh_document:
7676
V.bokeh_document.add_next_tick_callback(lambda: _callback(['stride_sec'],M,V,C))
@@ -84,7 +84,7 @@ def mel_dct_callback(n,M,V,C):
8484
mel, dct = V.model_parameters['mel_dct'].value.split(',')
8585
if int(dct) > int(mel):
8686
bokehlog.info("WARNING: adjusting `mel & DCT` such that DCT is less than or equal to mel")
87-
V.model_parameters['mel_dct'].css_classes = ['changed']
87+
V.model_parameters['mel_dct'].stylesheets = M.changed_style
8888
V.model_parameters['mel_dct'].value = mel+','+mel
8989
if V.bokeh_document:
9090
V.bokeh_document.add_next_tick_callback(lambda: _callback(['mel_dct'],M,V,C))
@@ -103,7 +103,7 @@ def range_callback(n,M,V,C):
103103
if lo > hi: lo=0;
104104
if V.model_parameters['range'].value != str(lo/M.freq_scale)+'-'+str(hi/M.freq_scale):
105105
bokehlog.info("WARNING: adjusting `range ("+M.freq_units+")` such that lower bound is not negative and the higher bound less than the Nyquist frequency.")
106-
V.model_parameters['range'].css_classes = ['changed']
106+
V.model_parameters['range'].stylesheets = M.changed_style
107107
V.model_parameters['range'].value = str(lo/M.freq_scale)+'-'+str(hi/M.freq_scale)
108108
if V.bokeh_document:
109109
V.bokeh_document.add_next_tick_callback(lambda: _callback(['range'],M,V,C))
@@ -124,7 +124,7 @@ def dilate_stride_callback(key,n,M,V,C):
124124
dilate = set(dilate_time + dilate_freq)
125125
if stride & dilate:
126126
bokehlog.info("WARNING: adjusting `"+key+"` so that the convolutional layers with strides do not overlap with those that dilate")
127-
V.model_parameters[key].css_classes = ['changed']
127+
V.model_parameters[key].stylesheets = M.changed_style
128128
tmp = set(parse_layers(V.model_parameters[key].value, nconvlayers))
129129
V.model_parameters[key].value = esrap_layers(list(tmp - (stride & dilate)), nconvlayers)
130130
if V.bokeh_document:
@@ -141,7 +141,7 @@ def dilate_stride_callback(key,n,M,V,C):
141141
# while downsampled_rate != round(downsampled_rate):
142142
# stride_time.pop()
143143
# downsampled_rate = M.audio_tic_rate / 2 ** min(nconvlayers, len(stride_time))
144-
# V.model_parameters['stride_time'].css_classes = ['changed']
144+
# V.model_parameters['stride_time'].stylesheets = M.changed_style
145145
# V.model_parameters['stride_time'].value = esrap_layers(stride_time, nconvlayers)
146146
# if V.bokeh_document:
147147
# V.bokeh_document.add_next_tick_callback(lambda: _callback(['stride_time'],M,V,C))
@@ -159,7 +159,7 @@ def dilate_stride_callback(key,n,M,V,C):
159159
# downsampled_rate = M.audio_tic_rate / 2 ** min(this_nconvlayers, len(stride_time))
160160
# if downsampled_rate == round(downsampled_rate):
161161
# break
162-
# V.model_parameters['nconvlayers'].css_classes = ['changed']
162+
# V.model_parameters['nconvlayers'].stylesheets = M.changed_style
163163
# V.model_parameters['nconvlayers'].value = str(this_nconvlayers)
164164
# if V.bokeh_document:
165165
# V.bokeh_document.add_next_tick_callback(lambda: _callback(['nconvlayers'],M,V,C))

src/cluster-plugin.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,17 +18,17 @@
1818
# optional callbacks can be used to validate user input
1919
def _callback(p,M,V,C):
2020
C.time.sleep(0.5)
21-
V.cluster_parameters[p].css_classes = []
21+
V.cluster_parameters[p].stylesheets = [""]
2222
M.save_state_callback()
2323
V.buttons_update()
2424

2525
def callback(n,M,V,C):
2626
# M, V, C are the model, view, and controller in src/gui
2727
# access the hyperparameters below with the V.detect_parameters dictionary
28-
# the value is stored in .value, and the appearance can be controlled with .css_classes
28+
# the value is stored in .value, and the appearance can be controlled with .stylesheets
2929
if int(V.detect_parameters['a-bounded-value'].value) < 0:
3030
#bokehlog.info("a-bounded-value = "+str(V.detect_parameters['a-bounded-value'].value)) # uncomment to debug
31-
V.detect_parameters['a-bounded-value'].css_classes = ['changed']
31+
V.detect_parameters['a-bounded-value'].stylesheets = [".bk-input { background-color: #FFA500; }"]
3232
V.detect_parameters['a-bounded-value'].value = "0"
3333
if V.bokeh_document: # if interactive V.bokeh_document.add_next_tick_callback(lambda: _callback('a-bounded-value',M,V,C))
3434
V.bokeh_document.add_next_tick_callback(lambda: _callback('a-bounded-value',M,V,C))

src/convolutional.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
def _callback(ps,M,V,C):
1414
C.time.sleep(0.5)
1515
for p in ps:
16-
V.model_parameters[p].css_classes = []
16+
V.model_parameters[p].stylesheets = [""]
1717
M.save_state_callback()
1818
V.buttons_update()
1919

@@ -22,15 +22,15 @@ def window_callback(n,M,V,C):
2222
changed, window_sec2 = M.next_pow2_sec(float(V.model_parameters['window'].value) * M.time_scale)
2323
if changed:
2424
bokehlog.info("WARNING: adjusting `window ("+M.time_units+")` to be a power of two in tics")
25-
V.model_parameters['window'].css_classes = ['changed']
25+
V.model_parameters['window'].stylesheets = M.changed_style
2626
V.model_parameters['window'].value = str(window_sec2 / M.time_scale)
2727
ps.append('window')
2828
mel, _ = V.model_parameters['mel_dct'].value.split(',')
2929
nfreqs = round(window_sec2*M.audio_tic_rate/2+1)
3030
if int(mel) != nfreqs:
3131
changed=True
3232
bokehlog.info("WARNING: adjusting `mel & DCT` to both be equal to the number of frequencies")
33-
V.model_parameters['mel_dct'].css_classes = ['changed']
33+
V.model_parameters['mel_dct'].stylesheets = M.changed_style
3434
V.model_parameters['mel_dct'].value = str(nfreqs)+','+str(nfreqs)
3535
ps.append('mel_dct')
3636
if changed:
@@ -70,7 +70,7 @@ def stride_callback(n,M,V,C):
7070
else:
7171
stride_sec2 = "-1"
7272
bokehlog.info("ERROR: downsampling achieved by `stride after` is prime")
73-
V.model_parameters['stride'].css_classes = ['changed']
73+
V.model_parameters['stride'].stylesheets = M.changed_style
7474
V.model_parameters['stride'].value = str(stride_sec2 * M.time_scale)
7575
if V.bokeh_document:
7676
V.bokeh_document.add_next_tick_callback(lambda: _callback(['stride_sec'],M,V,C))
@@ -84,7 +84,7 @@ def mel_dct_callback(n,M,V,C):
8484
mel, dct = V.model_parameters['mel_dct'].value.split(',')
8585
if int(dct) > int(mel):
8686
bokehlog.info("WARNING: adjusting `mel & DCT` such that DCT is less than or equal to mel")
87-
V.model_parameters['mel_dct'].css_classes = ['changed']
87+
V.model_parameters['mel_dct'].stylesheets = M.changed_style
8888
V.model_parameters['mel_dct'].value = mel+','+mel
8989
if V.bokeh_document:
9090
V.bokeh_document.add_next_tick_callback(lambda: _callback(['mel_dct'],M,V,C))
@@ -103,7 +103,7 @@ def range_callback(n,M,V,C):
103103
if lo > hi: lo=0;
104104
if V.model_parameters['range'].value != str(lo/M.freq_scale)+'-'+str(hi/M.freq_scale):
105105
bokehlog.info("WARNING: adjusting `range ("+M.freq_units+")` such that lower bound is not negative and the higher bound less than the Nyquist frequency.")
106-
V.model_parameters['range'].css_classes = ['changed']
106+
V.model_parameters['range'].stylesheets = M.changed_style
107107
V.model_parameters['range'].value = str(lo/M.freq_scale)+'-'+str(hi/M.freq_scale)
108108
if V.bokeh_document:
109109
V.bokeh_document.add_next_tick_callback(lambda: _callback(['range'],M,V,C))
@@ -124,7 +124,7 @@ def dilate_stride_callback(key,n,M,V,C):
124124
dilate = set(dilate_time + dilate_freq)
125125
if stride & dilate:
126126
bokehlog.info("WARNING: adjusting `"+key+"` so that the convolutional layers with strides do not overlap with those that dilate")
127-
V.model_parameters[key].css_classes = ['changed']
127+
V.model_parameters[key].stylesheets = M.changed_style
128128
tmp = set(parse_layers(V.model_parameters[key].value, nconvlayers))
129129
V.model_parameters[key].value = esrap_layers(list(tmp - (stride & dilate)), nconvlayers)
130130
if V.bokeh_document:
@@ -141,7 +141,7 @@ def stride_time_callback(n,M,V,C):
141141
while downsampled_rate != round(downsampled_rate):
142142
stride_time.pop()
143143
downsampled_rate = M.audio_tic_rate / 2 ** min(nconvlayers, len(stride_time))
144-
V.model_parameters['stride_time'].css_classes = ['changed']
144+
V.model_parameters['stride_time'].stylesheets = M.changed_style
145145
V.model_parameters['stride_time'].value = esrap_layers(stride_time, nconvlayers)
146146
if V.bokeh_document:
147147
V.bokeh_document.add_next_tick_callback(lambda: _callback(['stride_time'],M,V,C))
@@ -159,7 +159,7 @@ def nlayers_callback(n,M,V,C):
159159
downsampled_rate = M.audio_tic_rate / 2 ** min(this_nconvlayers, len(stride_time))
160160
if downsampled_rate == round(downsampled_rate):
161161
break
162-
V.model_parameters['nconvlayers'].css_classes = ['changed']
162+
V.model_parameters['nconvlayers'].stylesheets = M.changed_style
163163
V.model_parameters['nconvlayers'].value = str(this_nconvlayers)
164164
if V.bokeh_document:
165165
V.bokeh_document.add_next_tick_callback(lambda: _callback(['nconvlayers'],M,V,C))

0 commit comments

Comments
 (0)