Skip to content

Commit ac8eb8a

Browse files
authored
Merge pull request #26 from Ekhao/json-config
removed constants python file and added json config
2 parents 737c149 + 2431bf7 commit ac8eb8a

10 files changed

+183
-135
lines changed

config.json

Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
{
2+
"datanas-config": {
3+
"general-config": {
4+
"num-models": 10,
5+
"seed": null
6+
},
7+
"joblib-config": {
8+
"num-cores-to-use": 1
9+
},
10+
"search-space-config": {
11+
"data-search-space": [
12+
[
13+
48000,
14+
24000,
15+
12000,
16+
6000,
17+
3000,
18+
1500,
19+
750,
20+
375
21+
],
22+
[
23+
"spectrogram",
24+
"mel-spectrogram",
25+
"mfcc"
26+
]
27+
],
28+
"model-layer-search-space": [
29+
[
30+
2,
31+
4,
32+
8,
33+
16,
34+
32,
35+
64,
36+
128
37+
],
38+
[
39+
3,
40+
5
41+
],
42+
[
43+
"relu",
44+
"sigmoid"
45+
]
46+
]
47+
},
48+
"model-config": {
49+
"optimizer": "adam",
50+
"loss": "sparse_categorical_crossentropy",
51+
"metrics": [
52+
"accuracy"
53+
],
54+
"num-output-classes": 2,
55+
"width-dense-layer": 10
56+
},
57+
"dataset-config": {
58+
"path-normal-files": "/Users/emjn/Documents/DTU/Datasets/ToyConveyor/case1/NormalSound_IND/",
59+
"path-anomalous-files": "/Users/emjn/Documents/DTU/Datasets/ToyConveyor/case1/AnomalousSound_IND/",
60+
"path-noise-files": "/Users/emjn/Documents/DTU/Datasets/ToyConveyor/EnvironmentalNoise_CNT/",
61+
"case-noise-files": "case1",
62+
"num-normal-files": 900,
63+
"num-anomalous-files": 200,
64+
"dataset-channel": 1,
65+
"sound-gain": 1,
66+
"noise-gain": 1
67+
},
68+
"preprocessing-config": {
69+
"frame-size": 2048,
70+
"hop-length": 512,
71+
"num-mel-filters": 80,
72+
"num-mfccs": 13
73+
},
74+
"controller-config": {
75+
"controller": "evolution",
76+
"initialization": "trivial",
77+
"max-num-layers": 5
78+
},
79+
"evaluation-config": {
80+
"num-epochs": 20,
81+
"batch-size": 32,
82+
"approximate-model-size": 100000
83+
},
84+
"evolutionary-config": {
85+
"population-size": 10,
86+
"population-update-ratio": 0.5,
87+
"crossover-ratio": 0.2
88+
},
89+
"audio-seconds-to-load": 10
90+
}
91+
}

constants.py

Lines changed: 0 additions & 64 deletions
This file was deleted.

datanas.py

Lines changed: 64 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
# Standard Library Imports
44
import argparse
5+
import json
56

67
# Third Party Imports
78
import tensorflow as tf
@@ -10,7 +11,6 @@
1011
import searchspace
1112
import datamodelgenerator
1213
import datasetloader
13-
import constants
1414
import randomcontroller
1515
import evolutionarycontroller
1616

@@ -39,13 +39,13 @@ def main():
3939
# Model Parameters
4040
argparser.add_argument("-o", "--optimizer",
4141
help="The optimizer to use for training the models. Give as a string corresponding to the alias of a TensorFlow optimizer.")
42-
argparser.add_argument("-lf", "--loss_function",
42+
argparser.add_argument("-l", "--loss",
4343
help="The loss function to use for training the models. Give as a string corresponding to the alias of a TensorFlow loss function.")
4444
argparser.add_argument("-m", "--metrics",
4545
help="The metrics to use for training the models. Give as a [\"...\"] formatted list of TensorFlow metric aliases.")
4646
argparser.add_argument("-no", "--num_output_classes",
4747
help="The number of outputs classes that the created models should have.", type=int)
48-
argparser.add_argument("-wd", "--width_of_dense_layer",
48+
argparser.add_argument("-wd", "--width_dense_layer",
4949
help="The width of the dense layer after the convolutional layers in the model. Be aware that this argument can cause an explosion of model parameters.")
5050

5151
# Dataset Parameters
@@ -57,11 +57,11 @@ def main():
5757
help="The filepath to the directory containing noise files.")
5858
argparser.add_argument("-cns", "--case_noise_files",
5959
help="The case number to use for noise files.")
60-
argparser.add_argument("-nn", "--num_normal_files_to_use",
60+
argparser.add_argument("-nn", "--num_normal_files",
6161
help="The number of normal files to use for training.", type=int)
62-
argparser.add_argument("-na", "--num_anomalous_files_to_use",
62+
argparser.add_argument("-na", "--num_anomalous_files",
6363
help="The number of anomalous files to use for training.", type=int)
64-
argparser.add_argument("-ch", "--dataset_channel_to_use",
64+
argparser.add_argument("-ch", "--dataset_channel",
6565
help="The dataset channel to use for training.", type=int)
6666
argparser.add_argument("-sg", "--sound_gain",
6767
help="The gain to apply to the sound files.", type=float)
@@ -92,7 +92,7 @@ def main():
9292
argparser.add_argument("-bs", "--batch_size",
9393
help="The batch size to use for training.", type=int)
9494
argparser.add_argument(
95-
"-msa", "--model_size_approximate_range", type=int)
95+
"-ams", "--approximate_model_size", help="An approximate size of the models to be generated. Is used to decide whether a generated model is scored well or poor on its model size.", type=int)
9696

9797
# Evolutionary Parameters
9898
argparser.add_argument("-ps", "--population_size",
@@ -104,71 +104,86 @@ def main():
104104

105105
args = argparser.parse_args()
106106

107+
# Parse config file
108+
config_file = open("config.json", "r")
109+
config = json.load(config_file)
110+
111+
config = config["datanas-config"]
112+
general_config = config["general-config"]
113+
joblib_config = config["joblib-config"]
114+
search_space_config = config["search-space-config"]
115+
model_config = config["model-config"]
116+
dataset_config = config["dataset-config"]
117+
preprocessing_config = config["preprocessing-config"]
118+
controller_config = config["controller-config"]
119+
evaluation_config = config["evaluation-config"]
120+
evolutionary_config = config["evolutionary-config"]
121+
107122
# Set options according to command line arguments and config file
108123
if not args.num_models:
109-
args.num_models = constants.NUM_MODELS
124+
args.num_models = general_config["num-models"]
110125
if not args.seed:
111-
args.seed = constants.SEED
126+
args.seed = general_config["seed"]
112127
if not args.num_cores_to_use:
113-
args.num_cores_to_use = constants.NUM_CORES_TO_USE
128+
args.num_cores_to_use = joblib_config["num-cores-to-use"]
114129
if not args.data_search_space:
115-
args.data_search_space = constants.DATA_SEARCH_SPACE
130+
args.data_search_space = search_space_config["data-search-space"]
116131
if not args.model_layer_search_space:
117-
args.model_layer_search_space = constants.MODEL_LAYER_SEARCH_SPACE
132+
args.model_layer_search_space = search_space_config["model-layer-search-space"]
118133
if not args.optimizer:
119-
args.optimizer = constants.OPTIMIZER
120-
if not args.loss_function:
121-
args.loss_function = constants.LOSS_FUNCTION
134+
args.optimizer = model_config["optimizer"]
135+
if not args.loss:
136+
args.loss = model_config["loss"]
122137
if not args.metrics:
123-
args.metrics = constants.METRICS
138+
args.metrics = model_config["metrics"]
124139
if not args.num_output_classes:
125-
args.num_output_classes = constants.NUM_OUTPUT_CLASSES
126-
if not args.width_of_dense_layer:
127-
args.width_of_dense_layer = constants.WIDTH_OF_DENSE_LAYER
140+
args.num_output_classes = model_config["num-output-classes"]
141+
if not args.width_dense_layer:
142+
args.width_dense_layer = model_config["width-dense-layer"]
128143
if not args.path_normal_files:
129-
args.path_normal_files = constants.PATH_NORMAL_FILES
144+
args.path_normal_files = dataset_config["path-normal-files"]
130145
if not args.path_anomalous_files:
131-
args.path_anomalous_files = constants.PATH_ANOMALOUS_FILES
146+
args.path_anomalous_files = dataset_config["path-anomalous-files"]
132147
if not args.path_noise_files:
133-
args.path_noise_files = constants.PATH_NOISE_FILES
148+
args.path_noise_files = dataset_config["path-noise-files"]
134149
if not args.case_noise_files:
135-
args.case_noise_files = constants.CASE_NOISE_FILES
136-
if not args.num_normal_files_to_use:
137-
args.num_normal_files_to_use = constants.NUM_NORMAL_FILES_TO_USE
138-
if not args.num_anomalous_files_to_use:
139-
args.num_anomalous_files_to_use = constants.NUM_ANOMALOUS_FILES_TO_USE
140-
if not args.dataset_channel_to_use:
141-
args.dataset_channel_to_use = constants.DATASET_CHANNEL_TO_USE
150+
args.case_noise_files = dataset_config["case-noise-files"]
151+
if not args.num_normal_files:
152+
args.num_normal_files = dataset_config["num-normal-files"]
153+
if not args.num_anomalous_files:
154+
args.num_anomalous_files = dataset_config["num-anomalous-files"]
155+
if not args.dataset_channel:
156+
args.dataset_channel = dataset_config["dataset-channel"]
142157
if not args.sound_gain:
143-
args.sound_gain = constants.SOUND_GAIN
158+
args.sound_gain = dataset_config["sound-gain"]
144159
if not args.noise_gain:
145-
args.noise_gain = constants.NOISE_GAIN
160+
args.noise_gain = dataset_config["noise-gain"]
146161
if not args.frame_size:
147-
args.frame_size = constants.FRAME_SIZE
162+
args.frame_size = preprocessing_config["frame-size"]
148163
if not args.hop_length:
149-
args.hop_length = constants.HOP_LENGTH
164+
args.hop_length = preprocessing_config["hop-length"]
150165
if not args.num_mel_filters:
151-
args.num_mel_filters = constants.NUM_MEL_FILTERS
166+
args.num_mel_filters = preprocessing_config["num-mel-filters"]
152167
if not args.num_mfccs:
153-
args.num_mfccs = constants.NUM_MFCCS
168+
args.num_mfccs = preprocessing_config["num-mfccs"]
154169
if not args.controller:
155-
args.controller = constants.CONTROLLER
170+
args.controller = controller_config["controller"]
156171
if not args.initialization:
157-
args.initialization = constants.INITIALIZATION
172+
args.initialization = controller_config["initialization"]
158173
if not args.max_num_layers:
159-
args.max_num_layers = constants.MAX_NUM_LAYERS
174+
args.max_num_layers = controller_config["max-num-layers"]
160175
if not args.num_epochs:
161-
args.num_epochs = constants.NUM_EPOCHS
176+
args.num_epochs = evaluation_config["num-epochs"]
162177
if not args.batch_size:
163-
args.batch_size = constants.BATCH_SIZE
164-
if not args.model_size_approximate_range:
165-
args.model_size_approximate_range = constants.MODEL_SIZE_APPROXIMATE_RANGE
178+
args.batch_size = evaluation_config["batch-size"]
179+
if not args.approximate_model_size:
180+
args.approximate_model_size = evaluation_config["approximate-model-size"]
166181
if not args.population_size:
167-
args.population_size = constants.POPULATION_SIZE
182+
args.population_size = evolutionary_config["population-size"]
168183
if not args.population_update_ratio:
169-
args.population_update_ratio = constants.POPULATION_UPDATE_RATIO
184+
args.population_update_ratio = evolutionary_config["population-update-ratio"]
170185
if not args.crossover_ratio:
171-
args.crossover_ratio = constants.CROSSOVER_RATIO
186+
args.crossover_ratio = evolutionary_config["crossover-ratio"]
172187

173188
# The following block of code enables memory growth for the GPU during runtime.
174189
# It is suspected that this helps avoiding out of memory errors.
@@ -192,20 +207,20 @@ def main():
192207

193208
print("Loading dataset files from persistent storage...")
194209
dataset_loader = datasetloader.DatasetLoader(args.path_normal_files, args.path_anomalous_files, args.path_noise_files, args.case_noise_files,
195-
args.num_normal_files_to_use, args.num_anomalous_files_to_use, args.dataset_channel_to_use, args.num_cores_to_use, args.sound_gain, args.noise_gain, constants.AUDIO_SECONDS_TO_LOAD)
210+
args.num_normal_files, args.num_anomalous_files, args.dataset_channel, args.num_cores_to_use, args.sound_gain, args.noise_gain, config["audio-seconds-to-load"])
196211

197212
print("Initializing controller...")
198213
if args.controller == "evolution":
199214
controller = evolutionarycontroller.EvolutionaryController(
200-
search_space, args.population_size, args.max_num_layers, args.population_update_ratio, args.crossover_ratio, args.model_size_approximate_range, args.seed)
215+
search_space, args.population_size, args.max_num_layers, args.population_update_ratio, args.crossover_ratio, args.approximate_model_size, args.seed)
201216
controller.initialize_controller(args.initialization == "trivial")
202217
else:
203218
controller = randomcontroller.RandomController(
204-
search_space, args.seed, args.max_num_layers)
219+
search_space, args.max_num_layers, args.seed)
205220

206221
# Run the Data Aware NAS
207222
data_model_generator = datamodelgenerator.DataModelGenerator(
208-
args.num_output_classes, args.loss_function, controller, dataset_loader, args.optimizer, args.metrics, args.width_of_dense_layer, args.num_epochs, args.batch_size, args.num_normal_files_to_use, args.num_anomalous_files_to_use, args.path_normal_files, args.path_anomalous_files, args.frame_size, args.hop_length, args.num_mel_filters, args.num_mfccs)
223+
args.num_output_classes, args.loss, controller, dataset_loader, args.optimizer, args.metrics, args.width_dense_layer, args.num_epochs, args.batch_size, args.num_normal_files, args.num_anomalous_files, args.path_normal_files, args.path_anomalous_files, args.frame_size, args.hop_length, args.num_mel_filters, args.num_mfccs)
209224
pareto_front = data_model_generator.run_data_nas(args.num_models)
210225

211226
# Print out results

randomcontroller.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,10 @@
55

66
# Local Imports
77
import controller
8-
import constants
98

109

1110
class RandomController (controller.Controller):
12-
def __init__(self, search_space, seed=None, max_num_layers=constants.MAX_NUM_LAYERS):
11+
def __init__(self, search_space, max_num_layers, seed=None):
1312
super().__init__(search_space)
1413
random.seed(seed)
1514
self.seed = seed

tests/datamodel_test.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,14 @@
1+
# Standard Library Imports
12
import unittest
23
import unittest.mock
4+
import copy
35

4-
import datamodel
6+
# Third Party Imports
7+
import tensorflow as tf
58

9+
# Local Imports
10+
import datamodel
611
import searchspace
7-
import datasetloader
8-
9-
import tensorflow as tf
10-
import copy
1112

1213

1314
class DataModelTestCase(unittest.TestCase):

0 commit comments

Comments
 (0)