Skip to content

Commit e558848

Browse files
committed
new version with minor bug fixes
1 parent 74fa45e commit e558848

File tree

6 files changed

+12
-7
lines changed

6 files changed

+12
-7
lines changed

deep_autoviml/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@
4545
model_options=model_options, save_model_flag=True, use_my_model='',
4646
model_use_case='', verbose=0)
4747
48-
predictions = deepauto.predict(model, project_name, test_dataset=test,
48+
predictions = deepauto.predict(model, project_name="deep_autoviml", test_dataset=test,
4949
keras_model_type=keras_model_type,
5050
cat_vocab_dict=cat_vocab_dict)
5151
""" %(module_type, version_number))

deep_autoviml/__version__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,6 @@
2020
__author__ = "Ram Seshadri"
2121
__description__ = "deep_autoviml - build and test multiple Tensorflow 2.0 models and pipelines"
2222
__url__ = "https://github.com/Auto_ViML/deep_autoviml.git"
23-
__version__ = "0.0.82"
23+
__version__ = "0.0.84"
2424
__license__ = "Apache License 2.0"
2525
__copyright__ = "2020-21 Google"

deep_autoviml/deep_autoviml.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ def fit(train_data_or_file, target, keras_model_type="basic", project_name="deep
212212
"compression": None => you can set it to zip or other file compression formats if your data is compressed
213213
"csv_encoding": default 'utf-8'. But you can set it to any other csv encoding format your data is in
214214
"label_encode_flag": False. But you can set it to True if you want it encoded.
215-
"max_trials": default = 30 ## number of Storm Tuner trials ### Lower this for faster processing.
215+
"max_trials": default = 5 ## number of Storm Tuner trials ### Lower this for faster processing.
216216
"tuner": default = 'storm' ## Storm Tuner is the default tuner. Optuna is the other option.
217217
"embedding_size": default = 50 ## this is the NLP embedding size minimum
218218
"tf_hub_model": default "" (empty string). If you want to supply TF hub model, provide URL here.
@@ -361,7 +361,7 @@ def fit(train_data_or_file, target, keras_model_type="basic", project_name="deep
361361
"patience", "epochs", "steps_per_epoch", "optimizer",
362362
"kernel_initializer", "num_layers", "class_weight",
363363
"loss", "metrics", "monitor","mode", "lr_scheduler","early_stopping",
364-
"class_weight"]
364+
]
365365

366366
keras_options = copy.deepcopy(keras_options_defaults)
367367
if len(keras_options_copy) > 0:
@@ -389,7 +389,7 @@ def fit(train_data_or_file, target, keras_model_type="basic", project_name="deep
389389
model_options_defaults['compression'] = None ## is is needed in case to read Zip files
390390
model_options_defaults["label_encode_flag"] = '' ## User can set it to True or False depending on their need.
391391
model_options_defaults["header"] = 0 ### this is the header row for pandas to read
392-
model_options_defaults["max_trials"] = 30 ## number of Storm Tuner trials ###
392+
model_options_defaults["max_trials"] = 5 ## The number of Storm Tuner trials - make it small ###
393393
model_options_defaults['tuner'] = 'storm' ## Storm Tuner is the default tuner. Optuna is the other option.
394394
model_options_defaults["embedding_size"] = "" ## this is the NLP embedding size minimum
395395
model_options_defaults["tf_hub_model"] = "" ## If you want to use a pretrained Hub model, provide URL here.

deep_autoviml/modeling/train_model.py

+1
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,7 @@ def train_model(deep_model, full_ds, target, keras_model_type, keras_options,
157157
callbacks_dict, tb_logpath = get_callbacks(val_mode, val_monitor, patience, learning_rate,
158158
save_weights_only, onecycle_steps, save_model_path)
159159

160+
early_stopping = check_keras_options(keras_options, "early_stopping", False)
160161
if keras_options['lr_scheduler'] in ['expo', 'ExponentialDecay', 'exponentialdecay']:
161162
if early_stopping:
162163
callbacks_list = [callbacks_dict['early_stop'], callbacks_dict['print']]

deep_autoviml/utilities/utilities.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -820,7 +820,11 @@ def add_outputs_to_model_body(model_body, meta_outputs):
820820
##### This is the simplest way to convert a sequential model to functional!
821821
for num, each_layer in enumerate(model_body.layers):
822822
if num == 0:
823-
final_outputs = each_layer(meta_outputs)
823+
if isinstance(meta_outputs,list):
824+
combined_input = layers.concatenate(meta_outputs, name='auto_combined_layer')
825+
final_outputs = each_layer(combined_input)
826+
else:
827+
final_outputs = each_layer(meta_outputs)
824828
else:
825829
final_outputs = each_layer(final_outputs)
826830
return final_outputs

setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020

2121
setuptools.setup(
2222
name="deep_autoviml",
23-
version="0.0.82",
23+
version="0.0.84",
2424
author="Ram Seshadri",
2525
# author_email="[email protected]",
2626
description="Automatically Build Deep Learning Models and Pipelines fast!",

0 commit comments

Comments
 (0)