Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 16 additions & 13 deletions flaml/automl/automl.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import time
from functools import partial
from typing import Callable, List, Optional, Union

from flaml.helper_functions import format_integers
import numpy as np

from flaml import tune
Expand Down Expand Up @@ -1879,9 +1879,9 @@ def is_to_reverse_metric(metric, task):
this_estimator_kwargs.update(
self._state.fit_kwargs
) # update the shallow copy of fit_kwargs to fit_kwargs_by_estimator
self._state.fit_kwargs_by_estimator[
estimator_name
] = this_estimator_kwargs # set self._state.fit_kwargs_by_estimator[estimator_name] to the update, so only self._state.fit_kwargs_by_estimator will be updated
self._state.fit_kwargs_by_estimator[estimator_name] = (
this_estimator_kwargs # set self._state.fit_kwargs_by_estimator[estimator_name] to the update, so only self._state.fit_kwargs_by_estimator will be updated
)
else:
self._state.fit_kwargs_by_estimator[estimator_name] = self._state.fit_kwargs

Expand Down Expand Up @@ -2164,11 +2164,15 @@ def _log_trial(self, search_state, estimator):
mlflow.log_param("best_learner", self._best_estimator)
mlflow.log_metric(
self._state.metric if isinstance(self._state.metric, str) else self._state.error_metric,
1 - search_state.val_loss
if self._state.error_metric.startswith("1-")
else -search_state.val_loss
if self._state.error_metric.startswith("-")
else search_state.val_loss,
(
1 - search_state.val_loss
if self._state.error_metric.startswith("1-")
else (
-search_state.val_loss
if self._state.error_metric.startswith("-")
else search_state.val_loss
)
),
)

def _search_sequential(self):
Expand Down Expand Up @@ -2388,14 +2392,13 @@ def _search_sequential(self):
search_state.trained_estimator.cleanup()
if better or self._log_type == "all":
self._log_trial(search_state, estimator)

logger.info(
" at {:.1f}s,\testimator {}'s best error={:.4f},\tbest estimator {}'s best error={:.4f}".format(
" at {:.1f}s,\testimator {}'s best error={},\tbest estimator {}'s best error={}".format(
self._state.time_from_start,
estimator,
search_state.best_loss,
format_integers(search_state.best_loss),
self._best_estimator,
self._state.best_loss,
format_integers(self._state.best_loss),
)
)
if (
Expand Down
28 changes: 28 additions & 0 deletions flaml/helper_functions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import numpy as np


def format_integers(integer: float) -> str:
"""
Format a floating-point number to a string with a dynamic number of significant figures.

The number of significant figures is determined based on the position of the first
non-zero digit after the decimal point, with a minimum of 4 significant figures.

Parameters:
- integer (float): The floating-point number to format.

Returns:
- str: The formatted number as a string.

Example:
- format_integers(0.0003123) returns '0.0003123'
- format_integers(0) returns '0.0000'
"""
if integer == 0:
return "0.0000" # Return a standard format for zero to maintain four decimal places.

# Determine the first non-zero digit's position after the decimal
position = next((i for i, c in enumerate(str(integer).split(".")[-1]) if c.isdigit() and c != "0"), -1)

# Apply formatting: Use a minimum of 4 significant figures if the first significant digit occurs early
return "{:.{}g}".format(integer, position if position > 4 else 4)
5 changes: 3 additions & 2 deletions test/nni/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import argparse
import logging
import os
from flaml.helper_functions import format_integers

import nni
import torch
Expand Down Expand Up @@ -84,8 +85,8 @@ def test(args, model, device, test_loader):
accuracy = 100.0 * correct / len(test_loader.dataset)

logger.info(
"\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
test_loss, correct, len(test_loader.dataset), accuracy
"\nTest set: Average loss: {}, Accuracy: {}/{} ({:.0f}%)\n".format(
format_integers(test_loss), correct, len(test_loader.dataset), accuracy
)
)

Expand Down
3 changes: 2 additions & 1 deletion test/pipeline_tuning_example/tuner/tuner_func.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

import submit_train_pipeline
from ray import tune
from flaml.helper_functions import format_integers

import flaml

Expand Down Expand Up @@ -88,7 +89,7 @@ def tune_pipeline(concurrent_run=1):
metric = best_trial.metric_analysis[hp_metric][mode]
print(f"n_trials={len(analysis.trials)}")
print(f"time={time.time()-start_time}")
print(f"Best {hp_metric}: {metric:.4f}")
print(f"Best {hp_metric}: {format_integers(metric)}")
print(f"Best coonfiguration: {best_trial.config}")


Expand Down
6 changes: 4 additions & 2 deletions test/tune/test_tune.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""Require: pip install flaml[test,ray]
"""

import logging
import math
import os
Expand All @@ -9,6 +10,7 @@
import sklearn.metrics
import xgboost as xgb
from sklearn.model_selection import train_test_split
from flaml.helper_functions import format_integers

from flaml import CFO, BlendSearch

Expand Down Expand Up @@ -236,8 +238,8 @@ def _test_xgboost(method="BlendSearch"):
logger.info(f"method={method}")
logger.info(f"n_samples={num_samples*n_cpu}")
logger.info(f"time={time.time()-start_time}")
logger.info(f"Best model eval loss: {logloss:.4f}")
logger.info(f"Best model total accuracy: {accuracy:.4f}")
logger.info(f"Best model eval loss: {format_integers(logloss)}")
logger.info(f"Best model total accuracy: {format_integers(accuracy)}")
logger.info(f"Best model parameters: {best_trial.config}")


Expand Down