|
9 | 9 | import plotly.graph_objects as go |
10 | 10 | from plotly.subplots import make_subplots |
11 | 11 |
|
| 12 | +import warnings |
| 13 | +warnings.filterwarnings( |
| 14 | + "ignore", |
| 15 | + message="JULIA_SYSIMAGE_DIFFEQTORCH not set" |
| 16 | +) |
| 17 | + |
12 | 18 | BASE = Path(__file__).resolve().parent |
13 | 19 |
|
14 | 20 |
|
@@ -415,7 +421,6 @@ def plot_benchmark_results_plotly(df): |
415 | 421 |
|
416 | 422 | problem_names = sbibm.get_available_tasks() |
417 | 423 | problem_names_nice = np.array([sbibm.get_task(p).name_display for p in problem_names]) |
418 | | - task_name_nice = problem_names_nice.copy() |
419 | 424 | problem_dim = [sbibm.get_task(p).dim_parameters for p in problem_names] |
420 | 425 | data_dim = [sbibm.get_task(p).dim_data for p in problem_names] |
421 | 426 |
|
@@ -457,8 +462,8 @@ def plot_benchmark_results_plotly(df): |
457 | 462 | shown_labels = set() |
458 | 463 |
|
459 | 464 | for plot_idx, problem_idx in enumerate(problem_order): |
460 | | - col = plot_idx // (n_problems // 2) + 1 |
461 | | - row = plot_idx % (n_problems // 2) + 1 |
| 465 | + row = plot_idx // 2 + 1 |
| 466 | + col = plot_idx % 2 + 1 |
462 | 467 |
|
463 | 468 | task_name = problem_names[problem_idx] |
464 | 469 | subset = df[df['problem'] == task_name] |
@@ -505,7 +510,7 @@ def plot_benchmark_results_plotly(df): |
505 | 510 | '<extra></extra>' |
506 | 511 | ), |
507 | 512 | customdata=[[ |
508 | | - task_name_nice[problem_idx], |
| 513 | + problem_names_nice[problem_idx], |
509 | 514 | get_model_name_plotly(model_key), |
510 | 515 | get_sampler_name(sampler), |
511 | 516 | model_data['std'].iloc[0] |
|
0 commit comments