Make dashboard nicer

This commit is contained in:
Tobias Hölzer 2025-11-09 01:58:47 +01:00
parent b2cfddfead
commit 553b54bb32
2 changed files with 135 additions and 113 deletions

View file

@ -90,6 +90,6 @@ def get_train_dataset_file(grid: Literal["hex", "healpix"], level: int) -> Path:
def get_cv_results_dir(name: str, grid: Literal["hex", "healpix"], level: int) -> Path:
gridname = _get_gridname(grid, level)
now = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
results_dir = RESULTS_DIR / f"{gridname}_{name}_cv{now}"
results_dir = RESULTS_DIR / f"{gridname}_{name}_cv{now}_binary"
results_dir.mkdir(parents=True, exist_ok=True)
return results_dir

View file

@ -862,28 +862,37 @@ def main():
st.metric("Best Model Index (by F1)", f"#{best_f1_idx}")
# Show best parameters for the best model
with st.expander("Best Model Parameters (by F1)", expanded=True):
best_params = results.loc[best_f1_idx, ["initial_K", "eps_cl", "eps_e", "mean_test_f1", "std_test_f1"]]
col1, col2, col3, col4, col5 = st.columns(5)
with st.container(border=True):
st.subheader(":abacus: Best Model Parameters")
st.caption("Parameters of retrained best model (selected by F1 score)")
col1, col2, col3 = st.columns(3)
with col1:
st.metric("initial_K", f"{best_params['initial_K']:.0f}")
with col2:
st.metric("eps_cl", f"{best_params['eps_cl']:.2e}")
with col3:
st.metric("eps_e", f"{best_params['eps_e']:.2e}")
with col4:
st.metric("F1 Score", f"{best_params['mean_test_f1']:.4f}")
with col5:
st.metric("F1 Std", f"{best_params['std_test_f1']:.4f}")
# Show all metrics
# Show all metrics for the best model in a container
st.subheader(":bar_chart: Performance Across All Metrics")
st.caption("Complete performance profile of the best model (selected by F1 score)")
available_metrics = ["accuracy", "recall", "precision", "f1", "jaccard"]
cols = st.columns(len(available_metrics))
for idx, metric in enumerate(available_metrics):
with cols[idx]:
best_score = results.loc[best_f1_idx, f"mean_test_{metric}"]
st.metric(f"{metric.capitalize()}", f"{best_score:.4f}")
best_std = results.loc[best_f1_idx, f"std_test_{metric}"]
# Highlight F1 since that's what we optimized for
st.metric(
f"{metric.capitalize()}",
f"{best_score:.4f}",
delta=f"±{best_std:.4f}",
help="Mean ± std across cross-validation folds",
)
# Create tabs for different visualizations
tab1, tab2, tab3 = st.tabs(["Search Results", "Model State", "Inference Analysis"])
@ -905,6 +914,8 @@ def main():
st.header(f"Visualization for {selected_metric.capitalize()}")
# K-binned plot configuration
@st.fragment
def render_k_binned_plots():
col_toggle, col_slider = st.columns([1, 1])
with col_toggle:
@ -935,9 +946,10 @@ def main():
st.caption(f"Creating approximately {estimated_bins} bins for initial_K")
# Reload data if bin width changed from default
results_binned = results
if k_bin_width != 40:
with st.spinner("Re-binning data..."):
results = load_and_prepare_results(
results_binned = load_and_prepare_results(
results_dir / "search_results.parquet", settings, k_bin_width=k_bin_width
)
@ -948,18 +960,18 @@ def main():
st.subheader("K-Binned Parameter Space (Mean)")
with st.spinner("Generating mean plot..."):
if use_percentile:
chart1 = _plot_k_binned(results, f"mean_test_{selected_metric}", vmin_percentile=50)
chart1 = _plot_k_binned(results_binned, f"mean_test_{selected_metric}", vmin_percentile=50)
else:
chart1 = _plot_k_binned(results, f"mean_test_{selected_metric}")
chart1 = _plot_k_binned(results_binned, f"mean_test_{selected_metric}")
st.altair_chart(chart1, use_container_width=True)
with col2:
st.subheader("K-Binned Parameter Space (Std)")
with st.spinner("Generating std plot..."):
if use_percentile:
chart2 = _plot_k_binned(results, f"std_test_{selected_metric}", vmax_percentile=50)
chart2 = _plot_k_binned(results_binned, f"std_test_{selected_metric}", vmax_percentile=50)
else:
chart2 = _plot_k_binned(results, f"std_test_{selected_metric}")
chart2 = _plot_k_binned(results_binned, f"std_test_{selected_metric}")
st.altair_chart(chart2, use_container_width=True)
# Epsilon-binned plots
@ -968,18 +980,22 @@ def main():
with col1:
st.subheader("K vs eps_cl")
with st.spinner("Generating eps_cl plot..."):
chart3 = _plot_eps_binned(results, "eps_cl", f"mean_test_{selected_metric}")
chart3 = _plot_eps_binned(results_binned, "eps_cl", f"mean_test_{selected_metric}")
st.altair_chart(chart3, use_container_width=True)
with col2:
st.subheader("K vs eps_e")
with st.spinner("Generating eps_e plot..."):
chart4 = _plot_eps_binned(results, "eps_e", f"mean_test_{selected_metric}")
chart4 = _plot_eps_binned(results_binned, "eps_e", f"mean_test_{selected_metric}")
st.altair_chart(chart4, use_container_width=True)
render_k_binned_plots()
# Metric comparison plots
st.header("Metric Comparisons")
@st.fragment
def render_metric_comparisons():
# Color parameter selection
color_param = st.selectbox(
"Select Color Parameter",
@ -1001,6 +1017,8 @@ def main():
accuracy_jaccard_chart = _plot_metric_comparison(results, "accuracy", "jaccard", color_param)
st.altair_chart(accuracy_jaccard_chart, use_container_width=True)
render_metric_comparisons()
# Optional: Raw data table
with st.expander("View Raw Results Data"):
st.dataframe(results, width="stretch")
@ -1030,6 +1048,8 @@ def main():
st.subheader("Feature Importance")
st.markdown("The most important features based on learned feature weights from the best estimator.")
@st.fragment
def render_feature_importance():
# Slider to control number of features to display
top_n = st.slider(
"Number of top features to display",
@ -1051,6 +1071,8 @@ def main():
"""
)
render_feature_importance()
# Box-to-Label Assignment Visualization
st.subheader("Box-to-Label Assignments")
st.markdown(