Make dashboard nicer

This commit is contained in:
Tobias Hölzer 2025-11-09 01:58:47 +01:00
parent b2cfddfead
commit 553b54bb32
2 changed files with 135 additions and 113 deletions

View file

@ -90,6 +90,6 @@ def get_train_dataset_file(grid: Literal["hex", "healpix"], level: int) -> Path:
def get_cv_results_dir(name: str, grid: Literal["hex", "healpix"], level: int) -> Path:
gridname = _get_gridname(grid, level)
now = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
results_dir = RESULTS_DIR / f"{gridname}_{name}_cv{now}"
results_dir = RESULTS_DIR / f"{gridname}_{name}_cv{now}_binary"
results_dir.mkdir(parents=True, exist_ok=True)
return results_dir

View file

@ -862,28 +862,37 @@ def main():
st.metric("Best Model Index (by F1)", f"#{best_f1_idx}")
# Show best parameters for the best model
with st.expander("Best Model Parameters (by F1)", expanded=True):
best_params = results.loc[best_f1_idx, ["initial_K", "eps_cl", "eps_e", "mean_test_f1", "std_test_f1"]]
col1, col2, col3, col4, col5 = st.columns(5)
best_params = results.loc[best_f1_idx, ["initial_K", "eps_cl", "eps_e", "mean_test_f1", "std_test_f1"]]
with st.container(border=True):
st.subheader(":abacus: Best Model Parameters")
st.caption("Parameters of retrained best model (selected by F1 score)")
col1, col2, col3 = st.columns(3)
with col1:
st.metric("initial_K", f"{best_params['initial_K']:.0f}")
with col2:
st.metric("eps_cl", f"{best_params['eps_cl']:.2e}")
with col3:
st.metric("eps_e", f"{best_params['eps_e']:.2e}")
with col4:
st.metric("F1 Score", f"{best_params['mean_test_f1']:.4f}")
with col5:
st.metric("F1 Std", f"{best_params['std_test_f1']:.4f}")
# Show all metrics
available_metrics = ["accuracy", "recall", "precision", "f1", "jaccard"]
cols = st.columns(len(available_metrics))
# Show all metrics for the best model in a container
st.subheader(":bar_chart: Performance Across All Metrics")
st.caption("Complete performance profile of the best model (selected by F1 score)")
for idx, metric in enumerate(available_metrics):
with cols[idx]:
best_score = results.loc[best_f1_idx, f"mean_test_{metric}"]
st.metric(f"{metric.capitalize()}", f"{best_score:.4f}")
available_metrics = ["accuracy", "recall", "precision", "f1", "jaccard"]
cols = st.columns(len(available_metrics))
for idx, metric in enumerate(available_metrics):
with cols[idx]:
best_score = results.loc[best_f1_idx, f"mean_test_{metric}"]
best_std = results.loc[best_f1_idx, f"std_test_{metric}"]
# Highlight F1 since that's what we optimized for
st.metric(
f"{metric.capitalize()}",
f"{best_score:.4f}",
delta=f"±{best_std:.4f}",
help="Mean ± std across cross-validation folds",
)
# Create tabs for different visualizations
tab1, tab2, tab3 = st.tabs(["Search Results", "Model State", "Inference Analysis"])
@ -905,101 +914,110 @@ def main():
st.header(f"Visualization for {selected_metric.capitalize()}")
# K-binned plot configuration
col_toggle, col_slider = st.columns([1, 1])
@st.fragment
def render_k_binned_plots():
col_toggle, col_slider = st.columns([1, 1])
with col_toggle:
# Percentile normalization toggle for K-binned plots
use_percentile = st.toggle(
"Use Percentile Normalization",
value=True,
help="Apply percentile-based color normalization to K-binned parameter space plots",
)
with col_slider:
# Bin width slider for K-binned plots
k_min = int(results["initial_K"].min())
k_max = int(results["initial_K"].max())
k_range = k_max - k_min
k_bin_width = st.slider(
"Initial K Bin Width",
min_value=10,
max_value=max(100, k_range // 2),
value=40,
step=10,
help=f"Width of bins for initial_K facets (range: {k_min}-{k_max})",
)
# Show estimated number of bins
estimated_bins = int(np.ceil(k_range / k_bin_width))
st.caption(f"Creating approximately {estimated_bins} bins for initial_K")
# Reload data if bin width changed from default
if k_bin_width != 40:
with st.spinner("Re-binning data..."):
results = load_and_prepare_results(
results_dir / "search_results.parquet", settings, k_bin_width=k_bin_width
with col_toggle:
# Percentile normalization toggle for K-binned plots
use_percentile = st.toggle(
"Use Percentile Normalization",
value=True,
help="Apply percentile-based color normalization to K-binned parameter space plots",
)
# K-binned plots
col1, col2 = st.columns(2)
with col_slider:
# Bin width slider for K-binned plots
k_min = int(results["initial_K"].min())
k_max = int(results["initial_K"].max())
k_range = k_max - k_min
with col1:
st.subheader("K-Binned Parameter Space (Mean)")
with st.spinner("Generating mean plot..."):
if use_percentile:
chart1 = _plot_k_binned(results, f"mean_test_{selected_metric}", vmin_percentile=50)
else:
chart1 = _plot_k_binned(results, f"mean_test_{selected_metric}")
st.altair_chart(chart1, use_container_width=True)
k_bin_width = st.slider(
"Initial K Bin Width",
min_value=10,
max_value=max(100, k_range // 2),
value=40,
step=10,
help=f"Width of bins for initial_K facets (range: {k_min}-{k_max})",
)
with col2:
st.subheader("K-Binned Parameter Space (Std)")
with st.spinner("Generating std plot..."):
if use_percentile:
chart2 = _plot_k_binned(results, f"std_test_{selected_metric}", vmax_percentile=50)
else:
chart2 = _plot_k_binned(results, f"std_test_{selected_metric}")
st.altair_chart(chart2, use_container_width=True)
# Show estimated number of bins
estimated_bins = int(np.ceil(k_range / k_bin_width))
st.caption(f"Creating approximately {estimated_bins} bins for initial_K")
# Epsilon-binned plots
col1, col2 = st.columns(2)
# Reload data if bin width changed from default
results_binned = results
if k_bin_width != 40:
with st.spinner("Re-binning data..."):
results_binned = load_and_prepare_results(
results_dir / "search_results.parquet", settings, k_bin_width=k_bin_width
)
with col1:
st.subheader("K vs eps_cl")
with st.spinner("Generating eps_cl plot..."):
chart3 = _plot_eps_binned(results, "eps_cl", f"mean_test_{selected_metric}")
st.altair_chart(chart3, use_container_width=True)
# K-binned plots
col1, col2 = st.columns(2)
with col2:
st.subheader("K vs eps_e")
with st.spinner("Generating eps_e plot..."):
chart4 = _plot_eps_binned(results, "eps_e", f"mean_test_{selected_metric}")
st.altair_chart(chart4, use_container_width=True)
with col1:
st.subheader("K-Binned Parameter Space (Mean)")
with st.spinner("Generating mean plot..."):
if use_percentile:
chart1 = _plot_k_binned(results_binned, f"mean_test_{selected_metric}", vmin_percentile=50)
else:
chart1 = _plot_k_binned(results_binned, f"mean_test_{selected_metric}")
st.altair_chart(chart1, use_container_width=True)
with col2:
st.subheader("K-Binned Parameter Space (Std)")
with st.spinner("Generating std plot..."):
if use_percentile:
chart2 = _plot_k_binned(results_binned, f"std_test_{selected_metric}", vmax_percentile=50)
else:
chart2 = _plot_k_binned(results_binned, f"std_test_{selected_metric}")
st.altair_chart(chart2, use_container_width=True)
# Epsilon-binned plots
col1, col2 = st.columns(2)
with col1:
st.subheader("K vs eps_cl")
with st.spinner("Generating eps_cl plot..."):
chart3 = _plot_eps_binned(results_binned, "eps_cl", f"mean_test_{selected_metric}")
st.altair_chart(chart3, use_container_width=True)
with col2:
st.subheader("K vs eps_e")
with st.spinner("Generating eps_e plot..."):
chart4 = _plot_eps_binned(results_binned, "eps_e", f"mean_test_{selected_metric}")
st.altair_chart(chart4, use_container_width=True)
render_k_binned_plots()
# Metric comparison plots
st.header("Metric Comparisons")
# Color parameter selection
color_param = st.selectbox(
"Select Color Parameter",
options=["initial_K", "eps_cl", "eps_e"],
help="Choose which parameter to use for coloring the scatter plots",
)
@st.fragment
def render_metric_comparisons():
# Color parameter selection
color_param = st.selectbox(
"Select Color Parameter",
options=["initial_K", "eps_cl", "eps_e"],
help="Choose which parameter to use for coloring the scatter plots",
)
col1, col2 = st.columns(2)
col1, col2 = st.columns(2)
with col1:
st.subheader("Recall vs Precision")
with st.spinner("Generating Recall vs Precision plot..."):
recall_precision_chart = _plot_metric_comparison(results, "precision", "recall", color_param)
st.altair_chart(recall_precision_chart, use_container_width=True)
with col1:
st.subheader("Recall vs Precision")
with st.spinner("Generating Recall vs Precision plot..."):
recall_precision_chart = _plot_metric_comparison(results, "precision", "recall", color_param)
st.altair_chart(recall_precision_chart, use_container_width=True)
with col2:
st.subheader("Accuracy vs Jaccard")
with st.spinner("Generating Accuracy vs Jaccard plot..."):
accuracy_jaccard_chart = _plot_metric_comparison(results, "accuracy", "jaccard", color_param)
st.altair_chart(accuracy_jaccard_chart, use_container_width=True)
with col2:
st.subheader("Accuracy vs Jaccard")
with st.spinner("Generating Accuracy vs Jaccard plot..."):
accuracy_jaccard_chart = _plot_metric_comparison(results, "accuracy", "jaccard", color_param)
st.altair_chart(accuracy_jaccard_chart, use_container_width=True)
render_metric_comparisons()
# Optional: Raw data table
with st.expander("View Raw Results Data"):
@ -1030,26 +1048,30 @@ def main():
st.subheader("Feature Importance")
st.markdown("The most important features based on learned feature weights from the best estimator.")
# Slider to control number of features to display
top_n = st.slider(
"Number of top features to display",
min_value=5,
max_value=50,
value=10,
step=5,
help="Select how many of the most important features to visualize",
)
@st.fragment
def render_feature_importance():
# Slider to control number of features to display
top_n = st.slider(
"Number of top features to display",
min_value=5,
max_value=50,
value=10,
step=5,
help="Select how many of the most important features to visualize",
)
with st.spinner("Generating feature importance plot..."):
feature_chart = _plot_top_features(model_state, top_n=top_n)
st.altair_chart(feature_chart, use_container_width=True)
with st.spinner("Generating feature importance plot..."):
feature_chart = _plot_top_features(model_state, top_n=top_n)
st.altair_chart(feature_chart, use_container_width=True)
st.markdown(
"""
**Interpretation:**
- **Magnitude**: Larger absolute values indicate more important features
"""
)
st.markdown(
"""
**Interpretation:**
- **Magnitude**: Larger absolute values indicate more important features
"""
)
render_feature_importance()
# Box-to-Label Assignment Visualization
st.subheader("Box-to-Label Assignments")