Skip to content

Commit

Permalink
Update gui.py
Browse files Browse the repository at this point in the history
Process multiple metrics added to UI > Evaluation page
  • Loading branch information
fabriziosalmi authored Jun 29, 2024
1 parent 38b3b2e commit 4d168c9
Showing 1 changed file with 48 additions and 2 deletions.
50 changes: 48 additions & 2 deletions gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import subprocess
import json
import pandas as pd

import glob
import yaml
from config import load_config, save_configuration
from logging_setup import setup_logging
Expand Down Expand Up @@ -88,6 +88,44 @@ def display_report():
else:
st.error("Evaluation report not found.")

# Function to execute the process_multiple_metrics.py script
def process_multiple_metrics():
script_path = os.path.join(os.path.dirname(__file__), "process_multiple_metrics.py")
if os.path.exists(script_path):
result = subprocess.run(['python', script_path], capture_output=True, text=True)
if result.returncode == 0:
st.success("Script executed successfully!")
st.text(result.stdout)
else:
st.error("Script execution failed.")
st.text(result.stderr)
else:
st.error("process_multiple_metrics.py script not found.")

# Function to display the process multiple metrics reports from JSON
def display_multiple_metrics():
rewritten_dir = os.path.join(os.path.dirname(__file__), "rewritten")
json_files = glob.glob(os.path.join(rewritten_dir, "*_rewritten_metrics_merged.json"))

if not json_files:
st.error("No merged metrics JSON files found in the rewritten directory.")
return

for json_file in json_files:
with open(json_file, "r") as file:
data = json.load(file)
st.subheader(f"Metrics from {os.path.basename(json_file)}")

# Prepare data for display
metrics_data = []
for key, value in data.items():
if isinstance(value, dict):
value = json.dumps(value) # Convert dictionaries to strings
metrics_data.append({'Metric': key, 'Value': value})

df = pd.DataFrame(metrics_data)
st.dataframe(df) # Display the DataFrame as a table

# Initialize session state
if 'config_data' not in st.session_state:
st.session_state.config_data = config
Expand Down Expand Up @@ -563,7 +601,15 @@ def display_system_info():
# Evaluate Page to execute the evaluate_against_reference.py script
if selected == "Evaluate":
st.title("Evaluate")
if st.button("Run Evaluation Script"):
st.markdown("Evaluation metrics: You can generate metrics for comparison against reference and metrics for the generated content only.")
st.divider()
if st.button("Evaluate Against Reference"):
evaluate_script()
st.write("Evaluation report:")
display_report()
st.divider()
if st.button("Process Multiple Metrics"):
process_multiple_metrics()

st.write("Multiple Metrics report:")
display_multiple_metrics()

0 comments on commit 4d168c9

Please sign in to comment.