Render the Response page of the AI model.
This page displays completed analyses and allows users to review responses,
view related metadata, and submit evaluations.
Source code in view/streamlit_app/page_responses.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110 | def app():
"""
Render the Response page of the AI model.
This page displays completed analyses and allows users to review responses,
view related metadata, and submit evaluations.
"""
st.title("AI Model Responses")
st.write("⚠️ Only completed analyses are displayed here. Track the status of your analysis in the status menu.")
# Fetch status dataframe
df = check_status()
if not df.empty:
df = df[df['Result'] == '✅']
total_response = df["Identifier"].to_list()
total_response = [identifier.replace('status', 'response') for identifier in total_response]
if total_response:
rag_redis_key = st.selectbox(
label='Select a key or enter the number to view the response',
options=total_response
)
# Display AI responses and metadata
st.markdown("## Model Response")
response_data = fetch_api_data(f'http://{API_HOST}:{API_PORT}/response/{rag_redis_key}')
st.write(response_data)
st.markdown("## Tokens")
tokens_data = fetch_api_data(f'http://{API_HOST}:{API_PORT}/response/use/{rag_redis_key}')
st.json(tokens_data, expanded=False)
st.markdown("## Context")
context_data = fetch_api_data(f'http://{API_HOST}:{API_PORT}/response/context/{rag_redis_key}')
st.json(context_data, expanded=False)
st.markdown("## Files Used")
files_data = fetch_api_data(f'http://{API_HOST}:{API_PORT}/response/files/{rag_redis_key}')
st.json(files_data, expanded=False)
st.markdown("## Detailed Prompt")
prompt_data = fetch_api_data(f'http://{API_HOST}:{API_PORT}/response/messages/{rag_redis_key}')
st.json(prompt_data, expanded=False)
st.markdown("## Detailed Response")
details_data = fetch_api_data(f'http://{API_HOST}:{API_PORT}/response/detail/{rag_redis_key}')
st.json(details_data, expanded=False)
st.markdown("## Response Evaluation")
evaluation_data = fetch_api_data(f'http://{API_HOST}:{API_PORT}/response/evaluation/{rag_redis_key}')
st.json(evaluation_data, expanded=False)
if evaluation_data.get("evaluation") == 0:
evaluation_score = st.slider(
label="Evaluate the Model Response:",
min_value=0,
max_value=5,
step=1,
help="0 - Not Evaluated, 1 - Nonsensical, 2 - Very Incomplete, 3 - Incomplete, 4 - Acceptable, 5 - Complete",
key="evaluation_slider"
)
observation = st.text_area(
label="What would be the expected response? (Required for ratings 1 to 3)",
key="evaluation_observation",
)
if st.button("Submit Evaluation"):
if evaluation_score in [1, 2, 3] and not observation.strip():
st.error("Observation is required for evaluations rated 1 to 3.")
else:
url_post = f'http://{API_HOST}:{API_PORT}/response/evaluation/{rag_redis_key}'
params = {
"evaluation": int(evaluation_score),
"observation": observation
}
try:
evaluation_post = requests.post(url_post, json=params)
evaluation_post.raise_for_status()
st.success("Evaluation Submitted Successfully!")
except requests.RequestException as e:
st.error(f"Failed to submit evaluation: {e}")
else:
st.warning(f"No keys found for the user: {st.session_state.get('user', 'unknown')}")
else:
st.info("No completed analyses available.")
|