Spaces:
Sleeping
Sleeping
File size: 13,435 Bytes
a089b0d 94c0fea ca3af14 295a9df 83c336a 42c5a14 295a9df a089b0d 83c336a a089b0d 3ff5801 ca3af14 a089b0d d2ed71e a089b0d aad0805 a089b0d d2ed71e aad0805 150662b aad0805 150662b aad0805 d2ed71e aad0805 d2ed71e aad0805 3ff5801 aad0805 e7b03fa aad0805 3ff5801 aad0805 d2ed71e a089b0d 83c336a 4e4d72e 42c5a14 94c0fea 295a9df c6b142e 94c0fea ca3af14 c6b142e 295a9df c6b142e 295a9df ca3af14 83c336a 3ff5801 295a9df 4e4d72e 3ff5801 4e4d72e 3ff5801 4e4d72e 3ff5801 4e4d72e 3ff5801 4e4d72e 3ff5801 83c336a 3ff5801 83c336a 4e4d72e 853735a 295a9df a089b0d 853735a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 |
import pandas as pd
import streamlit as st
import matplotlib.pyplot as plt
import numpy as np
from second import double_main
from multiple import multiple_main
from multiple import display_story_points_stats
from jira_integration import render_jira_login, JIRA_SERVER
from weekly import generate_weekly_report
from pre import preprocess_uploaded_file, add_app_description
from multi_env_compare import multi_env_compare_main
import multiple_env_loader
def single_main(uploaded_file):
if uploaded_file is not None:
# Process the file with header
data = preprocess_uploaded_file(uploaded_file)
# Display debugging information
st.write("Data shape:", data.shape)
st.write("Unique functional areas:", data['Functional area'].nunique())
st.write("Sample of data:", data.head())
# Display scenarios with status "failed" grouped by functional area
failed_scenarios = data[data['Status'] == 'FAILED']
passed_scenarios = data[data['Status'] == 'PASSED']
# Display total count of failures
fail_count = len(failed_scenarios)
st.markdown(f"Failing scenarios Count: {fail_count}")
# Display total count of Passing
pass_count = len(passed_scenarios)
st.markdown(f"Passing scenarios Count: {pass_count}")
# Use radio buttons for selecting status
selected_status = st.radio("Select a status", ['Failed', 'Passed'])
# Determine which scenarios to display based on selected status
if selected_status == 'Failed':
unique_areas = np.append(failed_scenarios['Functional area'].unique(), "All")
selected_scenarios = failed_scenarios
elif selected_status == 'Passed':
unique_areas = np.append(passed_scenarios['Functional area'].unique(), "All")
selected_scenarios = passed_scenarios
else:
selected_scenarios = None
if selected_scenarios is not None:
# st.write(f"Scenarios with status '{selected_status}' grouped by functional area:")
st.markdown(f"### Scenarios with status '{selected_status}' grouped by functional area:")
# Display count of unique functional areas
# st.write(f"Number of unique functional areas: {len(unique_areas) - 1}") # Subtract 1 for "All"
# Select a range of functional areas to filter scenarios
selected_functional_areas = st.multiselect("Select functional areas", unique_areas, ["All"])
if "All" in selected_functional_areas:
filtered_scenarios = selected_scenarios
else:
filtered_scenarios = selected_scenarios[selected_scenarios['Functional area'].isin(selected_functional_areas)]
if not selected_functional_areas: # Check if the list is empty
st.error("Please select at least one functional area.")
else:
# Display count of filtered scenarios
st.write(f"Number of filtered scenarios: {len(filtered_scenarios)}")
# Calculate the average time spent for each functional area
average_time_spent_seconds = filtered_scenarios.groupby('Functional area')['Time spent'].mean().reset_index()
# Convert average time spent from seconds to minutes and seconds format
average_time_spent_seconds['Time spent'] = pd.to_datetime(average_time_spent_seconds['Time spent'], unit='s').dt.strftime('%M:%S')
# Group by functional area and get the start datetime for sorting
start_datetime_group = filtered_scenarios.groupby('Functional area')['Start datetime'].min().reset_index()
# Merge average_time_spent_seconds and start_datetime_group
average_time_spent_seconds = average_time_spent_seconds.merge(start_datetime_group, on='Functional area')
# Filter scenarios based on selected functional area
if selected_status == 'Failed':
# Check if Failed Step column exists
if 'Failed Step' in filtered_scenarios.columns:
grouped_filtered_scenarios = filtered_scenarios.groupby('Functional area')[['Scenario Name', 'Error Message', 'Failed Step', 'Time spent(m:s)']].apply(lambda x: x.reset_index(drop=True))
else:
grouped_filtered_scenarios = filtered_scenarios.groupby('Functional area')[['Scenario Name', 'Error Message', 'Time spent(m:s)']].apply(lambda x: x.reset_index(drop=True))
elif selected_status == 'Passed':
grouped_filtered_scenarios = filtered_scenarios.groupby('Functional area')[['Scenario Name', 'Time spent(m:s)']].apply(lambda x: x.reset_index(drop=True))
else:
grouped_filtered_scenarios = None
grouped_filtered_scenarios.reset_index(inplace=True)
# Only drop 'level_1' if it exists in the DataFrame
if 'level_1' in grouped_filtered_scenarios.columns:
grouped_filtered_scenarios.drop(columns=['level_1'], inplace=True)
grouped_filtered_scenarios.index = grouped_filtered_scenarios.index + 1
st.dataframe(grouped_filtered_scenarios)
# Sort the average time spent table by start datetime
average_time_spent_seconds = average_time_spent_seconds.sort_values(by='Start datetime')
# Display average time spent on each functional area in a table
st.markdown("### Average Time Spent on Each Functional Area")
average_time_spent_seconds.index = average_time_spent_seconds.index + 1
st.dataframe(average_time_spent_seconds)
# Check if selected_status is 'Failed' and grouped_filtered_scenarios length is less than or equal to 400
if selected_status != 'Passed' and len(grouped_filtered_scenarios) <= 400:
# Create and display bar graph of errors by functional area
st.write(f"### Bar graph showing number of '{selected_status}' scenarios in each functional area:")
error_counts = grouped_filtered_scenarios['Functional area'].value_counts()
# Only create the graph if there are errors to display
if not error_counts.empty:
plt.figure(figsize=(10, 6))
plt.bar(error_counts.index, error_counts.values)
plt.xlabel('Functional Area')
plt.ylabel('Number of Failures')
plt.title(f"Number of '{selected_status}' scenarios by Functional Area")
plt.xticks(rotation=45, ha='right')
# Set y-axis limits and ticks for consistent interval of 1
y_max = max(error_counts.values) + 1
plt.ylim(0, y_max)
plt.yticks(range(0, y_max, 1))
# Display individual numbers on y-axis
for i, count in enumerate(error_counts.values):
plt.text(i, count, str(count), ha='center', va='bottom')
plt.tight_layout() # Add this line to adjust layout
st.pyplot(plt)
else:
st.info(f"No '{selected_status}' scenarios found to display in the graph.")
else:
st.write("### No scenarios with status 'failed' found.")
pass
def main():
add_app_description()
# --- Centralized Sidebar Initialization ---
# Initialize session state for Jira and sprint data if they don't exist
if 'jira_server' not in st.session_state:
st.session_state.jira_server = JIRA_SERVER
if 'is_authenticated' not in st.session_state:
st.session_state.is_authenticated = False # Start as not authenticated
if 'jira_client' not in st.session_state:
st.session_state.jira_client = None
if 'sprint_data_initialized' not in st.session_state:
st.session_state.sprint_data_initialized = False
if 'force_sprint_refresh' not in st.session_state:
st.session_state.force_sprint_refresh = False
if 'sprint_data_cache' not in st.session_state:
st.session_state.sprint_data_cache = None
if 'last_sprint_fetch' not in st.session_state:
st.session_state.last_sprint_fetch = None
# Initialize session state for mode if it doesn't exist
if "mode" not in st.session_state:
st.session_state["mode"] = "multi"
# --- Sidebar Rendering ---
with st.sidebar:
# Mode Selection (kept in sidebar)
selected_mode = st.selectbox(
"Select Mode",
["Multi", "Compare", "Weekly", "Multi-Env Compare", "Auto Environment Loader"],
index=["Multi", "Compare", "Weekly", "Multi-Env Compare", "Auto Environment Loader"].index(st.session_state.get("selected_mode", "Multi"))
)
# Update the session state with the new selection
st.session_state["selected_mode"] = selected_mode
st.session_state["mode"] = selected_mode.lower()
mode_display = f'## Current mode: {st.session_state["mode"].title()} mode'
st.markdown(mode_display)
st.markdown("---") # Separator
# Jira Login Expander (always shown)
with st.expander("Jira Integration (Optional)", expanded=True):
# Render login - function handles checking if already authenticated
# It updates st.session_state.is_authenticated and st.session_state.jira_client
st.session_state.is_authenticated = render_jira_login()
# Sprint Progress Expander (shown only if authenticated)
if st.session_state.is_authenticated and st.session_state.jira_client:
st.markdown("---") # Separator inside the main expander
with st.expander("Sprint Progress", expanded=True):
# Refresh button
if st.button("π Refresh Sprint Data", key="refresh_sprint_sidebar_app"):
st.session_state.force_sprint_refresh = True
# Always call display (it handles caching), passing manual refresh flag
display_story_points_stats(force_refresh=st.session_state.force_sprint_refresh)
# Reset manual refresh flag after use
st.session_state.force_sprint_refresh = False
# Initialize session state for the selectbox widget
if "selected_mode" not in st.session_state:
st.session_state["selected_mode"] = "Multi"
# --- Main Page Content based on Mode ---
if st.session_state["mode"] == "multi":
multiple_main()
elif st.session_state["mode"] == "compare":
st.sidebar.markdown("### Upload Files for Comparison")
# Move file uploaders to main page area if needed, or keep in sidebar below Jira?
# For now, keeping in sidebar as it was.
upload_option = st.sidebar.radio("Upload method", ["Single uploader", "Two separate uploaders"], key="compare_upload_method")
if upload_option == "Single uploader":
uploaded_files = st.sidebar.file_uploader("Upload CSV or XLSX files for comparison", type=["csv", "xlsx"], accept_multiple_files=True)
if uploaded_files:
if len(uploaded_files) < 2:
st.warning("Please upload at least two files for comparison.")
elif len(uploaded_files) > 2:
st.warning("More than two files uploaded. Only the first two will be used for comparison.")
else:
with st.spinner('Processing...'):
double_main(uploaded_files[0], uploaded_files[1])
st.success('Comparison Complete!')
else:
col1, col2 = st.sidebar.columns(2)
with col1:
uploaded_file1 = st.file_uploader("Upload older CSV/XLSX file", type=["csv", "xlsx"], key="file1")
with col2:
uploaded_file2 = st.file_uploader("Upload newer CSV/XLSX file", type=["csv", "xlsx"], key="file2")
if uploaded_file1 is not None and uploaded_file2 is not None:
with st.spinner('Processing...'):
double_main(uploaded_file1, uploaded_file2)
st.success('Comparison Complete!')
elif uploaded_file1 is not None or uploaded_file2 is not None:
st.warning("Please upload both files for comparison.")
elif st.session_state["mode"] == "weekly":
uploaded_files = st.sidebar.file_uploader("Upload CSV or XLSX files for Weekly Report", type=["csv", "xlsx"], accept_multiple_files=True)
if uploaded_files:
generate_weekly_report(uploaded_files)
elif st.session_state["mode"] == "multi-env compare":
multi_env_compare_main()
elif st.session_state["mode"] == "auto environment loader":
# Launch the auto environment loader workflow
multiple_env_loader.main()
if __name__ == "__main__":
main() |