File size: 6,539 Bytes
a089b0d 94c0fea a089b0d 94c0fea a089b0d 94c0fea a089b0d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
import pandas as pd
import streamlit as st
import csv
import io
import matplotlib.pyplot as plt
import numpy as np
from second import double_main
from pre import preprocess_csv, load_data, fill_missing_data
def single_main(uploaded_file):
# st.title('Single CSV Analyzer')
# uploaded_file = st.file_uploader("Upload CSV file", type="csv")
if uploaded_file is not None:
# Process the csv files with header
filet = uploaded_file.read()
processed_output = preprocess_csv(filet)
processed_file = io.StringIO(processed_output.getvalue())
data = load_data(processed_file)
data = fill_missing_data(data, 4, 0)
data['Start datetime'] = pd.to_datetime(data['Start datetime'], errors='coerce')
data['End datetime'] = pd.to_datetime(data['End datetime'], errors='coerce')
data['Time spent'] = (data['End datetime'] - data['Start datetime']).dt.total_seconds()
# st.write(data)
# Display scenarios with status "failed" grouped by functional area
failed_scenarios = data[data['Status'] == 'FAILED']
passed_scenarios = data[data['Status'] == 'PASSED']
# selected_status = st.selectbox("Select a status", ['Failed', 'Passed'])
# Use radio buttons for selecting status
selected_status = st.radio("Select a status", ['Failed', 'Passed'])
# Determine which scenarios to display based on selected status
if selected_status == 'Failed':
unique_areas = np.append(failed_scenarios['Functional area'].unique(), "All")
selected_scenarios = failed_scenarios
selected_functional_area = st.selectbox("Select a functional area", unique_areas, index=len(unique_areas)-1)
elif selected_status == 'Passed':
unique_areas = np.append(passed_scenarios['Functional area'].unique(), "All")
selected_scenarios = passed_scenarios
selected_functional_area = st.selectbox("Select a functional area", unique_areas, index=len(unique_areas)-1)
else:
selected_scenarios = None
if selected_scenarios is not None:
# st.write(f"Scenarios with status '{selected_status}' grouped by functional area:")
st.markdown(f"### Scenarios with status '{selected_status}' grouped by functional area:")
# Handle the "All" option
# Filter scenarios based on selected functional area
if selected_functional_area != "All":
filtered_scenarios = selected_scenarios[selected_scenarios['Functional area'] == selected_functional_area]
else:
filtered_scenarios = selected_scenarios
# Calculate the average time spent for each functional area
average_time_spent_seconds = filtered_scenarios.groupby('Functional area')['Time spent'].mean().reset_index()
# Convert average time spent from seconds to minutes and seconds format
average_time_spent_seconds['Time spent'] = pd.to_datetime(average_time_spent_seconds['Time spent'], unit='s').dt.strftime('%M:%S')
# Group by functional area and get the start datetime for sorting
start_datetime_group = filtered_scenarios.groupby('Functional area')['Start datetime'].min().reset_index()
# Merge average_time_spent_seconds and start_datetime_group
average_time_spent_seconds = average_time_spent_seconds.merge(start_datetime_group, on='Functional area')
# Filter scenarios based on selected functional area
grouped_filtered_failed_scenarios = filtered_scenarios.groupby('Functional area')[['Scenario name', 'Error message']].apply(lambda x: x.reset_index(drop=True))
st.dataframe(grouped_filtered_failed_scenarios)
# Display total count of failures
fail_count = len(failed_scenarios)
st.write(f"Failing scenarios Count: {fail_count}")
# Display total count of Passing
pass_count = len(passed_scenarios)
st.write(f"Passing scenarios Count: {pass_count}")
# Sort the average time spent table by start datetime
average_time_spent_seconds = average_time_spent_seconds.sort_values(by='Start datetime')
# Display average time spent on each functional area in a table
st.markdown("### Average Time Spent on Each Functional Area")
st.dataframe(average_time_spent_seconds)
# Create and display bar graph of errors by functional area
st.write("### Bar graph showing number of failures in each functional area:")
error_counts = failed_scenarios['Functional area'].value_counts()
plt.figure(figsize=(10, 6))
plt.bar(error_counts.index, error_counts.values)
plt.xlabel('Functional Area')
plt.ylabel('Number of Errors')
plt.title('Number of Errors by Functional Area')
plt.xticks(rotation=45, ha='right')
plt.tight_layout() # Add this line to adjust layout
st.pyplot(plt)
else:
st.write("### No scenarios with status 'failed' found.")
pass
def main():
st.title('Batch Run CSV Analyser')
# Initially we are in single file processing mode
if "mode" not in st.session_state:
st.session_state["mode"] = "single"
mode_display = f'## Current mode: {st.session_state["mode"].title()} mode'
st.sidebar.markdown(mode_display)
# Add a button to switch between modes
btn_label = "Switch to Compare mode" if st.session_state["mode"] == "single" else "Switch to Single mode"
if st.sidebar.button(btn_label):
if st.session_state["mode"] == "single":
st.session_state["mode"] = "compare"
else:
st.session_state["mode"] = "single"
# Only show the second file uploader in compare mode
if st.session_state["mode"] == "single":
uploaded_file_1 = st.sidebar.file_uploader("Upload CSV file", type="csv")
if uploaded_file_1 is not None:
single_main(uploaded_file_1)
else:
uploaded_file_1 = st.sidebar.file_uploader("Upload CSV file 1", type="csv")
uploaded_file_2 = st.sidebar.file_uploader("Upload CSV file 2", type="csv")
if uploaded_file_1 is not None and uploaded_file_2 is not None:
double_main(uploaded_file_1, uploaded_file_2)
if __name__ == "__main__":
main()
|