BananaSauce commited on
Commit
a95b240
·
1 Parent(s): 69508f6

Upload 2 files

Browse files
Files changed (2) hide show
  1. pre.py +33 -0
  2. second.py +87 -0
pre.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import streamlit as st
3
+ import csv
4
+ import io
5
+
6
+ def preprocess_csv(input_bytes):
7
+ text = input_bytes.decode() # Decode bytes to text
8
+ output = io.StringIO()
9
+ writer = csv.writer(output)
10
+
11
+ for row in csv.reader(io.StringIO(text)): # Read text as csv
12
+ if len(row) > 5:
13
+ row = row[0:5] + [','.join(row[5:])] # Combine extra fields into one
14
+ writer.writerow(row)
15
+
16
+ output.seek(0) # go to the start of the StringIO object
17
+ return output
18
+
19
+ def load_data(file):
20
+ column_names = [
21
+ 'Functional area',
22
+ 'Scenario name',
23
+ 'Start datetime',
24
+ 'End datetime',
25
+ 'Status',
26
+ 'Error message'
27
+ ]
28
+ data = pd.read_csv(file, header=None, names=column_names)
29
+ return data
30
+
31
+ def fill_missing_data(data, column_index, value):
32
+ data.iloc[:, column_index] = data.iloc[:, column_index].fillna(value)
33
+ return data
second.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import streamlit as st
3
+ import csv
4
+ import io
5
+ import matplotlib.pyplot as plt
6
+ import numpy as np
7
+ from pre import preprocess_csv, load_data, fill_missing_data
8
+
9
+
10
+ def double_main(uploaded_file1,uploaded_file2):
11
+ # st.title('Single CSV Analyzer')
12
+
13
+ if uploaded_file1 is not None and uploaded_file2 is not None:
14
+ # Process the csv files with header
15
+ filet_1 = uploaded_file1.read()
16
+ processed_output_1 = preprocess_csv(filet_1)
17
+ processed_file_1 = io.StringIO(processed_output_1.getvalue())
18
+ data_1 = load_data(processed_file_1)
19
+
20
+ filet_2 = uploaded_file2.read()
21
+ processed_output_2 = preprocess_csv(filet_2)
22
+ processed_file_2 = io.StringIO(processed_output_2.getvalue())
23
+ data_2 = load_data(processed_file_2)
24
+
25
+ data_1 = fill_missing_data(data_1, 4, 0)
26
+ data_1['Start datetime'] = pd.to_datetime(data_1['Start datetime'], errors='coerce')
27
+ data_1['End datetime'] = pd.to_datetime(data_1['End datetime'], errors='coerce')
28
+ data_1['Time spent'] = (data_1['End datetime'] - data_1['Start datetime']).dt.total_seconds()
29
+
30
+ data_2 = fill_missing_data(data_2, 4, 0)
31
+ data_2['Start datetime'] = pd.to_datetime(data_2['Start datetime'], errors='coerce')
32
+ data_2['End datetime'] = pd.to_datetime(data_2['End datetime'], errors='coerce')
33
+ data_2['Time spent'] = (data_2['End datetime'] - data_2['Start datetime']).dt.total_seconds()
34
+
35
+ # Determine which DataFrame is older
36
+ if data_1['Start datetime'].min() < data_2['Start datetime'].min():
37
+ older_df = data_1
38
+ newer_df = data_2
39
+ else:
40
+ older_df = data_2
41
+ newer_df = data_1
42
+
43
+ older_df['Time spent'] = pd.to_datetime(older_df['Time spent'], unit='s').dt.strftime('%M:%S')
44
+ newer_df['Time spent'] = pd.to_datetime(newer_df['Time spent'], unit='s').dt.strftime('%M:%S')
45
+
46
+ older_datetime = older_df['Start datetime'].min()
47
+ newer_datetime = newer_df['Start datetime'].min()
48
+ st.write(f"The older csv started on {older_datetime}")
49
+ st.write(f"The newer csv started on {newer_datetime}")
50
+
51
+ # Merge dataframes on 'scenario name'
52
+ merged_df = pd.merge(older_df, newer_df, on=['Functional area', 'Scenario name'], suffixes=('_old', '_new'))
53
+
54
+ # Filter scenarios that were failing and are still failing
55
+ fail_to_fail_scenarios = merged_df[(merged_df['Status_old'] == 'FAILED') & (merged_df['Status_new'] == 'FAILED')]
56
+
57
+ st.markdown("### Consistent Failures(previously failing, now failing)")
58
+ fail_count = len(fail_to_fail_scenarios)
59
+ st.write(f"Failing scenarios Count: {fail_count}")
60
+ # Select columns for display
61
+ columns_to_display = ['Functional area', 'Scenario name', 'Error message_old', 'Error message_new']
62
+ # Display the selected columns using st.write
63
+ st.write(fail_to_fail_scenarios[columns_to_display])
64
+
65
+ # Filter scenarios that were passing and now failing
66
+ pass_to_fail_scenarios = merged_df[(merged_df['Status_old'] == 'PASSED') & (merged_df['Status_new'] == 'FAILED')]
67
+
68
+ st.markdown("### New Failures(previously passing, now failing)")
69
+ pass_fail_count = len(pass_to_fail_scenarios)
70
+ st.write(f"Failing scenarios Count: {pass_fail_count}")
71
+ # Select columns for display
72
+ columns_to_display = ['Functional area', 'Scenario name', 'Error message_new', 'Time spent_old','Time spent_new',]
73
+ # Display the selected columns using st.write
74
+ st.write(pass_to_fail_scenarios[columns_to_display])
75
+
76
+ # Filter scenarios that were failing and now passing
77
+ fail_to_pass_scenarios = merged_df[(merged_df['Status_old'] == 'FAILED') & (merged_df['Status_new'] == 'PASSED')]
78
+
79
+ # Display filtered dataframe in Streamlit app
80
+ st.markdown("### New Failures(previously failing, now passing)")
81
+ pass_count = len(fail_to_pass_scenarios)
82
+ st.write(f"Passing scenarios Count: {pass_count}")
83
+ # Select columns for display
84
+ columns_to_display = ['Functional area', 'Scenario name', 'Error message_old', 'Time spent_old','Time spent_new',]
85
+ # Display the selected columns using st.write
86
+ st.write(fail_to_pass_scenarios[columns_to_display])
87
+ pass