Spaces:
Sleeping
Sleeping
Commit
·
611c02e
1
Parent(s):
f556076
updated for task
Browse files- jira_integration.py +364 -95
- multiple.py +101 -70
- pre.py +181 -153
jira_integration.py
CHANGED
@@ -21,23 +21,19 @@ try:
|
|
21 |
if not os.path.exists(log_dir):
|
22 |
os.makedirs(log_dir)
|
23 |
log_file = os.path.join(log_dir, f"jira_debug_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log")
|
24 |
-
|
25 |
# Configure root logger with file handler
|
26 |
logging.basicConfig(
|
27 |
level=logging.DEBUG,
|
28 |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
29 |
-
handlers=[
|
30 |
-
logging.FileHandler(log_file)
|
31 |
-
]
|
32 |
)
|
33 |
except (OSError, IOError):
|
34 |
# If file logging fails (e.g., in Hugging Face Spaces), configure logging without file handler
|
35 |
logging.basicConfig(
|
36 |
level=logging.DEBUG,
|
37 |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
38 |
-
handlers=[
|
39 |
-
logging.NullHandler()
|
40 |
-
]
|
41 |
)
|
42 |
|
43 |
logger = logging.getLogger("jira_integration")
|
@@ -839,76 +835,228 @@ def map_functional_area(functional_area, metadata):
|
|
839 |
"""Map a functional area to its closest Jira allowed parent and child values using structured mapping."""
|
840 |
if not metadata or not functional_area:
|
841 |
logger.error("No metadata or functional area provided")
|
842 |
-
|
|
|
843 |
|
844 |
# Get the functional area field from metadata
|
845 |
func_field = metadata['all_fields'].get('customfield_13100', {})
|
846 |
if not func_field or 'allowedValues' not in func_field:
|
847 |
logger.error("Could not find functional area field in metadata")
|
848 |
-
|
849 |
-
|
850 |
-
|
851 |
-
|
852 |
-
|
853 |
-
|
854 |
-
|
855 |
-
|
856 |
-
|
857 |
-
|
858 |
-
|
859 |
-
|
860 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
861 |
|
862 |
# Split the functional area into parts
|
863 |
parts = [p.strip() for p in functional_area.split(' - ')]
|
864 |
logger.info(f"Split into parts: {parts}")
|
865 |
|
866 |
-
#
|
867 |
-
|
868 |
-
|
869 |
-
|
870 |
-
|
871 |
-
|
872 |
-
|
|
|
873 |
|
874 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
875 |
|
876 |
-
#
|
877 |
-
if
|
878 |
-
logger.info(
|
879 |
-
return
|
880 |
-
elif test_value_no_spaces in allowed_values:
|
881 |
-
logger.info(f"Found match without spaces: {test_value_no_spaces}")
|
882 |
-
return allowed_values[test_value_no_spaces], test_value_no_spaces
|
883 |
|
884 |
-
#
|
885 |
-
|
886 |
-
|
887 |
-
|
888 |
-
|
889 |
-
|
890 |
-
|
891 |
-
|
892 |
-
|
893 |
-
|
894 |
-
|
895 |
-
|
896 |
-
|
897 |
-
|
898 |
-
|
899 |
-
if 'SERVICE' in first_part or 'SERVICES' in first_part:
|
900 |
-
logger.info("No exact match found, defaulting to Services-Platform")
|
901 |
return "R&I", "Services-Platform"
|
902 |
-
|
903 |
-
|
904 |
-
|
905 |
-
|
906 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
907 |
return "R&I", "WARPSPEED-Parameters"
|
908 |
|
909 |
-
|
910 |
-
|
911 |
return "R&I", "Data Exchange"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
912 |
|
913 |
def get_customer_field_values(metadata):
|
914 |
"""Extract all available customer field values and their child options from metadata"""
|
@@ -937,24 +1085,100 @@ def map_customer_value(environment_value, customer_values):
|
|
937 |
|
938 |
# Clean up environment value
|
939 |
env_value = environment_value.strip()
|
|
|
940 |
|
941 |
-
#
|
942 |
-
if
|
943 |
-
parent_value = "ILR"
|
944 |
-
child_value =
|
945 |
-
logger.info(f"Mapped {env_value} to
|
946 |
return parent_value, child_value
|
947 |
|
948 |
-
# Handle
|
949 |
-
if
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
950 |
parent_value = "MIP Research and Innovation"
|
951 |
-
|
952 |
-
|
953 |
-
|
954 |
-
|
955 |
-
|
956 |
-
|
957 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
958 |
return parent_value, child_value
|
959 |
|
960 |
# Default case - try to find matching values
|
@@ -973,10 +1197,8 @@ def map_customer_value(environment_value, customer_values):
|
|
973 |
return "MIP Research and Innovation", "R&I General"
|
974 |
|
975 |
def create_regression_task(project_key, summary, description, environment, filtered_scenarios_df):
|
976 |
-
logger.debug(f"Entering create_regression_task with project_key={project_key}, summary={summary}, environment={environment}, DF_shape={filtered_scenarios_df.shape}")
|
977 |
logger.info("=== Starting create_regression_task function ===")
|
978 |
logger.info(f"Project: {project_key}, Summary: {summary}, Environment: {environment}")
|
979 |
-
logger.info(f"Filtered DF shape: {filtered_scenarios_df.shape if filtered_scenarios_df is not None else 'None'}")
|
980 |
|
981 |
try:
|
982 |
# Get metadata first to access field values
|
@@ -987,9 +1209,74 @@ def create_regression_task(project_key, summary, description, environment, filte
|
|
987 |
st.error(error_msg)
|
988 |
return None
|
989 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
990 |
# Get customer field values and map environment
|
991 |
customer_values = get_customer_field_values(metadata)
|
992 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
993 |
logger.info(f"Mapped customer values - Parent: {parent_value}, Child: {child_value}")
|
994 |
|
995 |
# Get Jira client
|
@@ -1010,28 +1297,10 @@ def create_regression_task(project_key, summary, description, environment, filte
|
|
1010 |
|
1011 |
logger.info(f"Found active sprint: {active_sprint.name} (ID: {active_sprint.id})")
|
1012 |
|
1013 |
-
# Extract functional area from filtered scenarios
|
1014 |
-
functional_areas = []
|
1015 |
-
try:
|
1016 |
-
if "Functional area" in filtered_scenarios_df.columns:
|
1017 |
-
functional_areas = filtered_scenarios_df["Functional area"].unique().tolist()
|
1018 |
-
logger.info(f"Extracted functional areas: {functional_areas}")
|
1019 |
-
except Exception as e:
|
1020 |
-
logger.exception(f"Error extracting functional areas: {str(e)}")
|
1021 |
-
st.error(f"Error extracting functional areas: {str(e)}")
|
1022 |
-
return None
|
1023 |
-
|
1024 |
# Calculate story points based on number of scenarios
|
1025 |
story_points = calculate_story_points(len(filtered_scenarios_df))
|
1026 |
logger.info(f"Calculated story points: {story_points}")
|
1027 |
|
1028 |
-
# Map functional area using metadata
|
1029 |
-
functional_area_parent, functional_area_child = map_functional_area(
|
1030 |
-
functional_areas[0] if functional_areas else "Data Exchange",
|
1031 |
-
metadata
|
1032 |
-
)
|
1033 |
-
logger.info(f"Mapped functional area to parent: {functional_area_parent}, child: {functional_area_child}")
|
1034 |
-
|
1035 |
# Prepare issue dictionary with all required fields
|
1036 |
issue_dict = {
|
1037 |
"project": {"key": project_key},
|
|
|
21 |
if not os.path.exists(log_dir):
|
22 |
os.makedirs(log_dir)
|
23 |
log_file = os.path.join(log_dir, f"jira_debug_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log")
|
24 |
+
|
25 |
# Configure root logger with file handler
|
26 |
logging.basicConfig(
|
27 |
level=logging.DEBUG,
|
28 |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
29 |
+
handlers=[logging.FileHandler(log_file)]
|
|
|
|
|
30 |
)
|
31 |
except (OSError, IOError):
|
32 |
# If file logging fails (e.g., in Hugging Face Spaces), configure logging without file handler
|
33 |
logging.basicConfig(
|
34 |
level=logging.DEBUG,
|
35 |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
36 |
+
handlers=[logging.NullHandler()]
|
|
|
|
|
37 |
)
|
38 |
|
39 |
logger = logging.getLogger("jira_integration")
|
|
|
835 |
"""Map a functional area to its closest Jira allowed parent and child values using structured mapping."""
|
836 |
if not metadata or not functional_area:
|
837 |
logger.error("No metadata or functional area provided")
|
838 |
+
# Return default values instead of raising an exception
|
839 |
+
return "R&I", "Data Exchange"
|
840 |
|
841 |
# Get the functional area field from metadata
|
842 |
func_field = metadata['all_fields'].get('customfield_13100', {})
|
843 |
if not func_field or 'allowedValues' not in func_field:
|
844 |
logger.error("Could not find functional area field in metadata")
|
845 |
+
# Return default values instead of raising an exception
|
846 |
+
return "R&I", "Data Exchange"
|
847 |
+
|
848 |
+
# Define all allowed child values for R&I parent
|
849 |
+
allowed_child_values = [
|
850 |
+
"Data Exchange", "FIN-Cash Book", "FIN-Creditors", "FIN-Debtors", "FIN-Fixed Assets",
|
851 |
+
"FIN-General Ledger", "FIN-Parameters", "Services-Calendar", "Services-Call Center",
|
852 |
+
"Services-Communications", "Services-Decsion Services", "Services-Entity Relations",
|
853 |
+
"Services-Gamification", "Services-Job Manager", "Services-Measurements",
|
854 |
+
"Services-Multimedia", "Services-Platform", "Services-Questionnaire", "Services-Workflow",
|
855 |
+
"WARPSPEED-Access", "WARPSPEED-Applications", "WARPSPEED-Calendar",
|
856 |
+
"WARPSPEED-Entity Configuration", "WARPSPEED-Environments", "WARPSPEED-Event Types",
|
857 |
+
"WARPSPEED-External Systems", "WARPSPEED-Linked Programmes", "WARPSPEED-Messages",
|
858 |
+
"WARPSPEED-Parameters", "WARPSPEED-Preferences", "WARPSPEED-RefMaster",
|
859 |
+
"WARPSPEED-Relations", "WARPSPEED-Security Tokens", "WARPSPEED-Sequences",
|
860 |
+
"WARPSPEED-Setup - Systems", "WARPSPEED-Statuses", "WARPSPEED-System Index Search",
|
861 |
+
"WARPSPEED-Template Relations", "WARPSPEED-Users", "WARPSPEED-Utilites - My Profile",
|
862 |
+
"WARPSPEED-Utilities - Matrix Headers", "WARPSPEED-Web Objects"
|
863 |
+
]
|
864 |
+
|
865 |
+
# Define ILR child values
|
866 |
+
ilr_child_values = [
|
867 |
+
"Products", "New Business", "Policy Maintenance", "Collections", "Payments",
|
868 |
+
"Claims", "Commission", "Month End", "Integration", "Campaigns",
|
869 |
+
"Claims (Non Legal)", "Claims (Legal) (Including WF)", "Claim Invoicing (Legal)",
|
870 |
+
"Bulk Processing", "Manual Transactions", "Entity Maintenance", "General Queries"
|
871 |
+
]
|
872 |
+
|
873 |
+
# Log input functional area
|
874 |
+
logger.info(f"\nInput functional area: {functional_area}")
|
875 |
+
|
876 |
+
# Check if this is an ILR-related functional area
|
877 |
+
ilr_keywords = ["ilr", "legal", "policy", "claim", "invoice", "commission", "collection", "payment"]
|
878 |
+
is_ilr_related = any(keyword in functional_area.lower() for keyword in ilr_keywords)
|
879 |
+
|
880 |
+
# Also check if the environment is Legalwise, Lifewise, Scorpion, or Talksure
|
881 |
+
environment = metadata.get('environment', '')
|
882 |
+
is_ilr_environment = any(env in environment.upper() if environment else False
|
883 |
+
for env in ["LEGAL_WISE_NR", "LIFE_WISE_NR", "SCORPION_NR", "TALKSURE"])
|
884 |
+
|
885 |
+
if is_ilr_related or is_ilr_environment or functional_area.startswith("ILR"):
|
886 |
+
logger.info(f"Detected ILR-related functional area or environment: {functional_area}, {environment}")
|
887 |
+
|
888 |
+
# Try to find the best matching ILR child value
|
889 |
+
best_match = None
|
890 |
+
best_score = 0
|
891 |
+
|
892 |
+
for child_value in ilr_child_values:
|
893 |
+
# Calculate similarity score
|
894 |
+
score = 0
|
895 |
+
child_lower = child_value.lower()
|
896 |
+
func_lower = functional_area.lower()
|
897 |
+
|
898 |
+
# Check for exact matches or contains
|
899 |
+
if child_lower in func_lower or func_lower in child_lower:
|
900 |
+
score = 100
|
901 |
+
else:
|
902 |
+
# Check for partial matches with specific keywords
|
903 |
+
for keyword in child_lower.split():
|
904 |
+
if len(keyword) > 3 and keyword in func_lower: # Only consider meaningful keywords
|
905 |
+
score += 20
|
906 |
+
|
907 |
+
if score > best_score:
|
908 |
+
best_score = score
|
909 |
+
best_match = child_value
|
910 |
+
|
911 |
+
# If we found a good match
|
912 |
+
if best_match and best_score > 30:
|
913 |
+
logger.info(f"Mapped ILR functional area to: {best_match} with score {best_score}")
|
914 |
+
return "ILR", best_match
|
915 |
+
|
916 |
+
# If no good match found, return None as the child value
|
917 |
+
logger.info("No good match found for ILR functional area, using None")
|
918 |
+
return "ILR", None
|
919 |
+
|
920 |
+
# Enhanced direct mapping for common functional areas with more specific patterns
|
921 |
+
direct_mappings = {
|
922 |
+
"Financials - Creditors": "FIN-Creditors",
|
923 |
+
"Financials - Debtors": "FIN-Debtors",
|
924 |
+
"Financials - Cash Book": "FIN-Cash Book",
|
925 |
+
"Financials - Fixed Assets": "FIN-Fixed Assets",
|
926 |
+
"Financials - FA": "FIN-Fixed Assets", # Added specific mapping for FA
|
927 |
+
"Financials - General Ledger": "FIN-General Ledger",
|
928 |
+
"Financials - Parameters": "FIN-Parameters",
|
929 |
+
"Services - Calendar": "Services-Calendar",
|
930 |
+
"Services - Call Center": "Services-Call Center",
|
931 |
+
"Services - Communications": "Services-Communications",
|
932 |
+
"Services - Entity Relations": "Services-Entity Relations",
|
933 |
+
"Services - Gamification": "Services-Gamification",
|
934 |
+
"Services - Job Manager": "Services-Job Manager",
|
935 |
+
"Services - Platform": "Services-Platform",
|
936 |
+
"Services - Workflow": "Services-Workflow",
|
937 |
+
"Data Exchange": "Data Exchange"
|
938 |
+
}
|
939 |
+
|
940 |
+
# Check for direct matches first
|
941 |
+
for pattern, value in direct_mappings.items():
|
942 |
+
if pattern in functional_area:
|
943 |
+
logger.info(f"Found direct mapping match: {pattern} -> {value}")
|
944 |
+
return "R&I", value
|
945 |
|
946 |
# Split the functional area into parts
|
947 |
parts = [p.strip() for p in functional_area.split(' - ')]
|
948 |
logger.info(f"Split into parts: {parts}")
|
949 |
|
950 |
+
# Check first part for category matching
|
951 |
+
first_part = parts[0].upper() if parts else ""
|
952 |
+
|
953 |
+
# Map based on first part
|
954 |
+
if "FINANCIALS" in first_part or "FIN" in first_part:
|
955 |
+
# For financial areas, check if we can be more specific based on second part
|
956 |
+
if len(parts) > 1:
|
957 |
+
second_part = parts[1].upper()
|
958 |
|
959 |
+
# Enhanced FA/Fixed Assets detection
|
960 |
+
if "FA" in second_part or "FIXED" in second_part or "ASSETS" in second_part or "ASSET" in second_part:
|
961 |
+
logger.info("Mapping to FIN-Fixed Assets based on FA detection")
|
962 |
+
return "R&I", "FIN-Fixed Assets"
|
963 |
+
elif "CREDITORS" in second_part or "AP" in second_part or "ACCOUNTS PAYABLE" in second_part:
|
964 |
+
logger.info("Mapping to FIN-Creditors based on second part")
|
965 |
+
return "R&I", "FIN-Creditors"
|
966 |
+
elif "DEBTORS" in second_part or "AR" in second_part or "ACCOUNTS RECEIVABLE" in second_part:
|
967 |
+
logger.info("Mapping to FIN-Debtors based on second part")
|
968 |
+
return "R&I", "FIN-Debtors"
|
969 |
+
elif "CASH" in second_part or "CASHBOOK" in second_part:
|
970 |
+
logger.info("Mapping to FIN-Cash Book based on second part")
|
971 |
+
return "R&I", "FIN-Cash Book"
|
972 |
+
elif "GENERAL" in second_part or "GL" in second_part or "LEDGER" in second_part:
|
973 |
+
logger.info("Mapping to FIN-General Ledger based on second part")
|
974 |
+
return "R&I", "FIN-General Ledger"
|
975 |
+
|
976 |
+
# Default financial mapping
|
977 |
+
logger.info("Defaulting to FIN-Parameters")
|
978 |
+
return "R&I", "FIN-Parameters"
|
979 |
+
|
980 |
+
elif "SERVICE" in first_part or "SERVICES" in first_part:
|
981 |
+
# For services areas, check if we can be more specific based on second part
|
982 |
+
if len(parts) > 1:
|
983 |
+
second_part = parts[1].upper()
|
984 |
|
985 |
+
# Add specific check for "Work Flow" vs "Workflow"
|
986 |
+
if "WORK" in second_part and ("FLOW" in second_part or "FLOWS" in second_part):
|
987 |
+
logger.info("Mapping to Services-Workflow based on Work Flow detection")
|
988 |
+
return "R&I", "Services-Workflow"
|
|
|
|
|
|
|
989 |
|
990 |
+
# Continue with existing logic for other service types
|
991 |
+
for child_value in allowed_child_values:
|
992 |
+
if child_value.startswith("Services-"):
|
993 |
+
# Extract the part after "Services-"
|
994 |
+
service_type = child_value.split("Services-")[1].upper()
|
995 |
+
# Normalize by removing spaces for comparison
|
996 |
+
second_part_normalized = second_part.replace(" ", "")
|
997 |
+
service_type_normalized = service_type.replace(" ", "")
|
998 |
+
|
999 |
+
if service_type_normalized in second_part_normalized or second_part_normalized in service_type_normalized:
|
1000 |
+
logger.info(f"Mapping to {child_value} based on second part")
|
1001 |
+
return "R&I", child_value
|
1002 |
+
|
1003 |
+
# Default services mapping
|
1004 |
+
logger.info("Defaulting to Services-Platform")
|
|
|
|
|
1005 |
return "R&I", "Services-Platform"
|
1006 |
+
|
1007 |
+
elif "WARPSPEED" in first_part:
|
1008 |
+
# For warpspeed areas, check if we can be more specific based on second part
|
1009 |
+
if len(parts) > 1:
|
1010 |
+
second_part = parts[1].upper()
|
1011 |
+
for child_value in allowed_child_values:
|
1012 |
+
if child_value.startswith("WARPSPEED-"):
|
1013 |
+
# Extract the part after "WARPSPEED-"
|
1014 |
+
warpspeed_type = child_value.split("WARPSPEED-")[1].upper()
|
1015 |
+
if warpspeed_type in second_part or second_part in warpspeed_type:
|
1016 |
+
logger.info(f"Mapping to {child_value} based on second part")
|
1017 |
+
return "R&I", child_value
|
1018 |
+
|
1019 |
+
# Default warpspeed mapping
|
1020 |
+
logger.info("Defaulting to WARPSPEED-Parameters")
|
1021 |
return "R&I", "WARPSPEED-Parameters"
|
1022 |
|
1023 |
+
elif "DATA" in first_part or "EXCHANGE" in first_part:
|
1024 |
+
logger.info("Mapping to Data Exchange")
|
1025 |
return "R&I", "Data Exchange"
|
1026 |
+
|
1027 |
+
# If no category match, try fuzzy matching with all allowed values
|
1028 |
+
best_match = None
|
1029 |
+
best_score = 0
|
1030 |
+
|
1031 |
+
# Try to match the full functional area
|
1032 |
+
for child_value in allowed_child_values:
|
1033 |
+
# Calculate similarity score
|
1034 |
+
score = 0
|
1035 |
+
child_upper = child_value.upper()
|
1036 |
+
func_upper = functional_area.upper()
|
1037 |
+
|
1038 |
+
# Check if child value appears in functional area
|
1039 |
+
if child_upper.replace("-", " ") in func_upper or child_upper in func_upper:
|
1040 |
+
score = 100 # Perfect match
|
1041 |
+
else:
|
1042 |
+
# Calculate partial match score
|
1043 |
+
for part in parts:
|
1044 |
+
part_upper = part.upper()
|
1045 |
+
if part_upper in child_upper or child_upper in part_upper:
|
1046 |
+
score += 50 / len(parts) # Partial match
|
1047 |
+
|
1048 |
+
if score > best_score:
|
1049 |
+
best_score = score
|
1050 |
+
best_match = child_value
|
1051 |
+
|
1052 |
+
# If we found a good match
|
1053 |
+
if best_match and best_score > 30: # Threshold for accepting a match
|
1054 |
+
logger.info(f"Found fuzzy match: {best_match} with score {best_score}")
|
1055 |
+
return "R&I", best_match
|
1056 |
+
|
1057 |
+
# If no good match found, return None as the child value
|
1058 |
+
logger.warning(f"No good match found for '{functional_area}', using None")
|
1059 |
+
return "R&I", None
|
1060 |
|
1061 |
def get_customer_field_values(metadata):
|
1062 |
"""Extract all available customer field values and their child options from metadata"""
|
|
|
1085 |
|
1086 |
# Clean up environment value
|
1087 |
env_value = environment_value.strip()
|
1088 |
+
logger.info(f"Mapping customer value for environment: {env_value}")
|
1089 |
|
1090 |
+
# Handle Legalwise environments
|
1091 |
+
if 'LEGAL_WISE_NR' in env_value:
|
1092 |
+
parent_value = "MIP ILR"
|
1093 |
+
child_value = "LEZA - LegalWise"
|
1094 |
+
logger.info(f"Mapped Legalwise environment {env_value} to {parent_value} parent with child {child_value}")
|
1095 |
return parent_value, child_value
|
1096 |
|
1097 |
+
# Handle Lifewise environments
|
1098 |
+
if 'LIFE_WISE_NR' in env_value:
|
1099 |
+
parent_value = "MIP ILR"
|
1100 |
+
child_value = "LEZA - LifeWise"
|
1101 |
+
logger.info(f"Mapped Lifewise environment {env_value} to {parent_value} parent with child {child_value}")
|
1102 |
+
return parent_value, child_value
|
1103 |
+
|
1104 |
+
# Handle Scorpion environments
|
1105 |
+
if 'SCORPION_NR' in env_value:
|
1106 |
+
parent_value = "MIP ILR"
|
1107 |
+
child_value = "LEZA - Scorpion"
|
1108 |
+
logger.info(f"Mapped Scorpion environment {env_value} to {parent_value} parent with child {child_value}")
|
1109 |
+
return parent_value, child_value
|
1110 |
+
|
1111 |
+
# Handle Talksure environments
|
1112 |
+
if 'TALKSURE' in env_value:
|
1113 |
+
parent_value = "MIP ILR"
|
1114 |
+
child_value = "Talksure"
|
1115 |
+
logger.info(f"Mapped Talksure environment {env_value} to {parent_value} parent with child {child_value}")
|
1116 |
+
return parent_value, child_value
|
1117 |
+
|
1118 |
+
# Special case for 2001 fin
|
1119 |
+
if '2001' in env_value and 'FIN' in env_value.lower():
|
1120 |
parent_value = "MIP Research and Innovation"
|
1121 |
+
child_value = "R&I 2001 Fin"
|
1122 |
+
logger.info(f"Mapped 2001 fin environment {env_value} to {parent_value} parent with child {child_value}")
|
1123 |
+
return parent_value, child_value
|
1124 |
+
|
1125 |
+
# Handle R&I environments with FIN
|
1126 |
+
if 'R&I' in env_value and 'FIN' in env_value:
|
1127 |
+
parent_value = "MIP Research and Innovation"
|
1128 |
+
# Extract the number between R&I and FIN
|
1129 |
+
try:
|
1130 |
+
number = env_value.split('R&I')[1].split('FIN')[0].strip()
|
1131 |
+
child_value = f"R&I {number} Fin" # Changed from FIN to Fin
|
1132 |
+
logger.info(f"Mapped R&I FIN environment {env_value} to {parent_value} parent with child {child_value}")
|
1133 |
+
return parent_value, child_value
|
1134 |
+
except:
|
1135 |
+
logger.warning(f"Could not parse R&I FIN format from {env_value}")
|
1136 |
+
|
1137 |
+
# Handle R&I environments without FIN
|
1138 |
+
if 'R&I' in env_value and 'FIN' not in env_value:
|
1139 |
+
parent_value = "MIP Research and Innovation"
|
1140 |
+
# Extract the number after R&I
|
1141 |
+
try:
|
1142 |
+
number = env_value.split('R&I')[1].strip()
|
1143 |
+
child_value = f"R&I {number}"
|
1144 |
+
logger.info(f"Mapped R&I environment {env_value} to {parent_value} parent with child {child_value}")
|
1145 |
+
return parent_value, child_value
|
1146 |
+
except:
|
1147 |
+
logger.warning(f"Could not parse R&I format from {env_value}")
|
1148 |
+
|
1149 |
+
# Handle Task Manager environments
|
1150 |
+
if 'Task Manager' in env_value:
|
1151 |
+
parent_value = "MIP Research and Innovation"
|
1152 |
+
child_value = env_value
|
1153 |
+
logger.info(f"Mapped Task Manager environment {env_value} to {parent_value} parent with child {child_value}")
|
1154 |
+
return parent_value, child_value
|
1155 |
+
|
1156 |
+
# Handle App Manager environments
|
1157 |
+
if 'App Manager' in env_value:
|
1158 |
+
parent_value = "MIP Research and Innovation"
|
1159 |
+
child_value = env_value
|
1160 |
+
logger.info(f"Mapped App Manager environment {env_value} to {parent_value} parent with child {child_value}")
|
1161 |
+
return parent_value, child_value
|
1162 |
+
|
1163 |
+
# Handle Auth Gateway environments
|
1164 |
+
if 'Auth Gateway' in env_value:
|
1165 |
+
parent_value = "MIP Research and Innovation"
|
1166 |
+
child_value = env_value
|
1167 |
+
logger.info(f"Mapped Auth Gateway environment {env_value} to {parent_value} parent with child {child_value}")
|
1168 |
+
return parent_value, child_value
|
1169 |
+
|
1170 |
+
# Handle MIP Intranet
|
1171 |
+
if 'MIP Intranet' in env_value:
|
1172 |
+
parent_value = "MIP Research and Innovation"
|
1173 |
+
child_value = "MIP Intranet"
|
1174 |
+
logger.info(f"Mapped MIP Intranet environment {env_value} to {parent_value} parent with child {child_value}")
|
1175 |
+
return parent_value, child_value
|
1176 |
+
|
1177 |
+
# Handle Pilotfish
|
1178 |
+
if 'Pilotfish' in env_value:
|
1179 |
+
parent_value = "MIP Research and Innovation"
|
1180 |
+
child_value = "Pilotfish"
|
1181 |
+
logger.info(f"Mapped Pilotfish environment {env_value} to {parent_value} parent with child {child_value}")
|
1182 |
return parent_value, child_value
|
1183 |
|
1184 |
# Default case - try to find matching values
|
|
|
1197 |
return "MIP Research and Innovation", "R&I General"
|
1198 |
|
1199 |
def create_regression_task(project_key, summary, description, environment, filtered_scenarios_df):
|
|
|
1200 |
logger.info("=== Starting create_regression_task function ===")
|
1201 |
logger.info(f"Project: {project_key}, Summary: {summary}, Environment: {environment}")
|
|
|
1202 |
|
1203 |
try:
|
1204 |
# Get metadata first to access field values
|
|
|
1209 |
st.error(error_msg)
|
1210 |
return None
|
1211 |
|
1212 |
+
# Add environment to metadata for use in mapping functions
|
1213 |
+
metadata['environment'] = environment
|
1214 |
+
|
1215 |
+
# Check if environment is ILR-related
|
1216 |
+
is_ilr_environment = any(env in environment.upper() if environment else False
|
1217 |
+
for env in ["LEGAL_WISE_NR", "LIFE_WISE_NR", "SCORPION_NR", "TALKSURE"])
|
1218 |
+
logger.info(f"Environment check - Is ILR: {is_ilr_environment}")
|
1219 |
+
|
1220 |
+
# Extract functional area from filtered scenarios
|
1221 |
+
functional_areas = []
|
1222 |
+
if "Functional area" in filtered_scenarios_df.columns:
|
1223 |
+
functional_areas = filtered_scenarios_df["Functional area"].unique().tolist()
|
1224 |
+
logger.info(f"Extracted functional areas: {functional_areas}")
|
1225 |
+
|
1226 |
+
# Handle ILR environments
|
1227 |
+
if is_ilr_environment:
|
1228 |
+
functional_area_parent = "ILR"
|
1229 |
+
# Define ILR child values
|
1230 |
+
ilr_child_values = [
|
1231 |
+
"Products", "New Business", "Policy Maintenance", "Collections", "Payments",
|
1232 |
+
"Claims", "Commission", "Month End", "Integration", "Campaigns",
|
1233 |
+
"Claims (Non Legal)", "Claims (Legal) (Including WF)", "Claim Invoicing (Legal)",
|
1234 |
+
"Bulk Processing", "Manual Transactions", "Entity Maintenance", "General Queries"
|
1235 |
+
]
|
1236 |
+
|
1237 |
+
# Default to General Queries
|
1238 |
+
functional_area_child = "General Queries"
|
1239 |
+
|
1240 |
+
# Try to find a better match based on the functional area
|
1241 |
+
if functional_areas:
|
1242 |
+
func_area = functional_areas[0].lower()
|
1243 |
+
for child in ilr_child_values:
|
1244 |
+
if child.lower() in func_area:
|
1245 |
+
functional_area_child = child
|
1246 |
+
break
|
1247 |
+
|
1248 |
+
logger.info(f"Forced functional area to ILR parent with child: {functional_area_child}")
|
1249 |
+
else:
|
1250 |
+
# Map functional area using metadata (now with environment info)
|
1251 |
+
logger.info("Not an ILR environment, using standard mapping")
|
1252 |
+
functional_area_parent, functional_area_child = map_functional_area(
|
1253 |
+
functional_areas[0] if functional_areas else "Data Exchange",
|
1254 |
+
metadata
|
1255 |
+
)
|
1256 |
+
|
1257 |
+
logger.info(f"Final functional area mapping - Parent: {functional_area_parent}, Child: {functional_area_child}")
|
1258 |
+
|
1259 |
# Get customer field values and map environment
|
1260 |
customer_values = get_customer_field_values(metadata)
|
1261 |
+
|
1262 |
+
# If functional area is ILR or environment is ILR-related, set customer to MIP ILR
|
1263 |
+
if functional_area_parent == "ILR" or is_ilr_environment:
|
1264 |
+
parent_value = "MIP ILR"
|
1265 |
+
# Set child value based on environment
|
1266 |
+
if "LEGAL_WISE_NR" in environment.upper():
|
1267 |
+
child_value = "LEZA - LegalWise"
|
1268 |
+
elif "LIFE_WISE_NR" in environment.upper():
|
1269 |
+
child_value = "LEZA - LifeWise"
|
1270 |
+
elif "SCORPION_NR" in environment.upper():
|
1271 |
+
child_value = "LEZA - Scorpion"
|
1272 |
+
elif "TALKSURE" in environment.upper():
|
1273 |
+
child_value = "Talksure"
|
1274 |
+
else:
|
1275 |
+
child_value = "General Queries"
|
1276 |
+
else:
|
1277 |
+
# Use the normal mapping for non-ILR functional areas
|
1278 |
+
parent_value, child_value = map_customer_value(environment, customer_values)
|
1279 |
+
|
1280 |
logger.info(f"Mapped customer values - Parent: {parent_value}, Child: {child_value}")
|
1281 |
|
1282 |
# Get Jira client
|
|
|
1297 |
|
1298 |
logger.info(f"Found active sprint: {active_sprint.name} (ID: {active_sprint.id})")
|
1299 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1300 |
# Calculate story points based on number of scenarios
|
1301 |
story_points = calculate_story_points(len(filtered_scenarios_df))
|
1302 |
logger.info(f"Calculated story points: {story_points}")
|
1303 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1304 |
# Prepare issue dictionary with all required fields
|
1305 |
issue_dict = {
|
1306 |
"project": {"key": project_key},
|
multiple.py
CHANGED
@@ -17,7 +17,9 @@ from jira_integration import (
|
|
17 |
get_dependent_field_value,
|
18 |
get_boards,
|
19 |
get_functional_area_values,
|
20 |
-
map_functional_area
|
|
|
|
|
21 |
)
|
22 |
from datetime import datetime, timedelta
|
23 |
import plotly.express as px
|
@@ -55,85 +57,113 @@ logger = logging.getLogger("multiple")
|
|
55 |
|
56 |
# Function to capture button clicks with manual callback
|
57 |
def handle_task_button_click(summary, description, formatted_env, filtered_df):
|
58 |
-
logger.info("=== Task button clicked - Starting
|
59 |
try:
|
60 |
logger.info(f"Summary: {summary}")
|
61 |
logger.info(f"Description length: {len(description)}")
|
62 |
logger.info(f"Environment: {formatted_env}")
|
63 |
logger.info(f"DataFrame shape: {filtered_df.shape}")
|
64 |
|
65 |
-
#
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
# Import here to avoid circular imports
|
76 |
-
from jira_integration import create_regression_task
|
77 |
|
78 |
-
|
|
|
|
|
|
|
|
|
79 |
|
80 |
-
#
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
)
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
|
134 |
except Exception as e:
|
135 |
logger.exception(f"Error in handle_task_button_click: {str(e)}")
|
136 |
-
st.error(f"❌ Error
|
137 |
import traceback
|
138 |
error_trace = traceback.format_exc()
|
139 |
logger.error(f"Full traceback: {error_trace}")
|
@@ -292,7 +322,6 @@ def perform_analysis(uploaded_dataframes):
|
|
292 |
col1, col2, col3 = st.columns([1, 2, 1])
|
293 |
with col2:
|
294 |
if st.button("📝 Log Jira Task", use_container_width=True):
|
295 |
-
st.write("Debug: Button clicked") # Debug line
|
296 |
# Use the properly structured DataFrame for task creation
|
297 |
task_df = grouped_filtered_scenarios.copy()
|
298 |
expected_columns = [
|
@@ -314,7 +343,9 @@ def perform_analysis(uploaded_dataframes):
|
|
314 |
summary, description = generate_task_content(task_df)
|
315 |
if summary and description:
|
316 |
# Call the task creation function
|
317 |
-
handle_task_button_click(summary, description, environment, task_df)
|
|
|
|
|
318 |
|
319 |
# Check if selected_status is 'Failed' and show bar graph
|
320 |
if selected_status != 'Passed':
|
|
|
17 |
get_dependent_field_value,
|
18 |
get_boards,
|
19 |
get_functional_area_values,
|
20 |
+
map_functional_area,
|
21 |
+
get_customer_field_values,
|
22 |
+
map_customer_value
|
23 |
)
|
24 |
from datetime import datetime, timedelta
|
25 |
import plotly.express as px
|
|
|
57 |
|
58 |
# Function to capture button clicks with manual callback
|
59 |
def handle_task_button_click(summary, description, formatted_env, filtered_df):
|
60 |
+
logger.info("=== Task button clicked - Starting debug logging ===")
|
61 |
try:
|
62 |
logger.info(f"Summary: {summary}")
|
63 |
logger.info(f"Description length: {len(description)}")
|
64 |
logger.info(f"Environment: {formatted_env}")
|
65 |
logger.info(f"DataFrame shape: {filtered_df.shape}")
|
66 |
|
67 |
+
# Get metadata for field values
|
68 |
+
metadata = get_project_metadata("RS")
|
69 |
+
if not metadata:
|
70 |
+
logger.error("Could not get project metadata")
|
71 |
+
return False
|
72 |
+
|
73 |
+
# Check if this is an ILR environment
|
74 |
+
is_ilr_environment = any(env in formatted_env.upper() if formatted_env else False
|
75 |
+
for env in ["LEGAL_WISE_NR", "LIFE_WISE_NR", "SCORPION_NR", "TALKSURE"])
|
|
|
|
|
|
|
76 |
|
77 |
+
# Extract functional area from filtered scenarios
|
78 |
+
functional_areas = []
|
79 |
+
if "Functional area" in filtered_df.columns:
|
80 |
+
functional_areas = filtered_df["Functional area"].unique().tolist()
|
81 |
+
logger.info(f"Extracted functional areas: {functional_areas}")
|
82 |
|
83 |
+
# Map functional area using metadata
|
84 |
+
functional_area_parent = "ILR" if is_ilr_environment else "R&I"
|
85 |
+
functional_area_child = None
|
86 |
+
|
87 |
+
# Set child value based on environment for ILR
|
88 |
+
if is_ilr_environment:
|
89 |
+
if "LEGAL_WISE_NR" in formatted_env.upper():
|
90 |
+
functional_area_child = "LEZA - LegalWise"
|
91 |
+
elif "LIFE_WISE_NR" in formatted_env.upper():
|
92 |
+
functional_area_child = "LEZA - LifeWise"
|
93 |
+
elif "SCORPION_NR" in formatted_env.upper():
|
94 |
+
functional_area_child = "LEZA - Scorpion"
|
95 |
+
elif "TALKSURE" in formatted_env.upper():
|
96 |
+
functional_area_child = "Talksure"
|
97 |
+
else:
|
98 |
+
# Use standard R&I mapping
|
99 |
+
_, functional_area_child = map_functional_area(
|
100 |
+
functional_areas[0] if functional_areas else "Data Exchange",
|
101 |
+
metadata
|
102 |
)
|
103 |
+
|
104 |
+
logger.info(f"Mapped functional area to parent: {functional_area_parent}, child: {functional_area_child}")
|
105 |
+
|
106 |
+
# Get customer field values and map environment
|
107 |
+
customer_values = get_customer_field_values(metadata)
|
108 |
+
parent_value, child_value = map_customer_value(formatted_env, customer_values)
|
109 |
+
logger.info(f"Mapped customer values - Parent: {parent_value}, Child: {child_value}")
|
110 |
+
|
111 |
+
# Calculate story points based on number of scenarios
|
112 |
+
story_points = calculate_story_points(len(filtered_df))
|
113 |
+
logger.info(f"Calculated story points: {story_points}")
|
114 |
+
|
115 |
+
# Prepare issue dictionary with all required fields
|
116 |
+
issue_dict = {
|
117 |
+
"project": {"key": "RS"},
|
118 |
+
"summary": summary,
|
119 |
+
"description": description,
|
120 |
+
"issuetype": {"name": "Story"},
|
121 |
+
"components": [{"name": "Maintenance (Regression)"}],
|
122 |
+
"customfield_10427": {
|
123 |
+
"value": parent_value,
|
124 |
+
"child": {
|
125 |
+
"value": child_value
|
126 |
+
}
|
127 |
+
},
|
128 |
+
"customfield_12730": {"value": "Non-Business Critical"}, # Regression Type field
|
129 |
+
"customfield_13430": {"value": str(len(filtered_df))}, # Number of Scenarios
|
130 |
+
"customfield_13100": {
|
131 |
+
"value": functional_area_parent,
|
132 |
+
"child": {
|
133 |
+
"value": functional_area_child
|
134 |
+
}
|
135 |
+
},
|
136 |
+
"assignee": {"name": st.session_state.jira_username},
|
137 |
+
"customfield_10002": story_points # Story Points field
|
138 |
+
}
|
139 |
+
|
140 |
+
# Log the complete issue dictionary
|
141 |
+
logger.info("=== Task Creation Values ===")
|
142 |
+
logger.info(f"Complete issue dictionary: {json.dumps(issue_dict, indent=2)}")
|
143 |
+
|
144 |
+
# Create the actual Jira task
|
145 |
+
task_key = create_regression_task(
|
146 |
+
summary=summary,
|
147 |
+
description=description,
|
148 |
+
environment=formatted_env,
|
149 |
+
filtered_scenarios_df=filtered_df,
|
150 |
+
project_key="RS"
|
151 |
+
)
|
152 |
+
|
153 |
+
if task_key:
|
154 |
+
# Set session state variables for success message
|
155 |
+
st.session_state.last_task_key = task_key
|
156 |
+
st.session_state.last_task_url = f"{JIRA_SERVER}/browse/{task_key}"
|
157 |
+
st.session_state.show_success = True
|
158 |
+
logger.info(f"Successfully created task: {task_key}")
|
159 |
+
return True
|
160 |
+
else:
|
161 |
+
st.error("❌ Failed to create Jira task. Check logs for details.")
|
162 |
+
return False
|
163 |
|
164 |
except Exception as e:
|
165 |
logger.exception(f"Error in handle_task_button_click: {str(e)}")
|
166 |
+
st.error(f"❌ Error preparing task: {str(e)}")
|
167 |
import traceback
|
168 |
error_trace = traceback.format_exc()
|
169 |
logger.error(f"Full traceback: {error_trace}")
|
|
|
322 |
col1, col2, col3 = st.columns([1, 2, 1])
|
323 |
with col2:
|
324 |
if st.button("📝 Log Jira Task", use_container_width=True):
|
|
|
325 |
# Use the properly structured DataFrame for task creation
|
326 |
task_df = grouped_filtered_scenarios.copy()
|
327 |
expected_columns = [
|
|
|
343 |
summary, description = generate_task_content(task_df)
|
344 |
if summary and description:
|
345 |
# Call the task creation function
|
346 |
+
success = handle_task_button_click(summary, description, environment, task_df)
|
347 |
+
if success:
|
348 |
+
st.rerun() # Refresh the page to show success message
|
349 |
|
350 |
# Check if selected_status is 'Failed' and show bar graph
|
351 |
if selected_status != 'Passed':
|
pre.py
CHANGED
@@ -43,138 +43,143 @@ def preprocess_xlsx(uploaded_file):
|
|
43 |
'Failed Scenario': 'string'
|
44 |
}
|
45 |
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
error_messages = failed_steps.groupby('Scenario Name').agg({
|
90 |
-
'Error Message': 'first',
|
91 |
-
'Step': 'first' # Capture the step where it failed
|
92 |
-
}).reset_index()
|
93 |
else:
|
94 |
-
#
|
95 |
error_messages = pd.DataFrame(columns=['Scenario Name', 'Error Message', 'Step'])
|
96 |
-
|
97 |
-
#
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
|
179 |
def fill_missing_data(data, column_index, value):
|
180 |
data.iloc[:, column_index] = data.iloc[:, column_index].fillna(value)
|
@@ -187,30 +192,53 @@ def to_camel_case(s):
|
|
187 |
|
188 |
# Define the function to preprocess a file (CSV or XLSX)
|
189 |
def preprocess_uploaded_file(uploaded_file):
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
|
|
|
|
210 |
|
211 |
-
#
|
212 |
-
data
|
213 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
|
215 |
def add_app_description():
|
216 |
app_title = '<p style="font-family:Roboto, sans-serif; color:#004E7C; font-size: 42px;">DataLink Compare</p>'
|
|
|
43 |
'Failed Scenario': 'string'
|
44 |
}
|
45 |
|
46 |
+
try:
|
47 |
+
# Read both the first sheet for error messages and "Time Taken" sheet
|
48 |
+
excel_file = pd.ExcelFile(uploaded_file, engine='openpyxl')
|
49 |
+
|
50 |
+
# Read detailed step data from first sheet (contains error messages)
|
51 |
+
error_df = pd.read_excel(excel_file, sheet_name=0)
|
52 |
+
|
53 |
+
# Read time taken data from the "Time Taken" sheet
|
54 |
+
df = pd.read_excel(
|
55 |
+
excel_file,
|
56 |
+
sheet_name='Time Taken',
|
57 |
+
dtype=dtype_dict
|
58 |
+
)
|
59 |
+
|
60 |
+
# Convert Failed Scenario column to boolean after reading
|
61 |
+
# Handle different possible values (TRUE/FALSE, True/False, etc.)
|
62 |
+
df['Failed Scenario'] = df['Failed Scenario'].astype(str).str.upper()
|
63 |
+
# Replace 'NAN' string with empty string to avoid conversion issues
|
64 |
+
df['Failed Scenario'] = df['Failed Scenario'].replace('NAN', '')
|
65 |
+
df['Status'] = df['Failed Scenario'].map(
|
66 |
+
lambda x: 'FAILED' if x in ['TRUE', 'YES', 'Y', '1'] else 'PASSED'
|
67 |
+
)
|
68 |
+
|
69 |
+
# Count failed and passed scenarios
|
70 |
+
failed_count = (df['Status'] == 'FAILED').sum()
|
71 |
+
passed_count = (df['Status'] == 'PASSED').sum()
|
72 |
+
|
73 |
+
|
74 |
+
# Extract error messages from the first sheet
|
75 |
+
# Find rows with FAILED result and group by Scenario Name to get the error message
|
76 |
+
if 'Result' in error_df.columns:
|
77 |
+
failed_steps = error_df[error_df['Result'] == 'FAILED'].copy()
|
78 |
+
|
79 |
+
# If there are failed steps, get the error messages
|
80 |
+
if not failed_steps.empty:
|
81 |
+
# Group by Scenario Name and get the first error message and step for each scenario
|
82 |
+
error_messages = failed_steps.groupby('Scenario Name').agg({
|
83 |
+
'Error Message': 'first',
|
84 |
+
'Step': 'first' # Capture the step where it failed
|
85 |
+
}).reset_index()
|
86 |
+
else:
|
87 |
+
# Create empty DataFrame with required columns
|
88 |
+
error_messages = pd.DataFrame(columns=['Scenario Name', 'Error Message', 'Step'])
|
|
|
|
|
|
|
|
|
89 |
else:
|
90 |
+
# If Result column doesn't exist, create empty DataFrame
|
91 |
error_messages = pd.DataFrame(columns=['Scenario Name', 'Error Message', 'Step'])
|
92 |
+
|
93 |
+
# Extract date from filename (e.g., RI2211_batch_20250225_27031.xlsx)
|
94 |
+
filename = uploaded_file.name
|
95 |
+
date_match = re.search(r'_(\d{8})_', filename)
|
96 |
+
if date_match:
|
97 |
+
date_str = date_match.group(1)
|
98 |
+
file_date = datetime.strptime(date_str, '%Y%m%d').date()
|
99 |
+
else:
|
100 |
+
st.warning(f"Could not extract date from filename: {filename}. Using current date.")
|
101 |
+
file_date = datetime.now().date()
|
102 |
+
|
103 |
+
# Extract environment from filename
|
104 |
+
if any(pattern in filename for pattern in ['_batch_', '_fin_', '_priority_', '_Puppeteer_']):
|
105 |
+
# Get everything before _batch, _fin, or _priority
|
106 |
+
if '_batch_' in filename:
|
107 |
+
environment = filename.split('_batch_')[0]
|
108 |
+
elif '_fin_' in filename:
|
109 |
+
environment = filename.split('_fin_')[0]
|
110 |
+
elif '_priority_' in filename:
|
111 |
+
environment = filename.split('_priority_')[0]
|
112 |
+
elif '_Puppeteer_' in filename:
|
113 |
+
environment = filename.split('_Puppeteer_')[0]
|
114 |
+
else:
|
115 |
+
environment = filename.split('.')[0]
|
116 |
+
|
117 |
+
# Create result dataframe
|
118 |
+
result_df = pd.DataFrame({
|
119 |
+
'Functional area': df['Feature Name'],
|
120 |
+
'Scenario Name': df['Scenario Name'],
|
121 |
+
'Status': df['Status'],
|
122 |
+
'Time spent': df['Total Time Taken (ms)'] / 1000 # Convert ms to seconds
|
123 |
+
})
|
124 |
+
|
125 |
+
# Fill any NaN values in Functional area
|
126 |
+
result_df['Functional area'] = result_df['Functional area'].fillna('Unknown')
|
127 |
+
|
128 |
+
# Ensure Time spent is a numeric value and handle NaN
|
129 |
+
result_df['Time spent'] = pd.to_numeric(result_df['Time spent'], errors='coerce')
|
130 |
+
result_df['Time spent'] = result_df['Time spent'].fillna(0)
|
131 |
+
|
132 |
+
# Merge error messages with result dataframe
|
133 |
+
if not error_messages.empty:
|
134 |
+
result_df = result_df.merge(error_messages[['Scenario Name', 'Error Message', 'Step']],
|
135 |
+
on='Scenario Name', how='left')
|
136 |
+
|
137 |
+
# Add environment column
|
138 |
+
result_df['Environment'] = environment
|
139 |
+
|
140 |
+
# Calculate formatted time spent
|
141 |
+
result_df['Time spent(m:s)'] = pd.to_datetime(result_df['Time spent'], unit='s').dt.strftime('%M:%S')
|
142 |
+
|
143 |
+
|
144 |
+
result_df['Start datetime'] = pd.to_datetime(file_date)
|
145 |
+
result_df['End datetime'] = result_df['Start datetime'] + pd.to_timedelta(result_df['Time spent'], unit='s')
|
146 |
+
|
147 |
+
# Add failed step information if available
|
148 |
+
if 'Step' in result_df.columns:
|
149 |
+
result_df['Failed Step'] = result_df['Step']
|
150 |
+
result_df.drop('Step', axis=1, inplace=True)
|
151 |
+
|
152 |
+
# Extract start time from the first sheet
|
153 |
+
before_steps = error_df[error_df['Step'].str.contains('before', case=False, na=False)].copy()
|
154 |
+
if not before_steps.empty:
|
155 |
+
# Get the first 'before' step for each scenario
|
156 |
+
before_steps.loc[:, 'Time Stamp'] = pd.to_datetime(before_steps['Time Stamp'], format='%H:%M:%S', errors='coerce')
|
157 |
+
start_times = before_steps.groupby('Scenario Name').agg({'Time Stamp': 'first'}).reset_index()
|
158 |
+
# Store the timestamps in a variable for efficient reuse
|
159 |
+
result_df = result_df.merge(start_times, on='Scenario Name', how='left')
|
160 |
+
result_df.rename(columns={'Time Stamp': 'Scenario Start Time'}, inplace=True)
|
161 |
+
|
162 |
+
# Convert Scenario Start Time to datetime if it's not already
|
163 |
+
result_df['Scenario Start Time'] = pd.to_datetime(result_df['Scenario Start Time'], errors='coerce')
|
164 |
+
|
165 |
+
# Combine the date from the filename with the time stamp
|
166 |
+
result_df['Start datetime'] = pd.to_datetime(
|
167 |
+
result_df['Scenario Start Time'].dt.strftime('%H:%M:%S') + ' ' + file_date.strftime('%Y-%m-%d'),
|
168 |
+
errors='coerce'
|
169 |
+
)
|
170 |
+
|
171 |
+
return result_df
|
172 |
+
|
173 |
+
except Exception as e:
|
174 |
+
st.error(f"Error processing Excel file: {str(e)}")
|
175 |
+
# Log more detailed error information
|
176 |
+
import traceback
|
177 |
+
st.error(f"Detailed error: {traceback.format_exc()}")
|
178 |
+
# Return empty DataFrame with expected columns to avoid further errors
|
179 |
+
return pd.DataFrame(columns=[
|
180 |
+
'Functional area', 'Scenario Name', 'Status', 'Time spent',
|
181 |
+
'Time spent(m:s)', 'Environment', 'Start datetime', 'End datetime'
|
182 |
+
])
|
183 |
|
184 |
def fill_missing_data(data, column_index, value):
|
185 |
data.iloc[:, column_index] = data.iloc[:, column_index].fillna(value)
|
|
|
192 |
|
193 |
# Define the function to preprocess a file (CSV or XLSX)
|
194 |
def preprocess_uploaded_file(uploaded_file):
|
195 |
+
try:
|
196 |
+
# Determine file type based on extension
|
197 |
+
if uploaded_file.name.lower().endswith('.xlsx'):
|
198 |
+
data = preprocess_xlsx(uploaded_file)
|
199 |
+
else:
|
200 |
+
# Original CSV processing
|
201 |
+
file_content = uploaded_file.read()
|
202 |
+
processed_output = preprocess_csv(file_content)
|
203 |
+
processed_file = io.StringIO(processed_output.getvalue())
|
204 |
+
data = load_data(processed_file)
|
205 |
+
data = fill_missing_data(data, 4, 0)
|
206 |
+
data['Start datetime'] = pd.to_datetime(data['Start datetime'], dayfirst=True, errors='coerce')
|
207 |
+
data['End datetime'] = pd.to_datetime(data['End datetime'], dayfirst=True, errors='coerce')
|
208 |
+
data['Time spent'] = (data['End datetime'] - data['Start datetime']).dt.total_seconds()
|
209 |
+
data['Time spent(m:s)'] = pd.to_datetime(data['Time spent'], unit='s').dt.strftime('%M:%S')
|
210 |
+
|
211 |
+
# Extract environment name from filename
|
212 |
+
filename = uploaded_file.name
|
213 |
+
environment = filename.split('_Puppeteer')[0]
|
214 |
+
|
215 |
+
# Add environment column to the dataframe
|
216 |
+
data['Environment'] = environment
|
217 |
|
218 |
+
# Make sure all required columns exist and have proper values
|
219 |
+
if data is not None and not data.empty:
|
220 |
+
# Ensure Time spent is numeric
|
221 |
+
if 'Time spent' in data.columns:
|
222 |
+
data['Time spent'] = pd.to_numeric(data['Time spent'], errors='coerce')
|
223 |
+
data['Time spent'] = data['Time spent'].fillna(0)
|
224 |
+
|
225 |
+
# Replace any NaN string values
|
226 |
+
for col in data.columns:
|
227 |
+
if data[col].dtype == 'object':
|
228 |
+
data[col] = data[col].replace('NaN', '').replace('nan', '')
|
229 |
+
|
230 |
+
return data
|
231 |
+
|
232 |
+
except Exception as e:
|
233 |
+
st.error(f"Error processing {uploaded_file.name}: {str(e)}")
|
234 |
+
# Provide more detailed error information
|
235 |
+
import traceback
|
236 |
+
st.error(f"Detailed error: {traceback.format_exc()}")
|
237 |
+
# Return empty DataFrame with expected columns to avoid cascading errors
|
238 |
+
return pd.DataFrame(columns=[
|
239 |
+
'Functional area', 'Scenario Name', 'Status', 'Time spent',
|
240 |
+
'Time spent(m:s)', 'Environment', 'Start datetime', 'End datetime'
|
241 |
+
])
|
242 |
|
243 |
def add_app_description():
|
244 |
app_title = '<p style="font-family:Roboto, sans-serif; color:#004E7C; font-size: 42px;">DataLink Compare</p>'
|