karths commited on
Commit
90c062f
·
verified ·
1 Parent(s): e41d5bb

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -0
app.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import gradio as gr
3
+ import os
4
+ import requests
5
+ from huggingface_hub import AsyncInferenceClient
6
+
7
+ HF_TOKEN = os.getenv('HF_TOKEN')
8
+ api_url = os.getenv('API_URL')
9
+ headers = {"Authorization": f"Bearer {HF_TOKEN}"}
10
+ client = AsyncInferenceClient(api_url)
11
+
12
+
13
+ system_message = """
14
+ Refactor the provided Python code to improve its maintainability and efficiency and reduce complexity. Include the refactored code along with the comments on the changes made for improving the metrics.
15
+ """
16
+ title = "Python Refactoring"
17
+ description = """
18
+ Please give it 3 to 4 minutes for the model to load and Run , consider using Python code with less than 120 lines of code due to GPU constrainst
19
+ """
20
+ css = """.toast-wrap { display: none !important } """
21
+ examples=[["""
22
+ import pandas as pd
23
+ import re
24
+ import ast
25
+ from code_bert_score import score
26
+ import numpy as np
27
+ def preprocess_code(source_text):
28
+
29
+ def remove_comments_and_docstrings(source_code):
30
+ source_code = re.sub(r'#.*', '', source_code)
31
+ source_code = re.sub(r'(\'\'\'(.*?)\'\'\'|\"\"\"(.*?)\"\"\")', '', source_code, flags=re.DOTALL)
32
+ return source_code
33
+ pattern = r"```python\s+(.+?)\s+```"
34
+ matches = re.findall(pattern, source_text, re.DOTALL)
35
+ code_to_process = '\n'.join(matches) if matches else source_text
36
+ cleaned_code = remove_comments_and_docstrings(code_to_process)
37
+ return cleaned_code
38
+ def evaluate_dataframe(df):
39
+
40
+ results = {'P': [], 'R': [], 'F1': [], 'F3': []}
41
+ for index, row in df.iterrows():
42
+ try:
43
+ cands = [preprocess_code(row['generated_text'])]
44
+ refs = [preprocess_code(row['output'])]
45
+ P, R, F1, F3 = score(cands, refs, lang='python')
46
+ results['P'].append(P[0])
47
+ results['R'].append(R[0])
48
+ results['F1'].append(F1[0])
49
+ results['F3'].append(F3[0])
50
+ except Exception as e:
51
+ print(f"Error processing row {index}: {e}")
52
+ for key in results.keys():
53
+ results[key].append(None)
54
+ df_metrics = pd.DataFrame(results)
55
+ return df_metrics
56
+ def evaluate_dataframe_multiple_runs(df, runs=3):
57
+
58
+ all_results = []
59
+ for run in range(runs):
60
+ df_metrics = evaluate_dataframe(df)
61
+ all_results.append(df_metrics)
62
+ # Calculate mean and std deviation of metrics across runs
63
+ df_metrics_mean = pd.concat(all_results).groupby(level=0).mean()
64
+ df_metrics_std = pd.concat(all_results).groupby(level=0).std()
65
+ return df_metrics_mean, df_metrics_std
66
+ """ ] ,
67
+ ["""
68
+ def analyze_sales_data(sales_records):
69
+ active_sales = filter(lambda record: record['status'] == 'active', sales_records)
70
+ sales_by_category = {}
71
+ for record in active_sales:
72
+ category = record['category']
73
+ total_sales = record['units_sold'] * record['price_per_unit']
74
+ if category not in sales_by_category:
75
+ sales_by_category[category] = {'total_sales': 0, 'total_units': 0}
76
+ sales_by_category[category]['total_sales'] += total_sales
77
+ sales_by_category[category]['total_units'] += record['units_sold']
78
+ average_sales_data = []
79
+ for category, data in sales_by_category.items():
80
+ average_sales = data['total_sales'] / data['total_units']
81
+ sales_by_category[category]['average_sales'] = average_sales
82
+ average_sales_data.append((category, average_sales))
83
+ average_sales_data.sort(key=lambda x: x[1], reverse=True)
84
+ for rank, (category, _) in enumerate(average_sales_data, start=1):
85
+ sales_by_category[category]['rank'] = rank
86
+ return sales_by_category
87
+ """]]
88
+
89
+
90
+ # query client using streaming mode
91
+ def inference(message, history):
92
+ partial_message = ""
93
+ for token in client.text_generation(message, max_new_tokens=4096, stream=True):
94
+ partial_message += token
95
+ yield partial_message
96
+
97
+ gr.ChatInterface(
98
+ inference,
99
+ chatbot=gr.Chatbot(height=500),
100
+ textbox=gr.Textbox(placeholder="Chat with me!", container=False, scale=7),
101
+ title=title,
102
+ description=description,
103
+ theme="abidlabs/Lime",
104
+ examples=examples,
105
+ cache_examples=True,
106
+ retry_btn="Retry",
107
+ undo_btn="Undo",
108
+ clear_btn="Clear",
109
+ ).queue().launch()
110
+