Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -11,40 +11,79 @@ import logging
|
|
11 |
import re
|
12 |
from typing import Dict, List, Optional, Tuple
|
13 |
import subprocess
|
|
|
14 |
import plotly.graph_objects as go
|
15 |
-
from transformers import pipeline
|
16 |
import threading
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
# ========== Configuration ==========
|
19 |
WORKSPACE = Path("/tmp/issue_workspace")
|
20 |
WORKSPACE.mkdir(exist_ok=True)
|
21 |
GITHUB_API = "https://api.github.com/repos"
|
22 |
HF_INFERENCE_API = "https://api-inference.huggingface.co/models"
|
|
|
|
|
23 |
logging.basicConfig(level=logging.INFO)
|
24 |
logger = logging.getLogger(__name__)
|
|
|
25 |
|
26 |
-
# Free Hugging Face models for selection
|
27 |
HF_MODELS = {
|
28 |
-
"Mistral-8x7B
|
29 |
-
"
|
30 |
-
"
|
|
|
31 |
}
|
32 |
DEFAULT_MODEL = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
33 |
|
34 |
-
# ========== Theme ==========
|
35 |
-
theme = gr.themes.
|
36 |
-
primary_hue="
|
37 |
-
secondary_hue="
|
38 |
-
radius_size="
|
|
|
39 |
).set(
|
40 |
-
button_primary_background_fill="
|
41 |
button_primary_text_color="white",
|
42 |
-
|
43 |
-
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
45 |
)
|
46 |
|
47 |
-
# ==========
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
class IssueManager:
|
49 |
def __init__(self):
|
50 |
self.issues: Dict[int, dict] = {}
|
@@ -53,284 +92,314 @@ class IssueManager:
|
|
53 |
self.current_issue: Optional[int] = None
|
54 |
self.github_token: Optional[str] = None
|
55 |
self.hf_token: Optional[str] = None
|
56 |
-
self.collaborators: Dict[str,
|
57 |
-
self.
|
58 |
-
self.
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
async def crawl_issues(self, repo_url: str, github_token: str, hf_token: str) -> Tuple[bool, str]:
|
61 |
-
self.repo_url = repo_url
|
62 |
-
self.github_token = github_token
|
63 |
-
self.hf_token = hf_token
|
64 |
-
match = re.match(r"https://github.com/([^/]+)/([^/]+)", repo_url)
|
65 |
-
if not match:
|
66 |
-
return False, "Invalid GitHub URL format"
|
67 |
-
|
68 |
-
owner, repo_name = match.groups()
|
69 |
-
all_issues = []
|
70 |
-
page = 1
|
71 |
-
headers = {"Authorization": f"Bearer {github_token}", "Accept": "application/vnd.github+json"}
|
72 |
-
|
73 |
try:
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
page += 1
|
87 |
-
|
88 |
-
self.issues = {
|
89 |
-
i['number']: {**i, 'severity': self._determine_severity(i)}
|
90 |
-
for i in all_issues if not i.get('assignee')
|
91 |
-
}
|
92 |
-
return True, f"Found {len(self.issues)} unresolved/unassigned issues"
|
93 |
except Exception as e:
|
94 |
logger.error(f"Crawl error: {e}")
|
95 |
return False, str(e)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
-
def
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
return
|
106 |
-
|
107 |
-
|
108 |
-
async def clone_and_work(self, issue_number: int) -> Tuple[Dict, str, str]:
|
109 |
-
try:
|
110 |
-
repo_path = WORKSPACE / f"repo-{issue_number}"
|
111 |
-
if repo_path.exists():
|
112 |
-
shutil.rmtree(repo_path)
|
113 |
-
|
114 |
-
self.repo = Repo.clone_from(self.repo_url, repo_path)
|
115 |
-
branch = f"issue-{issue_number}"
|
116 |
-
self.repo.git.checkout('-b', branch)
|
117 |
-
self.current_issue = issue_number
|
118 |
-
|
119 |
-
issue = self.issues[issue_number]
|
120 |
-
diff = ""
|
121 |
-
return {"success": f"Working on #{issue_number}"}, issue['body'], diff
|
122 |
-
except GitCommandError as e:
|
123 |
-
return {"error": str(e)}, "", ""
|
124 |
-
|
125 |
-
async def suggest_resolution(self, issue_number: int, model: str) -> str:
|
126 |
-
"""Use HF Inference API or local fallback"""
|
127 |
issue = self.issues[issue_number]
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
try:
|
134 |
-
async with aiohttp.ClientSession() as session:
|
135 |
-
async with session.post(f"{HF_INFERENCE_API}/{model}", headers=headers, json=payload) as resp:
|
136 |
-
if resp.status == 200:
|
137 |
-
result = await resp.json()
|
138 |
-
return result[0].get("generated_text", "No suggestion").strip()
|
139 |
-
elif resp.status == 429:
|
140 |
-
return await self._local_suggestion(prompt)
|
141 |
-
return f"HF API error: {resp.status}"
|
142 |
-
except Exception as e:
|
143 |
-
logger.error(f"HF suggestion error: {e}")
|
144 |
-
return await self._local_suggestion(prompt)
|
145 |
-
return await self._local_suggestion(prompt)
|
146 |
-
|
147 |
-
async def _local_suggestion(self, prompt: str) -> str:
|
148 |
-
"""Fallback to local model"""
|
149 |
-
if not self.local_nlp:
|
150 |
-
self.local_nlp = pipeline("text-generation", model="distilgpt2", device=-1) # CPU-only, lightweight
|
151 |
-
try:
|
152 |
-
result = self.local_nlp(prompt, max_length=200, num_return_sequences=1)
|
153 |
-
return result[0]["generated_text"].strip()
|
154 |
-
except Exception as e:
|
155 |
-
logger.error(f"Local fallback error: {e}")
|
156 |
-
return "Local suggestion unavailable"
|
157 |
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
repo_path = self.repo.working_dir
|
164 |
|
|
|
|
|
|
|
|
|
|
|
|
|
165 |
try:
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
resolution_file = resolution_dir / f"resolution_{timestamp}.txt"
|
170 |
-
resolution_file.write_text(resolution)
|
171 |
-
|
172 |
-
self.repo.git.add(all=True)
|
173 |
-
self.repo.index.commit(f"Resolve #{issue_number}")
|
174 |
-
diff = self.repo.git.diff('HEAD^', 'HEAD')
|
175 |
-
|
176 |
-
v1 = await self._verify_resolution(issue_number, resolution)
|
177 |
-
await asyncio.sleep(1)
|
178 |
-
v2 = await self._verify_resolution(issue_number, resolution)
|
179 |
-
|
180 |
-
if v1["status"] and v2["status"]:
|
181 |
-
self.repo.git.push('origin', f"issue-{issue_number}")
|
182 |
-
pr_url = await self._create_pr(issue_number)
|
183 |
-
return {
|
184 |
-
"success": f"Issue #{issue_number} resolved and verified",
|
185 |
-
"pr_url": pr_url,
|
186 |
-
"verification": [v1, v2]
|
187 |
-
}, diff
|
188 |
-
return {"error": "Verification failed", "details": [v1, v2]}, diff
|
189 |
-
|
190 |
-
except Exception as e:
|
191 |
-
logger.error(f"Resolve error: {e}")
|
192 |
-
return {"error": str(e)}, ""
|
193 |
|
194 |
-
async def
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
stdout, stderr = await proc.communicate()
|
210 |
-
if proc.returncode != 0:
|
211 |
-
return {"status": False, "message": f"Tests failed: {stderr.decode()}"}
|
212 |
-
|
213 |
-
return {"status": True, "message": "Resolution verified"}
|
214 |
-
except Exception as e:
|
215 |
-
return {"status": False, "message": str(e)}
|
216 |
-
|
217 |
-
async def _create_pr(self, issue_number: int) -> str:
|
218 |
-
owner, repo_name = re.match(r"https://github.com/([^/]+)/([^/]+)", self.repo_url).groups()
|
219 |
-
headers = {"Authorization": f"Bearer {self.github_token}", "Accept": "application/vnd.github+json"}
|
220 |
-
pr_data = {
|
221 |
-
"title": f"Resolve #{issue_number}: {self.issues[issue_number]['title']}",
|
222 |
-
"body": f"Resolution for #{issue_number}\nVerification: Passed twice",
|
223 |
-
"head": f"issue-{issue_number}",
|
224 |
-
"base": "main"
|
225 |
-
}
|
226 |
async with aiohttp.ClientSession() as session:
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
severity_counts = {s: sum(1 for i in self.issues.values() if i['severity'] == s) for s in ["Critical", "High", "Medium", "Low"]}
|
234 |
-
return go.Figure(
|
235 |
-
data=[go.Bar(x=list(severity_counts.keys()), y=list(severity_counts.values()))],
|
236 |
-
layout={"title": "Issue Severity Distribution"}
|
237 |
-
)
|
238 |
|
239 |
-
async def
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
|
|
|
|
|
|
|
|
|
|
245 |
|
246 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
247 |
|
248 |
-
# ========== UI ==========
|
249 |
def create_ui():
|
250 |
-
with gr.Blocks(theme=theme, title="
|
251 |
-
gr.Markdown("
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
repo_url = gr.Textbox(label="GitHub Repository URL", placeholder="https://github.com/username/repo")
|
256 |
-
github_token = gr.Textbox(label="GitHub Token", type="password")
|
257 |
-
hf_token = gr.Textbox(label="Hugging Face Token", type="password")
|
258 |
-
model_select = gr.Dropdown(choices=list(HF_MODELS.keys()), value="Mistral-8x7B (Powerful)", label="AI Model")
|
259 |
-
crawl_btn = gr.Button("Crawl Issues", variant="primary")
|
260 |
|
261 |
-
with gr.
|
262 |
-
with gr.
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
datatype=["number", "str", "str"],
|
267 |
-
interactive=True
|
268 |
-
)
|
269 |
-
progress = gr.Progress()
|
270 |
|
271 |
-
with gr.
|
|
|
|
|
|
|
|
|
|
|
|
|
272 |
with gr.Row():
|
273 |
-
|
274 |
-
|
275 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
276 |
with gr.Row():
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
return
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
330 |
|
331 |
return app
|
332 |
|
333 |
# ========== Execution ==========
|
334 |
if __name__ == "__main__":
|
|
|
335 |
app = create_ui()
|
336 |
-
app.launch(
|
|
|
|
|
|
|
|
|
|
11 |
import re
|
12 |
from typing import Dict, List, Optional, Tuple
|
13 |
import subprocess
|
14 |
+
import plotly.express as px
|
15 |
import plotly.graph_objects as go
|
16 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
17 |
import threading
|
18 |
+
from http.server import HTTPServer, BaseHTTPRequestHandler
|
19 |
+
import speech_recognition as sr
|
20 |
+
from code_editor import code_editor
|
21 |
+
from functools import lru_cache
|
22 |
+
import hashlib
|
23 |
+
import markdown2
|
24 |
+
from concurrent.futures import ThreadPoolExecutor
|
25 |
+
from hdb_scan import HDBSCAN
|
26 |
+
import websockets
|
27 |
+
from websockets.exceptions import ConnectionClosed
|
28 |
+
from ot_code_editor import OTCodeEditor # Assuming you have OTCodeEditor installed
|
29 |
|
30 |
# ========== Configuration ==========
|
31 |
WORKSPACE = Path("/tmp/issue_workspace")
|
32 |
WORKSPACE.mkdir(exist_ok=True)
|
33 |
GITHUB_API = "https://api.github.com/repos"
|
34 |
HF_INFERENCE_API = "https://api-inference.huggingface.co/models"
|
35 |
+
WEBHOOK_PORT = 8000
|
36 |
+
WS_PORT = 8001
|
37 |
logging.basicConfig(level=logging.INFO)
|
38 |
logger = logging.getLogger(__name__)
|
39 |
+
executor = ThreadPoolExecutor(max_workers=4)
|
40 |
|
|
|
41 |
HF_MODELS = {
|
42 |
+
"Mistral-8x7B": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
43 |
+
"Llama-3-8B": "meta-llama/Meta-Llama-3-8B",
|
44 |
+
"CodeLlama-34B": "codellama/CodeLlama-34b-Instruct-hf",
|
45 |
+
"StarCoder2": "bigcode/starcoder2-15b"
|
46 |
}
|
47 |
DEFAULT_MODEL = "mistralai/Mixtral-8x7B-Instruct-v0.1"
|
48 |
|
49 |
+
# ========== Modern Theme ==========
|
50 |
+
theme = gr.themes.Soft(
|
51 |
+
primary_hue="violet",
|
52 |
+
secondary_hue="emerald",
|
53 |
+
radius_size="xl",
|
54 |
+
font=[gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui"]
|
55 |
).set(
|
56 |
+
button_primary_background_fill="linear-gradient(90deg, #8B5CF6 0%, #EC4899 100%)",
|
57 |
button_primary_text_color="white",
|
58 |
+
button_primary_border_radius="12px",
|
59 |
+
block_label_text_size="lg",
|
60 |
+
block_label_text_weight="600",
|
61 |
+
block_title_text_size="xl",
|
62 |
+
block_title_text_weight="800",
|
63 |
+
panel_background_fill="white",
|
64 |
+
panel_border_radius="16px",
|
65 |
+
block_shadow="*shadow_drop_lg",
|
66 |
)
|
67 |
|
68 |
+
# ========== Enhanced Webhook Handler ==========
|
69 |
+
class WebhookHandler(BaseHTTPRequestHandler):
|
70 |
+
def do_POST(self):
|
71 |
+
content_length = int(self.headers['Content-Length'])
|
72 |
+
payload = json.loads(self.rfile.read(content_length).decode())
|
73 |
+
event = self.headers.get('X-GitHub-Event')
|
74 |
+
|
75 |
+
if event == 'issues':
|
76 |
+
action = payload.get('action')
|
77 |
+
if action in ['opened', 'reopened', 'closed', 'assigned']:
|
78 |
+
asyncio.run_coroutine_threadsafe(
|
79 |
+
manager.handle_webhook_event(event, action, payload),
|
80 |
+
asyncio.get_event_loop()
|
81 |
+
)
|
82 |
+
|
83 |
+
self.send_response(200)
|
84 |
+
self.end_headers()
|
85 |
+
|
86 |
+
# ========== AI-Powered Issue Manager ==========
|
87 |
class IssueManager:
|
88 |
def __init__(self):
|
89 |
self.issues: Dict[int, dict] = {}
|
|
|
92 |
self.current_issue: Optional[int] = None
|
93 |
self.github_token: Optional[str] = None
|
94 |
self.hf_token: Optional[str] = None
|
95 |
+
self.collaborators: Dict[str, dict] = {}
|
96 |
+
self.points: int = 0
|
97 |
+
self.severity_rules: Dict[str, List[str]] = {
|
98 |
+
"Critical": ["critical", "urgent", "security", "crash"],
|
99 |
+
"High": ["high", "important", "error", "regression"],
|
100 |
+
"Medium": ["medium", "bug", "performance"],
|
101 |
+
"Low": ["low", "documentation", "enhancement"]
|
102 |
+
}
|
103 |
+
self.issue_clusters: Dict[int, List[int]] = {} # Store clusters
|
104 |
+
self._init_local_models()
|
105 |
+
self.ws_clients: List[websockets.WebSocketClientProtocol] = []
|
106 |
+
self.code_editors: Dict[int, OTCodeEditor] = {} # Store code editors for each issue
|
107 |
+
|
108 |
+
def _init_local_models(self):
|
109 |
+
self.code_model = pipeline(
|
110 |
+
"text-generation",
|
111 |
+
model="codellama/CodeLlama-7b-Instruct-hf",
|
112 |
+
device_map="auto",
|
113 |
+
torch_dtype="auto"
|
114 |
+
)
|
115 |
+
self.summarizer = pipeline(
|
116 |
+
"summarization",
|
117 |
+
model="philschmid/bart-large-cnn-samsum",
|
118 |
+
device_map="auto"
|
119 |
+
)
|
120 |
+
|
121 |
+
@lru_cache(maxsize=100)
|
122 |
+
async def cached_suggestion(self, issue_hash: str, model: str):
|
123 |
+
return await self.suggest_resolution(issue_hash, model)
|
124 |
+
|
125 |
+
async def handle_webhook_event(self, event: str, action: str, payload: dict):
|
126 |
+
logger.info(f"Processing {event} {action} event")
|
127 |
+
if action == 'closed':
|
128 |
+
self.issues.pop(payload['issue']['number'], None)
|
129 |
+
else:
|
130 |
+
await self.crawl_issues(self.repo_url, self.github_token, self.hf_token)
|
131 |
+
|
132 |
async def crawl_issues(self, repo_url: str, github_token: str, hf_token: str) -> Tuple[bool, str]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
try:
|
134 |
+
self.repo_url = repo_url
|
135 |
+
self.github_token = github_token
|
136 |
+
self.hf_token = hf_token
|
137 |
+
self.repo = Repo.clone_from(repo_url, WORKSPACE / "repo")
|
138 |
+
headers = {"Authorization": f"token {github_token}"}
|
139 |
+
async with aiohttp.ClientSession(headers=headers) as session:
|
140 |
+
async with session.get(f"{GITHUB_API}/{repo_url}/issues") as response:
|
141 |
+
issues = await response.json()
|
142 |
+
for issue in issues:
|
143 |
+
self.issues[issue['number']] = issue
|
144 |
+
await self._cluster_similar_issues()
|
145 |
+
return True, f"Found {len(self.issues)} issues (clustered into {len(self.issue_clusters)} groups)"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
except Exception as e:
|
147 |
logger.error(f"Crawl error: {e}")
|
148 |
return False, str(e)
|
149 |
+
|
150 |
+
async def _cluster_similar_issues(self):
|
151 |
+
embeddings = await self._generate_embeddings()
|
152 |
+
# Use HDBSCAN for clustering
|
153 |
+
clusterer = HDBSCAN(min_cluster_size=2, metric='cosine')
|
154 |
+
clusters = clusterer.fit_predict(embeddings)
|
155 |
+
self.issue_clusters = {}
|
156 |
+
for i, cluster_id in enumerate(clusters):
|
157 |
+
if cluster_id not in self.issue_clusters:
|
158 |
+
self.issue_clusters[cluster_id] = []
|
159 |
+
self.issue_clusters[cluster_id].append(i)
|
160 |
|
161 |
+
async def _generate_embeddings(self):
|
162 |
+
async with aiohttp.ClientSession() as session:
|
163 |
+
texts = [f"{i['title']} {i['body']}" for i in self.issues.values()]
|
164 |
+
response = await session.post(
|
165 |
+
f"{HF_INFERENCE_API}/sentence-transformers/all-mpnet-base-v2",
|
166 |
+
headers={"Authorization": f"Bearer {self.hf_token}"},
|
167 |
+
json={"inputs": texts}
|
168 |
+
)
|
169 |
+
return await response.json()
|
170 |
+
|
171 |
+
async def generate_code_patch(self, issue_number: int) -> dict:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
issue = self.issues[issue_number]
|
173 |
+
context = await self._get_code_context(issue_number)
|
174 |
+
prompt = f"""<issue>
|
175 |
+
{issue['title']}
|
176 |
+
{issue['body']}
|
177 |
+
</issue>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
|
179 |
+
<code_context>
|
180 |
+
{context}
|
181 |
+
</code_context>
|
182 |
+
|
183 |
+
Generate a JSON patch file with specific changes needed to resolve this issue."""
|
|
|
184 |
|
185 |
+
response = self.code_model(
|
186 |
+
prompt,
|
187 |
+
max_length=1024,
|
188 |
+
temperature=0.2,
|
189 |
+
return_full_text=False
|
190 |
+
)
|
191 |
try:
|
192 |
+
return json.loads(response[0]['generated_text'])
|
193 |
+
except json.JSONDecodeError:
|
194 |
+
return {"error": "Failed to parse AI-generated patch"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
|
196 |
+
async def _get_code_context(self, issue_number: int) -> str:
|
197 |
+
repo_path = WORKSPACE / f"repo-{issue_number}"
|
198 |
+
code_files = list(repo_path.glob('**/*.py')) + list(repo_path.glob('**/*.js'))
|
199 |
+
return "\n".join(f.read_text()[:1000] for f in code_files[:5])
|
200 |
+
|
201 |
+
async def suggest_resolution(self, issue_hash: str, model: str) -> str:
|
202 |
+
issue = self.issues[int(issue_hash)]
|
203 |
+
prompt = f"""
|
204 |
+
## Issue: {issue['title']}
|
205 |
+
|
206 |
+
{issue['body']}
|
207 |
+
|
208 |
+
Suggest a solution to this issue.
|
209 |
+
"""
|
210 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
211 |
async with aiohttp.ClientSession() as session:
|
212 |
+
response = await session.post(
|
213 |
+
f"{HF_INFERENCE_API}/{model}",
|
214 |
+
headers={"Authorization": f"Bearer {self.hf_token}"},
|
215 |
+
json={"inputs": prompt}
|
216 |
+
)
|
217 |
+
return await response.json()
|
|
|
|
|
|
|
|
|
|
|
218 |
|
219 |
+
async def broadcast_collaboration_status(self):
|
220 |
+
while True:
|
221 |
+
try:
|
222 |
+
await asyncio.sleep(1)
|
223 |
+
# Send collaborator status to all connected clients
|
224 |
+
await asyncio.gather(
|
225 |
+
*[client.send(json.dumps([{"name": name, "status": status} for name, status in self.collaborators.items()])) for client in self.ws_clients]
|
226 |
+
)
|
227 |
+
except ConnectionClosed:
|
228 |
+
# Handle client disconnections
|
229 |
+
pass
|
230 |
|
231 |
+
async def handle_code_editor_update(self, issue_num: int, delta: str):
|
232 |
+
if issue_num not in self.code_editors:
|
233 |
+
return
|
234 |
+
self.code_editors[issue_num].apply_delta(json.loads(delta))
|
235 |
+
await asyncio.gather(
|
236 |
+
*[client.send(json.dumps({"type": "code_update", "issue_num": issue_num, "delta": delta})) for client in self.ws_clients]
|
237 |
+
)
|
238 |
|
239 |
+
# ========== Enhanced UI Components ==========
|
240 |
def create_ui():
|
241 |
+
with gr.Blocks(theme=theme, title="π€ AI Issue Resolver Pro", css=".gradio-container {max-width: 1200px !important}") as app:
|
242 |
+
gr.Markdown("""
|
243 |
+
# π AI Issue Resolver Pro
|
244 |
+
*Next-generation issue resolution powered by AI collaboration*
|
245 |
+
""")
|
|
|
|
|
|
|
|
|
|
|
246 |
|
247 |
+
with gr.Row(variant="panel"):
|
248 |
+
with gr.Column(scale=2):
|
249 |
+
repo_url = gr.Textbox(label="GitHub Repo", placeholder="https://github.com/org/repo", info="Connect your repository")
|
250 |
+
github_token = gr.Textbox(label="GitHub Token", type="password")
|
251 |
+
hf_token = gr.Textbox(label="HF Token", type="password")
|
|
|
|
|
|
|
|
|
252 |
|
253 |
+
with gr.Column(scale=1):
|
254 |
+
model_select = gr.Dropdown(choices=list(HF_MODELS.keys()), value="Mistral-8x7B",
|
255 |
+
label="AI Model", info="Choose your resolution strategy")
|
256 |
+
crawl_btn = gr.Button("π Scan Repository", variant="primary")
|
257 |
+
|
258 |
+
with gr.Tabs():
|
259 |
+
with gr.Tab("π Issue Board", id="board"):
|
260 |
with gr.Row():
|
261 |
+
issue_list = gr.Dataframe(
|
262 |
+
headers=["ID", "Title", "Severity", "Cluster"],
|
263 |
+
datatype=["number", "str", "str", "number"],
|
264 |
+
interactive=True,
|
265 |
+
height=600
|
266 |
+
)
|
267 |
+
with gr.Column(scale=1):
|
268 |
+
stats_plot = gr.Plot()
|
269 |
+
collab_status = gr.HTML("<h3>π₯ Active Collaborators</h3><div id='collab-list'></div>")
|
270 |
+
|
271 |
+
with gr.Tab("π» Resolution Studio", id="studio"):
|
272 |
with gr.Row():
|
273 |
+
with gr.Column(scale=1):
|
274 |
+
issue_num = gr.Number(label="Issue #", precision=0)
|
275 |
+
issue_viz = gr.HTML()
|
276 |
+
ai_tools = gr.Accordion("π οΈ AI Tools")
|
277 |
+
with ai_tools:
|
278 |
+
suggest_btn = gr.Button("π§ Suggest Resolution")
|
279 |
+
patch_btn = gr.Button("π Generate Patch")
|
280 |
+
test_btn = gr.Button("π§ͺ Create Tests")
|
281 |
+
impact_btn = gr.Button("π Impact Analysis")
|
282 |
+
|
283 |
+
with gr.Column(scale=2):
|
284 |
+
with gr.Tabs():
|
285 |
+
with gr.Tab("Code Editor"):
|
286 |
+
code_edit = gr.HTML()
|
287 |
+
with gr.Tab("AI Chat"):
|
288 |
+
chat = gr.ChatInterface(
|
289 |
+
self._ai_chat,
|
290 |
+
additional_inputs=[issue_num]
|
291 |
+
)
|
292 |
+
|
293 |
+
with gr.Tab("π Analytics", id="analytics"):
|
294 |
+
with gr.Row():
|
295 |
+
gr.Markdown("### π
Resolution Timeline")
|
296 |
+
timeline = gr.Timeline()
|
297 |
+
with gr.Row():
|
298 |
+
gr.Markdown("### π Achievement System")
|
299 |
+
badges = gr.HTML("<div class='badges'></div>")
|
300 |
+
|
301 |
+
# Enhanced Event Handlers
|
302 |
+
async def generate_patch(issue_num):
|
303 |
+
patch = await manager.generate_code_patch(issue_num)
|
304 |
+
return gr.JSON(value=patch)
|
305 |
+
|
306 |
+
def update_code_editor(files):
|
307 |
+
return code_editor(value=files)
|
308 |
+
|
309 |
+
issue_list.select(
|
310 |
+
fn=lambda evt: (evt[0], manager.issues[evt[0]]['body']),
|
311 |
+
outputs=[issue_num, issue_body]
|
312 |
+
).then(
|
313 |
+
fn=lambda num: generate_issue_preview(num),
|
314 |
+
outputs=issue_viz
|
315 |
+
)
|
316 |
+
|
317 |
+
app.load(
|
318 |
+
fn=init_collaboration,
|
319 |
+
inputs=[],
|
320 |
+
outputs=collab_status,
|
321 |
+
_js=web_socket_js()
|
322 |
+
)
|
323 |
+
|
324 |
+
crawl_btn.click(
|
325 |
+
fn=lambda repo, token, hf_token: manager.crawl_issues(repo, token, hf_token),
|
326 |
+
inputs=[repo_url, github_token, hf_token],
|
327 |
+
outputs=[issue_list, stats_plot]
|
328 |
+
)
|
329 |
+
|
330 |
+
suggest_btn.click(
|
331 |
+
fn=lambda issue, model: manager.cached_suggestion(issue, model),
|
332 |
+
inputs=[issue_num, model_select],
|
333 |
+
outputs=chat
|
334 |
+
)
|
335 |
+
|
336 |
+
patch_btn.click(
|
337 |
+
fn=generate_patch,
|
338 |
+
inputs=[issue_num],
|
339 |
+
outputs=chat
|
340 |
+
)
|
341 |
+
|
342 |
+
# Add more event handlers for other AI tools
|
343 |
+
# ...
|
344 |
+
|
345 |
+
issue_num.change(
|
346 |
+
fn=lambda issue_num: create_code_editor(issue_num),
|
347 |
+
inputs=[issue_num],
|
348 |
+
outputs=code_edit
|
349 |
+
)
|
350 |
+
|
351 |
+
# Add real-time collaboration
|
352 |
+
def web_socket_js():
|
353 |
+
return """
|
354 |
+
<script>
|
355 |
+
const collabWs = new WebSocket('ws://localhost:8001');
|
356 |
+
collabWs.onmessage = function(event) {
|
357 |
+
const data = JSON.parse(event.data);
|
358 |
+
if (data.type === 'code_update') {
|
359 |
+
const issueNum = data.issue_num;
|
360 |
+
const delta = data.delta;
|
361 |
+
const codeEditor = document.getElementById(`code-editor-${issueNum}`);
|
362 |
+
if (codeEditor) {
|
363 |
+
codeEditor.applyDelta(delta);
|
364 |
+
}
|
365 |
+
} else if (data.type === 'collaboration_status') {
|
366 |
+
document.getElementById('collab-list').innerHTML =
|
367 |
+
data.map(u => `<div class="collab-item">${u.name}: ${u.status}</div>`).join('');
|
368 |
+
}
|
369 |
+
};
|
370 |
+
</script>
|
371 |
+
"""
|
372 |
+
|
373 |
+
# Start enhanced webhook server
|
374 |
+
webhook_server = HTTPServer(("", WEBHOOK_PORT), WebhookHandler)
|
375 |
+
threading.Thread(target=webhook_server.serve_forever, daemon=True).start()
|
376 |
+
|
377 |
+
# Start WebSocket server
|
378 |
+
async def start_ws_server():
|
379 |
+
async with websockets.serve(handle_ws_connection, "localhost", WS_PORT):
|
380 |
+
await asyncio.Future()
|
381 |
+
|
382 |
+
async def handle_ws_connection(websocket: websockets.WebSocketClientProtocol, path):
|
383 |
+
manager.ws_clients.append(websocket)
|
384 |
+
try:
|
385 |
+
async for message in websocket:
|
386 |
+
data = json.loads(message)
|
387 |
+
if data.get("type") == "code_update":
|
388 |
+
await manager.handle_code_editor_update(data["issue_num"], data["delta"])
|
389 |
+
finally:
|
390 |
+
manager.ws_clients.remove(websocket)
|
391 |
+
|
392 |
+
asyncio.run(start_ws_server())
|
393 |
+
threading.Thread(target=lambda: asyncio.run(manager.broadcast_collaboration_status()), daemon=True).start()
|
394 |
|
395 |
return app
|
396 |
|
397 |
# ========== Execution ==========
|
398 |
if __name__ == "__main__":
|
399 |
+
manager = IssueManager()
|
400 |
app = create_ui()
|
401 |
+
app.launch(
|
402 |
+
share=True,
|
403 |
+
auth=("admin", os.getenv("APP_PASSWORD")),
|
404 |
+
favicon_path="https://huggingface.co/front/assets/huggingface_logo-noborder.svg"
|
405 |
+
)
|