Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,100 +1,137 @@
|
|
1 |
import gradio as gr
|
2 |
-
import requests
|
3 |
from huggingface_hub import InferenceClient
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
try:
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
tokenizer = AutoTokenizer.from_pretrained(DEFAULT_MODEL)
|
26 |
-
progress(100, "Model loaded successfully!")
|
27 |
-
return "Model loaded successfully!"
|
28 |
except Exception as e:
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
def
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
""
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
# Chat section
|
80 |
-
with gr.Row():
|
81 |
-
chatbot = gr.Chatbot(
|
82 |
-
label="Chat with Trained Bot",
|
83 |
-
type="messages",
|
84 |
-
|
85 |
-
)
|
86 |
-
user_input = gr.Textbox(
|
87 |
-
label="Your Message",
|
88 |
-
placeholder="Type your message and press Enter...",
|
89 |
-
lines=1,
|
90 |
-
)
|
91 |
-
|
92 |
-
# Train chatbot logic
|
93 |
-
train_button.click(train_chatbot, inputs=[chat_dataset], outputs=[train_status])
|
94 |
-
|
95 |
-
# Chat interaction logic
|
96 |
-
user_input.submit(chat_with_bot, inputs=[chatbot, user_input], outputs=chatbot)
|
97 |
-
|
98 |
-
# Launch app
|
99 |
-
if __name__ == "__main__":
|
100 |
-
app.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
1 |
import gradio as gr
|
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
+
import os
|
4 |
+
import asyncio
|
5 |
+
import aiohttp
|
6 |
+
import json
|
7 |
+
import logging
|
8 |
+
import hashlib
|
9 |
+
from typing import List, Dict, Tuple
|
10 |
+
from transformers import pipeline
|
11 |
+
from sentence_transformers import SentenceTransformer, util
|
12 |
+
from pydantic import BaseModel, SecretStr
|
13 |
+
|
14 |
+
# Enable detailed logging
|
15 |
+
logging.basicConfig(level=logging.INFO)
|
16 |
+
|
17 |
+
# Hugging Face Inference Client
|
18 |
+
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
19 |
+
|
20 |
+
# Load a pre-trained model for sentence similarity
|
21 |
+
similarity_model = SentenceTransformer('all-mpnet-base-v2')
|
22 |
+
|
23 |
+
class GitHubConfig(BaseModel):
|
24 |
+
username: str
|
25 |
+
repository: str
|
26 |
+
api_token: SecretStr
|
27 |
+
|
28 |
+
class GitHubIntegration:
|
29 |
+
def __init__(self, config: GitHubConfig):
|
30 |
+
self.config = config
|
31 |
+
self.headers = {
|
32 |
+
"Authorization": f"Bearer {self.config.api_token.get_secret_value()}",
|
33 |
+
"Accept": "application/vnd.github.v3+json"
|
34 |
+
}
|
35 |
+
self.url = "https://api.github.com"
|
36 |
+
|
37 |
+
async def fetch_issues(self) -> List[Dict]:
|
38 |
+
cache_key = hashlib.md5(f"{self.config.username}/{self.config.repository}".encode()).hexdigest()
|
39 |
+
cached_data = await self._load_cache(cache_key)
|
40 |
+
if cached_data:
|
41 |
+
return cached_data
|
42 |
+
|
43 |
+
url = f"{self.url}/repos/{self.config.username}/{self.config.repository}/issues"
|
44 |
+
try:
|
45 |
+
async with aiohttp.ClientSession() as session:
|
46 |
+
async with session.get(url, headers=self.headers) as response:
|
47 |
+
response.raise_for_status()
|
48 |
+
issues = await response.json()
|
49 |
+
await self._save_cache(cache_key, issues)
|
50 |
+
return issues
|
51 |
+
except Exception as e:
|
52 |
+
logging.error(f"GitHub API error: {str(e)}")
|
53 |
+
return []
|
54 |
+
|
55 |
+
async def _load_cache(self, key: str) -> List[Dict] | None:
|
56 |
+
cache_file = f"cache_{key}.json"
|
57 |
+
if os.path.exists(cache_file):
|
58 |
+
async with aiohttp.ClientSession() as session:
|
59 |
+
async with session.get(f"file://{cache_file}") as f:
|
60 |
+
return json.loads(await f.text())
|
61 |
+
return None
|
62 |
+
|
63 |
+
async def _save_cache(self, key: str, data: List[Dict]):
|
64 |
+
cache_file = f"cache_{key}.json"
|
65 |
+
with open(cache_file, "w") as f:
|
66 |
+
json.dump(data, f)
|
67 |
+
|
68 |
+
async def analyze_issues(issue_text: str, model_name: str) -> str:
|
69 |
+
"""
|
70 |
+
Analyze issues and provide solutions.
|
71 |
+
"""
|
72 |
+
logging.info(f"Analyzing issue with model: {model_name}")
|
73 |
+
prompt = f"""
|
74 |
+
Issue: {issue_text}
|
75 |
+
Please provide a comprehensive resolution in the following format:
|
76 |
+
## Problem Summary:
|
77 |
+
## Root Cause Analysis:
|
78 |
+
## Solution Options:
|
79 |
+
## Recommended Solution:
|
80 |
+
## Implementation Steps:
|
81 |
+
## Verification Steps:
|
82 |
+
"""
|
83 |
try:
|
84 |
+
nlp = pipeline("text-generation", model=model_name, max_length=1000)
|
85 |
+
result = nlp(prompt)
|
86 |
+
return result[0]['generated_text']
|
|
|
|
|
|
|
87 |
except Exception as e:
|
88 |
+
logging.error(f"Error analyzing issue: {e}")
|
89 |
+
return "Error analyzing issue."
|
90 |
+
|
91 |
+
async def find_related_issues(issue_text: str, issues: list) -> list:
|
92 |
+
"""
|
93 |
+
Find related issues using similarity model.
|
94 |
+
"""
|
95 |
+
issue_embedding = similarity_model.encode(issue_text)
|
96 |
+
related_issues = [
|
97 |
+
(issue, util.cos_sim(issue_embedding, similarity_model.encode(issue['title']))[0][0])
|
98 |
+
for issue in issues
|
99 |
+
]
|
100 |
+
return sorted(related_issues, key=lambda x: x[1], reverse=True)[:3]
|
101 |
+
|
102 |
+
def respond(command: str, github_api_token: str, github_username: str, github_repository: str, selected_model: str) -> str:
|
103 |
+
"""
|
104 |
+
Handle chat responses.
|
105 |
+
"""
|
106 |
+
if command.startswith("/github"):
|
107 |
+
parts = command.split(maxsplit=2)
|
108 |
+
if len(parts) < 3:
|
109 |
+
return "❌ Format: /github <username> <repo> <token>"
|
110 |
+
github_client = GitHubIntegration(GitHubConfig(username=parts[0], repository=parts[1], api_token=SecretStr(parts[2])))
|
111 |
+
issues = asyncio.run(github_client.fetch_issues())
|
112 |
+
return "✅ GitHub configured successfully."
|
113 |
+
elif command.startswith("/analyze"):
|
114 |
+
issue_text = command.replace("/analyze", "").strip()
|
115 |
+
return asyncio.run(analyze_issues(issue_text, selected_model))
|
116 |
+
elif command.startswith("/list_issues"):
|
117 |
+
github_client = GitHubIntegration(GitHubConfig(username=github_username, repository=github_repository, api_token=SecretStr(github_api_token)))
|
118 |
+
issues = asyncio.run(github_client.fetch_issues())
|
119 |
+
return "\n".join([f"- {issue['title']} (Issue #{issue['number']})" for issue in issues])
|
120 |
+
else:
|
121 |
+
return "Unknown command. Use /help for instructions."
|
122 |
+
|
123 |
+
iface = gr.Interface(
|
124 |
+
fn=respond,
|
125 |
+
inputs=[
|
126 |
+
gr.Textbox(label="Command"),
|
127 |
+
gr.Textbox(label="GitHub API Token", placeholder="Enter your GitHub API token"),
|
128 |
+
gr.Textbox(label="GitHub Username", placeholder="Enter your GitHub username"),
|
129 |
+
gr.Textbox(label="GitHub Repository", placeholder="Enter your GitHub repository"),
|
130 |
+
gr.Dropdown(label="Model", choices=["text-davinci-003", "text-curie-001"], value="text-davinci-003"),
|
131 |
+
],
|
132 |
+
outputs=gr.Textbox(label="Response"),
|
133 |
+
title="AI GitHub Assistant",
|
134 |
+
description="Interact with GitHub and analyze issues with AI.",
|
135 |
+
)
|
136 |
+
|
137 |
+
iface.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|