Spaces:
Runtime error
Runtime error
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import os | |
import asyncio | |
import aiohttp | |
import json | |
import logging | |
import hashlib | |
from typing import List, Dict, Tuple | |
from transformers import pipeline | |
from sentence_transformers import SentenceTransformer, util | |
from pydantic import BaseModel, SecretStr | |
# Enable detailed logging | |
logging.basicConfig(level=logging.INFO) | |
# Hugging Face Inference Client | |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
# Load a pre-trained model for sentence similarity | |
similarity_model = SentenceTransformer('all-mpnet-base-v2') | |
class GitHubConfig(BaseModel): | |
username: str | |
repository: str | |
api_token: SecretStr | |
class GitHubIntegration: | |
def __init__(self, config: GitHubConfig): | |
self.config = config | |
self.headers = { | |
"Authorization": f"Bearer {self.config.api_token.get_secret_value()}", | |
"Accept": "application/vnd.github.v3+json" | |
} | |
self.url = "https://api.github.com" | |
async def fetch_issues(self) -> List[Dict]: | |
cache_key = hashlib.md5(f"{self.config.username}/{self.config.repository}".encode()).hexdigest() | |
cached_data = await self._load_cache(cache_key) | |
if cached_data: | |
return cached_data | |
url = f"{self.url}/repos/{self.config.username}/{self.config.repository}/issues" | |
try: | |
async with aiohttp.ClientSession() as session: | |
async with session.get(url, headers=self.headers) as response: | |
response.raise_for_status() | |
issues = await response.json() | |
await self._save_cache(cache_key, issues) | |
return issues | |
except Exception as e: | |
logging.error(f"GitHub API error: {str(e)}") | |
return [] | |
async def _load_cache(self, key: str) -> List[Dict] | None: | |
cache_file = f"cache_{key}.json" | |
if os.path.exists(cache_file): | |
async with aiohttp.ClientSession() as session: | |
async with session.get(f"file://{cache_file}") as f: | |
return json.loads(await f.text()) | |
return None | |
async def _save_cache(self, key: str, data: List[Dict]): | |
cache_file = f"cache_{key}.json" | |
with open(cache_file, "w") as f: | |
json.dump(data, f) | |
async def analyze_issues(issue_text: str, model_name: str) -> str: | |
""" | |
Analyze issues and provide solutions. | |
""" | |
logging.info(f"Analyzing issue with model: {model_name}") | |
prompt = f""" | |
Issue: {issue_text} | |
Please provide a comprehensive resolution in the following format: | |
## Problem Summary: | |
## Root Cause Analysis: | |
## Solution Options: | |
## Recommended Solution: | |
## Implementation Steps: | |
## Verification Steps: | |
""" | |
try: | |
nlp = pipeline("text-generation", model=model_name, max_length=1000) | |
result = nlp(prompt) | |
return result[0]['generated_text'] | |
except Exception as e: | |
logging.error(f"Error analyzing issue: {e}") | |
return "Error analyzing issue." | |
async def find_related_issues(issue_text: str, issues: list) -> list: | |
""" | |
Find related issues using similarity model. | |
""" | |
issue_embedding = similarity_model.encode(issue_text) | |
related_issues = [ | |
(issue, util.cos_sim(issue_embedding, similarity_model.encode(issue['title']))[0][0]) | |
for issue in issues | |
] | |
return sorted(related_issues, key=lambda x: x[1], reverse=True)[:3] | |
def respond(command: str, github_api_token: str, github_username: str, github_repository: str, selected_model: str) -> str: | |
""" | |
Handle chat responses. | |
""" | |
if command.startswith("/github"): | |
parts = command.split(maxsplit=2) | |
if len(parts) < 3: | |
return "β Format: /github <username> <repo> <token>" | |
github_client = GitHubIntegration(GitHubConfig(username=parts[0], repository=parts[1], api_token=SecretStr(parts[2]))) | |
issues = asyncio.run(github_client.fetch_issues()) | |
return "β GitHub configured successfully." | |
elif command.startswith("/analyze"): | |
issue_text = command.replace("/analyze", "").strip() | |
return asyncio.run(analyze_issues(issue_text, selected_model)) | |
elif command.startswith("/list_issues"): | |
github_client = GitHubIntegration(GitHubConfig(username=github_username, repository=github_repository, api_token=SecretStr(github_api_token))) | |
issues = asyncio.run(github_client.fetch_issues()) | |
return "\n".join([f"- {issue['title']} (Issue #{issue['number']})" for issue in issues]) | |
else: | |
return "Unknown command. Use /help for instructions." | |
iface = gr.Interface( | |
fn=respond, | |
inputs=[ | |
gr.Textbox(label="Command"), | |
gr.Textbox(label="GitHub API Token", placeholder="Enter your GitHub API token"), | |
gr.Textbox(label="GitHub Username", placeholder="Enter your GitHub username"), | |
gr.Textbox(label="GitHub Repository", placeholder="Enter your GitHub repository"), | |
gr.Dropdown(label="Model", choices=["text-davinci-003", "text-curie-001"], value="text-davinci-003"), | |
], | |
outputs=gr.Textbox(label="Response"), | |
title="AI GitHub Assistant", | |
description="Interact with GitHub and analyze issues with AI.", | |
) | |
iface.launch(share=True) | |