Spaces:
Runtime error
Runtime error
from flask import Flask, render_template, request, jsonify | |
import requests | |
import os | |
import logging | |
from datetime import datetime | |
from transformers import pipeline | |
import spaces | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
# Load the model and tokenizer | |
model_name = "mixtral/instruct-v0.1" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name).to(device) | |
def stream_chat( | |
message: str, | |
history: list, | |
system_prompt: str, | |
temperature: float = 0.8, | |
max_new_tokens: int = 1024, | |
top_p: float = 1.0, | |
top_k: int = 20, | |
penalty: float = 1.2, | |
): | |
conversation = [ | |
{"role": "system", "content": system_prompt} | |
] | |
for prompt, answer in history: | |
conversation.extend([ | |
{"role": "user", "content": prompt}, | |
{"role": "assistant", "content": answer}, | |
]) | |
conversation.append({"role": "user", "content": message}) | |
input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt").to(device) | |
streamer = TextStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True) | |
generate_kwargs = dict( | |
input_ids=input_ids, | |
max_new_tokens=max_new_tokens, | |
do_sample=temperature != 0, | |
top_p=top_p, | |
top_k=top_k, | |
temperature=temperature, | |
eos_token_id=[128001, 128008, 128009], | |
streamer=streamer, | |
) | |
output = model.generate(**generate_kwargs) | |
return tokenizer.decode(output[0], skip_special_tokens=True) | |
# Initialize the pipeline | |
pipe = pipeline("text-generation", model="mistralai/Mixtral-8x7B-Instruct-v0.1") | |
app = Flask(__name__) | |
# Configure logging | |
LOGS_DIRECTORY = 'logs' | |
if not os.path.exists(LOGS_DIRECTORY): | |
os.makedirs(LOGS_DIRECTORY) | |
logging.basicConfig( | |
level=logging.INFO, | |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', | |
handlers=[ | |
logging.FileHandler(f"{LOGS_DIRECTORY}/github_bot_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"), | |
logging.StreamHandler() | |
] | |
) | |
logger = logging.getLogger(__name__) | |
def stream_chat(message: str, history: list, system_prompt: str): | |
conversation = [{"role": "system", "content": system_prompt}] | |
for prompt, answer in history: | |
conversation.extend([ | |
{"role": "user", "content": prompt}, | |
{"role": "assistant", "content": answer}, | |
]) | |
conversation.append({"role": "user", "content": message}) | |
return pipe(conversation) | |
def index(): | |
return render_template('index.html') | |
def fetch_issues(): | |
github_token = request.form.get('github_token') | |
repo_url = request.form.get('repo_url') | |
if not github_token or not repo_url: | |
logger.error('GitHub Token and Repository URL are required') | |
return jsonify({'error': 'GitHub Token and Repository URL are required'}), 400 | |
try: | |
repo_owner, repo_name = repo_url.split('/')[-2:] | |
headers = {'Authorization': f'token {github_token}'} | |
response = requests.get(f'https://api.github.com/repos/{repo_owner}/{repo_name}/issues', headers=headers) | |
response.raise_for_status() | |
issues = response.json() | |
logger.info(f'Successfully fetched issues for {repo_owner}/{repo_name}') | |
return jsonify(issues) | |
except requests.exceptions.RequestException as e: | |
logger.error(f'Error fetching issues: {str(e)}') | |
return jsonify({'error': str(e)}), 500 | |
def resolve_issue(): | |
github_token = request.form.get('github_token') | |
issue_number = request.form.get('issue_number') | |
resolution = request.form.get('resolution') | |
repo_url = request.form.get('repo_url') | |
forked_repo_url = request.form.get('forked_repo_url') | |
if not github_token or not issue_number or not resolution or not repo_url: | |
logger.error('GitHub Token, Issue Number, Resolution, and Repository URL are required') | |
return jsonify({'error': 'GitHub Token, Issue Number, Resolution, and Repository URL are required'}), 400 | |
try: | |
repo_owner, repo_name = repo_url.split('/')[-2:] | |
headers = {'Authorization': f'token {github_token}'} | |
# Create a comment with the resolution in the issue | |
comment_data = {"body": resolution} | |
requests.post(f'https://api.github.com/repos/{repo_owner}/{repo_name}/issues/{issue_number}/comments', headers=headers, json=comment_data) | |
if forked_repo_url: | |
# Extract the forked repo's owner and name | |
forked_repo_owner, forked_repo_name = forked_repo_url.split('/')[-2:] | |
# Close the issue in your forked repository | |
close_issue_data = {"state": "closed"} | |
requests.patch(f'https://api.github.com/repos/{forked_repo_owner}/{forked_repo_name}/issues/{issue_number}', headers=headers, json=close_issue_data) | |
logger.info(f'Issue #{issue_number} resolved successfully') | |
return jsonify({'message': f'Issue #{issue_number} resolved successfully'}) | |
except requests.exceptions.RequestException as e: | |
logger.error(f'Error resolving issue: {str(e)}') | |
return jsonify({'error': str(e)}), 500 | |
def extract_info(): | |
url = request.form.get('url') | |
if not url: | |
logger.error('URL is required') | |
return jsonify({'error': 'URL is required'}), 400 | |
try: | |
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36"} | |
response = requests.get(url, headers=headers) | |
response.raise_for_status() | |
logger.info(f'Successfully extracted info from {url}') | |
return jsonify({"status": "success", "response": response.text}) | |
except requests.exceptions.RequestException as e: | |
logger.error(f'Error extracting info: {str(e)}') | |
return jsonify({'error': str(e)}), 500 | |
if __name__ == '__main__': | |
app.run(debug=True) |