MINEOGO commited on
Commit
f16b6b6
·
verified ·
1 Parent(s): 66279c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -1,28 +1,28 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
- # Load DeepSeek model
5
- deepseek_tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-R1")
6
- deepseek_model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-R1", torch_dtype="auto", device_map="auto")
7
  deepseek_pipe = pipeline("text-generation", model=deepseek_model, tokenizer=deepseek_tokenizer)
8
 
9
- # Load LLaMA model
10
- llama_tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-4-Scout-17B-16E-Instruct")
11
- llama_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-4-Scout-17B-16E-Instruct", torch_dtype="auto", device_map="auto")
12
  llama_pipe = pipeline("text-generation", model=llama_model, tokenizer=llama_tokenizer)
13
 
14
  def generate_and_enhance_code(code_request: str, features: str):
15
- # Step 1: Generate base code from DeepSeek
16
  base_output = deepseek_pipe(code_request, max_new_tokens=512, do_sample=True, temperature=0.7)[0]["generated_text"]
17
 
18
- # Step 2: Ask Llama to add features
19
  enhancement_prompt = f"Hey Llama! can you please add some more features in my code?\n\nOriginal code:\n{base_output}\n\nFeatures to add:\n{features}\n\nAdd the features and pass me the code without any extra asking!"
20
  enhanced_output = llama_pipe(enhancement_prompt, max_new_tokens=1024, do_sample=True, temperature=0.6)[0]["generated_text"]
21
 
22
  return enhanced_output
23
 
24
  with gr.Blocks() as demo:
25
- gr.Markdown("## AI Code Assistant with DeepSeek + LLaMA 4")
26
  with gr.Row():
27
  code_input = gr.Textbox(lines=5, label="What code do you want?")
28
  feature_input = gr.Textbox(lines=3, label="What features should LLaMA add?")
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
+ # Load DeepSeek with trust_remote_code
5
+ deepseek_tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-R1", trust_remote_code=True)
6
+ deepseek_model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-R1", trust_remote_code=True, torch_dtype="auto", device_map="auto")
7
  deepseek_pipe = pipeline("text-generation", model=deepseek_model, tokenizer=deepseek_tokenizer)
8
 
9
+ # Load LLaMA with trust_remote_code
10
+ llama_tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-4-Scout-17B-16E-Instruct", trust_remote_code=True)
11
+ llama_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-4-Scout-17B-16E-Instruct", trust_remote_code=True, torch_dtype="auto", device_map="auto")
12
  llama_pipe = pipeline("text-generation", model=llama_model, tokenizer=llama_tokenizer)
13
 
14
  def generate_and_enhance_code(code_request: str, features: str):
15
+ # Generate base code from DeepSeek
16
  base_output = deepseek_pipe(code_request, max_new_tokens=512, do_sample=True, temperature=0.7)[0]["generated_text"]
17
 
18
+ # Enhance with LLaMA
19
  enhancement_prompt = f"Hey Llama! can you please add some more features in my code?\n\nOriginal code:\n{base_output}\n\nFeatures to add:\n{features}\n\nAdd the features and pass me the code without any extra asking!"
20
  enhanced_output = llama_pipe(enhancement_prompt, max_new_tokens=1024, do_sample=True, temperature=0.6)[0]["generated_text"]
21
 
22
  return enhanced_output
23
 
24
  with gr.Blocks() as demo:
25
+ gr.Markdown("## MINEOGO: DeepSeek + LLaMA Code Assistant")
26
  with gr.Row():
27
  code_input = gr.Textbox(lines=5, label="What code do you want?")
28
  feature_input = gr.Textbox(lines=3, label="What features should LLaMA add?")