File size: 793 Bytes
a081ff4
 
 
23a6073
a081ff4
 
 
 
 
 
 
23a6073
 
a081ff4
 
23a6073
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "flax-community/gpt-neo-125M-apps"
# define model and tokenizer
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token

def generate_solution(prompt, **kwargs):
    input_ids = tokenizer(prompt, return_tensors="pt").input_ids
    start = len(input_ids[0])
    output = model.generate(input_ids, pad_token_id=tokenizer.pad_token_id, **kwargs)
    return tokenizer.decode(output[0][start:])

inputs = [
    gr.inputs.Textbox(placeholder="Define a problem here ...", lines=5)
]

gr.Interface(
    generate_solution, 
    inputs=inputs, 
    outputs="text",
    title="Coding problem solver",
).launch()