ejschwartz commited on
Commit
6c6ef93
·
1 Parent(s): 0844fcc

Add examples

Browse files
Files changed (2) hide show
  1. README.md +0 -1
  2. app.py +6 -2
README.md CHANGED
@@ -19,4 +19,3 @@ names, when given the output of Hex-Rays decompiler output of a function. More
19
  ## TODO
20
 
21
  * We currently use `transformers` which de-quantizes the gguf. This is easy but inefficient. Can we use llama.cpp or Ollama with zerogpu?
22
- * Add examples
 
19
  ## TODO
20
 
21
  * We currently use `transformers` which de-quantizes the gguf. This is easy but inefficient. Can we use llama.cpp or Ollama with zerogpu?
 
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import frontmatter
2
  import gradio as gr
 
3
  import spaces
4
- import torch
5
 
6
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
7
 
@@ -29,6 +29,8 @@ example = """int __fastcall sub_B0D04(int a1, int a2)
29
  return result;
30
  }"""
31
 
 
 
32
  # Then create the pipeline with the model and tokenizer
33
  pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer)
34
 
@@ -70,10 +72,12 @@ def predict(code):
70
  return pipe_out[0]["generated_text"]
71
 
72
 
 
73
  demo = gr.Interface(
74
  fn=predict,
75
- inputs=gr.Text(value=example, label="Hex-Rays decompiler output"),
76
  outputs=gr.JSON(label="Aidapal Output"),
77
  description=frontmatter.load("README.md").content,
 
78
  )
79
  demo.launch()
 
1
  import frontmatter
2
  import gradio as gr
3
+ import json
4
  import spaces
 
5
 
6
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
7
 
 
29
  return result;
30
  }"""
31
 
32
+ examples = [j["input"] for j in json.load(open("gpt4_juiced_dataset.json"))]
33
+
34
  # Then create the pipeline with the model and tokenizer
35
  pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer)
36
 
 
72
  return pipe_out[0]["generated_text"]
73
 
74
 
75
+
76
  demo = gr.Interface(
77
  fn=predict,
78
+ inputs=gr.Text(label="Hex-Rays decompiler output"),
79
  outputs=gr.JSON(label="Aidapal Output"),
80
  description=frontmatter.load("README.md").content,
81
+ examples=examples
82
  )
83
  demo.launch()