Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
7a1d957
1
Parent(s):
051f31b
ui
Browse files
app.py
CHANGED
@@ -15,6 +15,20 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
15 |
model_id, gguf_file=filename, device_map="auto"
|
16 |
)
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
# Then create the pipeline with the model and tokenizer
|
19 |
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer)
|
20 |
|
@@ -57,8 +71,8 @@ def predict(code):
|
|
57 |
|
58 |
demo = gr.Interface(
|
59 |
fn=predict,
|
60 |
-
inputs="
|
61 |
-
outputs="
|
62 |
description=frontmatter.load("README.md").content,
|
63 |
)
|
64 |
demo.launch()
|
|
|
15 |
model_id, gguf_file=filename, device_map="auto"
|
16 |
)
|
17 |
|
18 |
+
example = """int __fastcall sub_B0D04(int a1, int a2)
|
19 |
+
{
|
20 |
+
unsigned int v2; // r4
|
21 |
+
int result; // r0
|
22 |
+
|
23 |
+
v2 = a1 + a2;
|
24 |
+
if ( __CFADD__(a1, a2) )
|
25 |
+
return 0;
|
26 |
+
result = _libc_alloca_cutoff();
|
27 |
+
if ( v2 <= 0x1000 )
|
28 |
+
return result | 1;
|
29 |
+
return result;
|
30 |
+
}"""
|
31 |
+
|
32 |
# Then create the pipeline with the model and tokenizer
|
33 |
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer)
|
34 |
|
|
|
71 |
|
72 |
demo = gr.Interface(
|
73 |
fn=predict,
|
74 |
+
inputs=gr.Text(placeholder=example, label="Hex-Rays decompiler output"),
|
75 |
+
outputs=gr.JSON(label="Aidapal Output"),,
|
76 |
description=frontmatter.load("README.md").content,
|
77 |
)
|
78 |
demo.launch()
|