Spaces:
Sleeping
Sleeping
Pandora41
commited on
Commit
·
844950f
1
Parent(s):
fc7a1eb
Deploy Finance QnA AI\
Browse files- README.md +0 -10
- app.py +31 -0
- model/adapter_config.json +28 -0
- model/adapter_model.safetensors +3 -0
- requirements.txt +7 -0
README.md
CHANGED
@@ -1,13 +1,3 @@
|
|
1 |
---
|
2 |
-
title: LLM Finance QnA Test
|
3 |
-
emoji: 📈
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 5.20.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
license: mit
|
11 |
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
license: mit
|
3 |
---
|
|
|
|
app.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
+
from peft import PeftModel
|
5 |
+
|
6 |
+
# Load base model & tokenizer
|
7 |
+
base_model = "vilsonrodrigues/falcon-7b-instruct-sharded"
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model)
|
9 |
+
model = AutoModelForCausalLM.from_pretrained(base_model, torch_dtype=torch.float16, device_map="auto")
|
10 |
+
|
11 |
+
# Load LoRA adapter
|
12 |
+
adapter_path = "./model"
|
13 |
+
model = PeftModel.from_pretrained(model, adapter_path)
|
14 |
+
|
15 |
+
def generate_response(prompt):
|
16 |
+
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
17 |
+
with torch.no_grad():
|
18 |
+
outputs = model.generate(**inputs, max_length=200)
|
19 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
20 |
+
|
21 |
+
# Gradio Interface
|
22 |
+
interface = gr.Interface(
|
23 |
+
fn=generate_response,
|
24 |
+
inputs=gr.Textbox(label="Masukkan pertanyaan finansial"),
|
25 |
+
outputs=gr.Textbox(label="Jawaban AI"),
|
26 |
+
title="Financial AI Chatbot",
|
27 |
+
description="Fine-tuned Falcon 7B Model untuk QnA Finansial."
|
28 |
+
)
|
29 |
+
|
30 |
+
if __name__ == "__main__":
|
31 |
+
interface.launch()
|
model/adapter_config.json
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "vilsonrodrigues/falcon-7b-instruct-sharded",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 32,
|
14 |
+
"lora_dropout": 0.05,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 16,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"query_key_value"
|
24 |
+
],
|
25 |
+
"task_type": "CAUSAL_LM",
|
26 |
+
"use_dora": false,
|
27 |
+
"use_rslora": false
|
28 |
+
}
|
model/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f00aff546715ff8f3831c1fdf2c8556ed57db905470bb3b8099f7f15a18aed51
|
3 |
+
size 18883912
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
transformers
|
3 |
+
torch
|
4 |
+
peft
|
5 |
+
accelerate
|
6 |
+
huggingface_hub
|
7 |
+
|