chenzerong commited on
Commit
d8e4846
·
1 Parent(s): ae7a494

add translate

Browse files
Files changed (2) hide show
  1. app.py +5 -4
  2. tools/translate.py +19 -0
app.py CHANGED
@@ -4,7 +4,7 @@ import requests
4
  import pytz
5
  import yaml
6
  from tools.final_answer import FinalAnswerTool
7
-
8
  from Gradio_UI import GradioUI
9
 
10
  # Below is an example of a tool that does nothing. Amaze us with your creativity !
@@ -47,15 +47,16 @@ custom_role_conversions=None,
47
  )
48
 
49
 
50
- # Import tool from Hub
51
- image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
 
52
 
53
  with open("prompts.yaml", 'r') as stream:
54
  prompt_templates = yaml.safe_load(stream)
55
 
56
  agent = CodeAgent(
57
  model=model,
58
- tools=[final_answer], ## add your tools here (don't remove final answer)
59
  max_steps=6,
60
  verbosity_level=1,
61
  grammar=None,
 
4
  import pytz
5
  import yaml
6
  from tools.final_answer import FinalAnswerTool
7
+ from tools.translate import TranslateTool
8
  from Gradio_UI import GradioUI
9
 
10
  # Below is an example of a tool that does nothing. Amaze us with your creativity !
 
47
  )
48
 
49
 
50
+ # Import tooli from Hub
51
+ image_generaton_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
52
+ translate_tool = TranslateTool()
53
 
54
  with open("prompts.yaml", 'r') as stream:
55
  prompt_templates = yaml.safe_load(stream)
56
 
57
  agent = CodeAgent(
58
  model=model,
59
+ tools=[final_answer, image_generaton_tool, translate_tool], ## add your tools here (don't remove final answer)
60
  max_steps=6,
61
  verbosity_level=1,
62
  grammar=None,
tools/translate.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ from smolagents.tools import Tool
3
+
4
+ class TranslateTool(Tool):
5
+ name = "translate"
6
+ description = "Translate text from Chinese to English"
7
+ inputs = {"text": {"type": "string", "description": "The text to translate"}}
8
+ output_type = "string"
9
+
10
+ def forward(self, text: str) -> str:
11
+ inputs = self.tokenizer(text, return_tensors="pt")
12
+ outputs = self.model.generate(**inputs, max_new_tokens=512)
13
+ return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
14
+
15
+ def __init__(self, *args, **kwargs):
16
+ super().__init__(*args, **kwargs)
17
+ self.model_id = "ModelSpace/GemmaX2-28-9B-v0.1"
18
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
19
+ self.model = AutoModelForCausalLM.from_pretrained(self.model_id)