cwhuh commited on
Commit
76917a3
·
1 Parent(s): 138e240

chore : refine parsing

Browse files
__pycache__/llm_wrapper.cpython-310.pyc CHANGED
Binary files a/__pycache__/llm_wrapper.cpython-310.pyc and b/__pycache__/llm_wrapper.cpython-310.pyc differ
 
llm_wrapper.py CHANGED
@@ -88,10 +88,13 @@ def run_gemini(
88
  chat_completion = client.models.generate_content(
89
  model=model,
90
  contents=input_content,
 
 
 
91
  )
92
  print(f"Chat Completion: {chat_completion}")
93
 
94
- chat_output = chat_completion.parsed
95
  input_token = chat_completion.usage_metadata.prompt_token_count
96
  output_token = chat_completion.usage_metadata.candidates_token_count
97
  pricing = input_token / 1000000 * 0.1 * 1500 + output_token / 1000000 * 0.7 * 1500
 
88
  chat_completion = client.models.generate_content(
89
  model=model,
90
  contents=input_content,
91
+ config={
92
+ "system_instruction": system_prompt,
93
+ }
94
  )
95
  print(f"Chat Completion: {chat_completion}")
96
 
97
+ chat_output = chat_completion.candidates[0].content.parts[0].text
98
  input_token = chat_completion.usage_metadata.prompt_token_count
99
  output_token = chat_completion.usage_metadata.candidates_token_count
100
  pricing = input_token / 1000000 * 0.1 * 1500 + output_token / 1000000 * 0.7 * 1500