ZeeAI1 commited on
Commit
01497d7
·
verified ·
1 Parent(s): ede0e3f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -1,9 +1,9 @@
1
  import streamlit as st
2
  import pandas as pd
3
- from transformers import pipeline
4
 
5
- # Initialize LLM pipeline
6
- parser = pipeline("text2text-generation", model="google/flan-t5-base")
7
 
8
  # Simulated chart of accounts mapping
9
  account_map = {
@@ -23,8 +23,8 @@ segment = {
23
  }
24
 
25
  def parse_prompt(prompt):
26
- result = parser(f"Extract accounting entry: {prompt}")[0]['generated_text']
27
- return result # Simplified: in real app, use structured parsing
28
 
29
  def handle_gl_entry(prompt):
30
  # Simulate parsing response
 
1
  import streamlit as st
2
  import pandas as pd
3
+ from huggingface_hub import InferenceClient
4
 
5
+ # Initialize hosted inference client
6
+ client = InferenceClient(model="google/flan-t5-base")
7
 
8
  # Simulated chart of accounts mapping
9
  account_map = {
 
23
  }
24
 
25
  def parse_prompt(prompt):
26
+ response = client.text_generation(prompt=f"Extract accounting entry: {prompt}", max_new_tokens=50)
27
+ return response
28
 
29
  def handle_gl_entry(prompt):
30
  # Simulate parsing response