pentarosarium commited on
Commit
a0c2c81
·
1 Parent(s): c45cef9

progress more 51

Browse files
Files changed (2) hide show
  1. app.py +33 -22
  2. requirements.txt +1 -1
app.py CHANGED
@@ -31,29 +31,40 @@ rubert1 = pipeline("sentiment-analysis", model = "DeepPavlov/rubert-base-cased")
31
  rubert2 = pipeline("sentiment-analysis", model = "blanchefort/rubert-base-cased-sentiment")
32
 
33
  def init_langchain_llm():
34
- pipe = pipeline("text-generation", model="nvidia/Llama-3.1-Nemotron-70B-Instruct-HF")
35
- llm = HuggingFacePipeline(pipeline=pipe)
36
- return llm
37
-
38
- def init_langchain_llm():
39
- model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
40
- pipeline = transformers.pipeline(
41
- "text-generation",
42
- model=model_id,
43
- model_kwargs={"torch_dtype": torch.bfloat16},
44
- device_map="auto",
45
- )
46
 
47
- def llama_wrapper(prompt):
48
- messages = [
49
- {"role": "system", "content": "You are an experienced credit analyst that analyzes news and estimates their short-term or mid-term impact on profitability or risk of loss of the entity present in the news."},
50
- {"role": "user", "content": prompt},
51
- ]
52
- result = pipeline(messages, max_new_tokens=256)
53
- return result[0]["generated_text"]
54
 
55
- llm = HuggingFacePipeline(pipeline=llama_wrapper)
56
- return llm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  def estimate_impact(llm, news_text, entity):
59
  template = """
@@ -427,7 +438,7 @@ def create_output_file(df, uploaded_file, analysis_df):
427
  return output
428
 
429
  def main():
430
- st.title("... приступим к анализу... версия 50")
431
 
432
  # Initialize session state
433
  if 'processed_df' not in st.session_state:
 
31
  rubert2 = pipeline("sentiment-analysis", model = "blanchefort/rubert-base-cased-sentiment")
32
 
33
  def init_langchain_llm():
34
+ # Authenticate using the token from Streamlit secrets
35
+ if 'hf_token' in st.secrets:
36
+ login(token=st.secrets['hf_token'])
37
+ else:
38
+ st.error("Hugging Face token not found in Streamlit secrets. Please add it to access the model.")
39
+ st.stop()
 
 
 
 
 
 
40
 
41
+ model_id = "meta-llama/Llama-2-7b-chat-hf" # or "meta-llama/Meta-Llama-3.1-8B-Instruct" if you have access
 
 
 
 
 
 
42
 
43
+ try:
44
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_id)
45
+ model = transformers.AutoModelForCausalLM.from_pretrained(
46
+ model_id,
47
+ torch_dtype=torch.float16,
48
+ device_map="auto",
49
+ )
50
+
51
+ pipeline = transformers.pipeline(
52
+ "text-generation",
53
+ model=model,
54
+ tokenizer=tokenizer,
55
+ torch_dtype=torch.float16,
56
+ device_map="auto",
57
+ )
58
+
59
+ def llama_wrapper(prompt):
60
+ result = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7)
61
+ return result[0]['generated_text']
62
+
63
+ llm = HuggingFacePipeline(pipeline=llama_wrapper)
64
+ return llm
65
+ except Exception as e:
66
+ st.error(f"Error initializing the model: {str(e)}")
67
+ st.stop()
68
 
69
  def estimate_impact(llm, news_text, entity):
70
  template = """
 
438
  return output
439
 
440
  def main():
441
+ st.title("... приступим к анализу... версия 51")
442
 
443
  # Initialize session state
444
  if 'processed_df' not in st.session_state:
requirements.txt CHANGED
@@ -12,4 +12,4 @@ matplotlib
12
  sacremoses
13
  langchain
14
  langchain-community
15
- transformers
 
12
  sacremoses
13
  langchain
14
  langchain-community
15
+ huggingface_hub