roshnn24 commited on
Commit
f53ade7
·
verified ·
1 Parent(s): b545a36

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -55
app.py CHANGED
@@ -14,6 +14,7 @@ from huggingface_hub import InferenceClient
14
  from langchain.llms.base import LLM
15
  from typing import Optional, List, Any
16
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
17
 
18
  # Initialize Flask application
19
  app = Flask(__name__)
@@ -142,19 +143,12 @@ def init_db():
142
  conn.commit()
143
 
144
  def initialize_llm():
145
- """Initialize the LLM using InferenceClient."""
146
  try:
147
- # Get API token from environment variable
148
- api_token = os.environ.get('HF_TOKEN')
149
- if not api_token:
150
- raise ValueError("No API token found")
151
-
152
- # Initialize the InferenceClient
153
- client = InferenceClient(api_key=api_token)
154
-
155
  print("LLM initialized successfully!")
156
- return client
157
-
158
  except Exception as e:
159
  print(f"LLM initialization error: {str(e)}")
160
  return None
@@ -270,12 +264,7 @@ try:
270
  print("Initializing database...")
271
  init_db()
272
  print("Database initialized successfully")
273
-
274
- print("Initializing LLM...")
275
- llm_client = initialize_llm()
276
- if llm_client is None:
277
- raise ValueError("LLM initialization failed")
278
- print("LLM initialized successfully")
279
 
280
 
281
 
@@ -313,11 +302,12 @@ Output Guidelines:
313
  - Mark critical information with [IMPORTANT] prefix and give small explanations with some bold headings if required and in white font always.
314
  """
315
 
316
- prompt = PromptTemplate(
317
- input_variables=["user_request", "chat_history", "important_info"],
318
- template=prompt_template
319
- )
320
- llm_chain = LLMChain(llm=llm, prompt=prompt)
 
321
 
322
 
323
  def convert_to_html(raw_text):
@@ -469,45 +459,27 @@ def chat():
469
  user_input = data.get("message", "")
470
  print(f"Received message: {user_input}")
471
 
472
- if not user_input:
473
- return jsonify({
474
- "success": False,
475
- "response": "No message provided."
476
- })
477
 
478
- # Very simple prompt for testing
479
- messages = [
480
- {"role": "user", "content": user_input}
481
- ]
482
-
483
- try:
484
- # Get response from the model using InferenceClient
485
- if llm_client:
486
- completion = llm_client.chat.completions.create(
487
- model="mistralai/Mistral-7B-Instruct-v0.3", # Replace with the appropriate model
488
- messages=messages,
489
- max_tokens=150 # Adjust the max tokens as needed
490
- )
491
- response = completion['choices'][0]['message']['content']
492
- print(f"Raw response received: {response}")
493
-
494
- # Return the model's response
495
- return jsonify({
496
- "success": True,
497
- "response": response
498
- })
499
 
500
- else:
501
- raise ValueError("LLM client not initialized properly.")
 
 
502
 
503
- except Exception as e:
504
- print(f"Model error: {str(e)}")
505
- # Return a simple error message
506
  return jsonify({
507
- "success": False,
508
- "response": "Sorry, I couldn't generate a response right now."
509
  })
510
 
 
 
 
511
  except Exception as e:
512
  print(f"Request error: {str(e)}")
513
  return jsonify({
 
14
  from langchain.llms.base import LLM
15
  from typing import Optional, List, Any
16
  from transformers import AutoTokenizer, AutoModelForCausalLM
17
+ from transformers import pipeline
18
 
19
  # Initialize Flask application
20
  app = Flask(__name__)
 
143
  conn.commit()
144
 
145
  def initialize_llm():
146
+ """Initialize the LLM using transformers pipeline."""
147
  try:
148
+ model_name = "mistralai/Mistral-7B-Instruct-v0.3"
149
+ llm = pipeline("text-generation", model=model_name)
 
 
 
 
 
 
150
  print("LLM initialized successfully!")
151
+ return llm
 
152
  except Exception as e:
153
  print(f"LLM initialization error: {str(e)}")
154
  return None
 
264
  print("Initializing database...")
265
  init_db()
266
  print("Database initialized successfully")
267
+ llm = initialize_llm()
 
 
 
 
 
268
 
269
 
270
 
 
302
  - Mark critical information with [IMPORTANT] prefix and give small explanations with some bold headings if required and in white font always.
303
  """
304
 
305
+ def generate_prompt(user_request, chat_history, important_info):
306
+ return prompt_template.format(
307
+ user_request=user_request,
308
+ chat_history=chat_history,
309
+ important_info=important_info
310
+ )
311
 
312
 
313
  def convert_to_html(raw_text):
 
459
  user_input = data.get("message", "")
460
  print(f"Received message: {user_input}")
461
 
462
+ # Very simple example for the chat history and important information (can be more complex)
463
+ chat_history = "" # Assuming you maintain chat history somehow
464
+ important_info = "Provide code examples for Python programming."
 
 
465
 
466
+ # Generate the prompt
467
+ prompt = generate_prompt(user_input, chat_history, important_info)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
468
 
469
+ # Get response from the model using transformers pipeline
470
+ if llm:
471
+ response = llm(prompt, max_length=150)[0]['generated_text']
472
+ print(f"Raw response received: {response}")
473
 
474
+ # Return the model's response
 
 
475
  return jsonify({
476
+ "success": True,
477
+ "response": response
478
  })
479
 
480
+ else:
481
+ raise ValueError("LLM not initialized properly.")
482
+
483
  except Exception as e:
484
  print(f"Request error: {str(e)}")
485
  return jsonify({