redfernstech commited on
Commit
0196abc
·
verified ·
1 Parent(s): 2dcf807

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +258 -47
app.py CHANGED
@@ -1,6 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import time
3
- from fastapi import FastAPI,Request
4
  from fastapi.responses import HTMLResponse
5
  from fastapi.staticfiles import StaticFiles
6
  from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
@@ -18,30 +250,28 @@ from gradio_client import Client
18
  from simple_salesforce import Salesforce, SalesforceLogin
19
  from llama_index.llms.huggingface import HuggingFaceLLM
20
 
21
-
22
  # Define Pydantic model for incoming request body
23
  class MessageRequest(BaseModel):
24
  message: str
 
25
  repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
26
  llm_client = InferenceClient(
27
  model=repo_id,
28
  token=os.getenv("HF_TOKEN"),
29
  )
30
 
31
-
32
  os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN")
33
  username = os.getenv("username")
34
  password = os.getenv("password")
35
  security_token = os.getenv("security_token")
36
- domain = os.getenv("domain")# Using sandbox environment
37
- session_id, sf_instance = SalesforceLogin(username=username, password=password, security_token=security_token, domain=domain)
38
 
39
- # Create Salesforce object
 
40
  sf = Salesforce(instance=sf_instance, session_id=session_id)
41
 
42
  app = FastAPI()
43
 
44
-
45
  @app.middleware("http")
46
  async def add_security_headers(request: Request, call_next):
47
  response = await call_next(request)
@@ -49,7 +279,6 @@ async def add_security_headers(request: Request, call_next):
49
  response.headers["X-Frame-Options"] = "ALLOWALL"
50
  return response
51
 
52
-
53
  # Allow CORS requests from any domain
54
  app.add_middleware(
55
  CORSMiddleware,
@@ -59,35 +288,23 @@ app.add_middleware(
59
  allow_headers=["*"],
60
  )
61
 
62
-
63
-
64
-
65
  @app.get("/favicon.ico")
66
  async def favicon():
67
  return HTMLResponse("") # or serve a real favicon if you have one
68
 
69
-
70
  app.mount("/static", StaticFiles(directory="static"), name="static")
71
-
72
  templates = Jinja2Templates(directory="static")
 
73
  # Configure Llama index settings
74
- Settings.llm = HuggingFaceInferenceAPI(
75
  model_name="meta-llama/Meta-Llama-3-8B-Instruct",
76
  tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
77
  context_window=3000,
78
- token=os.getenv("HF_TOKEN"),
79
  max_new_tokens=512,
80
  generate_kwargs={"temperature": 0.1},
 
81
  )
82
- # Configure Llama index settings
83
- # Settings.llm = HuggingFaceLLM(
84
- # model_name="google/flan-t5-small",
85
- # tokenizer_name="google/flan-t5-small",
86
- # context_window=512, # flan-t5-small has a max context window of 512 tokens
87
- # max_new_tokens=256,
88
- # generate_kwargs={"temperature": 0.1, "do_sample": True},
89
- # device_map="auto" # Automatically use GPU if available, else CPU
90
- # )
91
  Settings.embed_model = HuggingFaceEmbedding(
92
  model_name="BAAI/bge-small-en-v1.5"
93
  )
@@ -98,8 +315,10 @@ PDF_DIRECTORY = 'data'
98
  # Ensure directories exist
99
  os.makedirs(PDF_DIRECTORY, exist_ok=True)
100
  os.makedirs(PERSIST_DIR, exist_ok=True)
 
101
  chat_history = []
102
  current_chat_history = []
 
103
  def data_ingestion_from_directory():
104
  documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
105
  storage_context = StorageContext.from_defaults()
@@ -110,10 +329,10 @@ def initialize():
110
  start_time = time.time()
111
  data_ingestion_from_directory() # Process PDF ingestion at startup
112
  print(f"Data ingestion time: {time.time() - start_time} seconds")
 
113
  def split_name(full_name):
114
  # Split the name by spaces
115
  words = full_name.strip().split()
116
-
117
  # Logic for determining first name and last name
118
  if len(words) == 1:
119
  first_name = ''
@@ -124,12 +343,10 @@ def split_name(full_name):
124
  else:
125
  first_name = words[0]
126
  last_name = ' '.join(words[1:])
127
-
128
  return first_name, last_name
129
 
130
  initialize() # Run initialization tasks
131
 
132
-
133
  def handle_query(query):
134
  chat_text_qa_msgs = [
135
  (
@@ -143,7 +360,6 @@ def handle_query(query):
143
  )
144
  ]
145
  text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
146
-
147
  storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
148
  index = load_index_from_storage(storage_context)
149
  context_str = ""
@@ -151,66 +367,60 @@ def handle_query(query):
151
  if past_query.strip():
152
  context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
153
 
154
-
155
  query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
156
  answer = query_engine.query(query)
157
-
158
  if hasattr(answer, 'response'):
159
- response=answer.response
160
  elif isinstance(answer, dict) and 'response' in answer:
161
- response =answer['response']
162
  else:
163
- response ="Sorry, I couldn't find an answer."
164
  current_chat_history.append((query, response))
165
  return response
 
166
  @app.get("/ch/{id}", response_class=HTMLResponse)
167
  async def load_chat(request: Request, id: str):
168
  return templates.TemplateResponse("index.html", {"request": request, "user_id": id})
 
169
  # Route to save chat history
170
  @app.post("/hist/")
171
  async def save_chat_history(history: dict):
172
  # Check if 'userId' is present in the incoming dictionary
173
  user_id = history.get('userId')
174
  print(user_id)
175
-
176
  # Ensure user_id is defined before proceeding
177
  if user_id is None:
178
  return {"error": "userId is required"}, 400
179
-
180
  # Construct the chat history string
181
  hist = ''.join([f"'{entry['sender']}: {entry['message']}'\n" for entry in history['history']])
182
  hist = "You are a Redfernstech summarize model. Your aim is to use this conversation to identify user interests solely based on that conversation: " + hist
183
  print(hist)
184
-
185
  # Get the summarized result from the client model
186
  result = hist
187
-
188
  try:
189
  sf.Lead.update(user_id, {'Description': result})
190
  except Exception as e:
191
  return {"error": f"Failed to update lead: {str(e)}"}, 500
192
-
193
  return {"summary": result, "message": "Chat history saved"}
 
194
  @app.post("/webhook")
195
  async def receive_form_data(request: Request):
196
  form_data = await request.json()
197
  # Log in to Salesforce
198
  first_name, last_name = split_name(form_data['name'])
199
  data = {
200
- 'FirstName': first_name,
201
- 'LastName': last_name,
202
- 'Description': 'hii', # Static description
203
- 'Company': form_data['company'], # Assuming company is available in form_data
204
- 'Phone': form_data['phone'].strip(), # Phone from form data
205
- 'Email': form_data['email'], # Email from form data
206
  }
207
- a=sf.Lead.create(data)
208
  # Generate a unique ID (for tracking user)
209
  unique_id = a['id']
210
-
211
  # Here you can do something with form_data like saving it to a database
212
  print("Received form data:", form_data)
213
-
214
  # Send back the unique id to the frontend
215
  return JSONResponse({"id": unique_id})
216
 
@@ -226,6 +436,7 @@ async def chat(request: MessageRequest):
226
  }
227
  chat_history.append(message_data)
228
  return {"response": response}
 
229
  @app.get("/")
230
  def read_root():
231
  return {"message": "Welcome to the API"}
 
1
+ # import os
2
+ # import time
3
+ # from fastapi import FastAPI,Request
4
+ # from fastapi.responses import HTMLResponse
5
+ # from fastapi.staticfiles import StaticFiles
6
+ # from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
7
+ # from llama_index.embeddings.huggingface import HuggingFaceEmbedding
8
+ # from pydantic import BaseModel
9
+ # from fastapi.responses import JSONResponse
10
+ # import uuid # for generating unique IDs
11
+ # import datetime
12
+ # from fastapi.middleware.cors import CORSMiddleware
13
+ # from fastapi.templating import Jinja2Templates
14
+ # from huggingface_hub import InferenceClient
15
+ # import json
16
+ # import re
17
+ # from gradio_client import Client
18
+ # from simple_salesforce import Salesforce, SalesforceLogin
19
+ # from llama_index.llms.huggingface import HuggingFaceLLM
20
+
21
+
22
+ # # Define Pydantic model for incoming request body
23
+ # class MessageRequest(BaseModel):
24
+ # message: str
25
+ # repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
26
+ # llm_client = InferenceClient(
27
+ # model=repo_id,
28
+ # token=os.getenv("HF_TOKEN"),
29
+ # )
30
+
31
+
32
+ # os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN")
33
+ # username = os.getenv("username")
34
+ # password = os.getenv("password")
35
+ # security_token = os.getenv("security_token")
36
+ # domain = os.getenv("domain")# Using sandbox environment
37
+ # session_id, sf_instance = SalesforceLogin(username=username, password=password, security_token=security_token, domain=domain)
38
+
39
+ # # Create Salesforce object
40
+ # sf = Salesforce(instance=sf_instance, session_id=session_id)
41
+
42
+ # app = FastAPI()
43
+
44
+
45
+ # @app.middleware("http")
46
+ # async def add_security_headers(request: Request, call_next):
47
+ # response = await call_next(request)
48
+ # response.headers["Content-Security-Policy"] = "frame-ancestors *; frame-src *; object-src *;"
49
+ # response.headers["X-Frame-Options"] = "ALLOWALL"
50
+ # return response
51
+
52
+
53
+ # # Allow CORS requests from any domain
54
+ # app.add_middleware(
55
+ # CORSMiddleware,
56
+ # allow_origins=["*"],
57
+ # allow_credentials=True,
58
+ # allow_methods=["*"],
59
+ # allow_headers=["*"],
60
+ # )
61
+
62
+
63
+
64
+
65
+ # @app.get("/favicon.ico")
66
+ # async def favicon():
67
+ # return HTMLResponse("") # or serve a real favicon if you have one
68
+
69
+
70
+ # app.mount("/static", StaticFiles(directory="static"), name="static")
71
+
72
+ # templates = Jinja2Templates(directory="static")
73
+ # # Configure Llama index settings
74
+ # Settings.llm = HuggingFaceInferenceAPI(
75
+ # model_name="meta-llama/Meta-Llama-3-8B-Instruct",
76
+ # tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
77
+ # context_window=3000,
78
+ # token=os.getenv("HF_TOKEN"),
79
+ # max_new_tokens=512,
80
+ # generate_kwargs={"temperature": 0.1},
81
+ # )
82
+ # # Configure Llama index settings
83
+ # # Settings.llm = HuggingFaceLLM(
84
+ # # model_name="google/flan-t5-small",
85
+ # # tokenizer_name="google/flan-t5-small",
86
+ # # context_window=512, # flan-t5-small has a max context window of 512 tokens
87
+ # # max_new_tokens=256,
88
+ # # generate_kwargs={"temperature": 0.1, "do_sample": True},
89
+ # # device_map="auto" # Automatically use GPU if available, else CPU
90
+ # # )
91
+ # Settings.embed_model = HuggingFaceEmbedding(
92
+ # model_name="BAAI/bge-small-en-v1.5"
93
+ # )
94
+
95
+ # PERSIST_DIR = "db"
96
+ # PDF_DIRECTORY = 'data'
97
+
98
+ # # Ensure directories exist
99
+ # os.makedirs(PDF_DIRECTORY, exist_ok=True)
100
+ # os.makedirs(PERSIST_DIR, exist_ok=True)
101
+ # chat_history = []
102
+ # current_chat_history = []
103
+ # def data_ingestion_from_directory():
104
+ # documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
105
+ # storage_context = StorageContext.from_defaults()
106
+ # index = VectorStoreIndex.from_documents(documents)
107
+ # index.storage_context.persist(persist_dir=PERSIST_DIR)
108
+
109
+ # def initialize():
110
+ # start_time = time.time()
111
+ # data_ingestion_from_directory() # Process PDF ingestion at startup
112
+ # print(f"Data ingestion time: {time.time() - start_time} seconds")
113
+ # def split_name(full_name):
114
+ # # Split the name by spaces
115
+ # words = full_name.strip().split()
116
+
117
+ # # Logic for determining first name and last name
118
+ # if len(words) == 1:
119
+ # first_name = ''
120
+ # last_name = words[0]
121
+ # elif len(words) == 2:
122
+ # first_name = words[0]
123
+ # last_name = words[1]
124
+ # else:
125
+ # first_name = words[0]
126
+ # last_name = ' '.join(words[1:])
127
+
128
+ # return first_name, last_name
129
+
130
+ # initialize() # Run initialization tasks
131
+
132
+
133
+ # def handle_query(query):
134
+ # chat_text_qa_msgs = [
135
+ # (
136
+ # "user",
137
+ # """
138
+ # You are the Clara Redfernstech chatbot. Your goal is to provide accurate, professional, and helpful answers to user queries based on the company's data. Always ensure your responses are clear and concise. Give response within 10-15 words only
139
+ # {context_str}
140
+ # Question:
141
+ # {query_str}
142
+ # """
143
+ # )
144
+ # ]
145
+ # text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
146
+
147
+ # storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
148
+ # index = load_index_from_storage(storage_context)
149
+ # context_str = ""
150
+ # for past_query, response in reversed(current_chat_history):
151
+ # if past_query.strip():
152
+ # context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
153
+
154
+
155
+ # query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
156
+ # answer = query_engine.query(query)
157
+
158
+ # if hasattr(answer, 'response'):
159
+ # response=answer.response
160
+ # elif isinstance(answer, dict) and 'response' in answer:
161
+ # response =answer['response']
162
+ # else:
163
+ # response ="Sorry, I couldn't find an answer."
164
+ # current_chat_history.append((query, response))
165
+ # return response
166
+ # @app.get("/ch/{id}", response_class=HTMLResponse)
167
+ # async def load_chat(request: Request, id: str):
168
+ # return templates.TemplateResponse("index.html", {"request": request, "user_id": id})
169
+ # # Route to save chat history
170
+ # @app.post("/hist/")
171
+ # async def save_chat_history(history: dict):
172
+ # # Check if 'userId' is present in the incoming dictionary
173
+ # user_id = history.get('userId')
174
+ # print(user_id)
175
+
176
+ # # Ensure user_id is defined before proceeding
177
+ # if user_id is None:
178
+ # return {"error": "userId is required"}, 400
179
+
180
+ # # Construct the chat history string
181
+ # hist = ''.join([f"'{entry['sender']}: {entry['message']}'\n" for entry in history['history']])
182
+ # hist = "You are a Redfernstech summarize model. Your aim is to use this conversation to identify user interests solely based on that conversation: " + hist
183
+ # print(hist)
184
+
185
+ # # Get the summarized result from the client model
186
+ # result = hist
187
+
188
+ # try:
189
+ # sf.Lead.update(user_id, {'Description': result})
190
+ # except Exception as e:
191
+ # return {"error": f"Failed to update lead: {str(e)}"}, 500
192
+
193
+ # return {"summary": result, "message": "Chat history saved"}
194
+ # @app.post("/webhook")
195
+ # async def receive_form_data(request: Request):
196
+ # form_data = await request.json()
197
+ # # Log in to Salesforce
198
+ # first_name, last_name = split_name(form_data['name'])
199
+ # data = {
200
+ # 'FirstName': first_name,
201
+ # 'LastName': last_name,
202
+ # 'Description': 'hii', # Static description
203
+ # 'Company': form_data['company'], # Assuming company is available in form_data
204
+ # 'Phone': form_data['phone'].strip(), # Phone from form data
205
+ # 'Email': form_data['email'], # Email from form data
206
+ # }
207
+ # a=sf.Lead.create(data)
208
+ # # Generate a unique ID (for tracking user)
209
+ # unique_id = a['id']
210
+
211
+ # # Here you can do something with form_data like saving it to a database
212
+ # print("Received form data:", form_data)
213
+
214
+ # # Send back the unique id to the frontend
215
+ # return JSONResponse({"id": unique_id})
216
+
217
+ # @app.post("/chat/")
218
+ # async def chat(request: MessageRequest):
219
+ # message = request.message # Access the message from the request body
220
+ # response = handle_query(message) # Process the message
221
+ # message_data = {
222
+ # "sender": "User",
223
+ # "message": message,
224
+ # "response": response,
225
+ # "timestamp": datetime.datetime.now().isoformat()
226
+ # }
227
+ # chat_history.append(message_data)
228
+ # return {"response": response}
229
+ # @app.get("/")
230
+ # def read_root():
231
+ # return {"message": "Welcome to the API"}
232
+
233
  import os
234
  import time
235
+ from fastapi import FastAPI, Request
236
  from fastapi.responses import HTMLResponse
237
  from fastapi.staticfiles import StaticFiles
238
  from llama_index.core import StorageContext, load_index_from_storage, VectorStoreIndex, SimpleDirectoryReader, ChatPromptTemplate, Settings
 
250
  from simple_salesforce import Salesforce, SalesforceLogin
251
  from llama_index.llms.huggingface import HuggingFaceLLM
252
 
 
253
  # Define Pydantic model for incoming request body
254
  class MessageRequest(BaseModel):
255
  message: str
256
+
257
  repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
258
  llm_client = InferenceClient(
259
  model=repo_id,
260
  token=os.getenv("HF_TOKEN"),
261
  )
262
 
 
263
  os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN")
264
  username = os.getenv("username")
265
  password = os.getenv("password")
266
  security_token = os.getenv("security_token")
267
+ domain = os.getenv("domain") # Using sandbox environment
 
268
 
269
+ session_id, sf_instance = SalesforceLogin(username=username, password=password, security_token=security_token, domain=domain)
270
+ # Create Salesforce object
271
  sf = Salesforce(instance=sf_instance, session_id=session_id)
272
 
273
  app = FastAPI()
274
 
 
275
  @app.middleware("http")
276
  async def add_security_headers(request: Request, call_next):
277
  response = await call_next(request)
 
279
  response.headers["X-Frame-Options"] = "ALLOWALL"
280
  return response
281
 
 
282
  # Allow CORS requests from any domain
283
  app.add_middleware(
284
  CORSMiddleware,
 
288
  allow_headers=["*"],
289
  )
290
 
 
 
 
291
  @app.get("/favicon.ico")
292
  async def favicon():
293
  return HTMLResponse("") # or serve a real favicon if you have one
294
 
 
295
  app.mount("/static", StaticFiles(directory="static"), name="static")
 
296
  templates = Jinja2Templates(directory="static")
297
+
298
  # Configure Llama index settings
299
+ Settings.llm = HuggingFaceLLM(
300
  model_name="meta-llama/Meta-Llama-3-8B-Instruct",
301
  tokenizer_name="meta-llama/Meta-Llama-3-8B-Instruct",
302
  context_window=3000,
 
303
  max_new_tokens=512,
304
  generate_kwargs={"temperature": 0.1},
305
+ device_map="auto" # Automatically use GPU if available, else CPU
306
  )
307
+
 
 
 
 
 
 
 
 
308
  Settings.embed_model = HuggingFaceEmbedding(
309
  model_name="BAAI/bge-small-en-v1.5"
310
  )
 
315
  # Ensure directories exist
316
  os.makedirs(PDF_DIRECTORY, exist_ok=True)
317
  os.makedirs(PERSIST_DIR, exist_ok=True)
318
+
319
  chat_history = []
320
  current_chat_history = []
321
+
322
  def data_ingestion_from_directory():
323
  documents = SimpleDirectoryReader(PDF_DIRECTORY).load_data()
324
  storage_context = StorageContext.from_defaults()
 
329
  start_time = time.time()
330
  data_ingestion_from_directory() # Process PDF ingestion at startup
331
  print(f"Data ingestion time: {time.time() - start_time} seconds")
332
+
333
  def split_name(full_name):
334
  # Split the name by spaces
335
  words = full_name.strip().split()
 
336
  # Logic for determining first name and last name
337
  if len(words) == 1:
338
  first_name = ''
 
343
  else:
344
  first_name = words[0]
345
  last_name = ' '.join(words[1:])
 
346
  return first_name, last_name
347
 
348
  initialize() # Run initialization tasks
349
 
 
350
  def handle_query(query):
351
  chat_text_qa_msgs = [
352
  (
 
360
  )
361
  ]
362
  text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs)
 
363
  storage_context = StorageContext.from_defaults(persist_dir=PERSIST_DIR)
364
  index = load_index_from_storage(storage_context)
365
  context_str = ""
 
367
  if past_query.strip():
368
  context_str += f"User asked: '{past_query}'\nBot answered: '{response}'\n"
369
 
 
370
  query_engine = index.as_query_engine(text_qa_template=text_qa_template, context_str=context_str)
371
  answer = query_engine.query(query)
 
372
  if hasattr(answer, 'response'):
373
+ response = answer.response
374
  elif isinstance(answer, dict) and 'response' in answer:
375
+ response = answer['response']
376
  else:
377
+ response = "Sorry, I couldn't find an answer."
378
  current_chat_history.append((query, response))
379
  return response
380
+
381
  @app.get("/ch/{id}", response_class=HTMLResponse)
382
  async def load_chat(request: Request, id: str):
383
  return templates.TemplateResponse("index.html", {"request": request, "user_id": id})
384
+
385
  # Route to save chat history
386
  @app.post("/hist/")
387
  async def save_chat_history(history: dict):
388
  # Check if 'userId' is present in the incoming dictionary
389
  user_id = history.get('userId')
390
  print(user_id)
 
391
  # Ensure user_id is defined before proceeding
392
  if user_id is None:
393
  return {"error": "userId is required"}, 400
 
394
  # Construct the chat history string
395
  hist = ''.join([f"'{entry['sender']}: {entry['message']}'\n" for entry in history['history']])
396
  hist = "You are a Redfernstech summarize model. Your aim is to use this conversation to identify user interests solely based on that conversation: " + hist
397
  print(hist)
 
398
  # Get the summarized result from the client model
399
  result = hist
 
400
  try:
401
  sf.Lead.update(user_id, {'Description': result})
402
  except Exception as e:
403
  return {"error": f"Failed to update lead: {str(e)}"}, 500
 
404
  return {"summary": result, "message": "Chat history saved"}
405
+
406
  @app.post("/webhook")
407
  async def receive_form_data(request: Request):
408
  form_data = await request.json()
409
  # Log in to Salesforce
410
  first_name, last_name = split_name(form_data['name'])
411
  data = {
412
+ 'FirstName': first_name,
413
+ 'LastName': last_name,
414
+ 'Description': 'hii', # Static description
415
+ 'Company': form_data['company'], # Assuming company is available in form_data
416
+ 'Phone': form_data['phone'].strip(), # Phone from form data
417
+ 'Email': form_data['email'], # Email from form data
418
  }
419
+ a = sf.Lead.create(data)
420
  # Generate a unique ID (for tracking user)
421
  unique_id = a['id']
 
422
  # Here you can do something with form_data like saving it to a database
423
  print("Received form data:", form_data)
 
424
  # Send back the unique id to the frontend
425
  return JSONResponse({"id": unique_id})
426
 
 
436
  }
437
  chat_history.append(message_data)
438
  return {"response": response}
439
+
440
  @app.get("/")
441
  def read_root():
442
  return {"message": "Welcome to the API"}