som11 commited on
Commit
7110ce1
·
verified ·
1 Parent(s): dbe8c56

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -69
app.py CHANGED
@@ -1,69 +1,69 @@
1
- import os
2
- from dotenv import load_dotenv
3
- from langchain_groq import ChatGroq
4
- from langchain_core.prompts import ChatPromptTemplate
5
- from fastapi import FastAPI
6
- from fastapi.middleware.cors import CORSMiddleware
7
- from pydantic import BaseModel
8
-
9
-
10
- load_dotenv()
11
-
12
-
13
- groq_api_key = os.getenv('GROQ_API_KEY')
14
-
15
-
16
- llm_model = ChatGroq(
17
- groq_api_key=groq_api_key,
18
- model_name="Llama3-8b-8192"
19
- )
20
-
21
-
22
- app = FastAPI()
23
-
24
-
25
- origins = ["*"]
26
-
27
-
28
- app.add_middleware(
29
- CORSMiddleware,
30
- allow_origins=origins,
31
- allow_credentials=True,
32
- allow_methods=["*"],
33
- allow_headers=["*"],
34
- )
35
-
36
-
37
- class textFromFrontendModel(BaseModel):
38
- textFromNextJSFrontend: str
39
-
40
-
41
-
42
- @app.get('/')
43
- def welcome():
44
- return {
45
- 'success': True,
46
- 'message': 'server of "fitbites is up and running successfully '
47
- }
48
-
49
-
50
- @app.post('/predict')
51
- async def predict(incomingTextFromFrontend: textFromFrontendModel):
52
-
53
- prompt_text = incomingTextFromFrontend.textFromNextJSFrontend
54
-
55
- prompt_template = ChatPromptTemplate.from_template(
56
- """
57
- {text}
58
- """
59
- )
60
-
61
- chain = prompt_template | llm_model
62
-
63
- response_from_model = chain.invoke({"text": prompt_text})
64
-
65
- return {
66
- 'success': True,
67
- 'response_from_model': response_from_model
68
- }
69
-
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from langchain_groq import ChatGroq
4
+ from langchain_core.prompts import ChatPromptTemplate
5
+ from fastapi import FastAPI
6
+ from fastapi.middleware.cors import CORSMiddleware
7
+ from pydantic import BaseModel
8
+
9
+
10
+ load_dotenv()
11
+
12
+
13
+ groq_api_key = os.getenv('GROQ_API_KEY')
14
+
15
+
16
+ llm_model = ChatGroq(
17
+ groq_api_key=groq_api_key,
18
+ model_name="Llama3-8b-8192"
19
+ )
20
+
21
+
22
+ app = FastAPI()
23
+
24
+
25
+ origins = ["*"]
26
+
27
+
28
+ app.add_middleware(
29
+ CORSMiddleware,
30
+ allow_origins=origins,
31
+ allow_credentials=True,
32
+ allow_methods=["*"],
33
+ allow_headers=["*"],
34
+ )
35
+
36
+
37
+ class textFromFrontendModel(BaseModel):
38
+ textFromNextJSFrontend: str
39
+
40
+
41
+
42
+ @app.get('/')
43
+ def welcome():
44
+ return {
45
+ 'success': True,
46
+ 'message': 'server of "fitbites is up and running successfully '
47
+ }
48
+
49
+
50
+ @app.post('/predict')
51
+ async def predict(incomingTextFromFrontend: textFromFrontendModel):
52
+
53
+ prompt_text = incomingTextFromFrontend.textFromNextJSFrontend
54
+
55
+ prompt_template = ChatPromptTemplate.from_template(
56
+ """
57
+ {text}
58
+ """
59
+ )
60
+
61
+ chain = prompt_template | llm_model
62
+
63
+ response_from_model = chain.invoke({"text": prompt_text})
64
+
65
+ return {
66
+ 'success': True,
67
+ 'response_from_model': response_from_model
68
+ }
69
+