jameszokah commited on
Commit
84bd7df
·
verified ·
1 Parent(s): 1897f56

Synced repo using 'sync_with_huggingface' Github Action

Browse files
Files changed (9) hide show
  1. .env +4 -1
  2. Dockerfile +1 -1
  3. api.py +23 -21
  4. db/chroma.sqlite3 +0 -0
  5. get_agents.py +20 -0
  6. get_pattern.py +86 -0
  7. requirements.txt +2 -1
  8. test.yaml +38 -0
  9. trained_agents_data.pkl +3 -0
.env CHANGED
@@ -8,6 +8,9 @@ LANGCHAIN_API_KEY=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
8
  LANGCHAIN_PROJECT=pr-earnest-miracle-23
9
  TYPESENSE_API_KEY=xyz
10
  TYPESENSE_DATA_DIR=/
11
- TYPESENSE_PORT=8108
12
  DETA_TOKEN=PBM2kDUq_bqjZNN3DgFfBHPMZU9nk8nj7EGpnu5gq
13
  DETA_TOKEN=85QtEsbv_7B1UUYXKvu1FwiHX7NVZPnRcA7jLjw6E
 
 
 
 
8
  LANGCHAIN_PROJECT=pr-earnest-miracle-23
9
  TYPESENSE_API_KEY=xyz
10
  TYPESENSE_DATA_DIR=/
11
+ llama3-70b-8192TYPESENSE_PORT=8108
12
  DETA_TOKEN=PBM2kDUq_bqjZNN3DgFfBHPMZU9nk8nj7EGpnu5gq
13
  DETA_TOKEN=85QtEsbv_7B1UUYXKvu1FwiHX7NVZPnRcA7jLjw6E
14
+ OPENAI_MODEL_NAME=llama3-70b-8192
15
+ OPENAI_API_KEY=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
16
+ OPENAI_API_BASE=https://api.groq.com/openai/v1
Dockerfile CHANGED
@@ -14,5 +14,5 @@ RUN pip install --no-cache-dir --upgrade -r requirements.txt
14
  EXPOSE 7860
15
 
16
  COPY --chown=user . /app
17
- CMD ["python", "main.py"]
18
 
 
 
14
  EXPOSE 7860
15
 
16
  COPY --chown=user . /app
 
17
 
18
+ CMD ["python", "main.py"]
api.py CHANGED
@@ -3,7 +3,7 @@ import os
3
  from dotenv import load_dotenv
4
 
5
  import asyncio
6
- from fastapi import FastAPI, Body, File, UploadFile
7
  from fastapi.responses import StreamingResponse
8
  from typing import List, AsyncIterable, Annotated, Optional
9
  from enum import Enum
@@ -22,7 +22,9 @@ from langchain_core.documents import Document
22
  from in_memory import load_all_documents
23
  from langchain_nomic.embeddings import Embeddings, NomicEmbeddings
24
  from loader import load_web_content, load_youtube_content
25
- from praisonai import PraisonAI
 
 
26
 
27
  # ################################### FastAPI setup ############################################
28
  app = FastAPI()
@@ -50,6 +52,7 @@ app.add_middleware(
50
  load_dotenv()
51
  GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
52
  GROQ_API_BASE = os.environ.get("GROQ_API_BASE")
 
53
  embedding_model = NomicEmbeddings(model="nomic-embed-text-v1.5")
54
 
55
 
@@ -90,7 +93,7 @@ async def generate_chunks(query: str) -> AsyncIterable[str]:
90
  openai_api_base=GROQ_API_BASE,
91
  api_key=GROQ_API_KEY,
92
  temperature=0.0,
93
- model_name="mixtral-8x7b-32768",
94
  streaming=True, # ! important
95
  verbose=True,
96
  callbacks=[callback]
@@ -136,11 +139,16 @@ async def generate_chunks(query: str) -> AsyncIterable[str]:
136
 
137
 
138
  # ################################### Models ########################################
 
 
 
 
 
139
  class Input(BaseModel):
140
  question: str
141
- type: Optional[Enum('type', ['PATTERN', 'AGENTS', 'RAG'])]
142
  pattern: Optional[str]
143
- chat_history: List[str] # Define the type for chat_history
144
 
145
 
146
  class Metadata(BaseModel):
@@ -164,32 +172,26 @@ def read_root():
164
  return {"Hello": "World from Marigen"}
165
 
166
 
167
- @app.post("/chat")
168
  async def chat(query: RequestBody = Body(...)):
169
- result = None
170
  print(query.input.question)
171
  print(query.input.type)
172
 
173
- if query.input.type == 'PATTERN':
174
  print(query.input.pattern)
 
 
 
175
 
176
- return query.input.pattern
177
-
178
- elif query.input.type == 'AGENTS':
179
- praisonai = PraisonAI(
180
- auto=query.input.question,
181
- framework="autogen"
182
- )
183
- print(praisonai.framework)
184
- result = praisonai.run()
185
- return result
186
 
187
- elif query.input.type == 'RAG':
188
  gen = generate_chunks(query.input.question)
189
  return StreamingResponse(gen, media_type="text/event-stream")
190
 
191
- return result
192
-
193
 
194
  @app.post("/uploadfiles")
195
  async def create_upload_files(
 
3
  from dotenv import load_dotenv
4
 
5
  import asyncio
6
+ from fastapi import FastAPI, Body, File, UploadFile, HTTPException
7
  from fastapi.responses import StreamingResponse
8
  from typing import List, AsyncIterable, Annotated, Optional
9
  from enum import Enum
 
22
  from in_memory import load_all_documents
23
  from langchain_nomic.embeddings import Embeddings, NomicEmbeddings
24
  from loader import load_web_content, load_youtube_content
25
+ from get_pattern import generate_pattern
26
+ from get_agents import process_agents
27
+
28
 
29
  # ################################### FastAPI setup ############################################
30
  app = FastAPI()
 
52
  load_dotenv()
53
  GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
54
  GROQ_API_BASE = os.environ.get("GROQ_API_BASE")
55
+ OPENAI_MODEL_NAME = os.environ.get("OPENAI_MODEL_NAME")
56
  embedding_model = NomicEmbeddings(model="nomic-embed-text-v1.5")
57
 
58
 
 
93
  openai_api_base=GROQ_API_BASE,
94
  api_key=GROQ_API_KEY,
95
  temperature=0.0,
96
+ model_name=OPENAI_MODEL_NAME, # "mixtral-8x7b-32768",
97
  streaming=True, # ! important
98
  verbose=True,
99
  callbacks=[callback]
 
139
 
140
 
141
  # ################################### Models ########################################
142
+ class QuestionType(str, Enum):
143
+ PATTERN = "PATTERN"
144
+ AGENTS = "AGENTS"
145
+ RAG = "RAG"
146
+
147
  class Input(BaseModel):
148
  question: str
149
+ type: QuestionType
150
  pattern: Optional[str]
151
+ chat_history: List[str]
152
 
153
 
154
  class Metadata(BaseModel):
 
172
  return {"Hello": "World from Marigen"}
173
 
174
 
175
+ @app.post("/chat", response_class=StreamingResponse)
176
  async def chat(query: RequestBody = Body(...)):
 
177
  print(query.input.question)
178
  print(query.input.type)
179
 
180
+ if query.input.type == QuestionType.PATTERN:
181
  print(query.input.pattern)
182
+ pattern = query.input.pattern
183
+ gen = generate_pattern(pattern=pattern, query=query.input.question)
184
+ return StreamingResponse(gen, media_type="text/event-stream")
185
 
186
+ elif query.input.type == QuestionType.AGENTS:
187
+ gen = process_agents(query.input.question)
188
+ return StreamingResponse(gen, media_type="text/event-stream")
 
 
 
 
 
 
 
189
 
190
+ elif query.input.type == QuestionType.RAG:
191
  gen = generate_chunks(query.input.question)
192
  return StreamingResponse(gen, media_type="text/event-stream")
193
 
194
+ raise HTTPException(status_code=400, detail="No accurate response for your given query")
 
195
 
196
  @app.post("/uploadfiles")
197
  async def create_upload_files(
db/chroma.sqlite3 ADDED
Binary file (147 kB). View file
 
get_agents.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from praisonai import PraisonAI
3
+ from typing import List, AsyncIterable, Annotated, Optional
4
+
5
+ async def process_agents(query: str) -> AsyncIterable[str]:
6
+
7
+ # Create PraisonAI instance and task
8
+ praisonai = PraisonAI(auto=query)
9
+ print(praisonai.framework)
10
+
11
+ # Run the task and await the result
12
+ task = praisonai.main()
13
+
14
+
15
+ # print(task)
16
+ # Assuming `result` is an iterable of strings
17
+ for item in task:
18
+ yield item
19
+
20
+ # await task
get_pattern.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain_core.prompts import ChatPromptTemplate
3
+ from langchain_openai import ChatOpenAI
4
+ from dotenv import load_dotenv
5
+ from typing import List, AsyncIterable, Annotated, Optional
6
+ from langchain.callbacks import AsyncIteratorCallbackHandler
7
+ from langchain_core.output_parsers import StrOutputParser
8
+ import asyncio
9
+ import datetime
10
+
11
+
12
+ load_dotenv()
13
+ GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
14
+ GROQ_API_BASE = os.environ.get("GROQ_API_BASE")
15
+ GROQ_MODEL_NAME = os.environ.get("OPENAI_MODEL_NAME")
16
+
17
+ def read_pattern_files(pattern: str) -> (str, str):
18
+ system_file = 'system.md'
19
+ user_file = 'user.md'
20
+ system_content = ""
21
+ user_content = ""
22
+ pattern_dir = "patterns"
23
+
24
+ # Construct the full paths
25
+ system_file_path = os.path.abspath(os.path.join(pattern_dir, pattern, system_file))
26
+ user_file_path = os.path.abspath(os.path.join(pattern_dir, pattern, user_file))
27
+
28
+ print(system_file_path)
29
+ print(user_file_path)
30
+
31
+ # Check if system.md exists
32
+ if os.path.exists(system_file_path):
33
+ with open(system_file_path, 'r') as file:
34
+ system_content = file.read()
35
+
36
+ # Check if user.md exists
37
+ if os.path.exists(user_file_path):
38
+ with open(user_file_path, 'r') as file:
39
+ user_content = file.read()
40
+
41
+ return system_content, user_content
42
+
43
+
44
+
45
+ async def generate_pattern(pattern: str, query: str) -> AsyncIterable[str] :
46
+
47
+ callback = AsyncIteratorCallbackHandler()
48
+
49
+ chat = ChatOpenAI(
50
+ openai_api_base=GROQ_API_BASE,
51
+ api_key=GROQ_API_KEY,
52
+ temperature=0.0,
53
+ model_name= "mixtral-8x7b-32768", #GROQ_MODEL_NAME,
54
+ streaming=True, # ! important
55
+ verbose=True,
56
+ callbacks=[callback]
57
+ )
58
+
59
+ system, usr_content = read_pattern_files(pattern=pattern)
60
+ print('Sys Content -- > ')
61
+ print(system)
62
+ print('User Content --- > ')
63
+ print(usr_content)
64
+
65
+ human = usr_content + "{text}"
66
+ prompt = ChatPromptTemplate.from_messages([("system", system), ("human", human)])
67
+
68
+ chain = prompt | chat | StrOutputParser()
69
+
70
+
71
+ task = asyncio.create_task(
72
+ chain.ainvoke({"text": query})
73
+ )
74
+ index = 0
75
+ try:
76
+ async for token in callback.aiter():
77
+ print(index, ": ", token, ": ", datetime.datetime.now().time())
78
+ index = index + 1
79
+ yield token
80
+ except Exception as e:
81
+ print(f"Caught exception: {e}")
82
+ finally:
83
+ callback.done.set()
84
+
85
+ await task
86
+
requirements.txt CHANGED
@@ -129,4 +129,5 @@ openpyxl
129
  pysqlite3-binary
130
  langchain_nomic
131
  pydub
132
- praisonai
 
 
129
  pysqlite3-binary
130
  langchain_nomic
131
  pydub
132
+ praisonai==0.0.57
133
+ langchain_groq
test.yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ framework: crewai
2
+ topic: what is the meaning of life, for a student
3
+ roles:
4
+ researcher:
5
+ backstory: Experienced in search queries and extracting relevant information from
6
+ various sources.
7
+ goal: Gather relevant information on the meaning of life for students
8
+ role: Researcher
9
+ tasks:
10
+ information_gathering:
11
+ description: Gather relevant information on the meaning of life for students
12
+ from various sources, including articles, books, and websites.
13
+ expected_output: Document with collected information and relevant sources.
14
+ tools:
15
+ - ''
16
+ analyst:
17
+ backstory: Skilled in pattern recognition and extracting insights from data.
18
+ goal: Analyze and identify key themes and insights
19
+ role: Analyst
20
+ tasks:
21
+ theme_identification:
22
+ description: Analyze the gathered information and identify key themes and
23
+ insights on the meaning of life for students.
24
+ expected_output: Document with identified themes and insights.
25
+ tools:
26
+ - ''
27
+ writer:
28
+ backstory: Talented in crafting compelling narratives and essays.
29
+ goal: Write a cohesive and inspiring essay on the meaning of life for students
30
+ role: Writer
31
+ tasks:
32
+ essay_writing:
33
+ description: Write a cohesive and inspiring essay on the meaning of life for
34
+ students, incorporating the identified themes and insights.
35
+ expected_output: Well-structured essay on the meaning of life for students.
36
+ tools:
37
+ - ''
38
+ dependencies: []
trained_agents_data.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:926248e52d1fa532c317e37da24ed652ae64110f8219cb5e061668bd3091f048
3
+ size 5