tensorgirl commited on
Commit
ddb8c24
·
verified ·
1 Parent(s): c764a93

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +58 -112
  2. main.py +3 -3
app.py CHANGED
@@ -1,112 +1,58 @@
1
- from langchain_community.document_loaders import TextLoader
2
- import os
3
-
4
- loaders = []
5
- folder_path = "Data"
6
-
7
- for i in range(12):
8
- file_path = os.path.join(folder_path,"{}.txt".format(i))
9
- loaders.append(TextLoader(file_path))
10
-
11
- docs = []
12
- for loader in loaders:
13
- docs.extend(loader.load())
14
-
15
- from langchain.vectorstores import Chroma
16
- from langchain_community.embeddings import HuggingFaceInferenceAPIEmbeddings
17
-
18
- HF_TOKEN = os.getenv("HF_TOKEN")
19
- embeddings = HuggingFaceInferenceAPIEmbeddings(
20
- api_key=HF_TOKEN, model_name="sentence-transformers/all-mpnet-base-v2"
21
- )
22
-
23
- vectordb = Chroma.from_documents(
24
- documents=docs,
25
- embedding=embeddings
26
- )
27
-
28
- from langchain_community.llms import HuggingFaceHub
29
-
30
- llm = HuggingFaceHub(
31
- repo_id="google/gemma-1.1-2b-it",
32
- task="text-generation",
33
- model_kwargs={
34
- "max_new_tokens": 7000,
35
- "top_k": 5,
36
- "temperature": 0.1,
37
- "repetition_penalty": 1.03,
38
- },
39
- huggingfacehub_api_token = HF_TOKEN
40
- )
41
-
42
- from langchain.prompts import PromptTemplate
43
-
44
- template = """You are a Chatbot at a Restaurant. Help the customer pick the right dish to order. The items in the context are dishes. The field below the item is the cost of the dish. About is the description of the dish. Use the context below to answe the questions
45
- {context}
46
- Question: {question}
47
- Helpful Answer:"""
48
- QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],template=template,)
49
-
50
- from langchain.memory import ConversationBufferMemory
51
- memory = ConversationBufferMemory(
52
- memory_key="chat_history",
53
- return_messages=True
54
- )
55
-
56
- from langchain.chains import ConversationalRetrievalChain
57
-
58
- retriever = vectordb.as_retriever()
59
- qa = ConversationalRetrievalChain.from_llm(
60
- llm,
61
- retriever=retriever,
62
- memory=memory,
63
- )
64
-
65
- from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
66
- from langchain_core.output_parsers import StrOutputParser
67
- from langchain_core.runnables import RunnablePassthrough
68
-
69
- contextualize_q_system_prompt = """Given a chat history and the latest user question \
70
- which might reference context in the chat history, formulate a standalone question \
71
- which can be understood without the chat history. Do NOT answer the question, \
72
- just reformulate it if needed and otherwise return it as is."""
73
- contextualize_q_prompt = ChatPromptTemplate.from_messages(
74
- [
75
- ("system", contextualize_q_system_prompt),
76
- MessagesPlaceholder(variable_name="chat_history"),
77
- ("human", "{question}"),
78
- ]
79
- )
80
- contextualize_q_chain = contextualize_q_prompt | llm | StrOutputParser()
81
-
82
- def contextualized_question(input: dict):
83
- if input.get("chat_history"):
84
- return contextualize_q_chain
85
- else:
86
- return input["question"]
87
-
88
-
89
- rag_chain = (
90
- RunnablePassthrough.assign(
91
- context=contextualized_question | retriever
92
- )
93
- | QA_CHAIN_PROMPT
94
- | llm
95
- )
96
-
97
-
98
- from langchain_core.messages import AIMessage, HumanMessage
99
-
100
- os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
101
- os.environ["WANDB_PROJECT"] = "Restaurant_ChatBot"
102
-
103
- print("Welcome to the Restaurant. How can I help you today?")
104
- chat_history = []
105
-
106
- def predict(message):
107
- ai_msg = rag_chain.invoke({"question": message, "chat_history": chat_history})
108
- idx = ai_msg.find("Answer")
109
- chat_history.extend([HumanMessage(content=message), ai_msg])
110
-
111
- return ai_msg[idx:]
112
-
 
1
+ from langchain_groq import ChatGroq
2
+
3
+ llm = ChatGroq(
4
+ temperature=0,
5
+ groq_api_key = "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",
6
+ model_name="llama3-8b-8192"
7
+ )
8
+
9
+ from crewai import Agent, Task, Crew
10
+ import os
11
+
12
+ Code_Quality_agent = Agent(
13
+ role="Senior Software Engineer",
14
+ goal="Provide the best support quality assurance to the code written by the member in your team",
15
+ backstory="You work in a financial organization. The Goal is to identify bugs,"
16
+ " security should be a top concern. Look for common Look for common"
17
+ " vulnerabilities like SQL injection, cross-site scripting (XSS), "
18
+ "and insecure data handling practices.Ensure the code adheres to"
19
+ " secure coding standards established by the organization or "
20
+ "industry.Scrutinize how the code validates user input to prevent"
21
+ " malicious attacks.For code involving financial calculations"
22
+ " (e.g., interest rates, risk assessments), double-check the formulas"
23
+ " and logic for accuracy. Consider edge cases and ensure the code behave"
24
+ "s as intended under various scenarios.Verify that the code maintains data"
25
+ " integrity throughout processing. This includes checking for potential"
26
+ " data loss, corruption, or unauthorized access.Ensure the code complies"
27
+ " with relevant industry standards and regulations.",
28
+ verbose=True,
29
+ allow_delegation=False,
30
+ llm = llm
31
+
32
+ )
33
+
34
+ Code_Review = Task(expected_output=(
35
+ "You would be given code as an input for the code review {code}. "
36
+ "Make sure to use everything you know to provide the best support possible."
37
+ "Provide clear and actionable feedback to the code author.Maintain a collaborative and respectful tone throughout the review process.Ensure the code complies with relevant industry standards and regulations."
38
+ ),
39
+ description=(
40
+ "You would be given a code as an input for the code review {code}."
41
+ "Make sure to use everything you know to provide the best support possible."
42
+ "You must strive to provide a complete and accurate response."
43
+ ),
44
+ llm = llm,
45
+ agent= Code_Quality_agent,
46
+ )
47
+
48
+ crew = Crew(
49
+ agents=[Code_Quality_agent],
50
+ tasks=[Code_Review],
51
+ verbose=2,
52
+ )
53
+
54
+ def predict(code):
55
+ input = {code:code}
56
+ result = crew.kickoff(inputs=inputs)
57
+ return result
58
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
main.py CHANGED
@@ -10,8 +10,8 @@ app = FastAPI()
10
 
11
  @app.get("/")
12
  async def root():
13
- return {"Restaurant ChatBot":"Version 1.0 'First Draft'"}
14
 
15
- @app.post("/Predict/")
16
- def read_user(input_json: str):
17
  return predict(input_json)
 
10
 
11
  @app.get("/")
12
  async def root():
13
+ return {"Code Review Automation":"Version 1.0 'First Draft'"}
14
 
15
+ @app.post("/AutomateReview/")
16
+ def predict(input_json: str):
17
  return predict(input_json)