peterpeter8585 commited on
Commit
fb0e311
·
verified ·
1 Parent(s): e562172

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -0
app.py CHANGED
@@ -62,6 +62,105 @@ prompt = ChatPromptTemplate.from_messages(
62
  ("human", human),
63
  ]
64
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  #from transformers import pipeline,AutoModelForCausalLM as M,AutoTokenizer as T
66
  #m=M.from_pretrained("peterpeter8585/syai4.3")
67
  #t=T.from_pretrained("peterpeter8585/syai4.3")
 
62
  ("human", human),
63
  ]
64
  )
65
+ from typing import Any, Dict, List, Optional
66
+ from langchain_core.language_models import BaseChatModel
67
+ from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
68
+ from langchain_core.outputs import ChatResult, ChatGeneration
69
+ from langchain_core.callbacks.manager import CallbackManagerForLLMRun
70
+ from langchain_core.callbacks.manager import AsyncCallbackManagerForLLMRun
71
+ from langchain_core.runnables import run_in_executor
72
+ from transformers import AutoProcessor, AutoModelForCausalLM
73
+ import torch
74
+ class Chatchat(BaseChatModel):
75
+
76
+ model_name: str = "peterpeter8585/deepseek_1"
77
+ tokenizer : AutoTokenizer = None
78
+ model: AutoModelForCausalLM = None
79
+ model_path: str = None
80
+
81
+ def __init__(self, model_path, **kwargs: Any) -> None:
82
+ super().__init__(**kwargs)
83
+ if model_path is not None:
84
+ self.model_name = model_path
85
+
86
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name, trust_remote_code=True)
87
+ self.model = AutoModelForCausalLM.from_pretrained(
88
+ self.model_name, trust_remote_code=True)
89
+
90
+
91
+ def _call(
92
+ self,
93
+ prompt: str,
94
+ stop: Optional[List[str]] = None,
95
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
96
+ **kwargs: Any,
97
+ ) -> str:
98
+ # Load and preprocess the image
99
+ messages = [
100
+ {"role": "system", "content": "You are Chatchat.A helpful assistant at code."},
101
+ {"role": "user", "content": prompt}
102
+ ]
103
+
104
+ text = self.tokenizer.apply_chat_template(
105
+ messages,
106
+ tokenize=False,
107
+ add_generation_prompt=True
108
+ )
109
+ model_inputs = self.tokenizer([text], return_tensors="pt").to(self.model.device)
110
+ generated_ids = self.model.generate(
111
+ **model_inputs,
112
+ max_new_tokens=512
113
+ )
114
+ generated_ids = [
115
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
116
+ ]
117
+
118
+ response = self.tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
119
+
120
+ return response
121
+
122
+ async def _acall(
123
+ self,
124
+ prompt: str,
125
+ stop: Optional[List[str]] = None,
126
+ run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
127
+ **kwargs: Any,
128
+ ) -> str:
129
+ # Implement the async logic to generate a response from the model
130
+ return await run_in_executor(
131
+ None,
132
+ self._call,
133
+ prompt,
134
+ stop,
135
+ run_manager.get_sync() if run_manager else None,
136
+ **kwargs,
137
+ )
138
+
139
+ @property
140
+ def _llm_type(self) -> str:
141
+ return "custom-llm-chat"
142
+
143
+ @property
144
+ def _identifying_params(self) -> Dict[str, Any]:
145
+ return {"model_name": self.model_name}
146
+
147
+ def _generate(
148
+ self,
149
+ messages: List[BaseMessage],
150
+ stop: Optional[List[str]] = None,
151
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
152
+ **kwargs: Any,
153
+ ) -> ChatResult:
154
+ # Assumes the first message contains the prompt and the image path is in metadata
155
+ prompt = messages[0].content
156
+ response_text = self._call(prompt, stop, run_manager, **kwargs)
157
+
158
+ # Create AIMessage with the response
159
+ ai_message = AIMessage(content=response_text)
160
+ return ChatResult(generations=[ChatGeneration(message=ai_message)])
161
+
162
+
163
+ llm=Chatchat()
164
  #from transformers import pipeline,AutoModelForCausalLM as M,AutoTokenizer as T
165
  #m=M.from_pretrained("peterpeter8585/syai4.3")
166
  #t=T.from_pretrained("peterpeter8585/syai4.3")