prlabs2023 commited on
Commit
e9fb7b5
·
1 Parent(s): b89bf17

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -25
app.py CHANGED
@@ -28,7 +28,12 @@ class Query2(BaseModel):
28
  filename:str
29
  host:str
30
 
31
-
 
 
 
 
 
32
 
33
 
34
 
@@ -63,15 +68,61 @@ async def startup_event():
63
 
64
  audio_space="https://audiospace-1-u9912847.deta.app/uphoto"
65
 
66
- # @app.post("/code")
67
- # async def get_code(request: Request):
68
- # data = await request.form()
69
- # code = data.get("code")
70
- # global audio_space
71
- # print("code ="+code)
72
- # audio_space= audio_space+code
73
-
74
  import threading
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  @app.post("/")
76
  async def get_answer(q: Query ):
77
 
@@ -124,7 +175,7 @@ from PIL import Image
124
  import json
125
 
126
 
127
- client = InferenceClient()
128
  # client = InferenceClient(model="SG161222/Realistic_Vision_V1.4")
129
 
130
 
@@ -159,21 +210,7 @@ def do_ML(filename:str,text:str,code:str,host:str):
159
  data={"text":text,"filename":filename}
160
  requests.post(host+"texttoimage2handleerror",data=data)
161
 
162
- @app.post("/image")
163
- async def get_answer(q: Query ):
164
- text = q.text
165
- try:
166
- global client
167
- imagei = client.text_to_image(text)
168
- byte_array = io.BytesIO()
169
- imagei.save(byte_array, format='JPEG')
170
- response = Response(content=byte_array.getvalue(), media_type="image/png")
171
- return response
172
-
173
- except:
174
- return JSONResponse({"status":False})
175
-
176
-
177
 
178
 
179
 
 
28
  filename:str
29
  host:str
30
 
31
+ class QueryM(BaseModel):
32
+ text: str
33
+ tokens:int
34
+ temp:int
35
+ topp:int
36
+ topk:int
37
 
38
 
39
 
 
68
 
69
  audio_space="https://audiospace-1-u9912847.deta.app/uphoto"
70
 
 
 
 
 
 
 
 
 
71
  import threading
72
+
73
+ client = InferenceClient()
74
+
75
+
76
+ @app.post("/image")
77
+ async def get_answer(q: Query ):
78
+ text = q.text
79
+ try:
80
+ global client
81
+ imagei = client.text_to_image(text)
82
+ byte_array = io.BytesIO()
83
+ imagei.save(byte_array, format='JPEG')
84
+ response = Response(content=byte_array.getvalue(), media_type="image/png")
85
+ return response
86
+
87
+ except:
88
+ return JSONResponse({"status":False})
89
+
90
+
91
+ @app.post("/mistral")
92
+ async def get_answer(q: QueryM ):
93
+ text = q.text
94
+ try:
95
+ inference = InferenceApi(repo_id="mistralai/Mistral-7B-Instruct-v0.1")
96
+ generate_kwargs = dict(
97
+ max_new_tokens=q.tokens,
98
+ do_sample=True,
99
+ top_p=q.topp,
100
+ top_k=q.topk,
101
+ temperature=q.temp,
102
+ )
103
+ params = {"candidate_labels":["refund", "legal", "faq"]}
104
+ x = inference(inputs,generate_kwargs )[0]['generated_text']
105
+ x=x.replace(inputs,'')
106
+ return JSONResponse({"result":x,"status":True})
107
+ except:
108
+ return JSONResponse({"status":False})
109
+
110
+
111
+
112
+
113
+
114
+
115
+
116
+
117
+
118
+
119
+
120
+
121
+
122
+
123
+
124
+ ''' to be removed when main code is updated '''
125
+
126
  @app.post("/")
127
  async def get_answer(q: Query ):
128
 
 
175
  import json
176
 
177
 
178
+
179
  # client = InferenceClient(model="SG161222/Realistic_Vision_V1.4")
180
 
181
 
 
210
  data={"text":text,"filename":filename}
211
  requests.post(host+"texttoimage2handleerror",data=data)
212
 
213
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
 
215
 
216