|
|
|
import os |
|
from transformers import AutoTokenizer, AutoModel |
|
import anvil.server |
|
import pathlib |
|
import textwrap |
|
import import_ipynb |
|
from library import call_gpt, call_gemini, encode_gemini |
|
from background_service import BackgroundTaskService |
|
|
|
anvil.server.connect('PLMOIU5VCGGUOJH2XORIBWV3-ZXZVFLWX7QFIIAF4') |
|
|
|
|
|
MESSAGED={'title':'API Server', |
|
'messageL':['published server functions:','encode(text)', |
|
'call_gemini(text,key)','call_gpt(text,key,model)', |
|
'task_id<=launch(func_name,*args)','poll(task_id)']} |
|
|
|
tokenizer = AutoTokenizer.from_pretrained('allenai/specter') |
|
encoder = AutoModel.from_pretrained('allenai/specter') |
|
|
|
anvil.server.callable(call_gpt) |
|
anvil.server.callable(call_gemini) |
|
anvil.server.callable(encode_gemini) |
|
|
|
service=BackgroundTaskService(max_tasks=10) |
|
service.register(call_gpt) |
|
service.register(call_gemini) |
|
|
|
@anvil.server.callable |
|
def launch(func_name,*args): |
|
global service |
|
|
|
task_id = service.launch_task(func_name, *args) |
|
print(f"Task launched with ID: {task_id}") |
|
return task_id |
|
|
|
@anvil.server.callable |
|
def poll(task_id): |
|
global service |
|
|
|
result = service.get_result(task_id) |
|
if result=='No such task': return str(result) |
|
elif result!='In Progress': |
|
del service.results[task_id] |
|
if isinstance(result, (int, float, str, list, dict, tuple)): |
|
return result |
|
else: |
|
print(str(result)) |
|
return str(result) |
|
else: return str(result) |
|
|
|
@anvil.server.callable |
|
def encode_anvil(text): |
|
inputs = tokenizer(text, padding=True, truncation=True, |
|
return_tensors="pt", max_length=512) |
|
result = encoder(**inputs) |
|
embeddings = result.last_hidden_state[:, 0, :] |
|
emb_array = embeddings.detach().numpy() |
|
embedding=emb_array.tolist() |
|
return embedding |
|
|
|
@anvil.server.callable |
|
def reset_service(): |
|
global call_gpt, call_gemini, service |
|
service=BackgroundTaskService(max_tasks=10) |
|
service.register(call_gpt) |
|
service.register(call_gemini) |
|
|
|
@anvil.server.callable |
|
def print_results_table(): |
|
global service |
|
return(service.results) |
|
|
|
anvil.server.wait_forever() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|