memorease commited on
Commit
ee74553
·
verified ·
1 Parent(s): 736e2a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -1,13 +1,12 @@
1
  from fastapi import FastAPI
2
  from pydantic import BaseModel
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import torch
5
- from transformers import AutoModel
6
 
7
  app = FastAPI()
8
 
9
- # Hugging Face modelini yükle (CPU'da çalışacak şekilde)
10
- model = AutoModel.from_pretrained("memorease/memorease-quizgen")
11
  tokenizer = AutoTokenizer.from_pretrained("memorease/memorease-quizgen")
12
 
13
  class Memory(BaseModel):
@@ -19,4 +18,4 @@ def generate(memory: Memory):
19
  inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=128)
20
  outputs = model.generate(**inputs, max_new_tokens=64)
21
  question = tokenizer.decode(outputs[0], skip_special_tokens=True)
22
- return {"question": question}
 
1
  from fastapi import FastAPI
2
  from pydantic import BaseModel
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
 
5
 
6
  app = FastAPI()
7
 
8
+ # MODELİ DOĞRU ŞEKİLDE YÜKLE
9
+ model = AutoModelForCausalLM.from_pretrained("memorease/memorease-quizgen")
10
  tokenizer = AutoTokenizer.from_pretrained("memorease/memorease-quizgen")
11
 
12
  class Memory(BaseModel):
 
18
  inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=128)
19
  outputs = model.generate(**inputs, max_new_tokens=64)
20
  question = tokenizer.decode(outputs[0], skip_special_tokens=True)
21
+ return {"question": question}