FlameF0X commited on
Commit
010ba06
·
verified ·
1 Parent(s): 642232a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -2,11 +2,14 @@ from fastapi import FastAPI
2
  from pydantic import BaseModel
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
 
5
 
6
- # Initialize the model and tokenizer (Tiny GPT-2)
7
- model_name = "./tiny-gpt2" # Path to your tiny-gpt2 folder
8
- tokenizer = AutoTokenizer.from_pretrained(model_name)
9
- model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
10
 
11
  # FastAPI app
12
  app = FastAPI()
@@ -28,4 +31,3 @@ async def generate_text(request: PromptRequest):
28
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
29
 
30
  return {"generated_text": generated_text}
31
-
 
2
  from pydantic import BaseModel
3
  from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import torch
5
+ import os
6
 
7
+ # Define the path where the model is located
8
+ model_directory = "tiny-gpt2" # Make sure this is the correct relative or absolute path to your model folder
9
+
10
+ # Initialize the model and tokenizer using the correct directory path
11
+ tokenizer = AutoTokenizer.from_pretrained(model_directory)
12
+ model = AutoModelForCausalLM.from_pretrained(model_directory)
13
 
14
  # FastAPI app
15
  app = FastAPI()
 
31
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
32
 
33
  return {"generated_text": generated_text}