meraj12 commited on
Commit
803eaf9
·
verified ·
1 Parent(s): 7ac2cd1

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -100
app.py DELETED
@@ -1,100 +0,0 @@
1
- import streamlit as st
2
- from PIL import Image
3
- import torch
4
- from torchvision import transforms
5
- import os
6
- import torchvision.transforms.functional as TF
7
-
8
-
9
- st.set_page_config(page_title="Ghibli Style Converter", layout="centered")
10
-
11
- @st.cache_resource
12
- import torch.nn as nn
13
-
14
- class ConvLayer(nn.Module):
15
- def __init__(self, in_channels, out_channels, kernel_size, stride):
16
- super(ConvLayer, self).__init__()
17
- reflection_padding = kernel_size // 2
18
- self.layer = nn.Sequential(
19
- nn.ReflectionPad2d(reflection_padding),
20
- nn.Conv2d(in_channels, out_channels, kernel_size, stride),
21
- nn.InstanceNorm2d(out_channels, affine=True),
22
- nn.ReLU()
23
- )
24
-
25
- def forward(self, x):
26
- return self.layer(x)
27
-
28
- class ResidualBlock(nn.Module):
29
- def __init__(self, channels):
30
- super(ResidualBlock, self).__init__()
31
- self.block = nn.Sequential(
32
- ConvLayer(channels, channels, 3, 1),
33
- ConvLayer(channels, channels, 3, 1)
34
- )
35
-
36
- def forward(self, x):
37
- return x + self.block(x)
38
-
39
- class Generator(nn.Module):
40
- def __init__(self):
41
- super(Generator, self).__init__()
42
- self.encoder = nn.Sequential(
43
- ConvLayer(3, 32, 7, 1),
44
- ConvLayer(32, 64, 3, 2),
45
- ConvLayer(64, 128, 3, 2),
46
- )
47
- self.res_blocks = nn.Sequential(*[ResidualBlock(128) for _ in range(5)])
48
- self.decoder = nn.Sequential(
49
- nn.Upsample(scale_factor=2),
50
- ConvLayer(128, 64, 3, 1),
51
- nn.Upsample(scale_factor=2),
52
- ConvLayer(64, 32, 3, 1),
53
- nn.ReflectionPad2d(3),
54
- nn.Conv2d(32, 3, 7, 1),
55
- nn.Tanh()
56
- )
57
-
58
- def forward(self, x):
59
- x = self.encoder(x)
60
- x = self.res_blocks(x)
61
- x = self.decoder(x)
62
- return x
63
-
64
- def load_model():
65
- model = Generator()
66
- model.load_state_dict(torch.load("model/miyazaki_hayao.pth", map_location="cpu"))
67
- model.eval()
68
- return model
69
-
70
- def preprocess(image):
71
- image = image.convert("RGB")
72
- transform = transforms.Compose([
73
- transforms.Resize((256, 256)),
74
- transforms.ToTensor()
75
- ])
76
- return transform(image).unsqueeze(0)
77
-
78
- def postprocess(tensor):
79
- tensor = tensor.squeeze().detach().cpu()
80
- image = TF.to_pil_image(tensor.clamp(0, 1))
81
- return image
82
-
83
- st.title("🎨 Ghibli Style Image Converter")
84
- st.markdown("Upload your image to see it transformed into a Studio Ghibli-style artwork using AnimeGAN2!")
85
-
86
- uploaded_file = st.file_uploader("Upload an Image", type=["jpg", "jpeg", "png"])
87
-
88
- if uploaded_file:
89
- image = Image.open(uploaded_file)
90
- st.image(image, caption="Original Image", use_column_width=True)
91
-
92
- with st.spinner("Converting to Ghibli Style..."):
93
- model = load_model()
94
- input_tensor = preprocess(image)
95
- with torch.no_grad():
96
- output_tensor = model(input_tensor)
97
- output_image = postprocess(output_tensor)
98
-
99
- st.image(output_image, caption="Ghibli Style Image", use_column_width=True)
100
- st.success("Conversion complete!")