radub23 commited on
Commit
b835b89
·
1 Parent(s): 35ce90a

Integrate FastAI model for warning lamp detection and update dependencies

Browse files
Files changed (3) hide show
  1. .gitignore +20 -0
  2. app.py +50 -27
  3. requirements.txt +4 -1
.gitignore ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # Jupyter Notebook
7
+ .ipynb_checkpoints
8
+ Inference_notebook_demo.ipynb
9
+
10
+ # Virtual Environment
11
+ venv/
12
+ env/
13
+ .env/
14
+
15
+ # IDE
16
+ .vscode/
17
+ .idea/
18
+
19
+ # Misc
20
+ .DS_Store
app.py CHANGED
@@ -1,40 +1,56 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
3
  import os
4
 
5
  """
6
- Warning Lamp Detector using Hugging Face Inference API
7
  This application allows users to upload images of warning lamps and get classification results.
8
  """
9
 
10
- # Initialize the client with your model
11
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
 
 
 
 
 
 
12
 
13
  def detect_warning_lamp(image, history: list[tuple[str, str]], system_message):
14
  """
15
- Process the uploaded image and return detection results
 
 
 
 
 
 
16
  """
17
- # TODO: Replace with actual model inference
18
- # This is a placeholder response - you'll need to integrate your actual model
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- # Add the image analysis request
22
- messages.append({
23
- "role": "user",
24
- "content": f"Please analyze this warning lamp image and provide a detailed classification."
25
- })
26
-
27
- response = ""
28
- for message in client.chat_completion(
29
- messages,
30
- max_tokens=512,
31
- stream=True,
32
- temperature=0.7,
33
- top_p=0.95,
34
- ):
35
- token = message.choices[0].delta.content
36
- response += token
37
- yield response
 
 
38
 
39
  # Create a custom interface with image upload
40
  with gr.Blocks(title="Warning Lamp Detector", theme=gr.themes.Soft()) as demo:
@@ -46,8 +62,14 @@ with gr.Blocks(title="Warning Lamp Detector", theme=gr.themes.Soft()) as demo:
46
  1. Upload a clear image of the warning lamp
47
  2. Wait for the analysis
48
  3. View the detailed classification results
 
 
49
  """)
50
 
 
 
 
 
51
  with gr.Row():
52
  with gr.Column(scale=1):
53
  image_input = gr.Image(
@@ -58,7 +80,8 @@ with gr.Blocks(title="Warning Lamp Detector", theme=gr.themes.Soft()) as demo:
58
  system_message = gr.Textbox(
59
  value="You are an expert in warning lamp classification. Analyze the image and provide detailed information about the type, color, and status of the warning lamp.",
60
  label="System Message",
61
- lines=3
 
62
  )
63
 
64
  with gr.Column(scale=1):
 
1
  import gradio as gr
2
+ from fastai.vision.all import *
3
+ from fastai.learner import load_learner
4
+ from pathlib import Path
5
  import os
6
 
7
  """
8
+ Warning Lamp Detector using FastAI
9
  This application allows users to upload images of warning lamps and get classification results.
10
  """
11
 
12
+ # Load the FastAI model
13
+ try:
14
+ model_path = Path("WarningLampClassifier.pkl")
15
+ learn_inf = load_learner(model_path)
16
+ print("Model loaded successfully")
17
+ except Exception as e:
18
+ print(f"Error loading model: {e}")
19
+ raise
20
 
21
  def detect_warning_lamp(image, history: list[tuple[str, str]], system_message):
22
  """
23
+ Process the uploaded image and return detection results using FastAI model
24
+ Args:
25
+ image: PIL Image from Gradio
26
+ history: Chat history
27
+ system_message: System prompt
28
+ Returns:
29
+ Updated chat history with prediction results
30
  """
31
+ try:
32
+ # Convert PIL image to FastAI compatible format
33
+ img = PILImage(image)
34
+
35
+ # Get model prediction
36
+ pred_class, pred_idx, probs = learn_inf.predict(img)
37
+
38
+ # Format the prediction results
39
+ confidence = float(probs[pred_idx]) # Convert to float for better formatting
40
+ response = f"Detected Warning Lamp: {pred_class}\nConfidence: {confidence:.2%}"
41
+
42
+ # Add probabilities for all classes
43
+ response += "\n\nProbabilities for all classes:"
44
+ for i, (cls, prob) in enumerate(zip(learn_inf.dls.vocab, probs)):
45
+ response += f"\n- {cls}: {float(prob):.2%}"
46
+
47
+ # Update chat history
48
+ history.append((None, response))
49
+ return history
50
+ except Exception as e:
51
+ error_msg = f"Error processing image: {str(e)}"
52
+ history.append((None, error_msg))
53
+ return history
54
 
55
  # Create a custom interface with image upload
56
  with gr.Blocks(title="Warning Lamp Detector", theme=gr.themes.Soft()) as demo:
 
62
  1. Upload a clear image of the warning lamp
63
  2. Wait for the analysis
64
  3. View the detailed classification results
65
+
66
+ ### Supported Warning Lamps:
67
  """)
68
 
69
+ # Display supported classes if available
70
+ if 'learn_inf' in locals():
71
+ gr.Markdown("\n".join([f"- {cls}" for cls in learn_inf.dls.vocab]))
72
+
73
  with gr.Row():
74
  with gr.Column(scale=1):
75
  image_input = gr.Image(
 
80
  system_message = gr.Textbox(
81
  value="You are an expert in warning lamp classification. Analyze the image and provide detailed information about the type, color, and status of the warning lamp.",
82
  label="System Message",
83
+ lines=3,
84
+ visible=False # Hide this since we're using direct model inference
85
  )
86
 
87
  with gr.Column(scale=1):
requirements.txt CHANGED
@@ -1,3 +1,6 @@
1
  gradio>=4.19.2
2
  huggingface-hub>=0.20.3
3
- Pillow>=10.0.0 # Required for image processing
 
 
 
 
1
  gradio>=4.19.2
2
  huggingface-hub>=0.20.3
3
+ Pillow>=10.0.0 # Required for image processing
4
+ fastai>=2.7.13 # Required for model inference
5
+ torch>=2.2.0 # Required by FastAI
6
+ torchvision>=0.17.0 # Required by FastAI