muskangoyal06 commited on
Commit
75bac2f
Β·
verified Β·
1 Parent(s): bb99c9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -30
app.py CHANGED
@@ -2,49 +2,128 @@ import gradio as gr
2
  from ultralyticsplus import YOLO, render_result
3
  import cv2
4
  import time
 
5
 
6
- # Load model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  model = YOLO('foduucom/plant-leaf-detection-and-classification')
8
 
9
- # Model configuration
10
- model.overrides.update({
11
  'conf': 0.25,
12
  'iou': 0.45,
13
  'imgsz': 640,
14
- 'device': '0' if next(model.model.parameters()).is_cuda else 'cpu'
15
- })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
 
 
 
17
  def detect_leaves(image):
18
- start_time = time.time()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- # Convert image format
21
- img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
- # Predict
24
- results = model.predict(img, verbose=False)
 
25
 
26
- # Process results
27
- num_leaves = len(results[0].boxes)
28
- rendered_img = render_result(model=model, image=img, result=results[0])
29
 
30
- print(f"Processing time: {time.time() - start_time:.2f}s")
31
- return cv2.cvtColor(rendered_img, cv2.COLOR_BGR2RGB), num_leaves
32
-
33
- # Create interface with queue support
34
- interface = gr.Interface(
35
- fn=detect_leaves,
36
- inputs=gr.Image(label="Plant Image"),
37
- outputs=[
38
- gr.Image(label="Detection Result", width=600),
39
- gr.Number(label="Leaves Count")
40
- ],
41
- title="πŸƒ Leaf Detection",
42
- flagging_mode="never" # Updated from allow_flagging
43
- )
44
 
45
  if __name__ == "__main__":
46
- interface.launch(
47
  server_port=7860,
48
- share=False,
49
- # Removed enable_queue parameter
50
  )
 
2
  from ultralyticsplus import YOLO, render_result
3
  import cv2
4
  import time
5
+ import torch
6
 
7
+ # --------------------------
8
+ # System Checks & Optimization
9
+ # --------------------------
10
+ print("\n" + "="*40)
11
+ print("System Configuration Check:")
12
+ print(f"PyTorch Version: {torch.__version__}")
13
+ print(f"CUDA Available: {torch.cuda.is_available()}")
14
+ print(f"CUDA Device Count: {torch.cuda.device_count()}")
15
+ if torch.cuda.is_available():
16
+ print(f"Using GPU: {torch.cuda.get_device_name(0)}")
17
+ else:
18
+ print("Using CPU - For better performance, consider using a GPU environment")
19
+ print("="*40 + "\n")
20
+
21
+ # --------------------------
22
+ # Model Configuration
23
+ # --------------------------
24
+ # Load model with performance optimizations
25
  model = YOLO('foduucom/plant-leaf-detection-and-classification')
26
 
27
+ # Configure model parameters
28
+ model_params = {
29
  'conf': 0.25,
30
  'iou': 0.45,
31
  'imgsz': 640,
32
+ 'device': 'cuda' if torch.cuda.is_available() else 'cpu',
33
+ 'half': True if torch.cuda.is_available() else False # FP16 acceleration
34
+ }
35
+ model.overrides.update(model_params)
36
+
37
+ # Warmup model with dummy input
38
+ print("Performing model warmup...")
39
+ dummy_input = torch.randn(1, 3, 640, 640).to(model_params['device'])
40
+ if model_params['half']:
41
+ dummy_input = dummy_input.half()
42
+ model.predict(dummy_input, verbose=False)
43
+ print("Model warmup complete!\n")
44
+
45
+ # --------------------------
46
+ # Image Processing Pipeline
47
+ # --------------------------
48
+ def preprocess_image(image):
49
+ """Optimized image preprocessing"""
50
+ # Convert RGB to BGR
51
+ img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
52
+
53
+ # Resize maintaining aspect ratio
54
+ max_size = 1280
55
+ h, w = img.shape[:2]
56
+ scale = min(max_size/h, max_size/w)
57
+ img = cv2.resize(img, (int(w*scale), int(h*scale)),
58
+ interpolation=cv2.INTER_LINEAR)
59
+ return img
60
 
61
+ # --------------------------
62
+ # Detection Function
63
+ # --------------------------
64
  def detect_leaves(image):
65
+ try:
66
+ start_time = time.time()
67
+
68
+ # Step 1: Preprocessing
69
+ preprocess_start = time.time()
70
+ img = preprocess_image(image)
71
+ print(f"Preprocessing time: {time.time() - preprocess_start:.2f}s")
72
+
73
+ # Step 2: Prediction
74
+ predict_start = time.time()
75
+ results = model.predict(
76
+ source=img,
77
+ verbose=False,
78
+ stream=False, # Disable streaming mode
79
+ augment=False # Disable TTA for speed
80
+ )
81
+ print(f"Prediction time: {time.time() - predict_start:.2f}s")
82
+
83
+ # Step 3: Postprocessing
84
+ postprocess_start = time.time()
85
+ num_leaves = len(results[0].boxes)
86
+ rendered_img = render_result(model=model, image=img, result=results[0])
87
+ rendered_img = cv2.cvtColor(rendered_img, cv2.COLOR_BGR2RGB)
88
+ print(f"Postprocessing time: {time.time() - postprocess_start:.2f}s")
89
+
90
+ total_time = time.time() - start_time
91
+ print(f"\nTotal processing time: {total_time:.2f}s")
92
+ print(f"Detected leaves: {num_leaves}")
93
+ print("-"*50)
94
+
95
+ return rendered_img, num_leaves
96
 
97
+ except Exception as e:
98
+ print(f"Error processing image: {str(e)}")
99
+ return None, 0
100
+
101
+ # --------------------------
102
+ # Gradio Interface
103
+ # --------------------------
104
+ with gr.Blocks(title="Leaf Detection", theme=gr.themes.Soft()) as demo:
105
+ gr.Markdown("# πŸƒ Real-Time Plant Leaf Detection")
106
+ gr.Markdown("Upload a plant image to analyze leaf count and health")
107
+
108
+ with gr.Row():
109
+ input_image = gr.Image(label="Input Image", type="numpy")
110
+ output_image = gr.Image(label="Detection Results", width=600)
111
 
112
+ with gr.Row():
113
+ leaf_count = gr.Number(label="Detected Leaves", precision=0)
114
+ process_btn = gr.Button("Analyze Image", variant="primary")
115
 
116
+ progress = gr.Textbox(label="Processing Status", visible=True)
 
 
117
 
118
+ process_btn.click(
119
+ fn=detect_leaves,
120
+ inputs=[input_image],
121
+ outputs=[output_image, leaf_count]
122
+ )
 
 
 
 
 
 
 
 
 
123
 
124
  if __name__ == "__main__":
125
+ demo.launch(
126
  server_port=7860,
127
+ show_error=True,
128
+ share=False
129
  )