Cloud110702 commited on
Commit
022342b
·
verified ·
1 Parent(s): c9f11bb

Upload 2 files

Browse files
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. Image_classify.keras +3 -0
  3. app.py +84 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Image_classify.keras filter=lfs diff=lfs merge=lfs -text
Image_classify.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dc762a4e7e6c040f55e2d0c34b74ae05b30d73b046469c2d5a58e403d1f0b12
3
+ size 11614324
app.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ import numpy as np
3
+ import tensorflow as tf
4
+ from tensorflow.lite.python.interpreter import Interpreter
5
+ import os
6
+ import google.generativeai as genai
7
+
8
+ app = Flask(__name__)
9
+
10
+ # Load the TensorFlow Lite model
11
+ interpreter = Interpreter(model_path="model.tflite")
12
+ interpreter.allocate_tensors()
13
+
14
+ # Get input and output details
15
+ input_details = interpreter.get_input_details()
16
+ output_details = interpreter.get_output_details()
17
+
18
+ # Define categories
19
+ data_cat = ['disposable cups', 'paper', 'plastic bottle']
20
+ img_height, img_width = 224, 224
21
+
22
+ # Configure Gemini API
23
+ GEMINI_API_KEY = os.getenv('GEMINI_API_KEY', 'AIzaSyBx0A7BA-nKVZOiVn39JXzdGKgeGQqwAFg')
24
+ genai.configure(api_key=GEMINI_API_KEY)
25
+
26
+ # Initialize Gemini model
27
+ gemini_model = genai.GenerativeModel('gemini-pro')
28
+
29
+ @app.route('/predict', methods=['POST'])
30
+ def predict():
31
+ if 'image' not in request.files:
32
+ return jsonify({"error": "No image uploaded"}), 400
33
+
34
+ file = request.files['image']
35
+ try:
36
+ # Preprocess the image
37
+ img = tf.image.decode_image(file.read(), channels=3)
38
+ img = tf.image.resize(img, [img_height, img_width])
39
+ img_bat = np.expand_dims(img, 0).astype(np.float32)
40
+
41
+ # Set input tensor
42
+ interpreter.set_tensor(input_details[0]['index'], img_bat)
43
+
44
+ # Run inference
45
+ interpreter.invoke()
46
+
47
+ # Get the result
48
+ output_data = interpreter.get_tensor(output_details[0]['index'])
49
+ predicted_class = data_cat[np.argmax(output_data)]
50
+ confidence = np.max(output_data) * 100
51
+
52
+ # Generate sustainability insights with Gemini API
53
+ prompt = f"""
54
+ You are a sustainability-focused AI. Analyze the {predicted_class} (solid dry waste)
55
+ and generate the top three innovative, eco-friendly recommendations for repurposing it.
56
+ Each recommendation should:
57
+ - Provide a title
58
+ - Be practical and easy to implement
59
+ - Be environmentally beneficial
60
+ - Include a one or two-sentence explanation
61
+ Format each recommendation with a clear title followed by the explanation on a new line.
62
+ """
63
+
64
+ try:
65
+ # Generate response using the correct method
66
+ response = gemini_model.generate_content(prompt)
67
+ insights = response.text.strip() # Assuming generate_content returns a string or a response with 'text'
68
+
69
+ except Exception as e:
70
+ insights = f"Error generating insights: {str(e)}"
71
+ print(f"Gemini API error: {str(e)}") # For debugging
72
+
73
+ # Prepare the response
74
+ return jsonify({
75
+ "class": predicted_class,
76
+ "confidence": confidence,
77
+ "insights": insights
78
+ })
79
+
80
+ except Exception as e:
81
+ return jsonify({"error": str(e)}), 500
82
+
83
+ if __name__ == "__main__":
84
+ app.run(debug=True)