ogirald0 commited on
Commit
3b7a61c
·
1 Parent(s): 6d621bc

Cleanup: Keep only text analyzer

Browse files
Files changed (7) hide show
  1. README-HF.md +25 -12
  2. app.py +28 -29
  3. app_hf.py +0 -16
  4. requirements-full.txt +0 -10
  5. requirements.txt +2 -1
  6. test_api.py +0 -60
  7. test_client.py +0 -130
README-HF.md CHANGED
@@ -1,31 +1,44 @@
1
- # Text Classification Model Demo
2
 
3
- This is a Gradio interface for text classification using a BERT-based model. The model can classify text into predefined categories.
4
 
5
- ## Model Details
6
 
 
7
  - Base Model: prajjwal1/bert-tiny
8
  - Task: Text Classification
9
- - Interface: Gradio
 
 
 
 
 
10
 
11
  ## Usage
12
 
13
- 1. Enter your text in the input textbox
14
- 2. Click submit
15
- 3. View the classification results
16
 
17
  ## Technical Details
18
 
19
  - Python 3.9+
20
  - Key Dependencies:
21
- - gradio
22
- - transformers
23
- - torch
24
- - numpy
25
 
26
  ## Deployment
27
 
28
- This model is deployed using Hugging Face Spaces with a Gradio interface.
 
 
 
 
 
 
 
29
 
30
  ## License
31
 
 
1
+ # Oversai Models Hub
2
 
3
+ A comprehensive hub hosting multiple AI models with easy-to-use Gradio interfaces. Access various machine learning models through a single, unified interface.
4
 
5
+ ## Available Models
6
 
7
+ ### 1. Text Classification
8
  - Base Model: prajjwal1/bert-tiny
9
  - Task: Text Classification
10
+ - Interface: Simple text input with classification output
11
+
12
+ ### 2. [Future Models]
13
+ - Additional models will be added here
14
+ - Each with its own specialized interface
15
+ - Easy to extend and add new models
16
 
17
  ## Usage
18
 
19
+ 1. Select the model tab you want to use
20
+ 2. Follow the specific input instructions for each model
21
+ 3. Get instant results through the interface
22
 
23
  ## Technical Details
24
 
25
  - Python 3.9+
26
  - Key Dependencies:
27
+ - gradio >= 4.19.2
28
+ - transformers >= 4.38.2
29
+ - torch >= 2.2.1
30
+ - numpy >= 1.26.4
31
 
32
  ## Deployment
33
 
34
+ This model hub is deployed using Hugging Face Spaces with a Gradio interface. All models are accessible through a single, unified interface with multiple tabs.
35
+
36
+ ## Adding New Models
37
+
38
+ The hub is designed to be easily extensible. New models can be added by:
39
+ 1. Implementing the model in `src/models/`
40
+ 2. Creating a Gradio interface for it
41
+ 3. Adding it to the tabbed interface in `app_hf.py`
42
 
43
  ## License
44
 
app.py CHANGED
@@ -7,33 +7,32 @@ logging.basicConfig(level=logging.INFO)
7
  logger = logging.getLogger(__name__)
8
 
9
  def create_demo():
10
- try:
11
- # Initialize the model
12
- logger.info("Initializing Text Classification model...")
13
- model = TextClassificationModel()
14
-
15
- # Create the interface
16
- logger.info("Creating Gradio interface...")
17
- demo = model.create_interface()
18
-
19
- logger.info("Gradio interface created successfully")
20
- return demo
21
-
22
- except Exception as e:
23
- logger.error(f"Error creating demo: {str(e)}")
24
- raise
 
 
 
 
 
 
 
 
 
 
25
 
26
- if __name__ == "__main__":
27
- try:
28
- logger.info("Starting the application...")
29
- demo = create_demo()
30
- logger.info("Launching the interface...")
31
- demo.launch(
32
- server_name="0.0.0.0", # Allow external connections
33
- server_port=7860, # Specify port explicitly
34
- share=True # Enable public URL
35
- )
36
- logger.info("Interface launched successfully")
37
- except Exception as e:
38
- logger.error(f"Application error: {str(e)}")
39
- raise
 
7
  logger = logging.getLogger(__name__)
8
 
9
  def create_demo():
10
+ # Initialize models
11
+ logger.info("Initializing models...")
12
+ text_classification_model = TextClassificationModel()
13
+ # Add more models here as needed
14
+
15
+ # Create the interfaces
16
+ logger.info("Creating Gradio interfaces...")
17
+ text_classification_demo = text_classification_model.create_interface()
18
+ # Add more model interfaces here
19
+
20
+ # Create a tabbed interface
21
+ demo = gr.TabbedInterface(
22
+ interface_list=[
23
+ text_classification_demo,
24
+ # Add more interfaces here
25
+ ],
26
+ tab_names=[
27
+ "Text Classification",
28
+ # Add more tab names here
29
+ ],
30
+ title="Oversai Models Hub",
31
+ description="A collection of AI models for various tasks"
32
+ )
33
+
34
+ return demo
35
 
36
+ # Create and launch the demo
37
+ demo = create_demo()
38
+ demo.launch()
 
 
 
 
 
 
 
 
 
 
 
app_hf.py DELETED
@@ -1,16 +0,0 @@
1
- import gradio as gr
2
- from src.models.text_classification import TextClassificationModel
3
- import logging
4
-
5
- # Configure logging
6
- logging.basicConfig(level=logging.INFO)
7
- logger = logging.getLogger(__name__)
8
-
9
- # Initialize the model
10
- model = TextClassificationModel()
11
-
12
- # Create the interface
13
- demo = model.create_interface()
14
-
15
- # Launch the interface (Hugging Face will handle the server configuration)
16
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements-full.txt DELETED
@@ -1,10 +0,0 @@
1
- gradio>=4.19.2
2
- fastapi>=0.110.0
3
- uvicorn>=0.27.1
4
- python-dotenv>=1.0.1
5
- pydantic>=2.6.3
6
- numpy>=1.26.4
7
- torch>=2.2.1
8
- transformers>=4.38.2
9
- pillow>=10.2.0
10
- python-multipart>=0.0.9
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -6,4 +6,5 @@ fastapi>=0.110.0
6
  uvicorn>=0.27.1
7
  pydantic>=2.6.3
8
  pydantic-settings>=2.2.1
9
- python-dotenv>=1.0.1
 
 
6
  uvicorn>=0.27.1
7
  pydantic>=2.6.3
8
  pydantic-settings>=2.2.1
9
+ python-dotenv>=1.0.1
10
+ huggingface-hub>=0.28.1
test_api.py DELETED
@@ -1,60 +0,0 @@
1
- import requests
2
- import time
3
- import logging
4
-
5
- logging.basicConfig(level=logging.INFO)
6
- logger = logging.getLogger(__name__)
7
-
8
- def test_api():
9
- # Wait a bit for the model to load
10
- logger.info("Waiting for the server to start...")
11
- time.sleep(10)
12
-
13
- base_url = "http://127.0.0.1:7860"
14
-
15
- # Test single prediction
16
- test_texts = [
17
- "This is amazing! I love it!",
18
- "This is terrible, I hate it.",
19
- "It's okay, nothing special."
20
- ]
21
-
22
- logger.info("\nTesting single predictions:")
23
- for text in test_texts:
24
- try:
25
- logger.info(f"\nTesting with text: {text}")
26
- response = requests.post(
27
- f"{base_url}/api/predict",
28
- json={"text": text}
29
- )
30
-
31
- if response.status_code == 200:
32
- result = response.json()
33
- logger.info(f"Result: {result}")
34
- else:
35
- logger.error(f"Error: {response.status_code} - {response.text}")
36
-
37
- except Exception as e:
38
- logger.error(f"Request failed: {str(e)}")
39
-
40
- time.sleep(1) # Small delay between requests
41
-
42
- # Test batch prediction
43
- logger.info("\nTesting batch prediction:")
44
- try:
45
- response = requests.post(
46
- f"{base_url}/api/predict_batch",
47
- json={"texts": test_texts}
48
- )
49
-
50
- if response.status_code == 200:
51
- result = response.json()
52
- logger.info(f"Batch results: {result}")
53
- else:
54
- logger.error(f"Batch Error: {response.status_code} - {response.text}")
55
-
56
- except Exception as e:
57
- logger.error(f"Batch request failed: {str(e)}")
58
-
59
- if __name__ == "__main__":
60
- test_api()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test_client.py DELETED
@@ -1,130 +0,0 @@
1
- import requests
2
- import json
3
- import time
4
- import logging
5
- from typing import Dict, List, Any, Optional
6
-
7
- logging.basicConfig(level=logging.INFO)
8
- logger = logging.getLogger(__name__)
9
-
10
- class MLModelsClient:
11
- """Client for interacting with the ML Models API."""
12
-
13
- def __init__(self, base_url: str = "http://localhost:8000"):
14
- self.base_url = base_url
15
-
16
- def list_models(self) -> List[Dict]:
17
- """List all available models."""
18
- try:
19
- logger.info("Fetching available models...")
20
- response = requests.get(f"{self.base_url}/api/models")
21
- response.raise_for_status()
22
- models = response.json()
23
- logger.info(f"Found {len(models)} models")
24
- return models
25
- except Exception as e:
26
- logger.error(f"Error listing models: {str(e)}")
27
- raise
28
-
29
- def get_model_info(self, model_id: str) -> Dict:
30
- """Get information about a specific model."""
31
- try:
32
- logger.info(f"Fetching info for model {model_id}...")
33
- response = requests.get(f"{self.base_url}/api/models/{model_id}")
34
- response.raise_for_status()
35
- return response.json()
36
- except Exception as e:
37
- logger.error(f"Error getting model info: {str(e)}")
38
- raise
39
-
40
- def get_model_status(self, model_id: str) -> str:
41
- """Get the current status of a model."""
42
- try:
43
- logger.info(f"Fetching status for model {model_id}...")
44
- response = requests.get(f"{self.base_url}/api/models/{model_id}/status")
45
- response.raise_for_status()
46
- return response.json()["status"]
47
- except Exception as e:
48
- logger.error(f"Error getting model status: {str(e)}")
49
- raise
50
-
51
- def load_model(self, model_id: str) -> str:
52
- """Load a model into memory."""
53
- try:
54
- logger.info(f"Loading model {model_id}...")
55
- response = requests.post(f"{self.base_url}/api/models/{model_id}/load")
56
- response.raise_for_status()
57
- return response.json()["status"]
58
- except Exception as e:
59
- logger.error(f"Error loading model: {str(e)}")
60
- raise
61
-
62
- def predict(self, model_id: str, text: str) -> Dict:
63
- """Make a prediction using a model."""
64
- try:
65
- logger.info(f"Making prediction with model {model_id}...")
66
- model_info = self.get_model_info(model_id)
67
- response = requests.post(
68
- f"{self.base_url}{model_info.get('api_path')}/predict",
69
- json={"text": text}
70
- )
71
- response.raise_for_status()
72
- return response.json()
73
- except Exception as e:
74
- logger.error(f"Error making prediction: {str(e)}")
75
- raise
76
-
77
- def test_model_workflow():
78
- """Test the complete model workflow."""
79
- client = MLModelsClient()
80
-
81
- try:
82
- # 1. List available models
83
- logger.info("\n1. Testing model listing...")
84
- models = client.list_models()
85
- for model in models:
86
- logger.info(f"Found model: {json.dumps(model, indent=2)}")
87
-
88
- if not models:
89
- logger.error("No models found!")
90
- return
91
-
92
- # Use the first model for testing
93
- model_id = models[0]["id"]
94
-
95
- # 2. Get model information
96
- logger.info(f"\n2. Testing model info retrieval for {model_id}...")
97
- model_info = client.get_model_info(model_id)
98
- logger.info(f"Model info: {json.dumps(model_info, indent=2)}")
99
-
100
- # 3. Get model status
101
- logger.info(f"\n3. Testing model status retrieval for {model_id}...")
102
- status = client.get_model_status(model_id)
103
- logger.info(f"Model status: {status}")
104
-
105
- # 4. Load the model
106
- logger.info(f"\n4. Testing model loading for {model_id}...")
107
- load_status = client.load_model(model_id)
108
- logger.info(f"Load status: {load_status}")
109
-
110
- # 5. Test predictions
111
- test_texts = [
112
- "This is amazing! I really love it!",
113
- "This is terrible, I hate it.",
114
- "It's okay, nothing special."
115
- ]
116
-
117
- logger.info(f"\n5. Testing predictions for {model_id}...")
118
- for text in test_texts:
119
- logger.info(f"\nPredicting for text: {text}")
120
- result = client.predict(model_id, text)
121
- logger.info(f"Prediction: {json.dumps(result, indent=2)}")
122
- time.sleep(1) # Small delay between predictions
123
-
124
- except Exception as e:
125
- logger.error(f"Test workflow failed: {str(e)}")
126
- raise
127
-
128
- if __name__ == "__main__":
129
- logger.info("Starting model testing workflow...")
130
- test_model_workflow()