MHamdan commited on
Commit
37d1515
Β·
verified Β·
1 Parent(s): 4596ed0

Upload tool

Browse files
Files changed (3) hide show
  1. app.py +6 -0
  2. requirements.txt +4 -0
  3. tool.py +137 -0
app.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from smolagents import launch_gradio_demo
2
+ from tool import SimpleTool
3
+
4
+ tool = SimpleTool()
5
+
6
+ launch_gradio_demo(tool)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ bs4
2
+ requests
3
+ transformers
4
+ smolagents
tool.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from smolagents import Tool
2
+ from typing import Any, Optional
3
+
4
+ class SimpleTool(Tool):
5
+ name = "web_analyzer"
6
+ description = "Advanced web content analyzer with AI-powered analysis."
7
+ inputs = {"url":{"type":"string","description":"The webpage URL to analyze."},"mode":{"type":"string","nullable":True,"description":"Analysis mode ('analyze', 'summarize', 'sentiment', 'topics')."}}
8
+ output_type = "string"
9
+
10
+ def forward(self, url: str, mode: str = "analyze") -> str:
11
+ """Advanced web content analyzer with AI-powered analysis.
12
+
13
+ Args:
14
+ url: The webpage URL to analyze.
15
+ mode: Analysis mode ('analyze', 'summarize', 'sentiment', 'topics').
16
+
17
+ Returns:
18
+ str: AI-enhanced analysis of web content.
19
+ """
20
+ import requests
21
+ from bs4 import BeautifulSoup
22
+ import re
23
+ from transformers import pipeline
24
+
25
+ try:
26
+ # Setup headers
27
+ headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'}
28
+
29
+ # Fetch content
30
+ response = requests.get(url, headers=headers, timeout=10)
31
+ response.raise_for_status()
32
+
33
+ # Parse content
34
+ soup = BeautifulSoup(response.text, 'html.parser')
35
+ for tag in soup(['script', 'style', 'meta']):
36
+ tag.decompose()
37
+
38
+ # Get cleaned text
39
+ title = soup.title.string if soup.title else "No title found"
40
+ title = re.sub(r'\s+', ' ', title).strip()
41
+ text_content = re.sub(r'\s+', ' ', soup.get_text()).strip()
42
+
43
+ # Initialize ML models based on mode
44
+ if mode == "analyze":
45
+ # Basic analysis with summary
46
+ summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
47
+ classifier = pipeline("text-classification",
48
+ model="nlptown/bert-base-multilingual-uncased-sentiment")
49
+
50
+ # Get summary and sentiment
51
+ summary = summarizer(text_content[:1024], max_length=100, min_length=30)[0]['summary_text']
52
+ sentiment = classifier(text_content[:512])[0]
53
+ sent_score = int(sentiment['label'][0])
54
+ sent_text = ["Very Negative", "Negative", "Neutral", "Positive", "Very Positive"][sent_score-1]
55
+
56
+ # Format output
57
+ return f"""πŸ“Š Content Analysis
58
+
59
+ Title: {title}
60
+ Length: {len(text_content)} characters
61
+
62
+ πŸ“ AI Summary:
63
+ {summary}
64
+
65
+ 😊 Overall Sentiment: {sent_text} ({sent_score}/5)"""
66
+
67
+ elif mode == "summarize":
68
+ summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
69
+
70
+ # Process in chunks
71
+ chunk_size = 1024
72
+ summaries = []
73
+
74
+ for i in range(0, min(len(text_content), 3072), chunk_size):
75
+ chunk = text_content[i:i+chunk_size]
76
+ if len(chunk) > 100:
77
+ summary = summarizer(chunk, max_length=100, min_length=30)[0]['summary_text']
78
+ summaries.append(summary)
79
+
80
+ return f"""πŸ“ Multi-Section Summary
81
+
82
+ Title: {title}
83
+
84
+ {' '.join(summaries)}"""
85
+
86
+ elif mode == "sentiment":
87
+ classifier = pipeline("text-classification",
88
+ model="nlptown/bert-base-multilingual-uncased-sentiment")
89
+
90
+ # Analyze paragraphs
91
+ paragraphs = soup.find_all('p')
92
+ sentiments = ""
93
+ count = 0
94
+
95
+ for p in paragraphs:
96
+ text = p.text.strip()
97
+ if len(text) > 50:
98
+ result = classifier(text[:512])[0]
99
+ score = int(result['label'][0])
100
+ mood = ["Very Negative", "Negative", "Neutral", "Positive", "Very Positive"][score-1]
101
+ sentiments += f"\nSection {count + 1}: {mood} ({score}/5 stars)"
102
+ count += 1
103
+ if count >= 5:
104
+ break
105
+
106
+ return f"""😊 Sentiment Analysis
107
+
108
+ Title: {title}
109
+ {sentiments}"""
110
+
111
+ elif mode == "topics":
112
+ classifier = pipeline("zero-shot-classification",
113
+ model="facebook/bart-large-mnli")
114
+
115
+ topics = [
116
+ "Technology", "AI/ML", "Business", "Science",
117
+ "Innovation", "Research", "Industry News"
118
+ ]
119
+
120
+ results = classifier(text_content[:512], topics)
121
+
122
+ topic_analysis = "Detected Topics:\n"
123
+ for topic, score in zip(results['labels'], results['scores']):
124
+ if score > 0.1:
125
+ topic_analysis += f"- {topic}: {score*100:.1f}% confidence\n"
126
+
127
+ return f"""🎯 Topic Classification
128
+
129
+ Title: {title}
130
+
131
+ {topic_analysis}"""
132
+
133
+ else:
134
+ return f"Error: Unknown mode '{mode}'"
135
+
136
+ except Exception as e:
137
+ return f"Error processing webpage: {str(e)}"