Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,199 +1,120 @@
|
|
1 |
import streamlit as st
|
2 |
-
|
|
|
3 |
import requests
|
4 |
-
import
|
5 |
-
|
6 |
-
from googleapiclient.discovery import build # For YouTube API
|
7 |
-
from datetime import datetime, timedelta
|
8 |
-
import daal4py as d4p # Intel DAAL
|
9 |
import numpy as np
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
return
|
28 |
-
|
29 |
-
# Fetch
|
30 |
-
def
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
query = f"{stock_symbol} stock"
|
45 |
-
url = f"https://api.twitter.com/2/tweets/search/recent?query={query}&max_results={count}&tweet.fields=created_at,text"
|
46 |
-
response = requests.get(url, headers=headers)
|
47 |
-
tweets = response.json().get("data", [])
|
48 |
-
return [tweet["text"] for tweet in tweets]
|
49 |
-
|
50 |
-
# YouTube API function to fetch recent comments
|
51 |
-
def fetch_youtube_comments(stock_symbol, max_results=5):
|
52 |
-
youtube = build('youtube', 'v3', developerKey=YOUTUBE_API_KEY)
|
53 |
-
request = youtube.search().list(
|
54 |
-
q=f"{stock_symbol} stock",
|
55 |
-
part='snippet',
|
56 |
-
type='video',
|
57 |
-
maxResults=5
|
58 |
-
)
|
59 |
-
response = request.execute()
|
60 |
-
video_ids = [item['id']['videoId'] for item in response['items']]
|
61 |
-
|
62 |
comments = []
|
63 |
for video_id in video_ids:
|
64 |
-
|
65 |
-
part='snippet',
|
66 |
-
videoId=video_id,
|
67 |
-
maxResults=max_results
|
68 |
-
)
|
69 |
-
comment_response = comment_request.execute()
|
70 |
for comment in comment_response['items']:
|
71 |
-
comment_text = comment['snippet']['topLevelComment']['snippet']['
|
72 |
comments.append(comment_text)
|
73 |
-
|
74 |
-
return comments
|
75 |
-
|
76 |
-
# Perform sentiment analysis on social media comments/tweets
|
77 |
-
def perform_sentiment_analysis(comments):
|
78 |
-
sentiment_model = load_sentiment_model()
|
79 |
-
return sentiment_model(comments)
|
80 |
-
|
81 |
-
# Compute Moving Average using Intel oneAPI AI Analytics Toolkit (daal4py)
|
82 |
-
def compute_moving_average(prices, window=5):
|
83 |
-
price_array = np.array(prices, dtype=np.float64).reshape(-1, 1)
|
84 |
-
|
85 |
-
# Initialize Intel DAAL low-order moments algorithm (for moving average)
|
86 |
-
algorithm = d4p.low_order_moments()
|
87 |
-
|
88 |
-
moving_averages = []
|
89 |
-
for i in range(len(price_array) - window + 1):
|
90 |
-
window_data = price_array[i:i + window]
|
91 |
-
result = algorithm.compute(window_data)
|
92 |
-
moving_averages.append(result.mean[0])
|
93 |
-
|
94 |
-
return moving_averages
|
95 |
-
|
96 |
-
# Technical analysis function using Intel oneAPI
|
97 |
-
def technical_analysis(symbol):
|
98 |
-
data = fetch_stock_data(symbol)
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
return
|
117 |
|
118 |
-
#
|
119 |
def main():
|
120 |
-
st.title("Stock Analysis App
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
try:
|
133 |
-
stock_data = fetch_stock_data(company_symbol)
|
134 |
-
fundamental_data = fetch_fundamental_data(company_symbol)
|
135 |
-
|
136 |
-
if stock_data:
|
137 |
-
st.subheader("Asset Overview")
|
138 |
-
st.json(stock_data)
|
139 |
-
|
140 |
-
with st.expander("Technical Analysis (Intel oneAPI)"):
|
141 |
-
st.subheader("Technical Analysis")
|
142 |
-
tech_analysis = technical_analysis(company_symbol)
|
143 |
-
st.write(tech_analysis)
|
144 |
-
|
145 |
-
with st.expander("Fundamental Analysis"):
|
146 |
-
st.subheader("Fundamental Analysis")
|
147 |
-
if fundamental_data:
|
148 |
-
st.write(f"**Company Name**: {fundamental_data.get('Name', 'N/A')}")
|
149 |
-
st.write(f"**Sector**: {fundamental_data.get('Sector', 'N/A')}")
|
150 |
-
st.write(f"**Industry**: {fundamental_data.get('Industry', 'N/A')}")
|
151 |
-
st.write(f"**Market Capitalization**: {fundamental_data.get('MarketCapitalization', 'N/A')}")
|
152 |
-
st.write(f"**Earnings per Share (EPS)**: {fundamental_data.get('EPS', 'N/A')}")
|
153 |
-
st.write(f"**Price to Earnings Ratio (P/E)**: {fundamental_data.get('PERatio', 'N/A')}")
|
154 |
-
st.write(f"**Dividend Yield**: {fundamental_data.get('DividendYield', 'N/A')}")
|
155 |
-
st.write(f"**Profit Margin**: {fundamental_data.get('ProfitMargin', 'N/A')}")
|
156 |
-
else:
|
157 |
-
st.write("No fundamental data available.")
|
158 |
-
|
159 |
-
with st.expander("Sentiment Analysis"):
|
160 |
-
st.subheader("Sentiment Analysis")
|
161 |
-
st.write("Fetching views from Twitter and YouTube...")
|
162 |
-
|
163 |
-
# Fetch tweets and comments
|
164 |
-
tweets = fetch_tweets(company_symbol)
|
165 |
-
youtube_comments = fetch_youtube_comments(company_symbol)
|
166 |
-
|
167 |
-
st.write("**Tweets**:")
|
168 |
-
for tweet in tweets:
|
169 |
-
st.write(f"- {tweet}")
|
170 |
-
|
171 |
-
st.write("**YouTube Comments**:")
|
172 |
-
for comment in youtube_comments:
|
173 |
-
st.write(f"- {comment}")
|
174 |
-
|
175 |
-
# Perform sentiment analysis on collected data
|
176 |
-
all_comments = tweets + youtube_comments
|
177 |
-
if all_comments:
|
178 |
-
sentiment_results = perform_sentiment_analysis(all_comments)
|
179 |
-
st.write("Sentiment Analysis Results:")
|
180 |
-
for sentiment in sentiment_results:
|
181 |
-
st.write(sentiment)
|
182 |
-
|
183 |
-
with st.expander("Recommendation"):
|
184 |
-
st.subheader("Recommendation")
|
185 |
-
tokenizer, llama_model = load_llama_model()
|
186 |
-
stock_recommendation = llama_model.generate(
|
187 |
-
tokenizer(company_symbol, return_tensors="pt").input_ids,
|
188 |
-
max_new_tokens=50
|
189 |
-
)
|
190 |
-
recommendation_text = tokenizer.decode(stock_recommendation[0], skip_special_tokens=True)
|
191 |
-
st.write(recommendation_text)
|
192 |
-
else:
|
193 |
-
st.error(f"No data available for the symbol entered.")
|
194 |
|
195 |
-
|
196 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
|
198 |
if __name__ == "__main__":
|
199 |
main()
|
|
|
1 |
import streamlit as st
|
2 |
+
import yfinance as yf
|
3 |
+
import tweepy
|
4 |
import requests
|
5 |
+
from googleapiclient.discovery import build
|
6 |
+
from transformers import pipeline
|
|
|
|
|
|
|
7 |
import numpy as np
|
8 |
+
import daal4py as d4p
|
9 |
+
|
10 |
+
# Twitter API setup
|
11 |
+
def twitter_api_setup():
|
12 |
+
consumer_key = 'YOUR_TWITTER_API_KEY'
|
13 |
+
consumer_secret = 'YOUR_TWITTER_API_SECRET'
|
14 |
+
access_token = 'YOUR_TWITTER_ACCESS_TOKEN'
|
15 |
+
access_token_secret = 'YOUR_TWITTER_ACCESS_SECRET'
|
16 |
+
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
|
17 |
+
auth.set_access_token(access_token, access_token_secret)
|
18 |
+
api = tweepy.API(auth)
|
19 |
+
return api
|
20 |
+
|
21 |
+
# YouTube API setup
|
22 |
+
def youtube_api_setup():
|
23 |
+
api_key = 'YOUR_YOUTUBE_API_KEY'
|
24 |
+
youtube = build('youtube', 'v3', developerKey=api_key)
|
25 |
+
return youtube
|
26 |
+
|
27 |
+
# Fetch Twitter sentiment
|
28 |
+
def fetch_twitter_sentiment(symbol, api, sentiment_model):
|
29 |
+
tweets = api.search(q=symbol, lang='en', count=100, tweet_mode='extended')
|
30 |
+
tweet_texts = [tweet.full_text for tweet in tweets]
|
31 |
+
sentiments = sentiment_model(tweet_texts)
|
32 |
+
sentiment_scores = [s['label'] for s in sentiments]
|
33 |
+
positive = sentiment_scores.count('POSITIVE')
|
34 |
+
negative = sentiment_scores.count('NEGATIVE')
|
35 |
+
return positive, negative
|
36 |
+
|
37 |
+
# Fetch YouTube sentiment
|
38 |
+
def fetch_youtube_sentiment(symbol, youtube, sentiment_model):
|
39 |
+
search_response = youtube.search().list(q=symbol, part='snippet', maxResults=10).execute()
|
40 |
+
video_ids = [item['id']['videoId'] for item in search_response['items'] if 'videoId' in item['id']]
|
41 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
comments = []
|
43 |
for video_id in video_ids:
|
44 |
+
comment_response = youtube.commentThreads().list(part='snippet', videoId=video_id, maxResults=50).execute()
|
|
|
|
|
|
|
|
|
|
|
45 |
for comment in comment_response['items']:
|
46 |
+
comment_text = comment['snippet']['topLevelComment']['snippet']['textOriginal']
|
47 |
comments.append(comment_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
+
sentiments = sentiment_model(comments)
|
50 |
+
sentiment_scores = [s['label'] for s in sentiments]
|
51 |
+
positive = sentiment_scores.count('POSITIVE')
|
52 |
+
negative = sentiment_scores.count('NEGATIVE')
|
53 |
+
return positive, negative
|
54 |
+
|
55 |
+
# Moving Average (technical analysis)
|
56 |
+
def calculate_moving_average(stock_data, window_size):
|
57 |
+
moving_avg_algo = d4p.moving_average(window_size)
|
58 |
+
result = moving_avg_algo.compute(stock_data['Close'].to_numpy())
|
59 |
+
return result
|
60 |
+
|
61 |
+
# Fetch stock data using Yahoo Finance
|
62 |
+
def fetch_stock_data(symbol):
|
63 |
+
stock = yf.Ticker(symbol)
|
64 |
+
stock_data = stock.history(period="1y")
|
65 |
+
return stock_data
|
66 |
|
67 |
+
# Main app
|
68 |
def main():
|
69 |
+
st.title("Stock Analysis App")
|
70 |
+
|
71 |
+
# Input for stock symbol
|
72 |
+
stock_symbol = st.text_input("Enter Stock Symbol (e.g., AAPL, TSLA):", "AAPL")
|
73 |
+
|
74 |
+
if st.button("Analyze"):
|
75 |
+
# Fetch stock data
|
76 |
+
stock_data = fetch_stock_data(stock_symbol)
|
77 |
+
|
78 |
+
# Display stock data overview
|
79 |
+
st.subheader(f"Stock Overview - {stock_symbol}")
|
80 |
+
st.write(stock_data.tail())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
+
# Sentiment analysis
|
83 |
+
sentiment_model = pipeline("sentiment-analysis")
|
84 |
+
|
85 |
+
# Twitter Sentiment
|
86 |
+
st.subheader("Twitter Sentiment Analysis")
|
87 |
+
api = twitter_api_setup()
|
88 |
+
positive_twitter, negative_twitter = fetch_twitter_sentiment(stock_symbol, api, sentiment_model)
|
89 |
+
st.write(f"Positive Tweets: {positive_twitter}, Negative Tweets: {negative_twitter}")
|
90 |
+
|
91 |
+
# YouTube Sentiment
|
92 |
+
st.subheader("YouTube Sentiment Analysis")
|
93 |
+
youtube = youtube_api_setup()
|
94 |
+
positive_youtube, negative_youtube = fetch_youtube_sentiment(stock_symbol, youtube, sentiment_model)
|
95 |
+
st.write(f"Positive Comments: {positive_youtube}, Negative Comments: {negative_youtube}")
|
96 |
+
|
97 |
+
# Technical analysis (Moving Average)
|
98 |
+
st.subheader("Technical Analysis (Moving Average)")
|
99 |
+
window_size = st.slider("Select Moving Average Window Size:", 5, 100, 20)
|
100 |
+
moving_avg = calculate_moving_average(stock_data, window_size)
|
101 |
+
st.line_chart(moving_avg)
|
102 |
+
|
103 |
+
# Fundamental analysis
|
104 |
+
st.subheader("Fundamental Analysis")
|
105 |
+
st.write("Market Cap:", stock_data['Close'].iloc[-1] * stock_data['Volume'].mean())
|
106 |
+
st.write("Price-to-Earnings Ratio (P/E):", stock_data['Close'].iloc[-1] / (stock_data['Close'].mean()))
|
107 |
+
|
108 |
+
# Recommendation based on sentiment analysis
|
109 |
+
st.subheader("Stock Recommendation")
|
110 |
+
total_positive = positive_twitter + positive_youtube
|
111 |
+
total_negative = negative_twitter + negative_youtube
|
112 |
+
if total_positive > total_negative:
|
113 |
+
st.write(f"Recommendation: **BUY** {stock_symbol}")
|
114 |
+
elif total_negative > total_positive:
|
115 |
+
st.write(f"Recommendation: **SELL** {stock_symbol}")
|
116 |
+
else:
|
117 |
+
st.write(f"Recommendation: **HOLD** {stock_symbol}")
|
118 |
|
119 |
if __name__ == "__main__":
|
120 |
main()
|