Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
3 |
+
import requests
|
4 |
+
import os
|
5 |
+
import tweepy # For Twitter API
|
6 |
+
from googleapiclient.discovery import build # For YouTube API
|
7 |
+
from datetime import datetime, timedelta
|
8 |
+
import daal4py as d4p # Intel DAAL
|
9 |
+
import numpy as np
|
10 |
+
|
11 |
+
# Alpha Vantage API Key (replace with your own key)
|
12 |
+
ALPHA_VANTAGE_API_KEY = "your_alpha_vantage_api_key"
|
13 |
+
TWITTER_BEARER_TOKEN = "your_twitter_bearer_token"
|
14 |
+
YOUTUBE_API_KEY = "your_youtube_api_key"
|
15 |
+
|
16 |
+
# Hugging Face sentiment analysis pipeline
|
17 |
+
@st.cache_resource
|
18 |
+
def load_sentiment_model():
|
19 |
+
return pipeline("sentiment-analysis")
|
20 |
+
|
21 |
+
# Load LLaMA model for stock recommendations
|
22 |
+
@st.cache_resource
|
23 |
+
def load_llama_model():
|
24 |
+
model_name = "meta-llama/Llama-2-7b-chat-hf" # Change to your desired model
|
25 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
26 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
27 |
+
return tokenizer, model
|
28 |
+
|
29 |
+
# Fetch stock data from Alpha Vantage API
|
30 |
+
def fetch_stock_data(symbol):
|
31 |
+
url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={symbol}&apikey={ALPHA_VANTAGE_API_KEY}"
|
32 |
+
response = requests.get(url)
|
33 |
+
return response.json().get("Time Series (Daily)", {})
|
34 |
+
|
35 |
+
# Fetch company overview (for fundamental analysis) from Alpha Vantage API
|
36 |
+
def fetch_fundamental_data(symbol):
|
37 |
+
url = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={symbol}&apikey={ALPHA_VANTAGE_API_KEY}"
|
38 |
+
response = requests.get(url)
|
39 |
+
return response.json()
|
40 |
+
|
41 |
+
# Twitter API function to fetch recent tweets
|
42 |
+
def fetch_tweets(stock_symbol, count=10):
|
43 |
+
headers = {"Authorization": f"Bearer {TWITTER_BEARER_TOKEN}"}
|
44 |
+
query = f"{stock_symbol} stock"
|
45 |
+
url = f"https://api.twitter.com/2/tweets/search/recent?query={query}&max_results={count}&tweet.fields=created_at,text"
|
46 |
+
response = requests.get(url, headers=headers)
|
47 |
+
tweets = response.json().get("data", [])
|
48 |
+
return [tweet["text"] for tweet in tweets]
|
49 |
+
|
50 |
+
# YouTube API function to fetch recent comments
|
51 |
+
def fetch_youtube_comments(stock_symbol, max_results=5):
|
52 |
+
youtube = build('youtube', 'v3', developerKey=YOUTUBE_API_KEY)
|
53 |
+
request = youtube.search().list(
|
54 |
+
q=f"{stock_symbol} stock",
|
55 |
+
part='snippet',
|
56 |
+
type='video',
|
57 |
+
maxResults=5
|
58 |
+
)
|
59 |
+
response = request.execute()
|
60 |
+
video_ids = [item['id']['videoId'] for item in response['items']]
|
61 |
+
|
62 |
+
comments = []
|
63 |
+
for video_id in video_ids:
|
64 |
+
comment_request = youtube.commentThreads().list(
|
65 |
+
part='snippet',
|
66 |
+
videoId=video_id,
|
67 |
+
maxResults=max_results
|
68 |
+
)
|
69 |
+
comment_response = comment_request.execute()
|
70 |
+
for comment in comment_response['items']:
|
71 |
+
comment_text = comment['snippet']['topLevelComment']['snippet']['textDisplay']
|
72 |
+
comments.append(comment_text)
|
73 |
+
|
74 |
+
return comments
|
75 |
+
|
76 |
+
# Perform sentiment analysis on social media comments/tweets
|
77 |
+
def perform_sentiment_analysis(comments):
|
78 |
+
sentiment_model = load_sentiment_model()
|
79 |
+
return sentiment_model(comments)
|
80 |
+
|
81 |
+
# Compute Moving Average using Intel oneAPI AI Analytics Toolkit (daal4py)
|
82 |
+
def compute_moving_average(prices, window=5):
|
83 |
+
price_array = np.array(prices, dtype=np.float64).reshape(-1, 1)
|
84 |
+
|
85 |
+
# Initialize Intel DAAL low-order moments algorithm (for moving average)
|
86 |
+
algorithm = d4p.low_order_moments()
|
87 |
+
|
88 |
+
moving_averages = []
|
89 |
+
for i in range(len(price_array) - window + 1):
|
90 |
+
window_data = price_array[i:i + window]
|
91 |
+
result = algorithm.compute(window_data)
|
92 |
+
moving_averages.append(result.mean[0])
|
93 |
+
|
94 |
+
return moving_averages
|
95 |
+
|
96 |
+
# Technical analysis function using Intel oneAPI
|
97 |
+
def technical_analysis(symbol):
|
98 |
+
data = fetch_stock_data(symbol)
|
99 |
+
|
100 |
+
if data:
|
101 |
+
closing_prices = [float(v['4. close']) for v in data.values()]
|
102 |
+
dates = list(data.keys())
|
103 |
+
|
104 |
+
# Compute 5-day moving average
|
105 |
+
moving_averages = compute_moving_average(closing_prices)
|
106 |
+
|
107 |
+
latest_date = dates[0]
|
108 |
+
latest_price = closing_prices[0]
|
109 |
+
latest_moving_average = moving_averages[0] if moving_averages else "N/A"
|
110 |
+
|
111 |
+
return {
|
112 |
+
"Date": latest_date,
|
113 |
+
"Closing Price": latest_price,
|
114 |
+
"5-Day Moving Average": latest_moving_average
|
115 |
+
}
|
116 |
+
return {}
|
117 |
+
|
118 |
+
# Streamlit Web App
|
119 |
+
def main():
|
120 |
+
st.title("Stock Analysis App with Social Media Sentiment Analysis")
|
121 |
+
st.write("""
|
122 |
+
This app provides stock analysis using:
|
123 |
+
- Sentiment Analysis (from Twitter and YouTube)
|
124 |
+
- Technical Analysis (with Intel oneAPI for performance)
|
125 |
+
- Fundamental Analysis
|
126 |
+
- Buy/Sell/Hold Recommendations
|
127 |
+
""")
|
128 |
+
|
129 |
+
company_symbol = st.text_input("Enter the stock symbol (e.g., AAPL, TSLA, GOOGL):")
|
130 |
+
|
131 |
+
if company_symbol:
|
132 |
+
try:
|
133 |
+
stock_data = fetch_stock_data(company_symbol)
|
134 |
+
fundamental_data = fetch_fundamental_data(company_symbol)
|
135 |
+
|
136 |
+
if stock_data:
|
137 |
+
st.subheader("Asset Overview")
|
138 |
+
st.json(stock_data)
|
139 |
+
|
140 |
+
with st.expander("Technical Analysis (Intel oneAPI)"):
|
141 |
+
st.subheader("Technical Analysis")
|
142 |
+
tech_analysis = technical_analysis(company_symbol)
|
143 |
+
st.write(tech_analysis)
|
144 |
+
|
145 |
+
with st.expander("Fundamental Analysis"):
|
146 |
+
st.subheader("Fundamental Analysis")
|
147 |
+
if fundamental_data:
|
148 |
+
st.write(f"**Company Name**: {fundamental_data.get('Name', 'N/A')}")
|
149 |
+
st.write(f"**Sector**: {fundamental_data.get('Sector', 'N/A')}")
|
150 |
+
st.write(f"**Industry**: {fundamental_data.get('Industry', 'N/A')}")
|
151 |
+
st.write(f"**Market Capitalization**: {fundamental_data.get('MarketCapitalization', 'N/A')}")
|
152 |
+
st.write(f"**Earnings per Share (EPS)**: {fundamental_data.get('EPS', 'N/A')}")
|
153 |
+
st.write(f"**Price to Earnings Ratio (P/E)**: {fundamental_data.get('PERatio', 'N/A')}")
|
154 |
+
st.write(f"**Dividend Yield**: {fundamental_data.get('DividendYield', 'N/A')}")
|
155 |
+
st.write(f"**Profit Margin**: {fundamental_data.get('ProfitMargin', 'N/A')}")
|
156 |
+
else:
|
157 |
+
st.write("No fundamental data available.")
|
158 |
+
|
159 |
+
with st.expander("Sentiment Analysis"):
|
160 |
+
st.subheader("Sentiment Analysis")
|
161 |
+
st.write("Fetching views from Twitter and YouTube...")
|
162 |
+
|
163 |
+
# Fetch tweets and comments
|
164 |
+
tweets = fetch_tweets(company_symbol)
|
165 |
+
youtube_comments = fetch_youtube_comments(company_symbol)
|
166 |
+
|
167 |
+
st.write("**Tweets**:")
|
168 |
+
for tweet in tweets:
|
169 |
+
st.write(f"- {tweet}")
|
170 |
+
|
171 |
+
st.write("**YouTube Comments**:")
|
172 |
+
for comment in youtube_comments:
|
173 |
+
st.write(f"- {comment}")
|
174 |
+
|
175 |
+
# Perform sentiment analysis on collected data
|
176 |
+
all_comments = tweets + youtube_comments
|
177 |
+
if all_comments:
|
178 |
+
sentiment_results = perform_sentiment_analysis(all_comments)
|
179 |
+
st.write("Sentiment Analysis Results:")
|
180 |
+
for sentiment in sentiment_results:
|
181 |
+
st.write(sentiment)
|
182 |
+
|
183 |
+
with st.expander("Recommendation"):
|
184 |
+
st.subheader("Recommendation")
|
185 |
+
tokenizer, llama_model = load_llama_model()
|
186 |
+
stock_recommendation = llama_model.generate(
|
187 |
+
tokenizer(company_symbol, return_tensors="pt").input_ids,
|
188 |
+
max_new_tokens=50
|
189 |
+
)
|
190 |
+
recommendation_text = tokenizer.decode(stock_recommendation[0], skip_special_tokens=True)
|
191 |
+
st.write(recommendation_text)
|
192 |
+
else:
|
193 |
+
st.error(f"No data available for the symbol entered.")
|
194 |
+
|
195 |
+
except Exception as e:
|
196 |
+
st.error(f"An error occurred: {e}")
|
197 |
+
|
198 |
+
if __name__ == "__main__":
|
199 |
+
main()
|