cyberosa
Better description of the graph
ac1c55d
raw
history blame contribute delete
4.67 kB
from datetime import datetime
import gradio as gr
import pandas as pd
import duckdb
import logging
from tabs.tokens_dist import (
get_extreme_cases,
)
from tabs.dist_gap import (
get_distribution_plot,
get_avg_gap_time_evolution_grouped_markets,
get_correlation_map,
get_kde_with_trades,
get_kde_with_total_bet_amount,
get_dist_gap_time_evolution,
get_dist_gap_timeline_plotly,
)
def get_logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# stream handler and formatter
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return logger
logger = get_logger()
def prepare_data():
"""
Get all data from the parquet files
"""
logger.info("Getting all data")
con = duckdb.connect(":memory:")
# Query to fetch invalid trades data
query = f"""
SELECT *
FROM read_parquet('./live_data/markets_live_data.parquet')
"""
df = con.execute(query).fetchdf()
df["sample_datetime"] = df["sample_timestamp"].apply(
lambda x: datetime.fromtimestamp(x)
)
df["opening_datetime"] = df["openingTimestamp"].apply(
lambda x: datetime.fromtimestamp(int(x))
)
df["days_to_resolution"] = (df["opening_datetime"] - df["sample_datetime"]).dt.days
return df
demo = gr.Blocks()
markets_data = prepare_data()
markets_data["sample_date"] = pd.to_datetime(markets_data["sample_datetime"]).dt.date
live_markets_data = markets_data.loc[markets_data["open"] == True]
# filter only those with trades
markets_data = markets_data.loc[markets_data["total_trades"] > 0]
with demo:
gr.HTML("<h1>Olas Predict Live Markets </h1>")
gr.Markdown("This app shows the distributions of predictions on the live markets.")
best_market_id, best_gap, worst_market_id, worst_gap = get_extreme_cases(
live_markets_data
)
with gr.Tabs():
with gr.TabItem("๐Ÿ’น Probability distributions of some markets"):
with gr.Row():
gr.Markdown("Best case: a market with a low gap between distributions")
with gr.Row():
gr.Markdown(
f"Market id = {best_market_id} Dist gap = {round(best_gap,2)}"
)
with gr.Row():
best_case = get_dist_gap_timeline_plotly(
best_market_id, live_markets_data
)
with gr.Row():
gr.Markdown("Worst case: a market with a high distribution gap metric")
with gr.Row():
gr.Markdown(
f"Market id = {worst_market_id} Dist gap = {round(worst_gap,2)}"
)
with gr.Row():
worst_case = get_dist_gap_timeline_plotly(
worst_market_id, live_markets_data
)
with gr.Row():
gr.Markdown(
"Time evolution of the average distribution gap percentage of markets created the same day"
)
with gr.Row():
mean_plot = get_avg_gap_time_evolution_grouped_markets(markets_data)
with gr.TabItem("๐Ÿ“ Distribution gap metric for all markets"):
# remove samples with no trades
with gr.Row():
gr.Markdown(
"This metric measures the difference between the probability distribution based on the tokens distribution and the one based on the price weighted distribution"
)
with gr.Row():
gr.Markdown("# Density distribution")
with gr.Row():
kde_plot = get_distribution_plot(markets_data)
with gr.Row():
with gr.Column(min_width=350):
gr.Markdown("# Relationship with number of trades")
kde_trades_plot = get_kde_with_trades(markets_data)
with gr.Column(min_width=350):
gr.Markdown("# Relationship with total bet amount")
kde_total_bet_amount_plot = get_kde_with_total_bet_amount(
markets_data
)
with gr.Row():
gr.Markdown(
"# Correlation analysis between the metric and market variables"
)
with gr.Row():
correlation_plot = get_correlation_map(markets_data)
demo.queue(default_concurrency_limit=40).launch()