code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from contextlib2 import ExitStack
from logbook import Logger, Processor
from pandas.tslib import normalize_date
from zipline.protocol import BarData
from zipline.utils.api_support import ZiplineAPI
from six import viewkeys
from zipline.gens.sim_engine import (
BAR,
SESSION_START,
SESSION_END,
MINUTE_END,
BEFORE_TRADING_START_BAR
)
log = Logger('Trade Simulation')
class AlgorithmSimulator(object):
EMISSION_TO_PERF_KEY_MAP = {
'minute': 'minute_perf',
'daily': 'daily_perf'
}
def __init__(self, algo, sim_params, data_portal, clock, benchmark_source,
restrictions, universe_func):
# ==============
# Simulation
# Param Setup
# ==============
self.sim_params = sim_params
self.env = algo.trading_environment
self.data_portal = data_portal
self.restrictions = restrictions
# ==============
# Algo Setup
# ==============
self.algo = algo
# ==============
# Snapshot Setup
# ==============
# This object is the way that user algorithms interact with OHLCV data,
# fetcher data, and some API methods like `data.can_trade`.
self.current_data = self._create_bar_data(universe_func)
# We don't have a datetime for the current snapshot until we
# receive a message.
self.simulation_dt = None
self.clock = clock
self.benchmark_source = benchmark_source
# =============
# Logging Setup
# =============
# Processor function for injecting the algo_dt into
# user prints/logs.
def inject_algo_dt(record):
if 'algo_dt' not in record.extra:
record.extra['algo_dt'] = self.simulation_dt
self.processor = Processor(inject_algo_dt)
def get_simulation_dt(self):
return self.simulation_dt
def _create_bar_data(self, universe_func):
return BarData(
data_portal=self.data_portal,
simulation_dt_func=self.get_simulation_dt,
data_frequency=self.sim_params.data_frequency,
trading_calendar=self.algo.trading_calendar,
restrictions=self.restrictions,
universe_func=universe_func
)
def transform(self):
"""
Main generator work loop.
"""
algo = self.algo
emission_rate = algo.perf_tracker.emission_rate
def every_bar(dt_to_use, current_data=self.current_data,
handle_data=algo.event_manager.handle_data):
# called every tick (minute or day).
algo.on_dt_changed(dt_to_use)
for capital_change in calculate_minute_capital_changes(dt_to_use):
yield capital_change
self.simulation_dt = dt_to_use
blotter = algo.blotter
perf_tracker = algo.perf_tracker
# handle any transactions and commissions coming out new orders
# placed in the last bar
new_transactions, new_commissions, closed_orders = \
blotter.get_transactions(current_data)
blotter.prune_orders(closed_orders)
for transaction in new_transactions:
perf_tracker.process_transaction(transaction)
# since this order was modified, record it
order = blotter.orders[transaction.order_id]
perf_tracker.process_order(order)
if new_commissions:
for commission in new_commissions:
perf_tracker.process_commission(commission)
handle_data(algo, current_data, dt_to_use)
# grab any new orders from the blotter, then clear the list.
# this includes cancelled orders.
new_orders = blotter.new_orders
blotter.new_orders = []
# if we have any new orders, record them so that we know
# in what perf period they were placed.
if new_orders:
for new_order in new_orders:
perf_tracker.process_order(new_order)
algo.portfolio_needs_update = True
algo.account_needs_update = True
algo.performance_needs_update = True
def once_a_day(midnight_dt, current_data=self.current_data,
data_portal=self.data_portal):
perf_tracker = algo.perf_tracker
# Get the positions before updating the date so that prices are
# fetched for trading close instead of midnight
positions = algo.perf_tracker.position_tracker.positions
position_assets = algo.asset_finder.retrieve_all(positions)
# set all the timestamps
self.simulation_dt = midnight_dt
algo.on_dt_changed(midnight_dt)
# process any capital changes that came overnight
for capital_change in algo.calculate_capital_changes(
midnight_dt, emission_rate=emission_rate,
is_interday=True):
yield capital_change
# we want to wait until the clock rolls over to the next day
# before cleaning up expired assets.
self._cleanup_expired_assets(midnight_dt, position_assets)
# handle any splits that impact any positions or any open orders.
assets_we_care_about = \
viewkeys(perf_tracker.position_tracker.positions) | \
viewkeys(algo.blotter.open_orders)
if assets_we_care_about:
splits = data_portal.get_splits(assets_we_care_about,
midnight_dt)
if splits:
algo.blotter.process_splits(splits)
perf_tracker.position_tracker.handle_splits(splits)
def handle_benchmark(date, benchmark_source=self.benchmark_source):
algo.perf_tracker.all_benchmark_returns[date] = \
benchmark_source.get_value(date)
def on_exit():
# Remove references to algo, data portal, et al to break cycles
# and ensure deterministic cleanup of these objects when the
# simulation finishes.
self.algo = None
self.benchmark_source = self.current_data = self.data_portal = None
with ExitStack() as stack:
stack.callback(on_exit)
stack.enter_context(self.processor)
stack.enter_context(ZiplineAPI(self.algo))
if algo.data_frequency == 'minute':
def execute_order_cancellation_policy():
algo.blotter.execute_cancel_policy(SESSION_END)
def calculate_minute_capital_changes(dt):
# process any capital changes that came between the last
# and current minutes
return algo.calculate_capital_changes(
dt, emission_rate=emission_rate, is_interday=False)
else:
def execute_order_cancellation_policy():
pass
def calculate_minute_capital_changes(dt):
return []
for dt, action in self.clock:
if action == BAR:
for capital_change_packet in every_bar(dt):
yield capital_change_packet
elif action == SESSION_START:
for capital_change_packet in once_a_day(dt):
yield capital_change_packet
elif action == SESSION_END:
# End of the session.
if emission_rate == 'daily':
handle_benchmark(normalize_date(dt))
execute_order_cancellation_policy()
yield self._get_daily_message(dt, algo, algo.perf_tracker)
elif action == BEFORE_TRADING_START_BAR:
self.simulation_dt = dt
algo.on_dt_changed(dt)
algo.before_trading_start(self.current_data)
elif action == MINUTE_END:
handle_benchmark(dt)
minute_msg = \
self._get_minute_message(dt, algo, algo.perf_tracker)
yield minute_msg
risk_message = algo.perf_tracker.handle_simulation_end()
yield risk_message
def _cleanup_expired_assets(self, dt, position_assets):
"""
Clear out any assets that have expired before starting a new sim day.
Performs two functions:
1. Finds all assets for which we have open orders and clears any
orders whose assets are on or after their auto_close_date.
2. Finds all assets for which we have positions and generates
close_position events for any assets that have reached their
auto_close_date.
"""
algo = self.algo
def past_auto_close_date(asset):
acd = asset.auto_close_date
return acd is not None and acd <= dt
# Remove positions in any sids that have reached their auto_close date.
assets_to_clear = \
[asset for asset in position_assets if past_auto_close_date(asset)]
perf_tracker = algo.perf_tracker
data_portal = self.data_portal
for asset in assets_to_clear:
perf_tracker.process_close_position(asset, dt, data_portal)
# Remove open orders for any sids that have reached their
# auto_close_date.
blotter = algo.blotter
assets_to_cancel = \
set([asset for asset in blotter.open_orders
if past_auto_close_date(asset)])
for asset in assets_to_cancel:
blotter.cancel_all_orders_for_asset(asset)
def _get_daily_message(self, dt, algo, perf_tracker):
"""
Get a perf message for the given datetime.
"""
perf_message = perf_tracker.handle_market_close(
dt, self.data_portal,
)
perf_message['daily_perf']['recorded_vars'] = algo.recorded_vars
return perf_message
def _get_minute_message(self, dt, algo, perf_tracker):
"""
Get a perf message for the given datetime.
"""
rvars = algo.recorded_vars
minute_message = perf_tracker.handle_minute_close(
dt, self.data_portal,
)
minute_message['minute_perf']['recorded_vars'] = rvars
return minute_message | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/gens/tradesimulation.py | tradesimulation.py |
from time import sleep
from logbook import Logger
import pandas as pd
from zipline.gens.sim_engine import (
BAR,
SESSION_START,
SESSION_END,
MINUTE_END,
BEFORE_TRADING_START_BAR
)
log = Logger('Realtime Clock')
class RealtimeClock(object):
"""Realtime clock for live trading.
This class is a drop-in replacement for
:class:`zipline.gens.sim_engine.MinuteSimulationClock`.
The key difference between the two is that the RealtimeClock's event
emission is synchronized to the (broker's) wall time clock, while
MinuteSimulationClock yields a new event on every iteration (regardless of
wall clock).
The :param:`time_skew` parameter represents the time difference between
the Broker and the live trading machine's clock.
"""
def __init__(self,
sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission,
time_skew=pd.Timedelta("0s"),
is_broker_alive=None):
self.sessions = sessions
self.execution_opens = execution_opens
self.execution_closes = execution_closes
self.before_trading_start_minutes = before_trading_start_minutes
self.minute_emission = minute_emission
self.time_skew = time_skew
self.is_broker_alive = is_broker_alive or (lambda: True)
self._last_emit = None
self._before_trading_start_bar_yielded = False
def __iter__(self):
yield self.sessions[0], SESSION_START
while self.is_broker_alive():
current_time = pd.to_datetime('now', utc=True)
server_time = (current_time + self.time_skew).floor('1 min')
if (server_time >= self.before_trading_start_minutes[0] and
not self._before_trading_start_bar_yielded):
self._last_emit = server_time
self._before_trading_start_bar_yielded = True
yield server_time, BEFORE_TRADING_START_BAR
elif server_time < self.execution_opens[0].tz_localize('UTC'):
sleep(1)
elif (self.execution_opens[0].tz_localize('UTC') <= server_time <
self.execution_closes[0].tz_localize('UTC')):
if (self._last_emit is None or
server_time - self._last_emit >=
pd.Timedelta('1 minute')):
self._last_emit = server_time
yield server_time, BAR
if self.minute_emission:
yield server_time, MINUTE_END
else:
sleep(1)
elif server_time == self.execution_closes[0].tz_localize('UTC'):
self._last_emit = server_time
yield server_time, BAR
if self.minute_emission:
yield server_time, MINUTE_END
yield server_time, SESSION_END
return
elif server_time > self.execution_closes[0].tz_localize('UTC'):
# Return with no yield if the algo is started in after hours
return
else:
# We should never end up in this branch
raise RuntimeError("Invalid state in RealtimeClock") | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/gens/realtimeclock.py | realtimeclock.py |
import sys
from collections import namedtuple, defaultdict, OrderedDict
from time import sleep
from math import fabs
from six import iteritems
import pandas as pd
import numpy as np
from zipline.gens.brokers.broker import Broker
from zipline.finance.order import (Order as ZPOrder,
ORDER_STATUS as ZP_ORDER_STATUS)
from zipline.finance.execution import (MarketOrder,
LimitOrder,
StopOrder,
StopLimitOrder)
from zipline.finance.transaction import Transaction
import zipline.protocol as zp
from zipline.api import symbol as symbol_lookup
from zipline.errors import SymbolNotFound
from ib.ext.EClientSocket import EClientSocket
from ib.ext.EWrapper import EWrapper
from ib.ext.Contract import Contract
from ib.ext.Order import Order
from ib.ext.ExecutionFilter import ExecutionFilter
from ib.ext.EClientErrors import EClientErrors
from logbook import Logger
if sys.version_info > (3,):
long = int
log = Logger('IB Broker')
Position = namedtuple('Position', ['contract', 'position', 'market_price',
'market_value', 'average_cost',
'unrealized_pnl', 'realized_pnl',
'account_name'])
_connection_timeout = 15 # Seconds
_poll_frequency = 0.1
symbol_to_exchange = defaultdict(lambda: 'SMART')
symbol_to_exchange['VIX'] = 'CBOE'
symbol_to_exchange['GLD'] = 'ARCA'
symbol_to_exchange['GDX'] = 'ARCA'
symbol_to_sec_type = defaultdict(lambda: 'STK')
symbol_to_sec_type['VIX'] = 'IND'
def log_message(message, mapping):
try:
del (mapping['self'])
except (KeyError,):
pass
items = list(mapping.items())
items.sort()
log.debug(('### %s' % (message,)))
for k, v in items:
log.debug((' %s:%s' % (k, v)))
def _method_params_to_dict(args):
return {k: v
for k, v in iteritems(args)
if k != 'self'}
class TWSConnection(EClientSocket, EWrapper):
def __init__(self, tws_uri):
EWrapper.__init__(self)
EClientSocket.__init__(self, anyWrapper=self)
self.tws_uri = tws_uri
host, port, client_id = self.tws_uri.split(':')
self._host = host
self._port = int(port)
self.client_id = int(client_id)
self._next_ticker_id = 0
self._next_request_id = 0
self._next_order_id = None
self.managed_accounts = None
self.symbol_to_ticker_id = {}
self.ticker_id_to_symbol = {}
self.last_tick = defaultdict(dict)
self.bars = {}
# accounts structure: accounts[account_id][currency][value]
self.accounts = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: np.NaN)))
self.accounts_download_complete = False
self.positions = {}
self.portfolio = {}
self.open_orders = {}
self.order_statuses = {}
self.executions = defaultdict(OrderedDict)
self.commissions = defaultdict(OrderedDict)
self._execution_to_order_id = {}
self.time_skew = None
self.unrecoverable_error = False
self.connect()
def connect(self):
log.info("Connecting: {}:{}:{}".format(self._host, self._port,
self.client_id))
self.eConnect(self._host, self._port, self.client_id)
timeout = _connection_timeout
while timeout and not self.isConnected():
sleep(_poll_frequency)
timeout -= _poll_frequency
else:
if not self.isConnected():
raise SystemError("Connection timeout during TWS connection!")
self._download_account_details()
log.info("Managed accounts: {}".format(self.managed_accounts))
self.reqCurrentTime()
self.reqIds(1)
while self.time_skew is None or self._next_order_id is None:
sleep(_poll_frequency)
log.info("Local-Broker Time Skew: {}".format(self.time_skew))
def _download_account_details(self):
exec_filter = ExecutionFilter()
exec_filter.m_clientId = self.client_id
self.reqExecutions(self.next_request_id, exec_filter)
self.reqManagedAccts()
while self.managed_accounts is None:
sleep(_poll_frequency)
for account in self.managed_accounts:
self.reqAccountUpdates(subscribe=True, acctCode=account)
while self.accounts_download_complete is False:
sleep(_poll_frequency)
@property
def next_ticker_id(self):
ticker_id = self._next_ticker_id
self._next_ticker_id += 1
return ticker_id
@property
def next_request_id(self):
request_id = self._next_request_id
self._next_request_id += 1
return request_id
@property
def next_order_id(self):
order_id = self._next_order_id
self._next_order_id += 1
return order_id
def subscribe_to_market_data(self,
symbol,
sec_type='STK',
exchange='SMART',
currency='USD'):
if symbol in self.symbol_to_ticker_id:
# Already subscribed to market data
return
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = symbol_to_sec_type[symbol]
contract.m_exchange = symbol_to_exchange[symbol]
contract.m_currency = currency
ticker_id = self.next_ticker_id
self.symbol_to_ticker_id[symbol] = ticker_id
self.ticker_id_to_symbol[ticker_id] = symbol
tick_list = "233" # RTVolume, return tick_type == 48
self.reqMktData(ticker_id, contract, tick_list, False)
def _process_tick(self, ticker_id, tick_type, value):
try:
symbol = self.ticker_id_to_symbol[ticker_id]
except KeyError:
log.error("Tick {} for id={} is not registered".format(tick_type,
ticker_id))
return
if tick_type == 48:
# RT Volume Bar. Format:
# Last trade price; Last trade size;Last trade time;Total volume;\
# VWAP;Single trade flag
# e.g.: 701.28;1;1348075471534;67854;701.46918464;true
(last_trade_price, last_trade_size, last_trade_time, total_volume,
vwap, single_trade_flag) = value.split(';')
# Ignore this update if last_trade_price is empty:
# tickString: tickerId=0 tickType=48/RTVolume ;0;1469805548873;\
# 240304;216.648653;true
if len(last_trade_price) == 0:
return
last_trade_dt = pd.to_datetime(float(last_trade_time), unit='ms',
utc=True)
self._add_bar(symbol, float(last_trade_price),
int(last_trade_size), last_trade_dt,
int(total_volume), float(vwap),
single_trade_flag)
def _add_bar(self, symbol, last_trade_price, last_trade_size,
last_trade_time, total_volume, vwap, single_trade_flag):
bar = pd.DataFrame(index=pd.DatetimeIndex([last_trade_time]),
data={'last_trade_price': last_trade_price,
'last_trade_size': last_trade_size,
'total_volume': total_volume,
'vwap': vwap,
'single_trade_flag': single_trade_flag})
if symbol not in self.bars:
self.bars[symbol] = bar
else:
self.bars[symbol] = self.bars[symbol].append(bar)
def tickPrice(self, ticker_id, field, price, can_auto_execute):
self._process_tick(ticker_id, tick_type=field, value=price)
def tickSize(self, ticker_id, field, size):
self._process_tick(ticker_id, tick_type=field, value=size)
def tickOptionComputation(self,
ticker_id, field, implied_vol, delta, opt_price,
pv_dividend, gamma, vega, theta, und_price):
log_message('tickOptionComputation', vars())
def tickGeneric(self, ticker_id, tick_type, value):
self._process_tick(ticker_id, tick_type=tick_type, value=value)
def tickString(self, ticker_id, tick_type, value):
self._process_tick(ticker_id, tick_type=tick_type, value=value)
def tickEFP(self, ticker_id, tick_type, basis_points,
formatted_basis_points, implied_future, hold_days,
future_expiry, dividend_impact, dividends_to_expiry):
log_message('tickEFP', vars())
def updateAccountValue(self, key, value, currency, account_name):
self.accounts[account_name][currency][key] = value
def updatePortfolio(self,
contract,
position,
market_price,
market_value,
average_cost,
unrealized_pnl,
realized_pnl,
account_name):
symbol = contract.m_symbol
position = Position(contract=contract,
position=position,
market_price=market_price,
market_value=market_value,
average_cost=average_cost,
unrealized_pnl=unrealized_pnl,
realized_pnl=realized_pnl,
account_name=account_name)
self.positions[symbol] = position
def updateAccountTime(self, time_stamp):
pass
def accountDownloadEnd(self, account_name):
self.accounts_download_complete = True
def nextValidId(self, order_id):
self._next_order_id = order_id
def contractDetails(self, req_id, contract_details):
log_message('contractDetails', vars())
def contractDetailsEnd(self, req_id):
log_message('contractDetailsEnd', vars())
def bondContractDetails(self, req_id, contract_details):
log_message('bondContractDetails', vars())
def orderStatus(self, order_id, status, filled, remaining, avg_fill_price,
perm_id, parent_id, last_fill_price, client_id, why_held):
self.order_statuses[order_id] = _method_params_to_dict(vars())
log.debug(
"Order-{order_id} {status}: "
"filled={filled} remaining={remaining} "
"avg_fill_price={avg_fill_price} "
"last_fill_price={last_fill_price} ".format(
order_id=order_id,
status=self.order_statuses[order_id]['status'],
filled=self.order_statuses[order_id]['filled'],
remaining=self.order_statuses[order_id]['remaining'],
avg_fill_price=self
.order_statuses[order_id]['avg_fill_price'],
last_fill_price=self
.order_statuses[order_id]['last_fill_price']))
def openOrder(self, order_id, contract, order, state):
self.open_orders[order_id] = _method_params_to_dict(vars())
log.debug(
"Order-{order_id} {status}: "
"{order_action} {order_count} {symbol} with {order_type} order. "
"limit_price={limit_price} stop_price={stop_price}".format(
order_id=order_id,
status=state.m_status,
order_action=order.m_action,
order_count=order.m_totalQuantity,
symbol=contract.m_symbol,
order_type=order.m_orderType,
limit_price=order.m_lmtPrice,
stop_price=order.m_auxPrice))
def openOrderEnd(self):
pass
def execDetails(self, req_id, contract, exec_detail):
order_id, exec_id = exec_detail.m_orderId, exec_detail.m_execId
self.executions[order_id][exec_id] = _method_params_to_dict(vars())
self._execution_to_order_id[exec_id] = order_id
log.info(
"Order-{order_id} executed @ {exec_time}: "
"{symbol} current: {shares} @ ${price} "
"total: {cum_qty} @ ${avg_price} "
"exec_id: {exec_id} by client-{client_id}".format(
order_id=order_id, exec_id=exec_id,
exec_time=pd.to_datetime(exec_detail.m_time),
symbol=contract.m_symbol,
shares=exec_detail.m_shares,
price=exec_detail.m_price,
cum_qty=exec_detail.m_cumQty,
avg_price=exec_detail.m_avgPrice,
client_id=exec_detail.m_clientId))
def execDetailsEnd(self, req_id):
log.debug(
"Execution details completed for request {req_id}".format(
req_id=req_id))
def commissionReport(self, commission_report):
exec_id = commission_report.m_execId
order_id = self._execution_to_order_id[commission_report.m_execId]
self.commissions[order_id][exec_id] = commission_report
log.debug(
"Order-{order_id} report: "
"realized_pnl: ${realized_pnl} "
"commission: ${commission} yield: {yield_} "
"exec_id: {exec_id}".format(
order_id=order_id,
exec_id=commission_report.m_execId,
realized_pnl=commission_report.m_realizedPNL
if commission_report.m_realizedPNL != sys.float_info.max
else 0,
commission=commission_report.m_commission,
yield_=commission_report.m_yield
if commission_report.m_yield != sys.float_info.max
else 0)
)
def connectionClosed(self):
self.unrecoverable_error = True
log.error("IB Connection closed")
def error(self, id_=None, error_code=None, error_msg=None):
if isinstance(id_, Exception):
# XXX: for an unknown reason 'log' is None in this branch,
# therefore it needs to be instantiated before use
global log
if not log:
log = Logger('IB Broker')
log.exception(id_)
if isinstance(error_code, EClientErrors.CodeMsgPair):
error_msg = error_code.msg()
error_code = error_code.code()
if isinstance(error_code, int):
if error_code in (502, 503, 326):
# 502: Couldn't connect to TWS.
# 503: The TWS is out of date and must be upgraded.
# 326: Unable connect as the client id is already in use.
self.unrecoverable_error = True
if error_code < 1000:
log.error("[{}] {} ({})".format(error_code, error_msg, id_))
else:
log.info("[{}] {} ({})".format(error_code, error_msg, id_))
else:
log.error("[{}] {} ({})".format(error_code, error_msg, id_))
def updateMktDepth(self, ticker_id, position, operation, side, price,
size):
log_message('updateMktDepth', vars())
def updateMktDepthL2(self, ticker_id, position, market_maker, operation,
side, price, size):
log_message('updateMktDepthL2', vars())
def updateNewsBulletin(self, msg_id, msg_type, message, orig_exchange):
log_message('updateNewsBulletin', vars())
def managedAccounts(self, accounts_list):
self.managed_accounts = accounts_list.split(',')
def receiveFA(self, fa_data_type, xml):
log_message('receiveFA', vars())
def historicalData(self, req_id, date, open_, high, low, close, volume,
count, wap, has_gaps):
log_message('historicalData', vars())
def scannerParameters(self, xml):
log_message('scannerParameters', vars())
def scannerData(self, req_id, rank, contract_details, distance, benchmark,
projection, legs_str):
log_message('scannerData', vars())
def currentTime(self, time):
self.time_skew = (pd.to_datetime('now', utc=True) -
pd.to_datetime(long(time), unit='s', utc=True))
def deltaNeutralValidation(self, req_id, under_comp):
log_message('deltaNeutralValidation', vars())
def fundamentalData(self, req_id, data):
log_message('fundamentalData', vars())
def marketDataType(self, req_id, market_data_type):
log_message('marketDataType', vars())
def realtimeBar(self, req_id, time, open_, high, low, close, volume, wap,
count):
log_message('realtimeBar', vars())
def scannerDataEnd(self, req_id):
log_message('scannerDataEnd', vars())
def tickSnapshotEnd(self, req_id):
log_message('tickSnapshotEnd', vars())
def position(self, account, contract, pos, avg_cost):
log_message('position', vars())
def positionEnd(self):
log_message('positionEnd', vars())
def accountSummary(self, req_id, account, tag, value, currency):
log_message('accountSummary', vars())
def accountSummaryEnd(self, req_id):
log_message('accountSummaryEnd', vars())
class IBBroker(Broker):
def __init__(self, tws_uri, account_id=None):
self._tws_uri = tws_uri
self._orders = {}
self._transactions = {}
self._tws = TWSConnection(tws_uri)
self.account_id = (self._tws.managed_accounts[0] if account_id is None
else account_id)
self.currency = 'USD'
self._subscribed_assets = []
super(self.__class__, self).__init__()
@property
def subscribed_assets(self):
return self._subscribed_assets
def subscribe_to_market_data(self, asset):
if asset not in self.subscribed_assets:
# remove str() cast to have a fun debugging journey
self._tws.subscribe_to_market_data(str(asset.symbol))
self._subscribed_assets.append(asset)
while asset.symbol not in self._tws.bars:
sleep(_poll_frequency)
@property
def positions(self):
z_positions = zp.Positions()
for symbol in self._tws.positions:
ib_position = self._tws.positions[symbol]
try:
z_position = zp.Position(symbol_lookup(symbol))
except SymbolNotFound:
# The symbol might not have been ingested to the db therefore
# it needs to be skipped.
continue
z_position.amount = int(ib_position.position)
z_position.cost_basis = float(ib_position.average_cost)
# Check if symbol exists in bars df
if symbol in self._tws.bars:
z_position.last_sale_price = \
float(self._tws.bars[symbol].last_trade_price.iloc[-1])
z_position.last_sale_date = \
self._tws.bars[symbol].index.values[-1]
else:
z_position.last_sale_price = None
z_position.last_sale_date = None
z_positions[symbol_lookup(symbol)] = z_position
return z_positions
@property
def portfolio(self):
ib_account = self._tws.accounts[self.account_id][self.currency]
z_portfolio = zp.Portfolio()
z_portfolio.capital_used = None # TODO(tibor)
z_portfolio.starting_cash = None # TODO(tibor): Fill from state
z_portfolio.portfolio_value = float(ib_account['EquityWithLoanValue'])
z_portfolio.pnl = (float(ib_account['RealizedPnL']) +
float(ib_account['UnrealizedPnL']))
z_portfolio.returns = None # TODO(tibor): pnl / total_at_start
z_portfolio.cash = float(ib_account['TotalCashValue'])
z_portfolio.start_date = None # TODO(tibor)
z_portfolio.positions = self.positions
z_portfolio.positions_value = float(ib_account['StockMarketValue'])
z_portfolio.positions_exposure \
= (z_portfolio.positions_value /
(z_portfolio.positions_value +
float(ib_account['TotalCashValue'])))
return z_portfolio
@property
def account(self):
ib_account = self._tws.accounts[self.account_id][self.currency]
z_account = zp.Account()
z_account.settled_cash = float(ib_account['TotalCashValue-S'])
z_account.accrued_interest = None # TODO(tibor)
z_account.buying_power = float(ib_account['BuyingPower'])
z_account.equity_with_loan = float(ib_account['EquityWithLoanValue'])
z_account.total_positions_value = float(ib_account['StockMarketValue'])
z_account.total_positions_exposure = float(
(z_account.total_positions_value /
(z_account.total_positions_value +
float(ib_account['TotalCashValue']))))
z_account.regt_equity = float(ib_account['RegTEquity'])
z_account.regt_margin = float(ib_account['RegTMargin'])
z_account.initial_margin_requirement = float(
ib_account['FullInitMarginReq'])
z_account.maintenance_margin_requirement = float(
ib_account['FullMaintMarginReq'])
z_account.available_funds = float(ib_account['AvailableFunds'])
z_account.excess_liquidity = float(ib_account['ExcessLiquidity'])
z_account.cushion = float(
self._tws.accounts[self.account_id]['']['Cushion'])
z_account.day_trades_remaining = float(
self._tws.accounts[self.account_id]['']['DayTradesRemaining'])
z_account.leverage = float(
self._tws.accounts[self.account_id]['']['Leverage-S'])
z_account.net_leverage = (
float(ib_account['StockMarketValue']) /
(float(ib_account['TotalCashValue']) +
float(ib_account['StockMarketValue'])))
z_account.net_liquidation = float(ib_account['NetLiquidation'])
return z_account
@property
def time_skew(self):
return self._tws.time_skew
def is_alive(self):
return not self._tws.unrecoverable_error
@staticmethod
def _safe_symbol_lookup(symbol):
try:
return symbol_lookup(symbol)
except SymbolNotFound:
return None
_zl_order_ref_magic = '!ZL'
@classmethod
def _create_order_ref(cls, ib_order, dt=pd.to_datetime('now', utc=True)):
order_type = ib_order.m_orderType.replace(' ', '_')
return \
"A:{action} Q:{qty} T:{order_type} " \
"L:{limit_price} S:{stop_price} D:{date} {magic}".format(
action=ib_order.m_action,
qty=ib_order.m_totalQuantity,
order_type=order_type,
limit_price=ib_order.m_lmtPrice,
stop_price=ib_order.m_auxPrice,
date=int(dt.value / 1e9),
magic=cls._zl_order_ref_magic)
@classmethod
def _parse_order_ref(cls, ib_order_ref):
if not ib_order_ref or \
not ib_order_ref.endswith(cls._zl_order_ref_magic):
return None
try:
action, qty, order_type, limit_price, stop_price, dt, _ = \
ib_order_ref.split(' ')
if not all(
[action.startswith('A:'),
qty.startswith('Q:'),
order_type.startswith('T:'),
limit_price.startswith('L:'),
stop_price.startswith('S:'),
dt.startswith('D:')]):
return None
return {
'action': action[2:],
'qty': int(qty[2:]),
'order_type': order_type[2:].replace('_', ' '),
'limit_price': float(limit_price[2:]),
'stop_price': float(stop_price[2:]),
'dt': pd.to_datetime(dt[2:], unit='s', utc=True)}
except ValueError:
log.warning("Error parsing order metadata: {}".format(
ib_order_ref))
return None
def order(self, asset, amount, style):
contract = Contract()
contract.m_symbol = str(asset.symbol)
contract.m_currency = self.currency
contract.m_exchange = symbol_to_exchange[str(asset.symbol)]
contract.m_secType = symbol_to_sec_type[str(asset.symbol)]
order = Order()
order.m_totalQuantity = int(fabs(amount))
order.m_action = "BUY" if amount > 0 else "SELL"
is_buy = (amount > 0)
order.m_lmtPrice = style.get_limit_price(is_buy) or 0
order.m_auxPrice = style.get_stop_price(is_buy) or 0
if isinstance(style, MarketOrder):
order.m_orderType = "MKT"
elif isinstance(style, LimitOrder):
order.m_orderType = "LMT"
elif isinstance(style, StopOrder):
order.m_orderType = "STP"
elif isinstance(style, StopLimitOrder):
order.m_orderType = "STP LMT"
order.m_tif = "DAY"
order.m_orderRef = self._create_order_ref(order)
ib_order_id = self._tws.next_order_id
zp_order = self._get_or_create_zp_order(ib_order_id, order, contract)
log.info(
"Placing order-{order_id}: "
"{action} {qty} {symbol} with {order_type} order. "
"limit_price={limit_price} stop_price={stop_price} {tif}".format(
order_id=ib_order_id,
action=order.m_action,
qty=order.m_totalQuantity,
symbol=contract.m_symbol,
order_type=order.m_orderType,
limit_price=order.m_lmtPrice,
stop_price=order.m_auxPrice,
tif=order.m_tif
))
self._tws.placeOrder(ib_order_id, contract, order)
return zp_order
@property
def orders(self):
self._update_orders()
return self._orders
def _ib_to_zp_order_id(self, ib_order_id):
return "IB-{date}-{account_id}-{client_id}-{order_id}".format(
date=str(pd.to_datetime('today').date()),
account_id=self.account_id,
client_id=self._tws.client_id,
order_id=ib_order_id)
@staticmethod
def _action_qty_to_amount(action, qty):
return qty if action == 'BUY' else -1 * qty
def _get_or_create_zp_order(self, ib_order_id,
ib_order=None, ib_contract=None):
zp_order_id = self._ib_to_zp_order_id(ib_order_id)
if zp_order_id in self._orders:
return self._orders[zp_order_id]
# Try to reconstruct the order from the given information:
# open order state and execution state
symbol, order_details = None, None
if ib_order and ib_contract:
symbol = ib_contract.m_symbol
order_details = self._parse_order_ref(ib_order.m_orderRef)
if not order_details and ib_order_id in self._tws.open_orders:
open_order = self._tws.open_orders[ib_order_id]
symbol = open_order['contract'].m_symbol
order_details = self._parse_order_ref(
open_order['order'].m_orderRef)
if not order_details and ib_order_id in self._tws.executions:
executions = self._tws.executions[ib_order_id]
last_exec_detail = list(executions.values())[-1]['exec_detail']
last_exec_contract = list(executions.values())[-1]['contract']
symbol = last_exec_contract.m_symbol
order_details = self._parse_order_ref(last_exec_detail.m_orderRef)
asset = self._safe_symbol_lookup(symbol)
if not asset:
log.warning(
"Ignoring symbol {symbol} which has associated "
"order but it is not registered in bundle".format(
symbol=symbol))
return None
if order_details:
amount = self._action_qty_to_amount(order_details['action'],
order_details['qty'])
stop_price = order_details['stop_price']
limit_price = order_details['limit_price']
dt = order_details['dt']
else:
dt = pd.to_datetime('now', utc=True)
amount, stop_price, limit_price = 0, None, None
if ib_order_id in self._tws.open_orders:
open_order = self._tws.open_orders[ib_order_id]['order']
amount = self._action_qty_to_amount(
open_order.m_action, open_order.m_totalQuantity)
stop_price = open_order.m_auxPrice
limit_price = open_order.m_lmtPrice
stop_price = None if stop_price == 0 else stop_price
limit_price = None if limit_price == 0 else limit_price
self._orders[zp_order_id] = ZPOrder(
dt=dt,
asset=asset,
amount=amount,
stop=stop_price,
limit=limit_price,
id=zp_order_id)
self._orders[zp_order_id].broker_order_id = ib_order_id
return self._orders[zp_order_id]
@staticmethod
def _ib_to_zp_status(ib_status):
ib_status = ib_status.lower()
if ib_status == 'submitted':
return ZP_ORDER_STATUS.OPEN
elif ib_status in ('pendingsubmit',
'pendingcancel',
'presubmitted'):
return ZP_ORDER_STATUS.HELD
elif ib_status == 'cancelled':
return ZP_ORDER_STATUS.CANCELLED
elif ib_status == 'filled':
return ZP_ORDER_STATUS.FILLED
elif ib_status == 'inactive':
return ZP_ORDER_STATUS.REJECTED
else:
return None
def _update_orders(self):
def _update_from_order_status(zp_order, ib_order_id):
if ib_order_id in self._tws.open_orders:
open_order_state = self._tws.open_orders[ib_order_id]['state']
zp_status = self._ib_to_zp_status(open_order_state.m_status)
if zp_status:
zp_order.status = zp_status
else:
log.warning(
"Order-{order_id}: "
"unknown order status: {order_status}.".format(
order_id=ib_order_id,
order_status=open_order_state.m_status))
if ib_order_id in self._tws.order_statuses:
order_status = self._tws.order_statuses[ib_order_id]
zp_order.filled = order_status['filled']
zp_status = self._ib_to_zp_status(order_status['status'])
if zp_status:
zp_order.status = zp_status
else:
log.warning("Order-{order_id}: "
"unknown order status: {order_status}."
.format(order_id=ib_order_id,
order_status=order_status['status']))
def _update_from_execution(zp_order, ib_order_id):
if ib_order_id in self._tws.executions and \
ib_order_id not in self._tws.open_orders:
zp_order.status = ZP_ORDER_STATUS.FILLED
executions = self._tws.executions[ib_order_id]
last_exec_detail = \
list(executions.values())[-1]['exec_detail']
zp_order.filled = last_exec_detail.m_cumQty
all_ib_order_ids = (set([e.broker_order_id
for e in self._orders.values()]) |
set(self._tws.open_orders.keys()) |
set(self._tws.order_statuses.keys()) |
set(self._tws.executions.keys()) |
set(self._tws.commissions.keys()))
for ib_order_id in all_ib_order_ids:
zp_order = self._get_or_create_zp_order(ib_order_id)
if zp_order:
_update_from_execution(zp_order, ib_order_id)
_update_from_order_status(zp_order, ib_order_id)
@property
def transactions(self):
self._update_transactions()
return self._transactions
def _update_transactions(self):
all_orders = list(self.orders.values())
for ib_order_id, executions in iteritems(self._tws.executions):
orders = [order
for order in all_orders
if order.broker_order_id == ib_order_id]
if not orders:
log.warning("No order found for executions: {}".format(
executions))
continue
assert len(orders) == 1
order = orders[0]
for exec_id, execution in iteritems(executions):
if exec_id in self._transactions:
continue
try:
commission = self._tws.commissions[ib_order_id][exec_id]\
.m_commission
except KeyError:
log.warning(
"Commission not found for execution: {}".format(
exec_id))
commission = 0
exec_detail = execution['exec_detail']
is_buy = order.amount > 0
amount = (exec_detail.m_shares if is_buy
else -1 * exec_detail.m_shares)
tx = Transaction(
asset=order.asset,
amount=amount,
dt=pd.to_datetime(exec_detail.m_time, utc=True),
price=exec_detail.m_price,
order_id=order.id,
commission=commission
)
self._transactions[exec_id] = tx
def cancel_order(self, zp_order_id):
ib_order_id = self.orders[zp_order_id].broker_order_id
self._tws.cancelOrder(ib_order_id)
def get_spot_value(self, assets, field, dt, data_frequency):
symbol = str(assets.symbol)
self.subscribe_to_market_data(assets)
bars = self._tws.bars[symbol]
last_event_time = bars.index[-1]
minute_start = (last_event_time - pd.Timedelta('1 min')) \
.time()
minute_end = last_event_time.time()
if bars.empty:
return pd.NaT if field == 'last_traded' else np.NaN
else:
if field == 'price':
return bars.last_trade_price.iloc[-1]
elif field == 'last_traded':
return last_event_time or pd.NaT
minute_df = bars.between_time(minute_start, minute_end,
include_start=True, include_end=True)
if minute_df.empty:
return np.NaN
else:
if field == 'open':
return minute_df.last_trade_price.iloc[0]
elif field == 'close':
return minute_df.last_trade_price.iloc[-1]
elif field == 'high':
return minute_df.last_trade_price.max()
elif field == 'low':
return minute_df.last_trade_price.min()
elif field == 'volume':
return minute_df.last_trade_size.sum()
def get_last_traded_dt(self, asset):
self.subscribe_to_market_data(asset)
return self._tws.bars[asset.symbol].index[-1]
def get_realtime_bars(self, assets, frequency):
if frequency == '1m':
resample_freq = '1 Min'
elif frequency == '1d':
resample_freq = '24 H'
else:
raise ValueError("Invalid frequency specified: %s" % frequency)
df = pd.DataFrame()
for asset in assets:
symbol = str(asset.symbol)
self.subscribe_to_market_data(asset)
trade_prices = self._tws.bars[symbol]['last_trade_price']
trade_sizes = self._tws.bars[symbol]['last_trade_size']
ohlcv = trade_prices.resample(resample_freq).ohlc()
ohlcv['volume'] = trade_sizes.resample(resample_freq).sum()
# Add asset as level 0 column; ohlcv will be used as level 1 cols
ohlcv.columns = pd.MultiIndex.from_product([[asset, ],
ohlcv.columns])
df = pd.concat([df, ohlcv], axis=1)
return df | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/gens/brokers/ib_broker.py | ib_broker.py |
from textwrap import dedent
from numpy import (
array,
full,
recarray,
vstack,
)
from pandas import NaT as pd_NaT
from zipline.errors import (
WindowLengthNotPositive,
UnsupportedDataType,
NoFurtherDataError,
)
from zipline.utils.control_flow import nullctx
from zipline.utils.input_validation import expect_types
from zipline.utils.sharedoc import (
format_docstring,
PIPELINE_ALIAS_NAME_DOC,
PIPELINE_DOWNSAMPLING_FREQUENCY_DOC,
)
from zipline.utils.pandas_utils import nearest_unequal_elements
from .downsample_helpers import (
select_sampling_indices,
expect_downsample_frequency,
)
from .sentinels import NotSpecified
from .term import Term
class PositiveWindowLengthMixin(object):
"""
Validation mixin enforcing that a Term gets a positive WindowLength
"""
def _validate(self):
super(PositiveWindowLengthMixin, self)._validate()
if not self.windowed:
raise WindowLengthNotPositive(window_length=self.window_length)
class SingleInputMixin(object):
"""
Validation mixin enforcing that a Term gets a length-1 inputs list.
"""
def _validate(self):
super(SingleInputMixin, self)._validate()
num_inputs = len(self.inputs)
if num_inputs != 1:
raise ValueError(
"{typename} expects only one input, "
"but received {num_inputs} instead.".format(
typename=type(self).__name__,
num_inputs=num_inputs
)
)
class StandardOutputs(object):
"""
Validation mixin enforcing that a Term cannot produce non-standard outputs.
"""
def _validate(self):
super(StandardOutputs, self)._validate()
if self.outputs is not NotSpecified:
raise ValueError(
"{typename} does not support custom outputs,"
" but received custom outputs={outputs}.".format(
typename=type(self).__name__,
outputs=self.outputs,
)
)
class RestrictedDTypeMixin(object):
"""
Validation mixin enforcing that a term has a specific dtype.
"""
ALLOWED_DTYPES = NotSpecified
def _validate(self):
super(RestrictedDTypeMixin, self)._validate()
assert self.ALLOWED_DTYPES is not NotSpecified, (
"ALLOWED_DTYPES not supplied on subclass "
"of RestrictedDTypeMixin: %s." % type(self).__name__
)
if self.dtype not in self.ALLOWED_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
)
class CustomTermMixin(object):
"""
Mixin for user-defined rolling-window Terms.
Implements `_compute` in terms of a user-defined `compute` function, which
is mapped over the input windows.
Used by CustomFactor, CustomFilter, CustomClassifier, etc.
"""
ctx = nullctx()
def __new__(cls,
inputs=NotSpecified,
outputs=NotSpecified,
window_length=NotSpecified,
mask=NotSpecified,
dtype=NotSpecified,
missing_value=NotSpecified,
ndim=NotSpecified,
**kwargs):
unexpected_keys = set(kwargs) - set(cls.params)
if unexpected_keys:
raise TypeError(
"{termname} received unexpected keyword "
"arguments {unexpected}".format(
termname=cls.__name__,
unexpected={k: kwargs[k] for k in unexpected_keys},
)
)
return super(CustomTermMixin, cls).__new__(
cls,
inputs=inputs,
outputs=outputs,
window_length=window_length,
mask=mask,
dtype=dtype,
missing_value=missing_value,
ndim=ndim,
**kwargs
)
def compute(self, today, assets, out, *arrays):
"""
Override this method with a function that writes a value into `out`.
"""
raise NotImplementedError()
def _allocate_output(self, windows, shape):
"""
Allocate an output array whose rows should be passed to `self.compute`.
The resulting array must have a shape of ``shape``.
If we have standard outputs (i.e. self.outputs is NotSpecified), the
default is an empty ndarray whose dtype is ``self.dtype``.
If we have an outputs tuple, the default is an empty recarray with
``self.outputs`` as field names. Each field will have dtype
``self.dtype``.
This can be overridden to control the kind of array constructed
(e.g. to produce a LabelArray instead of an ndarray).
"""
missing_value = self.missing_value
outputs = self.outputs
if outputs is not NotSpecified:
out = recarray(
shape,
formats=[self.dtype.str] * len(outputs),
names=outputs,
)
out[:] = missing_value
else:
out = full(shape, missing_value, dtype=self.dtype)
return out
def _format_inputs(self, windows, column_mask):
inputs = []
for input_ in windows:
window = next(input_)
if window.shape[1] == 1:
# Do not mask single-column inputs.
inputs.append(window)
else:
inputs.append(window[:, column_mask])
return inputs
def _compute(self, windows, dates, assets, mask):
"""
Call the user's `compute` function on each window with a pre-built
output array.
"""
format_inputs = self._format_inputs
compute = self.compute
params = self.params
ndim = self.ndim
shape = (len(mask), 1) if ndim == 1 else mask.shape
out = self._allocate_output(windows, shape)
with self.ctx:
for idx, date in enumerate(dates):
# Never apply a mask to 1D outputs.
out_mask = array([True]) if ndim == 1 else mask[idx]
# Mask our inputs as usual.
inputs_mask = mask[idx]
masked_assets = assets[inputs_mask]
out_row = out[idx][out_mask]
inputs = format_inputs(windows, inputs_mask)
compute(date, masked_assets, out_row, *inputs, **params)
out[idx][out_mask] = out_row
return out
def short_repr(self):
return type(self).__name__ + '(%d)' % self.window_length
class LatestMixin(SingleInputMixin):
"""
Mixin for behavior shared by Custom{Factor,Filter,Classifier}.
"""
window_length = 1
def compute(self, today, assets, out, data):
out[:] = data[-1]
def _validate(self):
super(LatestMixin, self)._validate()
if self.inputs[0].dtype != self.dtype:
raise TypeError(
"{name} expected an input of dtype {expected}, "
"but got {actual} instead.".format(
name=type(self).__name__,
expected=self.dtype,
actual=self.inputs[0].dtype,
)
)
class AliasedMixin(SingleInputMixin):
"""
Mixin for aliased terms.
"""
def __new__(cls, term, name):
return super(AliasedMixin, cls).__new__(
cls,
inputs=(term,),
outputs=term.outputs,
window_length=0,
name=name,
dtype=term.dtype,
missing_value=term.missing_value,
ndim=term.ndim,
window_safe=term.window_safe,
)
def _init(self, name, *args, **kwargs):
self.name = name
return super(AliasedMixin, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, name, *args, **kwargs):
return (
super(AliasedMixin, cls)._static_identity(*args, **kwargs),
name,
)
def _compute(self, inputs, dates, assets, mask):
return inputs[0]
def __repr__(self):
return '{type}({inner_type}(...), name={name!r})'.format(
type=type(self).__name__,
inner_type=type(self.inputs[0]).__name__,
name=self.name,
)
def short_repr(self):
return self.name
@classmethod
def make_aliased_type(cls, other_base):
"""
Factory for making Aliased{Filter,Factor,Classifier}.
"""
docstring = dedent(
"""
A {t} that names another {t}.
Parameters
----------
term : {t}
{{name}}
"""
).format(t=other_base.__name__)
doc = format_docstring(
owner_name=other_base.__name__,
docstring=docstring,
formatters={'name': PIPELINE_ALIAS_NAME_DOC},
)
return type(
'Aliased' + other_base.__name__,
(cls, other_base),
{'__doc__': doc,
'__module__': other_base.__module__},
)
class DownsampledMixin(StandardOutputs):
"""
Mixin for behavior shared by Downsampled{Factor,Filter,Classifier}
A downsampled term is a wrapper around the "real" term that performs actual
computation. The downsampler is responsible for calling the real term's
`compute` method at selected intervals and forward-filling the computed
values.
Downsampling is not currently supported for terms with multiple outputs.
"""
# There's no reason to take a window of a downsampled term. The whole
# point is that you're re-using the same result multiple times.
window_safe = False
@expect_types(term=Term)
@expect_downsample_frequency
def __new__(cls, term, frequency):
return super(DownsampledMixin, cls).__new__(
cls,
inputs=term.inputs,
outputs=term.outputs,
window_length=term.window_length,
mask=term.mask,
frequency=frequency,
wrapped_term=term,
dtype=term.dtype,
missing_value=term.missing_value,
ndim=term.ndim,
)
def _init(self, frequency, wrapped_term, *args, **kwargs):
self._frequency = frequency
self._wrapped_term = wrapped_term
return super(DownsampledMixin, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, frequency, wrapped_term, *args, **kwargs):
return (
super(DownsampledMixin, cls)._static_identity(*args, **kwargs),
frequency,
wrapped_term,
)
def compute_extra_rows(self,
all_dates,
start_date,
end_date,
min_extra_rows):
"""
Ensure that min_extra_rows pushes us back to a computation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. This will be the minimum
number of rows required to make our computed start_date fall on a
recomputation date.
"""
try:
current_start_pos = all_dates.get_loc(start_date) - min_extra_rows
if current_start_pos < 0:
raise NoFurtherDataError(
initial_message="Insufficient data to compute Pipeline:",
first_date=all_dates[0],
lookback_start=start_date,
lookback_length=min_extra_rows,
)
except KeyError:
before, after = nearest_unequal_elements(all_dates, start_date)
raise ValueError(
"Pipeline start_date {start_date} is not in calendar.\n"
"Latest date before start_date is {before}.\n"
"Earliest date after start_date is {after}.".format(
start_date=start_date,
before=before,
after=after,
)
)
# Our possible target dates are all the dates on or before the current
# starting position.
# TODO: Consider bounding this below by self.window_length
candidates = all_dates[:current_start_pos + 1]
# Choose the latest date in the candidates that is the start of a new
# period at our frequency.
choices = select_sampling_indices(candidates, self._frequency)
# If we have choices, the last choice is the first date if the
# period containing current_start_date. Choose it.
new_start_date = candidates[choices[-1]]
# Add the difference between the new and old start dates to get the
# number of rows for the new start_date.
new_start_pos = all_dates.get_loc(new_start_date)
assert new_start_pos <= current_start_pos, \
"Computed negative extra rows!"
return min_extra_rows + (current_start_pos - new_start_pos)
def _compute(self, inputs, dates, assets, mask):
"""
Compute by delegating to self._wrapped_term._compute on sample dates.
On non-sample dates, forward-fill from previously-computed samples.
"""
to_sample = dates[select_sampling_indices(dates, self._frequency)]
assert to_sample[0] == dates[0], \
"Misaligned sampling dates in %s." % type(self).__name__
real_compute = self._wrapped_term._compute
# Inputs will contain different kinds of values depending on whether or
# not we're a windowed computation.
# If we're windowed, then `inputs` is a list of iterators of ndarrays.
# If we're not windowed, then `inputs` is just a list of ndarrays.
# There are two things we care about doing with the input:
# 1. Preparing an input to be passed to our wrapped term.
# 2. Skipping an input if we're going to use an already-computed row.
# We perform these actions differently based on the expected kind of
# input, and we encapsulate these actions with closures so that we
# don't clutter the code below with lots of branching.
if self.windowed:
# If we're windowed, inputs are stateful AdjustedArrays. We don't
# need to do any preparation before forwarding to real_compute, but
# we need to call `next` on them if we want to skip an iteration.
def prepare_inputs():
return inputs
def skip_this_input():
for w in inputs:
next(w)
else:
# If we're not windowed, inputs are just ndarrays. We need to
# slice out a single row when forwarding to real_compute, but we
# don't need to do anything to skip an input.
def prepare_inputs():
# i is the loop iteration variable below.
return [a[[i]] for a in inputs]
def skip_this_input():
pass
results = []
samples = iter(to_sample)
next_sample = next(samples)
for i, compute_date in enumerate(dates):
if next_sample == compute_date:
results.append(
real_compute(
prepare_inputs(),
dates[i:i + 1],
assets,
mask[i:i + 1],
)
)
try:
next_sample = next(samples)
except StopIteration:
# No more samples to take. Set next_sample to Nat, which
# compares False with any other datetime.
next_sample = pd_NaT
else:
skip_this_input()
# Copy results from previous sample period.
results.append(results[-1])
# We should have exhausted our sample dates.
try:
next_sample = next(samples)
except StopIteration:
pass
else:
raise AssertionError("Unconsumed sample date: %s" % next_sample)
# Concatenate stored results.
return vstack(results)
@classmethod
def make_downsampled_type(cls, other_base):
"""
Factory for making Downsampled{Filter,Factor,Classifier}.
"""
docstring = dedent(
"""
A {t} that defers to another {t} at lower-than-daily frequency.
Parameters
----------
term : {t}
{{frequency}}
"""
).format(t=other_base.__name__)
doc = format_docstring(
owner_name=other_base.__name__,
docstring=docstring,
formatters={'frequency': PIPELINE_DOWNSAMPLING_FREQUENCY_DOC},
)
return type(
'Downsampled' + other_base.__name__,
(cls, other_base,),
{'__doc__': doc,
'__module__': other_base.__module__},
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/mixins.py | mixins.py |
from __future__ import unicode_literals
from contextlib import contextmanager
import errno
from functools import partial
from io import BytesIO
from subprocess import Popen, PIPE
from networkx import topological_sort
from six import iteritems
from zipline.pipeline.data import BoundColumn
from zipline.pipeline import Filter, Factor, Classifier, Term
from zipline.pipeline.term import AssetExists
class NoIPython(Exception):
pass
def delimit(delimiters, content):
"""
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"'
"""
if len(delimiters) != 2:
raise ValueError(
"`delimiters` must be of length 2. Got %r" % delimiters
)
return ''.join([delimiters[0], content, delimiters[1]])
quote = partial(delimit, '""')
bracket = partial(delimit, '[]')
def begin_graph(f, name, **attrs):
writeln(f, "strict digraph %s {" % name)
writeln(f, "graph {}".format(format_attrs(attrs)))
def begin_cluster(f, name, **attrs):
attrs.setdefault("label", quote(name))
writeln(f, "subgraph cluster_%s {" % name)
writeln(f, "graph {}".format(format_attrs(attrs)))
def end_graph(f):
writeln(f, '}')
@contextmanager
def graph(f, name, **attrs):
begin_graph(f, name, **attrs)
yield
end_graph(f)
@contextmanager
def cluster(f, name, **attrs):
begin_cluster(f, name, **attrs)
yield
end_graph(f)
def roots(g):
"Get nodes from graph G with indegree 0"
return set(n for n, d in iteritems(g.in_degree()) if d == 0)
def filter_nodes(include_asset_exists, nodes):
if include_asset_exists:
return nodes
return filter(lambda n: n is not AssetExists(), nodes)
def _render(g, out, format_, include_asset_exists=False):
"""
Draw `g` as a graph to `out`, in format `format`.
Parameters
----------
g : zipline.pipeline.graph.TermGraph
Graph to render.
out : file-like object
format_ : str {'png', 'svg'}
Output format.
include_asset_exists : bool
Whether to filter out `AssetExists()` nodes.
"""
graph_attrs = {'rankdir': 'TB', 'splines': 'ortho'}
cluster_attrs = {'style': 'filled', 'color': 'lightgoldenrod1'}
in_nodes = g.loadable_terms
out_nodes = list(g.outputs.values())
f = BytesIO()
with graph(f, "G", **graph_attrs):
# Write outputs cluster.
with cluster(f, 'Output', labelloc='b', **cluster_attrs):
for term in filter_nodes(include_asset_exists, out_nodes):
add_term_node(f, term)
# Write inputs cluster.
with cluster(f, 'Input', **cluster_attrs):
for term in filter_nodes(include_asset_exists, in_nodes):
add_term_node(f, term)
# Write intermediate results.
for term in filter_nodes(include_asset_exists,
topological_sort(g.graph)):
if term in in_nodes or term in out_nodes:
continue
add_term_node(f, term)
# Write edges
for source, dest in g.graph.edges():
if source is AssetExists() and not include_asset_exists:
continue
add_edge(f, id(source), id(dest))
cmd = ['dot', '-T', format_]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError(
"Couldn't find `dot` graph layout program. "
"Make sure Graphviz is installed and `dot` is on your path."
)
else:
raise
f.seek(0)
proc_stdout, proc_stderr = proc.communicate(f.read())
if proc_stderr:
raise RuntimeError(
"Error(s) while rendering graph: %s" % proc_stderr.decode('utf-8')
)
out.write(proc_stdout)
def display_graph(g, format='svg', include_asset_exists=False):
"""
Display a TermGraph interactively from within IPython.
"""
try:
import IPython.display as display
except ImportError:
raise NoIPython("IPython is not installed. Can't display graph.")
if format == 'svg':
display_cls = display.SVG
elif format in ("jpeg", "png"):
display_cls = partial(display.Image, format=format, embed=True)
out = BytesIO()
_render(g, out, format, include_asset_exists=include_asset_exists)
return display_cls(data=out.getvalue())
def writeln(f, s):
f.write((s + '\n').encode('utf-8'))
def fmt(obj):
if isinstance(obj, Term):
if hasattr(obj, 'short_repr'):
r = obj.short_repr()
else:
r = type(obj).__name__
else:
r = obj
return '"%s"' % r
def add_term_node(f, term):
declare_node(f, id(term), attrs_for_node(term))
def declare_node(f, name, attributes):
writeln(f, "{0} {1};".format(name, format_attrs(attributes)))
def add_edge(f, source, dest):
writeln(f, "{0} -> {1};".format(source, dest))
def attrs_for_node(term, **overrides):
attrs = {
'shape': 'box',
'colorscheme': 'pastel19',
'style': 'filled',
'label': fmt(term),
}
if isinstance(term, BoundColumn):
attrs['fillcolor'] = '1'
if isinstance(term, Factor):
attrs['fillcolor'] = '2'
elif isinstance(term, Filter):
attrs['fillcolor'] = '3'
elif isinstance(term, Classifier):
attrs['fillcolor'] = '4'
attrs.update(**overrides or {})
return attrs
def format_attrs(attrs):
"""
Format key, value pairs from attrs into graphviz attrs format
Example
-------
>>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP
'[key1=value1, key2=value2]'
"""
if not attrs:
return ''
entries = ['='.join((key, value)) for key, value in iteritems(attrs)]
return '[' + ', '.join(entries) + ']' | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/visualize.py | visualize.py |
from itertools import chain
import re
from numbers import Number
import numexpr
from numexpr.necompiler import getExprNames
from numpy import (
full,
inf,
)
from zipline.pipeline.term import Term, ComputableTerm
_VARIABLE_NAME_RE = re.compile("^(x_)([0-9]+)$")
# Map from op symbol to equivalent Python magic method name.
_ops_to_methods = {
'+': '__add__',
'-': '__sub__',
'*': '__mul__',
'/': '__div__',
'%': '__mod__',
'**': '__pow__',
'&': '__and__',
'|': '__or__',
'^': '__xor__',
'<': '__lt__',
'<=': '__le__',
'==': '__eq__',
'!=': '__ne__',
'>=': '__ge__',
'>': '__gt__',
}
# Map from op symbol to equivalent Python magic method name after flipping
# arguments.
_ops_to_commuted_methods = {
'+': '__radd__',
'-': '__rsub__',
'*': '__rmul__',
'/': '__rdiv__',
'%': '__rmod__',
'**': '__rpow__',
'&': '__rand__',
'|': '__ror__',
'^': '__rxor__',
'<': '__gt__',
'<=': '__ge__',
'==': '__eq__',
'!=': '__ne__',
'>=': '__le__',
'>': '__lt__',
}
_unary_ops_to_methods = {
'-': '__neg__',
'~': '__invert__',
}
UNARY_OPS = {'-'}
MATH_BINOPS = {'+', '-', '*', '/', '**', '%'}
FILTER_BINOPS = {'&', '|'} # NumExpr doesn't support xor.
COMPARISONS = {'<', '<=', '!=', '>=', '>', '=='}
NUMEXPR_MATH_FUNCS = {
'sin',
'cos',
'tan',
'arcsin',
'arccos',
'arctan',
'sinh',
'cosh',
'tanh',
'arcsinh',
'arccosh',
'arctanh',
'log',
'log10',
'log1p',
'exp',
'expm1',
'sqrt',
'abs',
}
def _ensure_element(tup, elem):
"""
Create a tuple containing all elements of tup, plus elem.
Returns the new tuple and the index of elem in the new tuple.
"""
try:
return tup, tup.index(elem)
except ValueError:
return tuple(chain(tup, (elem,))), len(tup)
class BadBinaryOperator(TypeError):
"""
Called when a bad binary operation is encountered.
Parameters
----------
op : str
The attempted operation
left : zipline.computable.Term
The left hand side of the operation.
right : zipline.computable.Term
The right hand side of the operation.
"""
def __init__(self, op, left, right):
super(BadBinaryOperator, self).__init__(
"Can't compute {left} {op} {right}".format(
op=op,
left=type(left).__name__,
right=type(right).__name__,
)
)
def method_name_for_op(op, commute=False):
"""
Get the name of the Python magic method corresponding to `op`.
Parameters
----------
op : str {'+','-','*', '/','**','&','|','^','<','<=','==','!=','>=','>'}
The requested operation.
commute : bool
Whether to return the name of an equivalent method after flipping args.
Returns
-------
method_name : str
The name of the Python magic method corresponding to `op`.
If `commute` is True, returns the name of a method equivalent to `op`
with inputs flipped.
Examples
--------
>>> method_name_for_op('+')
'__add__'
>>> method_name_for_op('+', commute=True)
'__radd__'
>>> method_name_for_op('>')
'__gt__'
>>> method_name_for_op('>', commute=True)
'__lt__'
"""
if commute:
return _ops_to_commuted_methods[op]
return _ops_to_methods[op]
def unary_op_name(op):
return _unary_ops_to_methods[op]
def is_comparison(op):
return op in COMPARISONS
class NumericalExpression(ComputableTerm):
"""
Term binding to a numexpr expression.
Parameters
----------
expr : string
A string suitable for passing to numexpr. All variables in 'expr'
should be of the form "x_i", where i is the index of the corresponding
factor input in 'binds'.
binds : tuple
A tuple of factors to use as inputs.
dtype : np.dtype
The dtype for the expression.
"""
window_length = 0
def __new__(cls, expr, binds, dtype):
return super(NumericalExpression, cls).__new__(
cls,
inputs=binds,
expr=expr,
dtype=dtype,
window_safe=all(t.window_safe for t in binds),
)
def _init(self, expr, *args, **kwargs):
self._expr = expr
return super(NumericalExpression, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, expr, *args, **kwargs):
return (
super(NumericalExpression, cls)._static_identity(*args, **kwargs),
expr,
)
def _validate(self):
"""
Ensure that our expression string has variables of the form x_0, x_1,
... x_(N - 1), where N is the length of our inputs.
"""
variable_names, _unused = getExprNames(self._expr, {})
expr_indices = []
for name in variable_names:
if name == 'inf':
continue
match = _VARIABLE_NAME_RE.match(name)
if not match:
raise ValueError("%r is not a valid variable name" % name)
expr_indices.append(int(match.group(2)))
expr_indices.sort()
expected_indices = list(range(len(self.inputs)))
if expr_indices != expected_indices:
raise ValueError(
"Expected %s for variable indices, but got %s" % (
expected_indices, expr_indices,
)
)
super(NumericalExpression, self)._validate()
def _compute(self, arrays, dates, assets, mask):
"""
Compute our stored expression string with numexpr.
"""
out = full(mask.shape, self.missing_value, dtype=self.dtype)
# This writes directly into our output buffer.
numexpr.evaluate(
self._expr,
local_dict={
"x_%d" % idx: array
for idx, array in enumerate(arrays)
},
global_dict={'inf': inf},
out=out,
)
return out
def _rebind_variables(self, new_inputs):
"""
Return self._expr with all variables rebound to the indices implied by
new_inputs.
"""
expr = self._expr
# If we have 11+ variables, some of our variable names may be
# substrings of other variable names. For example, we might have x_1,
# x_10, and x_100. By enumerating in reverse order, we ensure that
# every variable name which is a substring of another variable name is
# processed after the variable of which it is a substring. This
# guarantees that the substitution of any given variable index only
# ever affects exactly its own index. For example, if we have variables
# with indices going up to 100, we will process all of the x_1xx names
# before x_1x, which will be before x_1, so the substitution of x_1
# will not affect x_1x, which will not affect x_1xx.
for idx, input_ in reversed(list(enumerate(self.inputs))):
old_varname = "x_%d" % idx
# Temporarily rebind to x_temp_N so that we don't overwrite the
# same value multiple times.
temp_new_varname = "x_temp_%d" % new_inputs.index(input_)
expr = expr.replace(old_varname, temp_new_varname)
# Clear out the temp variables now that we've finished iteration.
return expr.replace("_temp_", "_")
def _merge_expressions(self, other):
"""
Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs)
"""
new_inputs = tuple(set(self.inputs).union(other.inputs))
new_self_expr = self._rebind_variables(new_inputs)
new_other_expr = other._rebind_variables(new_inputs)
return new_self_expr, new_other_expr, new_inputs
def build_binary_op(self, op, other):
"""
Compute new expression strings and a new inputs tuple for combining
self and other with a binary operator.
"""
if isinstance(other, NumericalExpression):
self_expr, other_expr, new_inputs = self._merge_expressions(other)
elif isinstance(other, Term):
self_expr = self._expr
new_inputs, other_idx = _ensure_element(self.inputs, other)
other_expr = "x_%d" % other_idx
elif isinstance(other, Number):
self_expr = self._expr
other_expr = str(other)
new_inputs = self.inputs
else:
raise BadBinaryOperator(op, other)
return self_expr, other_expr, new_inputs
@property
def bindings(self):
return {
"x_%d" % i: input_
for i, input_ in enumerate(self.inputs)
}
def __repr__(self):
return "{typename}(expr='{expr}', bindings={bindings})".format(
typename=type(self).__name__,
expr=self._expr,
bindings=self.bindings,
)
def short_repr(self):
return "Expression: {expr}".format(
typename=type(self).__name__,
expr=self._expr,
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/expression.py | expression.py |
from zipline.errors import UnsupportedPipelineOutput
from zipline.utils.input_validation import (
expect_element,
expect_types,
optional,
)
from .graph import ExecutionPlan, TermGraph
from .filters import Filter
from .term import AssetExists, ComputableTerm, Term
class Pipeline(object):
"""
A Pipeline object represents a collection of named expressions to be
compiled and executed by a PipelineEngine.
A Pipeline has two important attributes: 'columns', a dictionary of named
`Term` instances, and 'screen', a Filter representing criteria for
including an asset in the results of a Pipeline.
To compute a pipeline in the context of a TradingAlgorithm, users must call
``attach_pipeline`` in their ``initialize`` function to register that the
pipeline should be computed each trading day. The outputs of a pipeline on
a given day can be accessed by calling ``pipeline_output`` in
``handle_data`` or ``before_trading_start``.
Parameters
----------
columns : dict, optional
Initial columns.
screen : zipline.pipeline.term.Filter, optional
Initial screen.
"""
__slots__ = ('_columns', '_screen', '__weakref__')
@expect_types(
columns=optional(dict),
screen=optional(Filter),
)
def __init__(self, columns=None, screen=None):
if columns is None:
columns = {}
validate_column = self.validate_column
for column_name, term in columns.items():
validate_column(column_name, term)
if not isinstance(term, ComputableTerm):
raise TypeError(
"Column {column_name!r} contains an invalid pipeline term "
"({term}). Did you mean to append '.latest'?".format(
column_name=column_name, term=term,
)
)
self._columns = columns
self._screen = screen
@property
def columns(self):
"""
The columns registered with this pipeline.
"""
return self._columns
@property
def screen(self):
"""
The screen applied to the rows of this pipeline.
"""
return self._screen
@expect_types(term=Term, name=str)
def add(self, term, name, overwrite=False):
"""
Add a column.
The results of computing `term` will show up as a column in the
DataFrame produced by running this pipeline.
Parameters
----------
column : zipline.pipeline.Term
A Filter, Factor, or Classifier to add to the pipeline.
name : str
Name of the column to add.
overwrite : bool
Whether to overwrite the existing entry if we already have a column
named `name`.
"""
self.validate_column(name, term)
columns = self.columns
if name in columns:
if overwrite:
self.remove(name)
else:
raise KeyError("Column '{}' already exists.".format(name))
if not isinstance(term, ComputableTerm):
raise TypeError(
"{term} is not a valid pipeline column. Did you mean to "
"append '.latest'?".format(term=term)
)
self._columns[name] = term
@expect_types(name=str)
def remove(self, name):
"""
Remove a column.
Parameters
----------
name : str
The name of the column to remove.
Raises
------
KeyError
If `name` is not in self.columns.
Returns
-------
removed : zipline.pipeline.term.Term
The removed term.
"""
return self.columns.pop(name)
@expect_types(screen=Filter, overwrite=(bool, int))
def set_screen(self, screen, overwrite=False):
"""
Set a screen on this Pipeline.
Parameters
----------
filter : zipline.pipeline.Filter
The filter to apply as a screen.
overwrite : bool
Whether to overwrite any existing screen. If overwrite is False
and self.screen is not None, we raise an error.
"""
if self._screen is not None and not overwrite:
raise ValueError(
"set_screen() called with overwrite=False and screen already "
"set.\n"
"If you want to apply multiple filters as a screen use "
"set_screen(filter1 & filter2 & ...).\n"
"If you want to replace the previous screen with a new one, "
"use set_screen(new_filter, overwrite=True)."
)
self._screen = screen
def to_execution_plan(self,
screen_name,
default_screen,
all_dates,
start_date,
end_date):
"""
Compile into an ExecutionPlan.
Parameters
----------
screen_name : str
Name to supply for self.screen.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
"""
return ExecutionPlan(
self._prepare_graph_terms(screen_name, default_screen),
all_dates,
start_date,
end_date,
)
def to_simple_graph(self, screen_name, default_screen):
"""
Compile into a simple TermGraph with no extra row metadata.
Parameters
----------
screen_name : str
Name to supply for self.screen.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
"""
return TermGraph(
self._prepare_graph_terms(screen_name, default_screen)
)
def _prepare_graph_terms(self, screen_name, default_screen):
"""Helper for to_graph and to_execution_plan."""
columns = self.columns.copy()
screen = self.screen
if screen is None:
screen = default_screen
columns[screen_name] = screen
return columns
@expect_element(format=('svg', 'png', 'jpeg'))
def show_graph(self, format='svg'):
"""
Render this Pipeline as a DAG.
Parameters
----------
format : {'svg', 'png', 'jpeg'}
Image format to render with. Default is 'svg'.
"""
g = self.to_simple_graph('', AssetExists())
if format == 'svg':
return g.svg
elif format == 'png':
return g.png
elif format == 'jpeg':
return g.jpeg
else:
# We should never get here because of the expect_element decorator
# above.
raise AssertionError("Unknown graph format %r." % format)
@staticmethod
def validate_column(column_name, term):
if term.ndim == 1:
raise UnsupportedPipelineOutput(column_name=column_name, term=term) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/pipeline.py | pipeline.py |
from abc import (
ABCMeta,
abstractmethod,
)
from uuid import uuid4
from six import (
iteritems,
with_metaclass,
)
from numpy import array
from pandas import DataFrame, MultiIndex
from toolz import groupby, juxt
from toolz.curried.operator import getitem
from zipline.lib.adjusted_array import ensure_adjusted_array, ensure_ndarray
from zipline.errors import NoFurtherDataError
from zipline.utils.numpy_utils import (
as_column,
repeat_first_axis,
repeat_last_axis,
)
from zipline.utils.pandas_utils import explode
from .term import AssetExists, InputDates, LoadableTerm
class PipelineEngine(with_metaclass(ABCMeta)):
@abstractmethod
def run_pipeline(self, pipeline, start_date, end_date):
"""
Compute values for `pipeline` between `start_date` and `end_date`.
Returns a DataFrame with a MultiIndex of (date, asset) pairs.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The columns `result` correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of `zipline.pipeline.term.Term`.
For each date between `start_date` and `end_date`, `result` will
contain a row for each asset that passed `pipeline.screen`. A
screen of None indicates that a row should be returned for each
asset that existed each day.
"""
raise NotImplementedError("run_pipeline")
class NoEngineRegistered(Exception):
"""
Raised if a user tries to call pipeline_output in an algorithm that hasn't
set up a pipeline engine.
"""
class ExplodingPipelineEngine(PipelineEngine):
"""
A PipelineEngine that doesn't do anything.
"""
def run_pipeline(self, pipeline, start_date, end_date):
raise NoEngineRegistered(
"Attempted to run a pipeline but no pipeline "
"resources were registered."
)
def default_populate_initial_workspace(initial_workspace,
root_mask_term,
execution_plan,
dates,
assets):
"""The default implementation for ``populate_initial_workspace``. This
function returns the ``initial_workspace`` argument without making any
modifications.
Parameters
----------
initial_workspace : dict[array-like]
The initial workspace before we have populated it with any cached
terms.
root_mask_term : Term
The root mask term, normally ``AssetExists()``. This is needed to
compute the dates for individual terms.
execution_plan : ExecutionPlan
The execution plan for the pipeline being run.
dates : pd.DatetimeIndex
All of the dates being requested in this pipeline run including
the extra dates for look back windows.
assets : pd.Int64Index
All of the assets that exist for the window being computed.
Returns
-------
populated_initial_workspace : dict[term, array-like]
The workspace to begin computations with.
"""
return initial_workspace
class SimplePipelineEngine(object):
"""
PipelineEngine class that computes each term independently.
Parameters
----------
get_loader : callable
A function that is given a loadable term and returns a PipelineLoader
to use to retrieve raw data for that term.
calendar : DatetimeIndex
Array of dates to consider as trading days when computing a range
between a fixed start and end.
asset_finder : zipline.assets.AssetFinder
An AssetFinder instance. We depend on the AssetFinder to determine
which assets are in the top-level universe at any point in time.
populate_initial_workspace : callable, optional
A function which will be used to populate the initial workspace when
computing a pipeline. See
:func:`zipline.pipeline.engine.default_populate_initial_workspace`
for more info.
See Also
--------
:func:`zipline.pipeline.engine.default_populate_initial_workspace`
"""
__slots__ = (
'_get_loader',
'_calendar',
'_finder',
'_root_mask_term',
'_root_mask_dates_term',
'_populate_initial_workspace',
'__weakref__',
)
def __init__(self,
get_loader,
calendar,
asset_finder,
populate_initial_workspace=None):
self._get_loader = get_loader
self._calendar = calendar
self._finder = asset_finder
self._root_mask_term = AssetExists()
self._root_mask_dates_term = InputDates()
self._populate_initial_workspace = (
populate_initial_workspace or default_populate_initial_workspace
)
def run_pipeline(self, pipeline, start_date, end_date):
"""
Compute a pipeline.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
The algorithm implemented here can be broken down into the following
stages:
0. Build a dependency graph of all terms in `pipeline`. Topologically
sort the graph to determine an order in which we can compute the
terms.
1. Ask our AssetFinder for a "lifetimes matrix", which should contain,
for each date between start_date and end_date, a boolean value for
each known asset indicating whether the asset existed on that date.
2. Compute each term in the dependency order determined in (0), caching
the results in a a dictionary to that they can be fed into future
terms.
3. For each date, determine the number of assets passing
pipeline.screen. The sum, N, of all these values is the total
number of rows in our output frame, so we pre-allocate an output
array of length N for each factor in `terms`.
4. Fill in the arrays allocated in (3) by copying computed values from
our output cache into the corresponding rows.
5. Stick the values computed in (4) into a DataFrame and return it.
Step 0 is performed by ``Pipeline.to_graph``.
Step 1 is performed in ``SimplePipelineEngine._compute_root_mask``.
Step 2 is performed in ``SimplePipelineEngine.compute_chunk``.
Steps 3, 4, and 5 are performed in ``SimplePiplineEngine._to_narrow``.
See Also
--------
PipelineEngine.run_pipeline
"""
if end_date < start_date:
raise ValueError(
"start_date must be before or equal to end_date \n"
"start_date=%s, end_date=%s" % (start_date, end_date)
)
screen_name = uuid4().hex
graph = pipeline.to_execution_plan(
screen_name,
self._root_mask_term,
self._calendar,
start_date,
end_date,
)
extra_rows = graph.extra_rows[self._root_mask_term]
root_mask = self._compute_root_mask(start_date, end_date, extra_rows)
dates, assets, root_mask_values = explode(root_mask)
initial_workspace = self._populate_initial_workspace(
{
self._root_mask_term: root_mask_values,
self._root_mask_dates_term: as_column(dates.values)
},
self._root_mask_term,
graph,
dates,
assets,
)
results = self.compute_chunk(
graph,
dates,
assets,
initial_workspace,
)
return self._to_narrow(
graph.outputs,
results,
results.pop(screen_name),
dates[extra_rows:],
assets,
)
def _compute_root_mask(self, start_date, end_date, extra_rows):
"""
Compute a lifetimes matrix from our AssetFinder, then drop columns that
didn't exist at all during the query dates.
Parameters
----------
start_date : pd.Timestamp
Base start date for the matrix.
end_date : pd.Timestamp
End date for the matrix.
extra_rows : int
Number of extra rows to compute before `start_date`.
Extra rows are needed by terms like moving averages that require a
trailing window of data.
Returns
-------
lifetimes : pd.DataFrame
Frame of dtype `bool` containing dates from `extra_rows` days
before `start_date`, continuing through to `end_date`. The
returned frame contains as columns all assets in our AssetFinder
that existed for at least one day between `start_date` and
`end_date`.
"""
calendar = self._calendar
finder = self._finder
start_idx, end_idx = self._calendar.slice_locs(start_date, end_date)
if start_idx < extra_rows:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=calendar[0],
lookback_start=start_date,
lookback_length=extra_rows,
)
# Build lifetimes matrix reaching back to `extra_rows` days before
# `start_date.`
lifetimes = finder.lifetimes(
calendar[start_idx - extra_rows:end_idx],
include_start_date=False
)
assert lifetimes.index[extra_rows] == start_date
assert lifetimes.index[-1] == end_date
if not lifetimes.columns.unique:
columns = lifetimes.columns
duplicated = columns[columns.duplicated()].unique()
raise AssertionError("Duplicated sids: %d" % duplicated)
# Filter out columns that didn't exist between the requested start and
# end dates.
existed = lifetimes.iloc[extra_rows:].any()
ret = lifetimes.loc[:, existed]
shape = ret.shape
assert shape[0] * shape[1] != 0, 'root mask cannot be empty'
return ret
@staticmethod
def _inputs_for_term(term, workspace, graph):
"""
Compute inputs for the given term.
This is mostly complicated by the fact that for each input we store as
many rows as will be necessary to serve **any** computation requiring
that input.
"""
offsets = graph.offset
out = []
if term.windowed:
# If term is windowed, then all input data should be instances of
# AdjustedArray.
for input_ in term.inputs:
adjusted_array = ensure_adjusted_array(
workspace[input_], input_.missing_value,
)
out.append(
adjusted_array.traverse(
window_length=term.window_length,
offset=offsets[term, input_],
)
)
else:
# If term is not windowed, input_data may be an AdjustedArray or
# np.ndarray. Coerce the former to the latter.
for input_ in term.inputs:
input_data = ensure_ndarray(workspace[input_])
offset = offsets[term, input_]
# OPTIMIZATION: Don't make a copy by doing input_data[0:] if
# offset is zero.
if offset:
input_data = input_data[offset:]
out.append(input_data)
return out
def get_loader(self, term):
return self._get_loader(term)
def compute_chunk(self, graph, dates, assets, initial_workspace):
"""
Compute the Pipeline terms in the graph for the requested start and end
dates.
Parameters
----------
graph : zipline.pipeline.graph.TermGraph
dates : pd.DatetimeIndex
Row labels for our root mask.
assets : pd.Int64Index
Column labels for our root mask.
initial_workspace : dict
Map from term -> output.
Must contain at least entry for `self._root_mask_term` whose shape
is `(len(dates), len(assets))`, but may contain additional
pre-computed terms for testing or optimization purposes.
Returns
-------
results : dict
Dictionary mapping requested results to outputs.
"""
self._validate_compute_chunk_params(dates, assets, initial_workspace)
get_loader = self.get_loader
# Copy the supplied initial workspace so we don't mutate it in place.
workspace = initial_workspace.copy()
# If loadable terms share the same loader and extra_rows, load them all
# together.
loader_group_key = juxt(get_loader, getitem(graph.extra_rows))
loader_groups = groupby(loader_group_key, graph.loadable_terms)
refcounts = graph.initial_refcounts(workspace)
for term in graph.execution_order(refcounts):
# `term` may have been supplied in `initial_workspace`, and in the
# future we may pre-compute loadable terms coming from the same
# dataset. In either case, we will already have an entry for this
# term, which we shouldn't re-compute.
if term in workspace:
continue
# Asset labels are always the same, but date labels vary by how
# many extra rows are needed.
mask, mask_dates = graph.mask_and_dates_for_term(
term,
self._root_mask_term,
workspace,
dates,
)
if isinstance(term, LoadableTerm):
to_load = sorted(
loader_groups[loader_group_key(term)],
key=lambda t: t.dataset
)
loader = get_loader(term)
loaded = loader.load_adjusted_array(
to_load, mask_dates, assets, mask,
)
workspace.update(loaded)
else:
workspace[term] = term._compute(
self._inputs_for_term(term, workspace, graph),
mask_dates,
assets,
mask,
)
if term.ndim == 2:
assert workspace[term].shape == mask.shape
else:
assert workspace[term].shape == (mask.shape[0], 1)
# Decref dependencies of ``term``, and clear any terms whose
# refcounts hit 0.
for garbage_term in graph.decref_dependencies(term, refcounts):
del workspace[garbage_term]
out = {}
graph_extra_rows = graph.extra_rows
for name, term in iteritems(graph.outputs):
# Truncate off extra rows from outputs.
out[name] = workspace[term][graph_extra_rows[term]:]
return out
def _to_narrow(self, terms, data, mask, dates, assets):
"""
Convert raw computed pipeline results into a DataFrame for public APIs.
Parameters
----------
terms : dict[str -> Term]
Dict mapping column names to terms.
data : dict[str -> ndarray[ndim=2]]
Dict mapping column names to computed results for those names.
mask : ndarray[bool, ndim=2]
Mask array of values to keep.
dates : ndarray[datetime64, ndim=1]
Row index for arrays `data` and `mask`
assets : ndarray[int64, ndim=2]
Column index for arrays `data` and `mask`
Returns
-------
results : pd.DataFrame
The indices of `results` are as follows:
index : two-tiered MultiIndex of (date, asset).
Contains an entry for each (date, asset) pair corresponding to
a `True` value in `mask`.
columns : Index of str
One column per entry in `data`.
If mask[date, asset] is True, then result.loc[(date, asset), colname]
will contain the value of data[colname][date, asset].
"""
if not mask.any():
# Manually handle the empty DataFrame case. This is a workaround
# to pandas failing to tz_localize an empty dataframe with a
# MultiIndex. It also saves us the work of applying a known-empty
# mask to each array.
#
# Slicing `dates` here to preserve pandas metadata.
empty_dates = dates[:0]
empty_assets = array([], dtype=object)
return DataFrame(
data={
name: array([], dtype=arr.dtype)
for name, arr in iteritems(data)
},
index=MultiIndex.from_arrays([empty_dates, empty_assets]),
)
resolved_assets = array(self._finder.retrieve_all(assets))
dates_kept = repeat_last_axis(dates.values, len(assets))[mask]
assets_kept = repeat_first_axis(resolved_assets, len(dates))[mask]
final_columns = {}
for name in data:
# Each term that computed an output has its postprocess method
# called on the filtered result.
#
# As of Mon May 2 15:38:47 2016, we only use this to convert
# LabelArrays into categoricals.
final_columns[name] = terms[name].postprocess(data[name][mask])
return DataFrame(
data=final_columns,
index=MultiIndex.from_arrays([dates_kept, assets_kept]),
).tz_localize('UTC', level=0)
def _validate_compute_chunk_params(self, dates, assets, initial_workspace):
"""
Verify that the values passed to compute_chunk are well-formed.
"""
root = self._root_mask_term
clsname = type(self).__name__
# Writing this out explicitly so this errors in testing if we change
# the name without updating this line.
compute_chunk_name = self.compute_chunk.__name__
if root not in initial_workspace:
raise AssertionError(
"root_mask values not supplied to {cls}.{method}".format(
cls=clsname,
method=compute_chunk_name,
)
)
shape = initial_workspace[root].shape
implied_shape = len(dates), len(assets)
if shape != implied_shape:
raise AssertionError(
"root_mask shape is {shape}, but received dates/assets "
"imply that shape should be {implied}".format(
shape=shape,
implied=implied_shape,
)
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/engine.py | engine.py |
from networkx import (
DiGraph,
topological_sort,
)
from six import iteritems, itervalues
from zipline.utils.memoize import lazyval
from zipline.pipeline.visualize import display_graph
from .term import LoadableTerm
class CyclicDependency(Exception):
pass
class TermGraph(object):
"""
An abstract representation of Pipeline Term dependencies.
This class does not keep any additional metadata about any term relations
other than dependency ordering. As such it is only useful in contexts
where you care exclusively about order properties (for example, when
drawing visualizations of execution order).
Parameters
----------
terms : dict
A dict mapping names to final output terms.
Attributes
----------
outputs
Methods
-------
ordered()
Return a topologically-sorted iterator over the terms in self.
See Also
--------
ExecutionPlan
"""
def __init__(self, terms):
self.graph = DiGraph()
self._frozen = False
parents = set()
for term in itervalues(terms):
self._add_to_graph(term, parents)
# No parents should be left between top-level terms.
assert not parents
self._outputs = terms
# Mark that no more terms should be added to the graph.
self._frozen = True
def _add_to_graph(self, term, parents):
"""
Add a term and all its children to ``graph``.
``parents`` is the set of all the parents of ``term` that we've added
so far. It is only used to detect dependency cycles.
"""
if self._frozen:
raise ValueError(
"Can't mutate %s after construction." % type(self).__name__
)
# If we've seen this node already as a parent of the current traversal,
# it means we have an unsatisifiable dependency. This should only be
# possible if the term's inputs are mutated after construction.
if term in parents:
raise CyclicDependency(term)
parents.add(term)
self.graph.add_node(term)
for dependency in term.dependencies:
self._add_to_graph(dependency, parents)
self.graph.add_edge(dependency, term)
parents.remove(term)
@property
def outputs(self):
"""
Dict mapping names to designated output terms.
"""
return self._outputs
def execution_order(self, refcounts):
"""
Return a topologically-sorted iterator over the terms in ``self`` which
need to be computed.
"""
return iter(topological_sort(
self.graph.subgraph(
{term for term, refcount in refcounts.items() if refcount > 0},
),
))
def ordered(self):
return iter(topological_sort(self.graph))
@lazyval
def loadable_terms(self):
return tuple(
term for term in self.graph if isinstance(term, LoadableTerm)
)
@lazyval
def jpeg(self):
return display_graph(self, 'jpeg')
@lazyval
def png(self):
return display_graph(self, 'png')
@lazyval
def svg(self):
return display_graph(self, 'svg')
def _repr_png_(self):
return self.png.data
def initial_refcounts(self, initial_terms):
"""
Calculate initial refcounts for execution of this graph.
Parameters
----------
initial_terms : iterable[Term]
An iterable of terms that were pre-computed before graph execution.
Each node starts with a refcount equal to its outdegree, and output
nodes get one extra reference to ensure that they're still in the graph
at the end of execution.
"""
refcounts = self.graph.out_degree()
for t in self.outputs.values():
refcounts[t] += 1
for t in initial_terms:
self._decref_depencies_recursive(t, refcounts, set())
return refcounts
def _decref_depencies_recursive(self, term, refcounts, garbage):
"""
Decrement terms recursively.
Notes
-----
This should only be used to build the initial workspace, after that we
should use:
:meth:`~zipline.pipeline.graph.TermGraph.decref_dependencies`
"""
# Edges are tuple of (from, to).
for parent, _ in self.graph.in_edges([term]):
refcounts[parent] -= 1
# No one else depends on this term. Remove it from the
# workspace to conserve memory.
if refcounts[parent] == 0:
garbage.add(parent)
self._decref_depencies_recursive(parent, refcounts, garbage)
def decref_dependencies(self, term, refcounts):
"""
Decrement in-edges for ``term`` after computation.
Parameters
----------
term : zipline.pipeline.Term
The term whose parents should be decref'ed.
refcounts : dict[Term -> int]
Dictionary of refcounts.
Return
------
garbage : set[Term]
Terms whose refcounts hit zero after decrefing.
"""
garbage = set()
# Edges are tuple of (from, to).
for parent, _ in self.graph.in_edges([term]):
refcounts[parent] -= 1
# No one else depends on this term. Remove it from the
# workspace to conserve memory.
if refcounts[parent] == 0:
garbage.add(parent)
return garbage
class ExecutionPlan(TermGraph):
"""
Graph represention of Pipeline Term dependencies that includes metadata
about extra rows required to perform computations.
Each node in the graph has an `extra_rows` attribute, indicating how many,
if any, extra rows we should compute for the node. Extra rows are most
often needed when a term is an input to a rolling window computation. For
example, if we compute a 30 day moving average of price from day X to day
Y, we need to load price data for the range from day (X - 29) to day Y.
Parameters
----------
terms : dict
A dict mapping names to final output terms.
all_dates : pd.DatetimeIndex
An index of all known trading days for which ``terms`` will be
computed.
start_date : pd.Timestamp
The first date for which output is requested for ``terms``.
end_date : pd.Timestamp
The last date for which output is requested for ``terms``.
Attributes
----------
outputs
offset
extra_rows
Methods
-------
ordered()
Return a topologically-sorted iterator over the terms in self.
"""
def __init__(self,
terms,
all_dates,
start_date,
end_date,
min_extra_rows=0):
super(ExecutionPlan, self).__init__(terms)
for term in terms.values():
self.set_extra_rows(
term,
all_dates,
start_date,
end_date,
min_extra_rows=min_extra_rows,
)
def set_extra_rows(self,
term,
all_dates,
start_date,
end_date,
min_extra_rows):
"""
Compute ``extra_rows`` for transitive dependencies of ``root_terms``
"""
# A term can require that additional extra rows beyond the minimum be
# computed. This is most often used with downsampled terms, which need
# to ensure that the first date is a computation date.
extra_rows_for_term = term.compute_extra_rows(
all_dates,
start_date,
end_date,
min_extra_rows,
)
if extra_rows_for_term < min_extra_rows:
raise ValueError(
"term %s requested fewer rows than the minimum of %d" % (
term, min_extra_rows,
)
)
self._ensure_extra_rows(term, extra_rows_for_term)
for dependency, additional_extra_rows in term.dependencies.items():
self.set_extra_rows(
dependency,
all_dates,
start_date,
end_date,
min_extra_rows=extra_rows_for_term + additional_extra_rows,
)
@lazyval
def offset(self):
"""
For all pairs (term, input) such that `input` is an input to `term`,
compute a mapping::
(term, input) -> offset(term, input)
where ``offset(term, input)`` is the number of rows that ``term``
should truncate off the raw array produced for ``input`` before using
it. We compute this value as follows::
offset(term, input) = (extra_rows_computed(input)
- extra_rows_computed(term)
- requested_extra_rows(term, input))
Examples
--------
Case 1
~~~~~~
Factor A needs 5 extra rows of USEquityPricing.close, and Factor B
needs 3 extra rows of the same. Factor A also requires 5 extra rows of
USEquityPricing.high, which no other Factor uses. We don't require any
extra rows of Factor A or Factor B
We load 5 extra rows of both `price` and `high` to ensure we can
service Factor A, and the following offsets get computed::
offset[Factor A, USEquityPricing.close] == (5 - 0) - 5 == 0
offset[Factor A, USEquityPricing.high] == (5 - 0) - 5 == 0
offset[Factor B, USEquityPricing.close] == (5 - 0) - 3 == 2
offset[Factor B, USEquityPricing.high] raises KeyError.
Case 2
~~~~~~
Factor A needs 5 extra rows of USEquityPricing.close, and Factor B
needs 3 extra rows of Factor A, and Factor B needs 2 extra rows of
USEquityPricing.close.
We load 8 extra rows of USEquityPricing.close (enough to load 5 extra
rows of Factor A), and the following offsets get computed::
offset[Factor A, USEquityPricing.close] == (8 - 3) - 5 == 0
offset[Factor B, USEquityPricing.close] == (8 - 0) - 2 == 6
offset[Factor B, Factor A] == (3 - 0) - 3 == 0
Notes
-----
`offset(term, input) >= 0` for all valid pairs, since `input` must be
an input to `term` if the pair appears in the mapping.
This value is useful because we load enough rows of each input to serve
all possible dependencies. However, for any given dependency, we only
want to compute using the actual number of required extra rows for that
dependency. We can do so by truncating off the first `offset` rows of
the loaded data for `input`.
See Also
--------
zipline.pipeline.graph.TermGraph.offset
zipline.pipeline.engine.SimplePipelineEngine._inputs_for_term
zipline.pipeline.engine.SimplePipelineEngine._mask_and_dates_for_term
"""
extra = self.extra_rows
return {
# Another way of thinking about this is:
# How much bigger is the array for ``dep`` compared to ``term``?
# How much of that difference did I ask for.
(term, dep): (extra[dep] - extra[term]) - requested_extra_rows
for term in self.graph
for dep, requested_extra_rows in term.dependencies.items()
}
@lazyval
def extra_rows(self):
"""
A dict mapping `term` -> `# of extra rows to load/compute of `term`.
Notes
----
This value depends on the other terms in the graph that require `term`
**as an input**. This is not to be confused with `term.dependencies`,
which describes how many additional rows of `term`'s inputs we need to
load, and which is determined entirely by `Term` itself.
Example
-------
Our graph contains the following terms:
A = SimpleMovingAverage([USEquityPricing.high], window_length=5)
B = SimpleMovingAverage([USEquityPricing.high], window_length=10)
C = SimpleMovingAverage([USEquityPricing.low], window_length=8)
To compute N rows of A, we need N + 4 extra rows of `high`.
To compute N rows of B, we need N + 9 extra rows of `high`.
To compute N rows of C, we need N + 7 extra rows of `low`.
We store the following extra_row requirements:
self.extra_rows[high] = 9 # Ensures that we can service B.
self.extra_rows[low] = 7
See Also
--------
zipline.pipeline.graph.TermGraph.offset
zipline.pipeline.term.Term.dependencies
"""
return {
term: attrs['extra_rows']
for term, attrs in iteritems(self.graph.node)
}
def _ensure_extra_rows(self, term, N):
"""
Ensure that we're going to compute at least N extra rows of `term`.
"""
attrs = self.graph.node[term]
attrs['extra_rows'] = max(N, attrs.get('extra_rows', 0))
def mask_and_dates_for_term(self,
term,
root_mask_term,
workspace,
all_dates):
"""
Load mask and mask row labels for term.
Parameters
----------
term : Term
The term to load the mask and labels for.
root_mask_term : Term
The term that represents the root asset exists mask.
workspace : dict[Term, any]
The values that have been computed for each term.
all_dates : pd.DatetimeIndex
All of the dates that are being computed for in the pipeline.
Returns
-------
mask : np.ndarray
The correct mask for this term.
dates : np.ndarray
The slice of dates for this term.
"""
mask = term.mask
mask_offset = self.extra_rows[mask] - self.extra_rows[term]
# This offset is computed against root_mask_term because that is what
# determines the shape of the top-level dates array.
dates_offset = (
self.extra_rows[root_mask_term] - self.extra_rows[term]
)
return workspace[mask][mask_offset:], all_dates[dates_offset:] | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/graph.py | graph.py |
from abc import ABCMeta, abstractproperty
from bisect import insort
from collections import Mapping
from weakref import WeakValueDictionary
from numpy import (
array,
dtype as dtype_class,
ndarray,
searchsorted,
)
from six import with_metaclass
from zipline.assets import Asset
from zipline.errors import (
DTypeNotSpecified,
InvalidOutputName,
NonExistentAssetInTimeFrame,
NonSliceableTerm,
NonWindowSafeInput,
NotDType,
TermInputsNotSpecified,
TermOutputsEmpty,
UnsupportedDType,
WindowLengthNotSpecified,
)
from zipline.lib.adjusted_array import can_represent_dtype
from zipline.lib.labelarray import LabelArray
from zipline.utils.input_validation import expect_types
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import (
bool_dtype,
categorical_dtype,
datetime64ns_dtype,
default_missing_value_for_dtype,
)
from zipline.utils.sharedoc import (
templated_docstring,
PIPELINE_ALIAS_NAME_DOC,
PIPELINE_DOWNSAMPLING_FREQUENCY_DOC,
)
from .downsample_helpers import expect_downsample_frequency
from .sentinels import NotSpecified
class Term(with_metaclass(ABCMeta, object)):
"""
Base class for terms in a Pipeline API compute graph.
"""
# These are NotSpecified because a subclass is required to provide them.
dtype = NotSpecified
domain = NotSpecified
missing_value = NotSpecified
# Subclasses aren't required to provide `params`. The default behavior is
# no params.
params = ()
# Determines if a term is safe to be used as a windowed input.
window_safe = False
# The dimensions of the term's output (1D or 2D).
ndim = 2
_term_cache = WeakValueDictionary()
def __new__(cls,
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=NotSpecified,
ndim=NotSpecified,
# params is explicitly not allowed to be passed to an instance.
*args,
**kwargs):
"""
Memoized constructor for Terms.
Caching previously-constructed Terms is useful because it allows us to
only compute equivalent sub-expressions once when traversing a Pipeline
dependency graph.
Caching previously-constructed Terms is **sane** because terms and
their inputs are both conceptually immutable.
"""
# Subclasses can set override these class-level attributes to provide
# default values.
if domain is NotSpecified:
domain = cls.domain
if dtype is NotSpecified:
dtype = cls.dtype
if missing_value is NotSpecified:
missing_value = cls.missing_value
if ndim is NotSpecified:
ndim = cls.ndim
if window_safe is NotSpecified:
window_safe = cls.window_safe
dtype, missing_value = validate_dtype(
cls.__name__,
dtype,
missing_value,
)
params = cls._pop_params(kwargs)
identity = cls._static_identity(
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=window_safe,
ndim=ndim,
params=params,
*args, **kwargs
)
try:
return cls._term_cache[identity]
except KeyError:
new_instance = cls._term_cache[identity] = \
super(Term, cls).__new__(cls)._init(
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=window_safe,
ndim=ndim,
params=params,
*args, **kwargs
)
return new_instance
@classmethod
def _pop_params(cls, kwargs):
"""
Pop entries from the `kwargs` passed to cls.__new__ based on the values
in `cls.params`.
Parameters
----------
kwargs : dict
The kwargs passed to cls.__new__.
Returns
-------
params : list[(str, object)]
A list of string, value pairs containing the entries in cls.params.
Raises
------
TypeError
Raised if any parameter values are not passed or not hashable.
"""
params = cls.params
if not isinstance(params, Mapping):
params = {k: NotSpecified for k in params}
param_values = []
for key, default_value in params.items():
try:
value = kwargs.pop(key, default_value)
if value is NotSpecified:
raise KeyError(key)
# Check here that the value is hashable so that we fail here
# instead of trying to hash the param values tuple later.
hash(value)
except KeyError:
raise TypeError(
"{typename} expected a keyword parameter {name!r}.".format(
typename=cls.__name__,
name=key
)
)
except TypeError:
# Value wasn't hashable.
raise TypeError(
"{typename} expected a hashable value for parameter "
"{name!r}, but got {value!r} instead.".format(
typename=cls.__name__,
name=key,
value=value,
)
)
param_values.append((key, value))
return tuple(param_values)
def __init__(self, *args, **kwargs):
"""
Noop constructor to play nicely with our caching __new__. Subclasses
should implement _init instead of this method.
When a class' __new__ returns an instance of that class, Python will
automatically call __init__ on the object, even if a new object wasn't
actually constructed. Because we memoize instances, we often return an
object that was already initialized from __new__, in which case we
don't want to call __init__ again.
Subclasses that need to initialize new instances should override _init,
which is guaranteed to be called only once.
"""
pass
@expect_types(key=Asset)
def __getitem__(self, key):
if isinstance(self, LoadableTerm):
raise NonSliceableTerm(term=self)
return Slice(self, key)
@classmethod
def _static_identity(cls,
domain,
dtype,
missing_value,
window_safe,
ndim,
params):
"""
Return the identity of the Term that would be constructed from the
given arguments.
Identities that compare equal will cause us to return a cached instance
rather than constructing a new one. We do this primarily because it
makes dependency resolution easier.
This is a classmethod so that it can be called from Term.__new__ to
determine whether to produce a new instance.
"""
return (cls, domain, dtype, missing_value, window_safe, ndim, params)
def _init(self, domain, dtype, missing_value, window_safe, ndim, params):
"""
Parameters
----------
domain : object
Unused placeholder.
dtype : np.dtype
Dtype of this term's output.
params : tuple[(str, hashable)]
Tuple of key/value pairs of additional parameters.
"""
self.domain = domain
self.dtype = dtype
self.missing_value = missing_value
self.window_safe = window_safe
self.ndim = ndim
for name, value in params:
if hasattr(self, name):
raise TypeError(
"Parameter {name!r} conflicts with already-present"
" attribute with value {value!r}.".format(
name=name,
value=getattr(self, name),
)
)
# TODO: Consider setting these values as attributes and replacing
# the boilerplate in NumericalExpression, Rank, and
# PercentileFilter.
self.params = dict(params)
# Make sure that subclasses call super() in their _validate() methods
# by setting this flag. The base class implementation of _validate
# should set this flag to True.
self._subclass_called_super_validate = False
self._validate()
assert self._subclass_called_super_validate, (
"Term._validate() was not called.\n"
"This probably means that you overrode _validate"
" without calling super()."
)
del self._subclass_called_super_validate
return self
def _validate(self):
"""
Assert that this term is well-formed. This should be called exactly
once, at the end of Term._init().
"""
# mark that we got here to enforce that subclasses overriding _validate
# call super().
self._subclass_called_super_validate = True
def compute_extra_rows(self,
all_dates,
start_date,
end_date,
min_extra_rows):
"""
Calculate the number of extra rows needed to compute ``self``.
Must return at least ``min_extra_rows``, and the default implementation
is to just return ``min_extra_rows``. This is overridden by
downsampled terms to ensure that the first date computed is a
recomputation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. Must be at least
``min_extra_rows``.
"""
return min_extra_rows
@abstractproperty
def inputs(self):
"""
A tuple of other Terms needed as direct inputs for this Term.
"""
raise NotImplementedError('inputs')
@abstractproperty
def windowed(self):
"""
Boolean indicating whether this term is a trailing-window computation.
"""
raise NotImplementedError('windowed')
@abstractproperty
def mask(self):
"""
A Filter representing asset/date pairs to include while
computing this Term. (True means include; False means exclude.)
"""
raise NotImplementedError('mask')
@abstractproperty
def dependencies(self):
"""
A dictionary mapping terms that must be computed before `self` to the
number of extra rows needed for those terms.
"""
raise NotImplementedError('dependencies')
class AssetExists(Term):
"""
Pseudo-filter describing whether or not an asset existed on a given day.
This is the default mask for all terms that haven't been passed a mask
explicitly.
This is morally a Filter, in the sense that it produces a boolean value for
every asset on every date. We don't subclass Filter, however, because
`AssetExists` is computed directly by the PipelineEngine.
This term is guaranteed to be available as an input for any term computed
by SimplePipelineEngine.run_pipeline().
See Also
--------
zipline.assets.AssetFinder.lifetimes
"""
dtype = bool_dtype
dataset = None
inputs = ()
dependencies = {}
mask = None
windowed = False
def __repr__(self):
return "AssetExists()"
def _compute(self, today, assets, out):
raise NotImplementedError(
"AssetExists cannot be computed directly."
" Check your PipelineEngine configuration."
)
class InputDates(Term):
"""
1-Dimensional term providing date labels for other term inputs.
This term is guaranteed to be available as an input for any term computed
by SimplePipelineEngine.run_pipeline().
"""
ndim = 1
dataset = None
dtype = datetime64ns_dtype
inputs = ()
dependencies = {}
mask = None
windowed = False
window_safe = True
def __repr__(self):
return "InputDates()"
def _compute(self, today, assets, out):
raise NotImplementedError(
"InputDates cannot be computed directly."
" Check your PipelineEngine configuration."
)
class LoadableTerm(Term):
"""
A Term that should be loaded from an external resource by a PipelineLoader.
This is the base class for :class:`zipline.pipeline.data.BoundColumn`.
"""
windowed = False
inputs = ()
@lazyval
def dependencies(self):
return {self.mask: 0}
class ComputableTerm(Term):
"""
A Term that should be computed from a tuple of inputs.
This is the base class for :class:`zipline.pipeline.Factor`,
:class:`zipline.pipeline.Filter`, and :class:`zipline.pipeline.Classifier`.
"""
inputs = NotSpecified
outputs = NotSpecified
window_length = NotSpecified
mask = NotSpecified
def __new__(cls,
inputs=inputs,
outputs=outputs,
window_length=window_length,
mask=mask,
*args, **kwargs):
if inputs is NotSpecified:
inputs = cls.inputs
# Having inputs = NotSpecified is an error, but we handle it later
# in self._validate rather than here.
if inputs is not NotSpecified:
# Allow users to specify lists as class-level defaults, but
# normalize to a tuple so that inputs is hashable.
inputs = tuple(inputs)
if outputs is NotSpecified:
outputs = cls.outputs
if outputs is not NotSpecified:
outputs = tuple(outputs)
if mask is NotSpecified:
mask = cls.mask
if mask is NotSpecified:
mask = AssetExists()
if window_length is NotSpecified:
window_length = cls.window_length
return super(ComputableTerm, cls).__new__(
cls,
inputs=inputs,
outputs=outputs,
mask=mask,
window_length=window_length,
*args, **kwargs
)
def _init(self, inputs, outputs, window_length, mask, *args, **kwargs):
self.inputs = inputs
self.outputs = outputs
self.window_length = window_length
self.mask = mask
return super(ComputableTerm, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls,
inputs,
outputs,
window_length,
mask,
*args,
**kwargs):
return (
super(ComputableTerm, cls)._static_identity(*args, **kwargs),
inputs,
outputs,
window_length,
mask,
)
def _validate(self):
super(ComputableTerm, self)._validate()
if self.inputs is NotSpecified:
raise TermInputsNotSpecified(termname=type(self).__name__)
if self.outputs is NotSpecified:
pass
elif not self.outputs:
raise TermOutputsEmpty(termname=type(self).__name__)
else:
# Raise an exception if there are any naming conflicts between the
# term's output names and certain attributes.
disallowed_names = [
attr for attr in dir(ComputableTerm)
if not attr.startswith('_')
]
# The name 'compute' is an added special case that is disallowed.
# Use insort to add it to the list in alphabetical order.
insort(disallowed_names, 'compute')
for output in self.outputs:
if output.startswith('_') or output in disallowed_names:
raise InvalidOutputName(
output_name=output,
termname=type(self).__name__,
disallowed_names=disallowed_names,
)
if self.window_length is NotSpecified:
raise WindowLengthNotSpecified(termname=type(self).__name__)
if self.mask is NotSpecified:
# This isn't user error, this is a bug in our code.
raise AssertionError("{term} has no mask".format(term=self))
if self.window_length:
for child in self.inputs:
if not child.window_safe:
raise NonWindowSafeInput(parent=self, child=child)
def _compute(self, inputs, dates, assets, mask):
"""
Subclasses should implement this to perform actual computation.
This is named ``_compute`` rather than just ``compute`` because
``compute`` is reserved for user-supplied functions in
CustomFilter/CustomFactor/CustomClassifier.
"""
raise NotImplementedError()
@lazyval
def windowed(self):
"""
Whether or not this term represents a trailing window computation.
If term.windowed is truthy, its compute_from_windows method will be
called with instances of AdjustedArray as inputs.
If term.windowed is falsey, its compute_from_baseline will be called
with instances of np.ndarray as inputs.
"""
return (
self.window_length is not NotSpecified
and self.window_length > 0
)
@lazyval
def dependencies(self):
"""
The number of extra rows needed for each of our inputs to compute this
term.
"""
extra_input_rows = max(0, self.window_length - 1)
out = {}
for term in self.inputs:
out[term] = extra_input_rows
out[self.mask] = 0
return out
@expect_types(data=ndarray)
def postprocess(self, data):
"""
Called with an result of ``self``, unravelled (i.e. 1-dimensional)
after any user-defined screens have been applied.
This is mostly useful for transforming the dtype of an output, e.g., to
convert a LabelArray into a pandas Categorical.
The default implementation is to just return data unchanged.
"""
return data
def to_workspace_value(self, result, assets):
"""
Called with a column of the result of a pipeline. This needs to put
the data into a format that can be used in a workspace to continue
doing computations.
Parameters
----------
result : pd.Series
A multiindexed series with (dates, assets) whose values are the
results of running this pipeline term over the dates.
assets : pd.Index
All of the assets being requested. This allows us to correctly
shape the workspace value.
Returns
-------
workspace_value : array-like
An array like value that the engine can consume.
"""
return result.unstack().fillna(self.missing_value).reindex(
columns=assets,
fill_value=self.missing_value,
).values
def _downsampled_type(self, *args, **kwargs):
"""
The expression type to return from self.downsample().
"""
raise NotImplementedError(
"downsampling is not yet implemented "
"for instances of %s." % type(self).__name__
)
@expect_downsample_frequency
@templated_docstring(frequency=PIPELINE_DOWNSAMPLING_FREQUENCY_DOC)
def downsample(self, frequency):
"""
Make a term that computes from ``self`` at lower-than-daily frequency.
Parameters
----------
{frequency}
"""
return self._downsampled_type(term=self, frequency=frequency)
def _aliased_type(self, *args, **kwargs):
"""
The expression type to return from self.alias().
"""
raise NotImplementedError(
"alias is not yet implemented "
"for instances of %s." % type(self).__name__
)
@templated_docstring(name=PIPELINE_ALIAS_NAME_DOC)
def alias(self, name):
"""
Make a term from ``self`` that names the expression.
Parameters
----------
{name}
Returns
-------
aliased : Aliased
``self`` with a name.
Notes
-----
This is useful for giving a name to a numerical or boolean expression.
"""
return self._aliased_type(term=self, name=name)
def __repr__(self):
return (
"{type}({inputs}, window_length={window_length})"
).format(
type=type(self).__name__,
inputs=self.inputs,
window_length=self.window_length,
)
class Slice(ComputableTerm):
"""
Term for extracting a single column of a another term's output.
Parameters
----------
term : zipline.pipeline.term.Term
The term from which to extract a column of data.
asset : zipline.assets.Asset
The asset corresponding to the column of `term` to be extracted.
Notes
-----
Users should rarely construct instances of `Slice` directly. Instead, they
should construct instances via indexing, e.g. `MyFactor()[Asset(24)]`.
"""
def __new__(cls, term, asset):
return super(Slice, cls).__new__(
cls,
asset=asset,
inputs=[term],
window_length=0,
mask=term.mask,
dtype=term.dtype,
missing_value=term.missing_value,
window_safe=term.window_safe,
ndim=1,
)
def __repr__(self):
return "{type}({parent_term}, column={asset})".format(
type=type(self).__name__,
parent_term=type(self.inputs[0]).__name__,
asset=self._asset,
)
def _init(self, asset, *args, **kwargs):
self._asset = asset
return super(Slice, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, asset, *args, **kwargs):
return (super(Slice, cls)._static_identity(*args, **kwargs), asset)
def _compute(self, windows, dates, assets, mask):
asset = self._asset
asset_column = searchsorted(assets.values, asset.sid)
if assets[asset_column] != asset.sid:
raise NonExistentAssetInTimeFrame(
asset=asset, start_date=dates[0], end_date=dates[-1],
)
# Return a 2D array with one column rather than a 1D array of the
# column.
return windows[0][:, [asset_column]]
@property
def _downsampled_type(self):
raise NotImplementedError(
'downsampling of slices is not yet supported'
)
def validate_dtype(termname, dtype, missing_value):
"""
Validate a `dtype` and `missing_value` passed to Term.__new__.
Ensures that we know how to represent ``dtype``, and that missing_value
is specified for types without default missing values.
Returns
-------
validated_dtype, validated_missing_value : np.dtype, any
The dtype and missing_value to use for the new term.
Raises
------
DTypeNotSpecified
When no dtype was passed to the instance, and the class doesn't
provide a default.
NotDType
When either the class or the instance provides a value not
coercible to a numpy dtype.
NoDefaultMissingValue
When dtype requires an explicit missing_value, but
``missing_value`` is NotSpecified.
"""
if dtype is NotSpecified:
raise DTypeNotSpecified(termname=termname)
try:
dtype = dtype_class(dtype)
except TypeError:
raise NotDType(dtype=dtype, termname=termname)
if not can_represent_dtype(dtype):
raise UnsupportedDType(dtype=dtype, termname=termname)
if missing_value is NotSpecified:
missing_value = default_missing_value_for_dtype(dtype)
try:
if (dtype == categorical_dtype):
# This check is necessary because we use object dtype for
# categoricals, and numpy will allow us to promote numerical
# values to object even though we don't support them.
_assert_valid_categorical_missing_value(missing_value)
# For any other type, we can check if the missing_value is safe by
# making an array of that value and trying to safely convert it to
# the desired type.
# 'same_kind' allows casting between things like float32 and
# float64, but not str and int.
array([missing_value]).astype(dtype=dtype, casting='same_kind')
except TypeError as e:
raise TypeError(
"Missing value {value!r} is not a valid choice "
"for term {termname} with dtype {dtype}.\n\n"
"Coercion attempt failed with: {error}".format(
termname=termname,
value=missing_value,
dtype=dtype,
error=e,
)
)
return dtype, missing_value
def _assert_valid_categorical_missing_value(value):
"""
Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term.
"""
label_types = LabelArray.SUPPORTED_SCALAR_TYPES
if not isinstance(value, label_types):
raise TypeError(
"Categorical terms must have missing values of type "
"{types}.".format(
types=' or '.join([t.__name__ for t in label_types]),
)
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/term.py | term.py |
from __future__ import division
from numbers import Number
from numpy import (
abs,
arange,
average,
clip,
diff,
dstack,
exp,
fmax,
full,
inf,
isnan,
log,
NINF,
sqrt,
sum as np_sum,
)
from numexpr import evaluate
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.mixins import SingleInputMixin
from zipline.utils.input_validation import expect_bounded, expect_types
from zipline.utils.math_utils import (
nanargmax,
nanargmin,
nanmax,
nanmean,
nanstd,
nansum,
nanmin,
)
from zipline.utils.numpy_utils import (
float64_dtype,
ignore_nanwarnings,
rolling_window,
)
from .factor import CustomFactor
class Returns(CustomFactor):
"""
Calculates the percent change in close price over the given window_length.
**Default Inputs**: [USEquityPricing.close]
"""
inputs = [USEquityPricing.close]
window_safe = True
def _validate(self):
super(Returns, self)._validate()
if self.window_length < 2:
raise ValueError(
"'Returns' expected a window length of at least 2, but was "
"given {window_length}. For daily returns, use a window "
"length of 2.".format(window_length=self.window_length)
)
def compute(self, today, assets, out, close):
out[:] = (close[-1] - close[0]) / close[0]
class RSI(CustomFactor, SingleInputMixin):
"""
Relative Strength Index
**Default Inputs**: [USEquityPricing.close]
**Default Window Length**: 15
"""
window_length = 15
inputs = (USEquityPricing.close,)
def compute(self, today, assets, out, closes):
diffs = diff(closes, axis=0)
ups = nanmean(clip(diffs, 0, inf), axis=0)
downs = abs(nanmean(clip(diffs, -inf, 0), axis=0))
return evaluate(
"100 - (100 / (1 + (ups / downs)))",
local_dict={'ups': ups, 'downs': downs},
global_dict={},
out=out,
)
class SimpleMovingAverage(CustomFactor, SingleInputMixin):
"""
Average Value of an arbitrary column
**Default Inputs**: None
**Default Window Length**: None
"""
# numpy's nan functions throw warnings when passed an array containing only
# nans, but they still returns the desired value (nan), so we ignore the
# warning.
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
out[:] = nanmean(data, axis=0)
class WeightedAverageValue(CustomFactor):
"""
Helper for VWAP-like computations.
**Default Inputs:** None
**Default Window Length:** None
"""
def compute(self, today, assets, out, base, weight):
out[:] = nansum(base * weight, axis=0) / nansum(weight, axis=0)
class VWAP(WeightedAverageValue):
"""
Volume Weighted Average Price
**Default Inputs:** [USEquityPricing.close, USEquityPricing.volume]
**Default Window Length:** None
"""
inputs = (USEquityPricing.close, USEquityPricing.volume)
class MaxDrawdown(CustomFactor, SingleInputMixin):
"""
Max Drawdown
**Default Inputs:** None
**Default Window Length:** None
"""
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
drawdowns = fmax.accumulate(data, axis=0) - data
drawdowns[isnan(drawdowns)] = NINF
drawdown_ends = nanargmax(drawdowns, axis=0)
# TODO: Accelerate this loop in Cython or Numba.
for i, end in enumerate(drawdown_ends):
peak = nanmax(data[:end + 1, i])
out[i] = (peak - data[end, i]) / data[end, i]
class AverageDollarVolume(CustomFactor):
"""
Average Daily Dollar Volume
**Default Inputs:** [USEquityPricing.close, USEquityPricing.volume]
**Default Window Length:** None
"""
inputs = [USEquityPricing.close, USEquityPricing.volume]
def compute(self, today, assets, out, close, volume):
out[:] = nansum(close * volume, axis=0) / len(close)
def exponential_weights(length, decay_rate):
"""
Build a weight vector for an exponentially-weighted statistic.
The resulting ndarray is of the form::
[decay_rate ** length, ..., decay_rate ** 2, decay_rate]
Parameters
----------
length : int
The length of the desired weight vector.
decay_rate : float
The rate at which entries in the weight vector increase or decrease.
Returns
-------
weights : ndarray[float64]
"""
return full(length, decay_rate, float64_dtype) ** arange(length + 1, 1, -1)
class _ExponentialWeightedFactor(SingleInputMixin, CustomFactor):
"""
Base class for factors implementing exponential-weighted operations.
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list or tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Methods
-------
weights
from_span
from_halflife
from_center_of_mass
"""
params = ('decay_rate',)
@classmethod
@expect_types(span=Number)
def from_span(cls, inputs, window_length, span, **kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Example
-------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[USEquityPricing.close],
window_length=30,
span=15,
)
Note
----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if span <= 1:
raise ValueError(
"`span` must be a positive number. %s was passed." % span
)
decay_rate = (1.0 - (2.0 / (1.0 + span)))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
)
@classmethod
@expect_types(halflife=Number)
def from_halflife(cls, inputs, window_length, halflife, **kwargs):
"""
Convenience constructor for passing ``decay_rate`` in terms of half
life.
Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides
the behavior equivalent to passing `halflife` to pandas.ewma.
Example
-------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=np.exp(np.log(0.5) / 15),
# )
my_ewma = EWMA.from_halflife(
inputs=[USEquityPricing.close],
window_length=30,
halflife=15,
)
Note
----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if halflife <= 0:
raise ValueError(
"`span` must be a positive number. %s was passed." % halflife
)
decay_rate = exp(log(.5) / halflife)
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
)
@classmethod
def from_center_of_mass(cls,
inputs,
window_length,
center_of_mass,
**kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of center of
mass.
Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides
behavior equivalent to passing `center_of_mass` to pandas.ewma.
Example
-------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=(1 - (1 / 15.0)),
# )
my_ewma = EWMA.from_center_of_mass(
inputs=[USEquityPricing.close],
window_length=30,
center_of_mass=15,
)
Note
----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=(1.0 - (1.0 / (1.0 + center_of_mass))),
**kwargs
)
class ExponentialWeightedMovingAverage(_ExponentialWeightedFactor):
"""
Exponentially Weighted Moving Average
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list/tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Notes
-----
- This class can also be imported under the name ``EWMA``.
See Also
--------
:func:`pandas.ewma`
"""
def compute(self, today, assets, out, data, decay_rate):
out[:] = average(
data,
axis=0,
weights=exponential_weights(len(data), decay_rate),
)
class LinearWeightedMovingAverage(CustomFactor, SingleInputMixin):
"""
Weighted Average Value of an arbitrary column
**Default Inputs**: None
**Default Window Length**: None
"""
# numpy's nan functions throw warnings when passed an array containing only
# nans, but they still returns the desired value (nan), so we ignore the
# warning.
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
ndays = data.shape[0]
# Initialize weights array
weights = arange(1, ndays + 1, dtype=float64_dtype).reshape(ndays, 1)
# Compute normalizer
normalizer = (ndays * (ndays + 1)) / 2
# Weight the data
weighted_data = data * weights
# Compute weighted averages
out[:] = nansum(weighted_data, axis=0) / normalizer
class ExponentialWeightedMovingStdDev(_ExponentialWeightedFactor):
"""
Exponentially Weighted Moving Standard Deviation
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list/tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Notes
-----
- This class can also be imported under the name ``EWMSTD``.
See Also
--------
:func:`pandas.ewmstd`
"""
def compute(self, today, assets, out, data, decay_rate):
weights = exponential_weights(len(data), decay_rate)
mean = average(data, axis=0, weights=weights)
variance = average((data - mean) ** 2, axis=0, weights=weights)
squared_weight_sum = (np_sum(weights) ** 2)
bias_correction = (
squared_weight_sum / (squared_weight_sum - np_sum(weights ** 2))
)
out[:] = sqrt(variance * bias_correction)
class BollingerBands(CustomFactor):
"""
Bollinger Bands technical indicator.
https://en.wikipedia.org/wiki/Bollinger_Bands
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.close`
Parameters
----------
inputs : length-1 iterable[BoundColumn]
The expression over which to compute bollinger bands.
window_length : int > 0
Length of the lookback window over which to compute the bollinger
bands.
k : float
The number of standard deviations to add or subtract to create the
upper and lower bands.
"""
params = ('k',)
inputs = (USEquityPricing.close,)
outputs = 'lower', 'middle', 'upper'
def compute(self, today, assets, out, close, k):
difference = k * nanstd(close, axis=0)
out.middle = middle = nanmean(close, axis=0)
out.upper = middle + difference
out.lower = middle - difference
class Aroon(CustomFactor):
"""
Aroon technical indicator.
https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/aroon-indicator # noqa
**Defaults Inputs:** USEquityPricing.low, USEquityPricing.high
Parameters
----------
window_length : int > 0
Length of the lookback window over which to compute the Aroon
indicator.
"""
inputs = (USEquityPricing.low, USEquityPricing.high)
outputs = ('down', 'up')
def compute(self, today, assets, out, lows, highs):
wl = self.window_length
high_date_index = nanargmax(highs, axis=0)
low_date_index = nanargmin(lows, axis=0)
evaluate(
'(100 * high_date_index) / (wl - 1)',
local_dict={
'high_date_index': high_date_index,
'wl': wl,
},
out=out.up,
)
evaluate(
'(100 * low_date_index) / (wl - 1)',
local_dict={
'low_date_index': low_date_index,
'wl': wl,
},
out=out.down,
)
class FastStochasticOscillator(CustomFactor):
"""
Fast Stochastic Oscillator Indicator [%K, Momentum Indicator]
https://wiki.timetotrade.eu/Stochastic
This stochastic is considered volatile, and varies a lot when used in
market analysis. It is recommended to use the slow stochastic oscillator
or a moving average of the %K [%D].
**Default Inputs:** :data: `zipline.pipeline.data.USEquityPricing.close`
:data: `zipline.pipeline.data.USEquityPricing.low`
:data: `zipline.pipeline.data.USEquityPricing.high`
**Default Window Length:** 14
Returns
-------
out: %K oscillator
"""
inputs = (USEquityPricing.close, USEquityPricing.low, USEquityPricing.high)
window_safe = True
window_length = 14
def compute(self, today, assets, out, closes, lows, highs):
highest_highs = nanmax(highs, axis=0)
lowest_lows = nanmin(lows, axis=0)
today_closes = closes[-1]
evaluate(
'((tc - ll) / (hh - ll)) * 100',
local_dict={
'tc': today_closes,
'll': lowest_lows,
'hh': highest_highs,
},
global_dict={},
out=out,
)
class IchimokuKinkoHyo(CustomFactor):
"""Compute the various metrics for the Ichimoku Kinko Hyo (Ichimoku Cloud).
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud # noqa
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.high`
:data:`zipline.pipeline.data.USEquityPricing.low`
:data:`zipline.pipeline.data.USEquityPricing.close`
**Default Window Length:** 52
Parameters
----------
window_length : int > 0
The length the the window for the senkou span b.
tenkan_sen_length : int >= 0, <= window_length
The length of the window for the tenkan-sen.
kijun_sen_length : int >= 0, <= window_length
The length of the window for the kijou-sen.
chikou_span_length : int >= 0, <= window_length
The lag for the chikou span.
"""
params = {
'tenkan_sen_length': 9,
'kijun_sen_length': 26,
'chikou_span_length': 26,
}
inputs = (USEquityPricing.high, USEquityPricing.low, USEquityPricing.close)
outputs = (
'tenkan_sen',
'kijun_sen',
'senkou_span_a',
'senkou_span_b',
'chikou_span',
)
window_length = 52
def _validate(self):
super(IchimokuKinkoHyo, self)._validate()
for k, v in self.params.items():
if v > self.window_length:
raise ValueError(
'%s must be <= the window_length: %s > %s' % (
k, v, self.window_length,
),
)
def compute(self,
today,
assets,
out,
high,
low,
close,
tenkan_sen_length,
kijun_sen_length,
chikou_span_length):
out.tenkan_sen = tenkan_sen = (
high[-tenkan_sen_length:].max(axis=0) +
low[-tenkan_sen_length:].min(axis=0)
) / 2
out.kijun_sen = kijun_sen = (
high[-kijun_sen_length:].max(axis=0) +
low[-kijun_sen_length:].min(axis=0)
) / 2
out.senkou_span_a = (tenkan_sen + kijun_sen) / 2
out.senkou_span_b = (high.max(axis=0) + low.min(axis=0)) / 2
out.chikou_span = close[chikou_span_length]
class RateOfChangePercentage(CustomFactor):
"""
Rate of change Percentage
ROC measures the percentage change in price from one period to the next.
The ROC calculation compares the current price with the price `n`
periods ago.
Formula for calculation: ((price - prevPrice) / prevPrice) * 100
price - the current price
prevPrice - the price n days ago, equals window length
"""
def compute(self, today, assets, out, close):
today_close = close[-1]
prev_close = close[0]
evaluate('((tc - pc) / pc) * 100',
local_dict={
'tc': today_close,
'pc': prev_close
},
global_dict={},
out=out,
)
class TrueRange(CustomFactor):
"""
True Range
A technical indicator originally developed by J. Welles Wilder, Jr.
Indicates the true degree of daily price change in an underlying.
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.high`
:data:`zipline.pipeline.data.USEquityPricing.low`
:data:`zipline.pipeline.data.USEquityPricing.close`
**Default Window Length:** 2
"""
inputs = (
USEquityPricing.high,
USEquityPricing.low,
USEquityPricing.close,
)
window_length = 2
def compute(self, today, assets, out, highs, lows, closes):
high_to_low = highs[1:] - lows[1:]
high_to_prev_close = abs(highs[1:] - closes[:-1])
low_to_prev_close = abs(lows[1:] - closes[:-1])
out[:] = nanmax(
dstack((
high_to_low,
high_to_prev_close,
low_to_prev_close,
)),
2
)
class MovingAverageConvergenceDivergenceSignal(CustomFactor):
"""
Moving Average Convergence/Divergence (MACD) Signal line
https://en.wikipedia.org/wiki/MACD
A technical indicator originally developed by Gerald Appel in the late
1970's. MACD shows the relationship between two moving averages and
reveals changes in the strength, direction, momentum, and duration of a
trend in a stock's price.
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.close`
Parameters
----------
fast_period : int > 0, optional
The window length for the "fast" EWMA. Default is 12.
slow_period : int > 0, > fast_period, optional
The window length for the "slow" EWMA. Default is 26.
signal_period : int > 0, < fast_period, optional
The window length for the signal line. Default is 9.
Notes
-----
Unlike most pipeline expressions, this factor does not accept a
``window_length`` parameter. ``window_length`` is inferred from
``slow_period`` and ``signal_period``.
"""
inputs = (USEquityPricing.close,)
# We don't use the default form of `params` here because we want to
# dynamically calculate `window_length` from the period lengths in our
# __new__.
params = ('fast_period', 'slow_period', 'signal_period')
@expect_bounded(
__funcname='MACDSignal',
fast_period=(1, None), # These must all be >= 1.
slow_period=(1, None),
signal_period=(1, None),
)
def __new__(cls,
fast_period=12,
slow_period=26,
signal_period=9,
*args,
**kwargs):
if slow_period <= fast_period:
raise ValueError(
"'slow_period' must be greater than 'fast_period', but got\n"
"slow_period={slow}, fast_period={fast}".format(
slow=slow_period,
fast=fast_period,
)
)
return super(MovingAverageConvergenceDivergenceSignal, cls).__new__(
cls,
fast_period=fast_period,
slow_period=slow_period,
signal_period=signal_period,
window_length=slow_period + signal_period - 1,
*args, **kwargs
)
def _ewma(self, data, length):
decay_rate = 1.0 - (2.0 / (1.0 + length))
return average(
data,
axis=1,
weights=exponential_weights(length, decay_rate)
)
def compute(self, today, assets, out, close, fast_period, slow_period,
signal_period):
slow_EWMA = self._ewma(
rolling_window(close, slow_period),
slow_period
)
fast_EWMA = self._ewma(
rolling_window(close, fast_period)[-signal_period:],
fast_period
)
macd = fast_EWMA - slow_EWMA
out[:] = self._ewma(macd.T, signal_period)
class AnnualizedVolatility(CustomFactor):
"""
Volatility. The degree of variation of a series over time as measured by
the standard deviation of daily returns.
https://en.wikipedia.org/wiki/Volatility_(finance)
**Default Inputs:** :data:`zipline.pipeline.factors.Returns(window_length=2)` # noqa
Parameters
----------
annualization_factor : float, optional
The number of time units per year. Defaults is 252, the number of NYSE
trading days in a normal year.
"""
inputs = [Returns(window_length=2)]
params = {'annualization_factor': 252.0}
window_length = 252
def compute(self, today, assets, out, returns, annualization_factor):
out[:] = nanstd(returns, axis=0) * (annualization_factor ** .5)
# Convenience aliases.
EWMA = ExponentialWeightedMovingAverage
EWMSTD = ExponentialWeightedMovingStdDev
MACDSignal = MovingAverageConvergenceDivergenceSignal | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/factors/technical.py | technical.py |
from numpy import broadcast_arrays
from scipy.stats import (
linregress,
pearsonr,
spearmanr,
)
from zipline.errors import IncompatibleTerms
from zipline.pipeline.factors import CustomFactor
from zipline.pipeline.filters import SingleAsset
from zipline.pipeline.mixins import SingleInputMixin
from zipline.pipeline.sentinels import NotSpecified
from zipline.pipeline.term import AssetExists
from zipline.utils.input_validation import expect_bounded, expect_dtypes
from zipline.utils.numpy_utils import float64_dtype, int64_dtype
from .technical import Returns
ALLOWED_DTYPES = (float64_dtype, int64_dtype)
class _RollingCorrelation(CustomFactor, SingleInputMixin):
@expect_dtypes(base_factor=ALLOWED_DTYPES, target=ALLOWED_DTYPES)
@expect_bounded(correlation_length=(2, None))
def __new__(cls,
base_factor,
target,
correlation_length,
mask=NotSpecified):
if target.ndim == 2 and base_factor.mask is not target.mask:
raise IncompatibleTerms(term_1=base_factor, term_2=target)
return super(_RollingCorrelation, cls).__new__(
cls,
inputs=[base_factor, target],
window_length=correlation_length,
mask=mask,
)
class RollingPearson(_RollingCorrelation):
"""
A Factor that computes pearson correlation coefficients between the columns
of a given Factor and either the columns of another Factor/BoundColumn or a
slice/single column of data.
Parameters
----------
base_factor : zipline.pipeline.factors.Factor
The factor for which to compute correlations of each of its columns
with `target`.
target : zipline.pipeline.Term with a numeric dtype
The term with which to compute correlations against each column of data
produced by `base_factor`. This term may be a Factor, a BoundColumn or
a Slice. If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets (columns) of `base_factor` should have
their correlation with `target` computed each day.
See Also
--------
:func:`scipy.stats.pearsonr`
:meth:`Factor.pearsonr`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
Notes
-----
Most users should call Factor.pearsonr rather than directly construct an
instance of this class.
"""
window_safe = True
def compute(self, today, assets, out, base_data, target_data):
# If `target_data` is a Slice or single column of data, broadcast it
# out to the same shape as `base_data`, then compute column-wise. This
# is efficient because each column of the broadcasted array only refers
# to a single memory location.
target_data = broadcast_arrays(target_data, base_data)[0]
for i in range(len(out)):
out[i] = pearsonr(base_data[:, i], target_data[:, i])[0]
class RollingSpearman(_RollingCorrelation):
"""
A Factor that computes spearman rank correlation coefficients between the
columns of a given Factor and either the columns of another
Factor/BoundColumn or a slice/single column of data.
Parameters
----------
base_factor : zipline.pipeline.factors.Factor
The factor for which to compute correlations of each of its columns
with `target`.
target : zipline.pipeline.Term with a numeric dtype
The term with which to compute correlations against each column of data
produced by `base_factor`. This term may be a Factor, a BoundColumn or
a Slice. If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets (columns) of `base_factor` should have
their correlation with `target` computed each day.
See Also
--------
:func:`scipy.stats.spearmanr`
:meth:`Factor.spearmanr`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
Notes
-----
Most users should call Factor.spearmanr rather than directly construct an
instance of this class.
"""
window_safe = True
def compute(self, today, assets, out, base_data, target_data):
# If `target_data` is a Slice or single column of data, broadcast it
# out to the same shape as `base_data`, then compute column-wise. This
# is efficient because each column of the broadcasted array only refers
# to a single memory location.
target_data = broadcast_arrays(target_data, base_data)[0]
for i in range(len(out)):
out[i] = spearmanr(base_data[:, i], target_data[:, i])[0]
class RollingLinearRegression(CustomFactor, SingleInputMixin):
"""
A Factor that performs an ordinary least-squares regression predicting the
columns of a given Factor from either the columns of another
Factor/BoundColumn or a slice/single column of data.
Parameters
----------
dependent : zipline.pipeline.factors.Factor
The factor whose columns are the predicted/dependent variable of each
regression with `independent`.
independent : zipline.pipeline.slice.Slice or zipline.pipeline.Factor
The factor/slice whose columns are the predictor/independent variable
of each regression with `dependent`. If `independent` is a Factor,
regressions are computed asset-wise.
independent : zipline.pipeline.Term with a numeric dtype
The term to use as the predictor/independent variable in each
regression with `dependent`. This term may be a Factor, a BoundColumn
or a Slice. If `independent` is two-dimensional, regressions are
computed asset-wise.
regression_length : int
Length of the lookback window over which to compute each regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets (columns) of `dependent` should be
regressed against `independent` each day.
See Also
--------
:func:`scipy.stats.linregress`
:meth:`Factor.linear_regression`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
Notes
-----
Most users should call Factor.linear_regression rather than directly
construct an instance of this class.
"""
outputs = ['alpha', 'beta', 'r_value', 'p_value', 'stderr']
@expect_dtypes(dependent=ALLOWED_DTYPES, independent=ALLOWED_DTYPES)
@expect_bounded(regression_length=(2, None))
def __new__(cls,
dependent,
independent,
regression_length,
mask=NotSpecified):
if independent.ndim == 2 and dependent.mask is not independent.mask:
raise IncompatibleTerms(term_1=dependent, term_2=independent)
return super(RollingLinearRegression, cls).__new__(
cls,
inputs=[dependent, independent],
window_length=regression_length,
mask=mask,
)
def compute(self, today, assets, out, dependent, independent):
alpha = out.alpha
beta = out.beta
r_value = out.r_value
p_value = out.p_value
stderr = out.stderr
def regress(y, x):
regr_results = linregress(y=y, x=x)
# `linregress` returns its results in the following order:
# slope, intercept, r-value, p-value, stderr
alpha[i] = regr_results[1]
beta[i] = regr_results[0]
r_value[i] = regr_results[2]
p_value[i] = regr_results[3]
stderr[i] = regr_results[4]
# If `independent` is a Slice or single column of data, broadcast it
# out to the same shape as `dependent`, then compute column-wise. This
# is efficient because each column of the broadcasted array only refers
# to a single memory location.
independent = broadcast_arrays(independent, dependent)[0]
for i in range(len(out)):
regress(y=dependent[:, i], x=independent[:, i])
class RollingPearsonOfReturns(RollingPearson):
"""
Calculates the Pearson product-moment correlation coefficient of the
returns of the given asset with the returns of all other assets.
Pearson correlation is what most people mean when they say "correlation
coefficient" or "R-value".
Parameters
----------
target : zipline.assets.Asset
The asset to correlate with all other assets.
returns_length : int >= 2
Length of the lookback window over which to compute returns. Daily
returns require a window length of 2.
correlation_length : int >= 1
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with the
target asset computed each day.
Note
----
Computing this factor over many assets can be time consuming. It is
recommended that a mask be used in order to limit the number of assets over
which correlations are computed.
Example
-------
Let the following be example 10-day returns for three different assets::
SPY MSFT FB
2017-03-13 -.03 .03 .04
2017-03-14 -.02 -.03 .02
2017-03-15 -.01 .02 .01
2017-03-16 0 -.02 .01
2017-03-17 .01 .04 -.01
2017-03-20 .02 -.03 -.02
2017-03-21 .03 .01 -.02
2017-03-22 .04 -.02 -.02
Suppose we are interested in SPY's rolling returns correlation with each
stock from 2017-03-17 to 2017-03-22, using a 5-day look back window (that
is, we calculate each correlation coefficient over 5 days of data). We can
achieve this by doing::
rolling_correlations = RollingPearsonOfReturns(
target=sid(8554),
returns_length=10,
correlation_length=5,
)
The result of computing ``rolling_correlations`` from 2017-03-17 to
2017-03-22 gives::
SPY MSFT FB
2017-03-17 1 .15 -.96
2017-03-20 1 .10 -.96
2017-03-21 1 -.16 -.94
2017-03-22 1 -.16 -.85
Note that the column for SPY is all 1's, as the correlation of any data
series with itself is always 1. To understand how each of the other values
were calculated, take for example the .15 in MSFT's column. This is the
correlation coefficient between SPY's returns looking back from 2017-03-17
(-.03, -.02, -.01, 0, .01) and MSFT's returns (.03, -.03, .02, -.02, .04).
See Also
--------
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
"""
def __new__(cls,
target,
returns_length,
correlation_length,
mask=NotSpecified):
# Use the `SingleAsset` filter here because it protects against
# inputting a non-existent target asset.
returns = Returns(
window_length=returns_length,
mask=(AssetExists() | SingleAsset(asset=target)),
)
return super(RollingPearsonOfReturns, cls).__new__(
cls,
base_factor=returns,
target=returns[target],
correlation_length=correlation_length,
mask=mask,
)
class RollingSpearmanOfReturns(RollingSpearman):
"""
Calculates the Spearman rank correlation coefficient of the returns of the
given asset with the returns of all other assets.
Parameters
----------
target : zipline.assets.Asset
The asset to correlate with all other assets.
returns_length : int >= 2
Length of the lookback window over which to compute returns. Daily
returns require a window length of 2.
correlation_length : int >= 1
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with the
target asset computed each day.
Note
----
Computing this factor over many assets can be time consuming. It is
recommended that a mask be used in order to limit the number of assets over
which correlations are computed.
See Also
--------
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
"""
def __new__(cls,
target,
returns_length,
correlation_length,
mask=NotSpecified):
# Use the `SingleAsset` filter here because it protects against
# inputting a non-existent target asset.
returns = Returns(
window_length=returns_length,
mask=(AssetExists() | SingleAsset(asset=target)),
)
return super(RollingSpearmanOfReturns, cls).__new__(
cls,
base_factor=returns,
target=returns[target],
correlation_length=correlation_length,
mask=mask,
)
class RollingLinearRegressionOfReturns(RollingLinearRegression):
"""
Perform an ordinary least-squares regression predicting the returns of all
other assets on the given asset.
Parameters
----------
target : zipline.assets.Asset
The asset to regress against all other assets.
returns_length : int >= 2
Length of the lookback window over which to compute returns. Daily
returns require a window length of 2.
regression_length : int >= 1
Length of the lookback window over which to compute each regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should be regressed against the target
asset each day.
Notes
-----
Computing this factor over many assets can be time consuming. It is
recommended that a mask be used in order to limit the number of assets over
which regressions are computed.
This factor is designed to return five outputs:
- alpha, a factor that computes the intercepts of each regression.
- beta, a factor that computes the slopes of each regression.
- r_value, a factor that computes the correlation coefficient of each
regression.
- p_value, a factor that computes, for each regression, the two-sided
p-value for a hypothesis test whose null hypothesis is that the slope is
zero.
- stderr, a factor that computes the standard error of the estimate of each
regression.
For more help on factors with multiple outputs, see
:class:`zipline.pipeline.factors.CustomFactor`.
Example
-------
Let the following be example 10-day returns for three different assets::
SPY MSFT FB
2017-03-13 -.03 .03 .04
2017-03-14 -.02 -.03 .02
2017-03-15 -.01 .02 .01
2017-03-16 0 -.02 .01
2017-03-17 .01 .04 -.01
2017-03-20 .02 -.03 -.02
2017-03-21 .03 .01 -.02
2017-03-22 .04 -.02 -.02
Suppose we are interested in predicting each stock's returns from SPY's
over rolling 5-day look back windows. We can compute rolling regression
coefficients (alpha and beta) from 2017-03-17 to 2017-03-22 by doing::
regression_factor = RollingRegressionOfReturns(
target=sid(8554),
returns_length=10,
regression_length=5,
)
alpha = regression_factor.alpha
beta = regression_factor.beta
The result of computing ``alpha`` from 2017-03-17 to 2017-03-22 gives::
SPY MSFT FB
2017-03-17 0 .011 .003
2017-03-20 0 -.004 .004
2017-03-21 0 .007 .006
2017-03-22 0 .002 .008
And the result of computing ``beta`` from 2017-03-17 to 2017-03-22 gives::
SPY MSFT FB
2017-03-17 1 .3 -1.1
2017-03-20 1 .2 -1
2017-03-21 1 -.3 -1
2017-03-22 1 -.3 -.9
Note that SPY's column for alpha is all 0's and for beta is all 1's, as the
regression line of SPY with itself is simply the function y = x.
To understand how each of the other values were calculated, take for
example MSFT's ``alpha`` and ``beta`` values on 2017-03-17 (.011 and .3,
respectively). These values are the result of running a linear regression
predicting MSFT's returns from SPY's returns, using values starting at
2017-03-17 and looking back 5 days. That is, the regression was run with
x = [-.03, -.02, -.01, 0, .01] and y = [.03, -.03, .02, -.02, .04], and it
produced a slope of .3 and an intercept of .011.
See Also
--------
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
"""
def __new__(cls,
target,
returns_length,
regression_length,
mask=NotSpecified):
# Use the `SingleAsset` filter here because it protects against
# inputting a non-existent target asset.
returns = Returns(
window_length=returns_length,
mask=(AssetExists() | SingleAsset(asset=target)),
)
return super(RollingLinearRegressionOfReturns, cls).__new__(
cls,
dependent=returns,
independent=returns[target],
regression_length=regression_length,
mask=mask,
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/factors/statistical.py | statistical.py |
from functools import wraps
from operator import attrgetter
from numbers import Number
from math import ceil
from numpy import empty_like, inf, nan, where
from scipy.stats import rankdata
from zipline.errors import BadPercentileBounds, UnknownRankMethod
from zipline.lib.normalize import naive_grouped_rowwise_apply
from zipline.lib.rank import masked_rankdata_2d, rankdata_1d_descending
from zipline.pipeline.api_utils import restrict_to_dtype
from zipline.pipeline.classifiers import Classifier, Everything, Quantiles
from zipline.pipeline.expression import (
BadBinaryOperator,
COMPARISONS,
is_comparison,
MATH_BINOPS,
method_name_for_op,
NumericalExpression,
NUMEXPR_MATH_FUNCS,
UNARY_OPS,
unary_op_name,
)
from zipline.pipeline.filters import (
Filter,
NumExprFilter,
PercentileFilter,
NotNullFilter,
NullFilter,
)
from zipline.pipeline.mixins import (
AliasedMixin,
CustomTermMixin,
DownsampledMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
)
from zipline.pipeline.sentinels import NotSpecified, NotSpecifiedType
from zipline.pipeline.term import ComputableTerm, Term
from zipline.utils.functional import with_doc, with_name
from zipline.utils.input_validation import expect_types
from zipline.utils.math_utils import nanmean, nanstd
from zipline.utils.memoize import classlazyval
from zipline.utils.numpy_utils import (
bool_dtype,
categorical_dtype,
coerce_to_dtype,
datetime64ns_dtype,
float64_dtype,
int64_dtype,
)
_RANK_METHODS = frozenset(['average', 'min', 'max', 'dense', 'ordinal'])
def coerce_numbers_to_my_dtype(f):
"""
A decorator for methods whose signature is f(self, other) that coerces
``other`` to ``self.dtype``.
This is used to make comparison operations between numbers and `Factor`
instances work independently of whether the user supplies a float or
integer literal.
For example, if I write::
my_filter = my_factor > 3
my_factor probably has dtype float64, but 3 is an int, so we want to coerce
to float64 before doing the comparison.
"""
@wraps(f)
def method(self, other):
if isinstance(other, Number):
other = coerce_to_dtype(self.dtype, other)
return f(self, other)
return method
def binop_return_type(op):
if is_comparison(op):
return NumExprFilter
else:
return NumExprFactor
def binop_return_dtype(op, left, right):
"""
Compute the expected return dtype for the given binary operator.
Parameters
----------
op : str
Operator symbol, (e.g. '+', '-', ...).
left : numpy.dtype
Dtype of left hand side.
right : numpy.dtype
Dtype of right hand side.
Returns
-------
outdtype : numpy.dtype
The dtype of the result of `left <op> right`.
"""
if is_comparison(op):
if left != right:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Comparisons are only supported between Factors of equal "
"dtypes.".format(left=left, op=op, right=right)
)
return bool_dtype
elif left != float64_dtype or right != float64_dtype:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Arithmetic operators are only supported between Factors of "
"dtype 'float64'.".format(
left=left.name,
op=op,
right=right.name,
)
)
return float64_dtype
def binary_operator(op):
"""
Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__.
"""
# When combining a Factor with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted implementation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
@with_doc("Binary Operator: '%s'" % op)
@with_name(method_name_for_op(op))
@coerce_numbers_to_my_dtype
def binary_operator(self, other):
# This can't be hoisted up a scope because the types returned by
# binop_return_type aren't defined when the top-level function is
# invoked in the class body of Factor.
return_type = binop_return_type(op)
if isinstance(self, NumExprFactor):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return return_type(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, NumExprFactor):
# NumericalExpression overrides ops to correctly handle merging of
# inputs. Look up and call the appropriate reflected operator with
# ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if self is other:
return return_type(
"x_0 {op} x_0".format(op=op),
(self,),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
return return_type(
"x_0 {op} x_1".format(op=op),
(self, other),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, Number):
return return_type(
"x_0 {op} ({constant})".format(op=op, constant=other),
binds=(self,),
# .dtype access is safe here because coerce_numbers_to_my_dtype
# will convert any input numbers to numpy equivalents.
dtype=binop_return_dtype(op, self.dtype, other.dtype)
)
raise BadBinaryOperator(op, self, other)
return binary_operator
def reflected_binary_operator(op):
"""
Factory function for making binary operator methods on a Factor.
Returns a function, "reflected_binary_operator" suitable for implementing
functions like __radd__.
"""
assert not is_comparison(op)
@with_name(method_name_for_op(op, commute=True))
@coerce_numbers_to_my_dtype
def reflected_binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other
)
return NumExprFactor(
"({left}) {op} ({right})".format(
left=other_expr,
right=self_expr,
op=op,
),
new_inputs,
dtype=binop_return_dtype(op, other.dtype, self.dtype)
)
# Only have to handle the numeric case because in all other valid cases
# the corresponding left-binding method will be called.
elif isinstance(other, Number):
return NumExprFactor(
"{constant} {op} x_0".format(op=op, constant=other),
binds=(self,),
dtype=binop_return_dtype(op, other.dtype, self.dtype),
)
raise BadBinaryOperator(op, other, self)
return reflected_binary_operator
def unary_operator(op):
"""
Factory function for making unary operator methods for Factors.
"""
# Only negate is currently supported.
valid_ops = {'-'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
@with_doc("Unary Operator: '%s'" % op)
@with_name(unary_op_name(op))
def unary_operator(self):
if self.dtype != float64_dtype:
raise TypeError(
"Can't apply unary operator {op!r} to instance of "
"{typename!r} with dtype {dtypename!r}.\n"
"{op!r} is only supported for Factors of dtype "
"'float64'.".format(
op=op,
typename=type(self).__name__,
dtypename=self.dtype.name,
)
)
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{op}x_0".format(op=op),
(self,),
dtype=float64_dtype,
)
return unary_operator
def function_application(func):
"""
Factory function for producing function application methods for Factor
subclasses.
"""
if func not in NUMEXPR_MATH_FUNCS:
raise ValueError("Unsupported mathematical function '%s'" % func)
@with_name(func)
def mathfunc(self):
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{func}({expr})".format(func=func, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{func}(x_0)".format(func=func),
(self,),
dtype=float64_dtype,
)
return mathfunc
# Decorators for Factor methods.
if_not_float64_tell_caller_to_use_isnull = restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() was called on a factor of dtype {received_dtype}.\n"
"{method_name}() is only defined for dtype {expected_dtype}."
"To filter missing data, use isnull() or notnull()."
)
)
float64_only = restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() is only defined on Factors of dtype {expected_dtype},"
" but it was called on a Factor of dtype {received_dtype}."
)
)
FACTOR_DTYPES = frozenset([datetime64ns_dtype, float64_dtype, int64_dtype])
class Factor(RestrictedDTypeMixin, ComputableTerm):
"""
Pipeline API expression producing a numerical or date-valued output.
Factors are the most commonly-used Pipeline term, representing the result
of any computation producing a numerical result.
Factors can be combined, both with other Factors and with scalar values,
via any of the builtin mathematical operators (``+``, ``-``, ``*``, etc).
This makes it easy to write complex expressions that combine multiple
Factors. For example, constructing a Factor that computes the average of
two other Factors is simply::
>>> f1 = SomeFactor(...) # doctest: +SKIP
>>> f2 = SomeOtherFactor(...) # doctest: +SKIP
>>> average = (f1 + f2) / 2.0 # doctest: +SKIP
Factors can also be converted into :class:`zipline.pipeline.Filter` objects
via comparison operators: (``<``, ``<=``, ``!=``, ``eq``, ``>``, ``>=``).
There are many natural operators defined on Factors besides the basic
numerical operators. These include methods identifying missing or
extreme-valued outputs (isnull, notnull, isnan, notnan), methods for
normalizing outputs (rank, demean, zscore), and methods for constructing
Filters based on rank-order properties of results (top, bottom,
percentile_between).
"""
ALLOWED_DTYPES = FACTOR_DTYPES # Used by RestrictedDTypeMixin
# Dynamically add functions for creating NumExprFactor/NumExprFilter
# instances.
clsdict = locals()
clsdict.update(
{
method_name_for_op(op): binary_operator(op)
# Don't override __eq__ because it breaks comparisons on tuples of
# Factors.
for op in MATH_BINOPS.union(COMPARISONS - {'=='})
}
)
clsdict.update(
{
method_name_for_op(op, commute=True): reflected_binary_operator(op)
for op in MATH_BINOPS
}
)
clsdict.update(
{
unary_op_name(op): unary_operator(op)
for op in UNARY_OPS
}
)
clsdict.update(
{
funcname: function_application(funcname)
for funcname in NUMEXPR_MATH_FUNCS
}
)
__truediv__ = clsdict['__div__']
__rtruediv__ = clsdict['__rdiv__']
eq = binary_operator('==')
@expect_types(
mask=(Filter, NotSpecifiedType),
groupby=(Classifier, NotSpecifiedType),
)
@float64_only
def demean(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that computes ``self`` and subtracts the mean from
row of the result.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means, and output NaN anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, de-meaning the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when computing means.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute means.
Example
-------
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 1.5 2.5 3.5 1.0
2017-03-15 2.0 3.0 4.0 1.5
2017-03-16 2.5 3.5 1.0 2.0
Let ``c`` be a Classifier producing the following output::
AAPL MSFT MCD BK
2017-03-13 1 1 2 2
2017-03-14 1 1 2 2
2017-03-15 1 1 2 2
2017-03-16 1 1 2 2
Let ``m`` be a Filter producing the following output::
AAPL MSFT MCD BK
2017-03-13 False True True True
2017-03-14 True False True True
2017-03-15 True True False True
2017-03-16 True True True False
Then ``f.demean()`` will subtract the mean from each row produced by
``f``.
::
AAPL MSFT MCD BK
2017-03-13 -1.500 -0.500 0.500 1.500
2017-03-14 -0.625 0.375 1.375 -1.125
2017-03-15 -0.625 0.375 1.375 -1.125
2017-03-16 0.250 1.250 -1.250 -0.250
``f.demean(mask=m)`` will subtract the mean from each row, but means
will be calculated ignoring values on the diagonal, and NaNs will
written to the diagonal in the output. Diagonal values are ignored
because they are the locations where the mask ``m`` produced False.
::
AAPL MSFT MCD BK
2017-03-13 NaN -1.000 0.000 1.000
2017-03-14 -0.500 NaN 1.500 -1.000
2017-03-15 -0.166 0.833 NaN -0.666
2017-03-16 0.166 1.166 -1.333 NaN
``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and
MCD/BK from their respective entries. The AAPL/MSFT are grouped
together because both assets always produce 1 in the output of the
classifier ``c``. Similarly, MCD/BK are grouped together because they
always produce 2.
::
AAPL MSFT MCD BK
2017-03-13 -0.500 0.500 -0.500 0.500
2017-03-14 -0.500 0.500 1.250 -1.250
2017-03-15 -0.500 0.500 1.250 -1.250
2017-03-16 -0.500 0.500 -0.500 0.500
``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of
AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on
the diagonal , and NaNs will be written to the diagonal in the output.
::
AAPL MSFT MCD BK
2017-03-13 NaN 0.000 -0.500 0.500
2017-03-14 0.000 NaN 1.250 -1.250
2017-03-15 -0.500 0.500 NaN 0.000
2017-03-16 -0.500 0.500 0.000 NaN
Notes
-----
Mean is sensitive to the magnitudes of outliers. When working with
factor that can potentially produce large outliers, it is often useful
to use the ``mask`` parameter to discard values at the extremes of the
distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.demean(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``demean()`` is only supported on Factors of dtype float64.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=demean,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
window_safe=self.window_safe,
mask=mask,
)
@expect_types(
mask=(Filter, NotSpecifiedType),
groupby=(Classifier, NotSpecifiedType),
)
@float64_only
def zscore(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that Z-Scores each day's results.
The Z-Score of a row is defined as::
(row - row.mean()) / row.stddev()
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means and standard deviations, and output NaN
anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, z-scoring the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when Z-Scoring.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute Z-Scores.
Returns
-------
zscored : zipline.pipeline.Factor
A Factor producing that z-scores the output of self.
Notes
-----
Mean and standard deviation are sensitive to the magnitudes of
outliers. When working with factor that can potentially produce large
outliers, it is often useful to use the ``mask`` parameter to discard
values at the extremes of the distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.zscore(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``zscore()`` is only supported on Factors of dtype float64.
Example
-------
See :meth:`~zipline.pipeline.factors.Factor.demean` for an in-depth
example of the semantics for ``mask`` and ``groupby``.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=zscore,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=True,
)
def rank(self,
method='ordinal',
ascending=True,
mask=NotSpecified,
groupby=NotSpecified):
"""
Construct a new Factor representing the sorted rank of each column
within each row.
Parameters
----------
method : str, {'ordinal', 'min', 'max', 'dense', 'average'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for
each ranking method. Default is 'ordinal'.
ascending : bool, optional
Whether to return sorted rank in ascending or descending order.
Default is True.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, ranks are computed ignoring any asset/date
pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
ranks : zipline.pipeline.factors.Rank
A new factor that will compute the ranking of the data produced by
`self`.
Notes
-----
The default value for `method` is different from the default for
`scipy.stats.rankdata`. See that function's documentation for a full
description of the valid inputs to `method`.
Missing or non-existent data on a given day will cause an asset to be
given a rank of NaN for that day.
See Also
--------
:func:`scipy.stats.rankdata`
:class:`zipline.pipeline.factors.factor.Rank`
"""
if groupby is NotSpecified:
return Rank(self, method=method, ascending=ascending, mask=mask)
return GroupedRowTransform(
transform=rankdata if ascending else rankdata_1d_descending,
transform_args=(method,),
factor=self,
groupby=groupby,
dtype=float64_dtype,
missing_value=nan,
mask=mask,
window_safe=True,
)
@expect_types(
target=Term, correlation_length=int, mask=(Filter, NotSpecifiedType),
)
def pearsonr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling pearson correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingPearson
A new Factor that will compute correlations between `target` and
the columns of `self`.
Example
-------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.pearsonr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingPearsonOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.pearsonr`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:meth:`Factor.spearmanr`
"""
from .statistical import RollingPearson
return RollingPearson(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
)
@expect_types(
target=Term, correlation_length=int, mask=(Filter, NotSpecifiedType),
)
def spearmanr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling spearman rank correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingSpearman
A new Factor that will compute correlations between `target` and
the columns of `self`.
Example
-------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.spearmanr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingSpearmanOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.spearmanr`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
:meth:`Factor.pearsonr`
"""
from .statistical import RollingSpearman
return RollingSpearman(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
)
@expect_types(
target=Term, regression_length=int, mask=(Filter, NotSpecifiedType),
)
def linear_regression(self, target, regression_length, mask=NotSpecified):
"""
Construct a new Factor that performs an ordinary least-squares
regression predicting the columns of `self` from `target`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term to use as the predictor/independent variable in each
regression. This may be a Factor, a BoundColumn or a Slice. If
`target` is two-dimensional, regressions are computed asset-wise.
regression_length : int
Length of the lookback window over which to compute each
regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should be regressed with the
target slice each day.
Returns
-------
regressions : zipline.pipeline.factors.RollingLinearRegression
A new Factor that will compute linear regressions of `target`
against the columns of `self`.
Example
-------
Suppose we want to create a factor that regresses AAPL's 10-day returns
against the 10-day returns of all other assets, computing each
regression over 30 days. This can be achieved by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_regressions = returns.linear_regression(
target=returns_slice, regression_length=30,
)
This is equivalent to doing::
aapl_regressions = RollingLinearRegressionOfReturns(
target=sid(24), returns_length=10, regression_length=30,
)
See Also
--------
:func:`scipy.stats.linregress`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
"""
from .statistical import RollingLinearRegression
return RollingLinearRegression(
dependent=self,
independent=target,
regression_length=regression_length,
mask=mask,
)
@expect_types(
min_percentile=(int, float),
max_percentile=(int, float),
mask=(Filter, NotSpecifiedType),
groupby=(Classifier, NotSpecifiedType),
)
@float64_only
def winsorize(self,
min_percentile,
max_percentile,
mask=NotSpecified,
groupby=NotSpecified):
"""
Construct a Factor returns a winsorized row. Winsorizing changes values
ranked less than the minimum percentile to to value at the minimum
percentile. Similarly, values ranking above the maximum percentile will
be changed to the value at the maximum percentile. This is useful
when limiting the impact of extreme values.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means and standard deviations, and output NaN
anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, winsorizing the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
min_percentile: float, int
Entries with values at or below this percentile will be replaced
with the (len(inp) * min_percentile)th lowest value. If low values
should not be clipped, use 0.
max_percentile: float, int
Entries with values at or above this percentile will be replaced
with the (len(inp) * max_percentile)th lowest value. If high
values should not be clipped, use 1.
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when winsorizing.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to winsorize.
Returns
-------
winsorized : zipline.pipeline.Factor
A Factor producing a winsorized version of self.
Example
-------
price = USEquityPricing.close.latest
columns={
'PRICE': price,
'WINSOR_1: price.winsorize(
min_percentile=0.25, max_percentile=0.75
),
'WINSOR_2': price.winsorize(
min_percentile=0.50, max_percentile=1.0
),
'WINSOR_3': price.winsorize(
min_percentile=0.0, max_percentile=0.5
),
}
Given a pipeline with columns, defined above, the result for a
given day could look like:
'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3'
Asset_1 1 2 4 3
Asset_2 2 2 4 3
Asset_3 3 3 4 3
Asset_4 4 4 4 4
Asset_5 5 5 5 4
Asset_6 6 5 5 4
See Also
--------
:func:`scipy.stats.mstats.winsorize`
:meth:`pandas.DataFrame.groupby`
"""
if not 0.0 <= min_percentile < max_percentile <= 1.0:
raise BadPercentileBounds(
min_percentile=min_percentile,
max_percentile=max_percentile,
upper_bound=1.0,
)
return GroupedRowTransform(
transform=winsorize,
transform_args=(min_percentile, max_percentile),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=self.window_safe,
)
@expect_types(bins=int, mask=(Filter, NotSpecifiedType))
def quantiles(self, bins, mask=NotSpecified):
"""
Construct a Classifier computing quantiles of the output of ``self``.
Every non-NaN data point the output is labelled with an integer value
from 0 to (bins - 1). NaNs are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
bins : int
Number of bins labels to compute.
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quantiles.
Returns
-------
quantiles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to (bins - 1).
"""
if mask is NotSpecified:
mask = self.mask
return Quantiles(inputs=(self,), bins=bins, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def quartiles(self, mask=NotSpecified):
"""
Construct a Classifier computing quartiles over the output of ``self``.
Every non-NaN data point the output is labelled with a value of either
0, 1, 2, or 3, corresponding to the first, second, third, or fourth
quartile over each row. NaN data points are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quartiles.
Returns
-------
quartiles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to 3.
"""
return self.quantiles(bins=4, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def quintiles(self, mask=NotSpecified):
"""
Construct a Classifier computing quintile labels on ``self``.
Every non-NaN data point the output is labelled with a value of either
0, 1, 2, or 3, 4, corresonding to quintiles over each row. NaN data
points are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quintiles.
Returns
-------
quintiles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to 4.
"""
return self.quantiles(bins=5, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def deciles(self, mask=NotSpecified):
"""
Construct a Classifier computing decile labels on ``self``.
Every non-NaN data point the output is labelled with a value from 0 to
9 corresonding to deciles over each row. NaN data points are labelled
with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing deciles.
Returns
-------
deciles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to 9.
"""
return self.quantiles(bins=10, mask=mask)
def top(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the top N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the top N asset
values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, top values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.filters.Filter
"""
return self.rank(ascending=False, mask=mask, groupby=groupby) <= N
def bottom(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the bottom N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the bottom N
asset values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter
"""
return self.rank(ascending=True, mask=mask, groupby=groupby) <= N
def percentile_between(self,
min_percentile,
max_percentile,
mask=NotSpecified):
"""
Construct a new Filter representing entries from the output of this
Factor that fall within the percentile range defined by min_percentile
and max_percentile.
Parameters
----------
min_percentile : float [0.0, 100.0]
Return True for assets falling above this percentile in the data.
max_percentile : float [0.0, 100.0]
Return True for assets falling below this percentile in the data.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when percentile
calculating thresholds. If mask is supplied, percentile cutoffs
are computed each day using only assets for which ``mask`` returns
True. Assets for which ``mask`` produces False will produce False
in the output of this Factor as well.
Returns
-------
out : zipline.pipeline.filters.PercentileFilter
A new filter that will compute the specified percentile-range mask.
See Also
--------
zipline.pipeline.filters.filter.PercentileFilter
"""
return PercentileFilter(
self,
min_percentile=min_percentile,
max_percentile=max_percentile,
mask=mask,
)
def isnull(self):
"""
A Filter producing True for values where this Factor has missing data.
Equivalent to self.isnan() when ``self.dtype`` is float64.
Otherwise equivalent to ``self.eq(self.missing_value)``.
Returns
-------
filter : zipline.pipeline.filters.Filter
"""
if self.dtype == float64_dtype:
# Using isnan is more efficient when possible because we can fold
# the isnan computation with other NumExpr expressions.
return self.isnan()
else:
return NullFilter(self)
def notnull(self):
"""
A Filter producing True for values where this Factor has complete data.
Equivalent to ``~self.isnan()` when ``self.dtype`` is float64.
Otherwise equivalent to ``(self != self.missing_value)``.
"""
return NotNullFilter(self)
@if_not_float64_tell_caller_to_use_isnull
def isnan(self):
"""
A Filter producing True for all values where this Factor is NaN.
Returns
-------
nanfilter : zipline.pipeline.filters.Filter
"""
return self != self
@if_not_float64_tell_caller_to_use_isnull
def notnan(self):
"""
A Filter producing True for values where this Factor is not NaN.
Returns
-------
nanfilter : zipline.pipeline.filters.Filter
"""
return ~self.isnan()
@if_not_float64_tell_caller_to_use_isnull
def isfinite(self):
"""
A Filter producing True for values where this Factor is anything but
NaN, inf, or -inf.
"""
return (-inf < self) & (self < inf)
@classlazyval
def _downsampled_type(self):
return DownsampledMixin.make_downsampled_type(Factor)
@classlazyval
def _aliased_type(self):
return AliasedMixin.make_aliased_type(Factor)
class NumExprFactor(NumericalExpression, Factor):
"""
Factor computed from a numexpr expression.
Parameters
----------
expr : string
A string suitable for passing to numexpr. All variables in 'expr'
should be of the form "x_i", where i is the index of the corresponding
factor input in 'binds'.
binds : tuple
A tuple of factors to use as inputs.
Notes
-----
NumExprFactors are constructed by numerical operators like `+` and `-`.
Users should rarely need to construct a NumExprFactor directly.
"""
pass
class GroupedRowTransform(Factor):
"""
A Factor that transforms an input factor by applying a row-wise
shape-preserving transformation on classifier-defined groups of that
Factor.
This is most often useful for normalization operators like ``zscore`` or
``demean`` or for performing ranking using ``rank``.
Parameters
----------
transform : function[ndarray[ndim=1] -> ndarray[ndim=1]]
Function to apply over each row group.
factor : zipline.pipeline.Factor
The factor providing baseline data to transform.
mask : zipline.pipeline.Filter
Mask of entries to ignore when calculating transforms.
groupby : zipline.pipeline.Classifier
Classifier partitioning ``factor`` into groups to use when calculating
means.
transform_args : tuple[hashable]
Additional positional arguments to forward to ``transform``.
Notes
-----
Users should rarely construct instances of this factor directly. Instead,
they should construct instances via factor normalization methods like
``zscore`` and ``demean`` or using ``rank`` with ``groupby``.
See Also
--------
zipline.pipeline.factors.Factor.zscore
zipline.pipeline.factors.Factor.demean
zipline.pipeline.factors.Factor.rank
"""
window_length = 0
def __new__(cls,
transform,
transform_args,
factor,
groupby,
dtype,
missing_value,
mask,
**kwargs):
if mask is NotSpecified:
mask = factor.mask
else:
mask = mask & factor.mask
if groupby is NotSpecified:
groupby = Everything(mask=mask)
return super(GroupedRowTransform, cls).__new__(
GroupedRowTransform,
transform=transform,
transform_args=transform_args,
inputs=(factor, groupby),
missing_value=missing_value,
mask=mask,
dtype=dtype,
**kwargs
)
def _init(self, transform, transform_args, *args, **kwargs):
self._transform = transform
self._transform_args = transform_args
return super(GroupedRowTransform, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, transform, transform_args, *args, **kwargs):
return (
super(GroupedRowTransform, cls)._static_identity(*args, **kwargs),
transform,
transform_args,
)
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
groupby_expr = self.inputs[1]
if groupby_expr.dtype == int64_dtype:
group_labels = arrays[1]
null_label = self.inputs[1].missing_value
elif groupby_expr.dtype == categorical_dtype:
# Coerce our LabelArray into an isomorphic array of ints. This is
# necessary because np.where doesn't know about LabelArrays or the
# void dtype.
group_labels = arrays[1].as_int_array()
null_label = arrays[1].missing_value_code
else:
raise TypeError(
"Unexpected groupby dtype: %s." % groupby_expr.dtype
)
# Make a copy with the null code written to masked locations.
group_labels = where(mask, group_labels, null_label)
return where(
group_labels != null_label,
naive_grouped_rowwise_apply(
data=data,
group_labels=group_labels,
func=self._transform,
func_args=self._transform_args,
out=empty_like(data, dtype=self.dtype),
),
self.missing_value,
)
@property
def transform_name(self):
return self._transform.__name__
def short_repr(self):
return type(self).__name__ + '(%r)' % self.transform_name
class Rank(SingleInputMixin, Factor):
"""
A Factor representing the row-wise rank data of another Factor.
Parameters
----------
factor : zipline.pipeline.factors.Factor
The factor on which to compute ranks.
method : str, {'average', 'min', 'max', 'dense', 'ordinal'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for each
ranking method.
See Also
--------
:func:`scipy.stats.rankdata`
:class:`Factor.rank`
Notes
-----
Most users should call Factor.rank rather than directly construct an
instance of this class.
"""
window_length = 0
dtype = float64_dtype
window_safe = True
def __new__(cls, factor, method, ascending, mask):
return super(Rank, cls).__new__(
cls,
inputs=(factor,),
method=method,
ascending=ascending,
mask=mask,
)
def _init(self, method, ascending, *args, **kwargs):
self._method = method
self._ascending = ascending
return super(Rank, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, method, ascending, *args, **kwargs):
return (
super(Rank, cls)._static_identity(*args, **kwargs),
method,
ascending,
)
def _validate(self):
"""
Verify that the stored rank method is valid.
"""
if self._method not in _RANK_METHODS:
raise UnknownRankMethod(
method=self._method,
choices=set(_RANK_METHODS),
)
return super(Rank, self)._validate()
def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a like-shaped array of per-row
ranks.
"""
return masked_rankdata_2d(
arrays[0],
mask,
self.inputs[0].missing_value,
self._method,
self._ascending,
)
def __repr__(self):
return "{type}({input_}, method='{method}', mask={mask})".format(
type=type(self).__name__,
input_=self.inputs[0],
method=self._method,
mask=self.mask,
)
class CustomFactor(PositiveWindowLengthMixin, CustomTermMixin, Factor):
'''
Base class for user-defined Factors.
Parameters
----------
inputs : iterable, optional
An iterable of `BoundColumn` instances (e.g. USEquityPricing.close),
describing the data to load and pass to `self.compute`. If this
argument is not passed to the CustomFactor constructor, we look for a
class-level attribute named `inputs`.
outputs : iterable[str], optional
An iterable of strings which represent the names of each output this
factor should compute and return. If this argument is not passed to the
CustomFactor constructor, we look for a class-level attribute named
`outputs`.
window_length : int, optional
Number of rows to pass for each input. If this argument is not passed
to the CustomFactor constructor, we look for a class-level attribute
named `window_length`.
mask : zipline.pipeline.Filter, optional
A Filter describing the assets on which we should compute each day.
Each call to ``CustomFactor.compute`` will only receive assets for
which ``mask`` produced True on the day for which compute is being
called.
Notes
-----
Users implementing their own Factors should subclass CustomFactor and
implement a method named `compute` with the following signature:
.. code-block:: python
def compute(self, today, assets, out, *inputs):
...
On each simulation date, ``compute`` will be called with the current date,
an array of sids, an output array, and an input array for each expression
passed as inputs to the CustomFactor constructor.
The specific types of the values passed to `compute` are as follows::
today : np.datetime64[ns]
Row label for the last row of all arrays passed as `inputs`.
assets : np.array[int64, ndim=1]
Column labels for `out` and`inputs`.
out : np.array[self.dtype, ndim=1]
Output array of the same shape as `assets`. `compute` should write
its desired return values into `out`. If multiple outputs are
specified, `compute` should write its desired return values into
`out.<output_name>` for each output name in `self.outputs`.
*inputs : tuple of np.array
Raw data arrays corresponding to the values of `self.inputs`.
``compute`` functions should expect to be passed NaN values for dates on
which no data was available for an asset. This may include dates on which
an asset did not yet exist.
For example, if a CustomFactor requires 10 rows of close price data, and
asset A started trading on Monday June 2nd, 2014, then on Tuesday, June
3rd, 2014, the column of input data for asset A will have 9 leading NaNs
for the preceding days on which data was not yet available.
Examples
--------
A CustomFactor with pre-declared defaults:
.. code-block:: python
class TenDayRange(CustomFactor):
"""
Computes the difference between the highest high in the last 10
days and the lowest low.
Pre-declares high and low as default inputs and `window_length` as
10.
"""
inputs = [USEquityPricing.high, USEquityPricing.low]
window_length = 10
def compute(self, today, assets, out, highs, lows):
from numpy import nanmin, nanmax
highest_highs = nanmax(highs, axis=0)
lowest_lows = nanmin(lows, axis=0)
out[:] = highest_highs - lowest_lows
# Doesn't require passing inputs or window_length because they're
# pre-declared as defaults for the TenDayRange class.
ten_day_range = TenDayRange()
A CustomFactor without defaults:
.. code-block:: python
class MedianValue(CustomFactor):
"""
Computes the median value of an arbitrary single input over an
arbitrary window..
Does not declare any defaults, so values for `window_length` and
`inputs` must be passed explicitly on every construction.
"""
def compute(self, today, assets, out, data):
from numpy import nanmedian
out[:] = data.nanmedian(data, axis=0)
# Values for `inputs` and `window_length` must be passed explicitly to
# MedianValue.
median_close10 = MedianValue([USEquityPricing.close], window_length=10)
median_low15 = MedianValue([USEquityPricing.low], window_length=15)
A CustomFactor with multiple outputs:
.. code-block:: python
class MultipleOutputs(CustomFactor):
inputs = [USEquityPricing.close]
outputs = ['alpha', 'beta']
window_length = N
def compute(self, today, assets, out, close):
computed_alpha, computed_beta = some_function(close)
out.alpha[:] = computed_alpha
out.beta[:] = computed_beta
# Each output is returned as its own Factor upon instantiation.
alpha, beta = MultipleOutputs()
# Equivalently, we can create a single factor instance and access each
# output as an attribute of that instance.
multiple_outputs = MultipleOutputs()
alpha = multiple_outputs.alpha
beta = multiple_outputs.beta
Note: If a CustomFactor has multiple outputs, all outputs must have the
same dtype. For instance, in the example above, if alpha is a float then
beta must also be a float.
'''
dtype = float64_dtype
def __getattribute__(self, name):
outputs = object.__getattribute__(self, 'outputs')
if outputs is NotSpecified:
return super(CustomFactor, self).__getattribute__(name)
elif name in outputs:
return RecarrayField(factor=self, attribute=name)
else:
try:
return super(CustomFactor, self).__getattribute__(name)
except AttributeError:
raise AttributeError(
'Instance of {factor} has no output named {attr!r}. '
'Possible choices are: {choices}.'.format(
factor=type(self).__name__,
attr=name,
choices=self.outputs,
)
)
def __iter__(self):
if self.outputs is NotSpecified:
raise ValueError(
'{factor} does not have multiple outputs.'.format(
factor=type(self).__name__,
)
)
return (RecarrayField(self, attr) for attr in self.outputs)
class RecarrayField(SingleInputMixin, Factor):
"""
A single field from a multi-output factor.
"""
def __new__(cls, factor, attribute):
return super(RecarrayField, cls).__new__(
cls,
attribute=attribute,
inputs=[factor],
window_length=0,
mask=factor.mask,
dtype=factor.dtype,
missing_value=factor.missing_value,
)
def _init(self, attribute, *args, **kwargs):
self._attribute = attribute
return super(RecarrayField, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, attribute, *args, **kwargs):
return (
super(RecarrayField, cls)._static_identity(*args, **kwargs),
attribute,
)
def _compute(self, windows, dates, assets, mask):
return windows[0][self._attribute]
class Latest(LatestMixin, CustomFactor):
"""
Factor producing the most recently-known value of `inputs[0]` on each day.
The `.latest` attribute of DataSet columns returns an instance of this
Factor.
"""
window_length = 1
def compute(self, today, assets, out, data):
out[:] = data[-1]
# Functions to be passed to GroupedRowTransform. These aren't defined inline
# because the transformation function is part of the instance hash key.
def demean(row):
return row - nanmean(row)
def zscore(row):
return (row - nanmean(row)) / nanstd(row)
def winsorize(row, min_percentile, max_percentile):
"""
This implementation is based on scipy.stats.mstats.winsorize
"""
a = row.copy()
num = a.size
idx = a.argsort()
if min_percentile > 0:
lowidx = int(min_percentile * num)
a[idx[:lowidx]] = a[idx[lowidx]]
if max_percentile < 1:
upidx = int(ceil(num * max_percentile))
# upidx could return as the length of the array, in this case
# no modification to the right tail is necessary.
if upidx < num:
a[idx[upidx:]] = a[idx[upidx - 1]]
return a | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/factors/factor.py | factor.py |
from numpy import newaxis
from zipline.utils.numpy_utils import (
NaTD,
busday_count_mask_NaT,
datetime64D_dtype,
float64_dtype,
)
from .factor import Factor
class BusinessDaysSincePreviousEvent(Factor):
"""
Abstract class for business days since a previous event.
Returns the number of **business days** (not trading days!) since
the most recent event date for each asset.
This doesn't use trading days for symmetry with
BusinessDaysUntilNextEarnings.
Assets which announced or will announce the event today will produce a
value of 0.0. Assets that announced the event on the previous business
day will produce a value of 1.0.
Assets for which the event date is `NaT` will produce a value of `NaN`.
"""
window_length = 0
dtype = float64_dtype
def _compute(self, arrays, dates, assets, mask):
# Coerce from [ns] to [D] for numpy busday_count.
announce_dates = arrays[0].astype(datetime64D_dtype)
# Set masked values to NaT.
announce_dates[~mask] = NaTD
# Convert row labels into a column vector for broadcasted comparison.
reference_dates = dates.values.astype(datetime64D_dtype)[:, newaxis]
return busday_count_mask_NaT(announce_dates, reference_dates)
class BusinessDaysUntilNextEvent(Factor):
"""
Abstract class for business days since a next event.
Returns the number of **business days** (not trading days!) until
the next known event date for each asset.
This doesn't use trading days because the trading calendar includes
information that may not have been available to the algorithm at the time
when `compute` is called.
For example, the NYSE closings September 11th 2001, would not have been
known to the algorithm on September 10th.
Assets that announced or will announce the event today will produce a value
of 0.0. Assets that will announce the event on the next upcoming business
day will produce a value of 1.0.
Assets for which the event date is `NaT` will produce a value of `NaN`.
"""
window_length = 0
dtype = float64_dtype
def _compute(self, arrays, dates, assets, mask):
# Coerce from [ns] to [D] for numpy busday_count.
announce_dates = arrays[0].astype(datetime64D_dtype)
# Set masked values to NaT.
announce_dates[~mask] = NaTD
# Convert row labels into a column vector for broadcasted comparison.
reference_dates = dates.values.astype(datetime64D_dtype)[:, newaxis]
return busday_count_mask_NaT(reference_dates, announce_dates) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/factors/events.py | events.py |
from numbers import Number
import operator
import re
from numpy import where, isnan, nan, zeros
import pandas as pd
from zipline.lib.labelarray import LabelArray
from zipline.lib.quantiles import quantiles
from zipline.pipeline.api_utils import restrict_to_dtype
from zipline.pipeline.sentinels import NotSpecified
from zipline.pipeline.term import ComputableTerm
from zipline.utils.compat import unicode
from zipline.utils.input_validation import expect_types
from zipline.utils.memoize import classlazyval
from zipline.utils.numpy_utils import (
categorical_dtype,
int64_dtype,
vectorized_is_element,
)
from ..filters import ArrayPredicate, NotNullFilter, NullFilter, NumExprFilter
from ..mixins import (
AliasedMixin,
CustomTermMixin,
DownsampledMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
StandardOutputs,
)
string_classifiers_only = restrict_to_dtype(
dtype=categorical_dtype,
message_template=(
"{method_name}() is only defined on Classifiers producing strings"
" but it was called on a Factor of dtype {received_dtype}."
)
)
class Classifier(RestrictedDTypeMixin, ComputableTerm):
"""
A Pipeline expression computing a categorical output.
Classifiers are most commonly useful for describing grouping keys for
complex transformations on Factor outputs. For example, Factor.demean() and
Factor.zscore() can be passed a Classifier in their ``groupby`` argument,
indicating that means/standard deviations should be computed on assets for
which the classifier produced the same label.
"""
# Used by RestrictedDTypeMixin
ALLOWED_DTYPES = (int64_dtype, categorical_dtype)
categories = NotSpecified
def isnull(self):
"""
A Filter producing True for values where this term has missing data.
"""
return NullFilter(self)
def notnull(self):
"""
A Filter producing True for values where this term has complete data.
"""
return NotNullFilter(self)
# We explicitly don't support classifier to classifier comparisons, since
# the stored values likely don't mean the same thing. This may be relaxed
# in the future, but for now we're starting conservatively.
def eq(self, other):
"""
Construct a Filter returning True for asset/date pairs where the output
of ``self`` matches ``other``.
"""
# We treat this as an error because missing_values have NaN semantics,
# which means this would return an array of all False, which is almost
# certainly not what the user wants.
if other == self.missing_value:
raise ValueError(
"Comparison against self.missing_value ({value!r}) in"
" {typename}.eq().\n"
"Missing values have NaN semantics, so the "
"requested comparison would always produce False.\n"
"Use the isnull() method to check for missing values.".format(
value=other,
typename=(type(self).__name__),
)
)
if isinstance(other, Number) != (self.dtype == int64_dtype):
raise InvalidClassifierComparison(self, other)
if isinstance(other, Number):
return NumExprFilter.create(
"x_0 == {other}".format(other=int(other)),
binds=(self,),
)
else:
return ArrayPredicate(
term=self,
op=operator.eq,
opargs=(other,),
)
def __ne__(self, other):
"""
Construct a Filter returning True for asset/date pairs where the output
of ``self`` matches ``other.
"""
if isinstance(other, Number) != (self.dtype == int64_dtype):
raise InvalidClassifierComparison(self, other)
if isinstance(other, Number):
return NumExprFilter.create(
"((x_0 != {other}) & (x_0 != {missing}))".format(
other=int(other),
missing=self.missing_value,
),
binds=(self,),
)
else:
# Numexpr doesn't know how to use LabelArrays.
return ArrayPredicate(term=self, op=operator.ne, opargs=(other,))
@string_classifiers_only
@expect_types(prefix=(bytes, unicode))
def startswith(self, prefix):
"""
Construct a Filter matching values starting with ``prefix``.
Parameters
----------
prefix : str
String prefix against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string starting with ``prefix``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.startswith,
opargs=(prefix,),
)
@string_classifiers_only
@expect_types(suffix=(bytes, unicode))
def endswith(self, suffix):
"""
Construct a Filter matching values ending with ``suffix``.
Parameters
----------
suffix : str
String suffix against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string ending with ``prefix``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.endswith,
opargs=(suffix,),
)
@string_classifiers_only
@expect_types(substring=(bytes, unicode))
def has_substring(self, substring):
"""
Construct a Filter matching values containing ``substring``.
Parameters
----------
substring : str
Sub-string against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string containing ``substring``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.has_substring,
opargs=(substring,),
)
@string_classifiers_only
@expect_types(pattern=(bytes, unicode, type(re.compile(''))))
def matches(self, pattern):
"""
Construct a Filter that checks regex matches against ``pattern``.
Parameters
----------
pattern : str
Regex pattern against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string matched by ``pattern``.
See Also
--------
:mod:`Python Regular Expressions <re>`
"""
return ArrayPredicate(
term=self,
op=LabelArray.matches,
opargs=(pattern,),
)
def element_of(self, choices):
"""
Construct a Filter indicating whether values are in ``choices``.
Parameters
----------
choices : iterable[str or int]
An iterable of choices.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces an entry in ``choices``.
"""
try:
choices = frozenset(choices)
except Exception as e:
raise TypeError(
"Expected `choices` to be an iterable of hashable values,"
" but got {} instead.\n"
"This caused the following error: {!r}.".format(choices, e)
)
if self.missing_value in choices:
raise ValueError(
"Found self.missing_value ({mv!r}) in choices supplied to"
" {typename}.{meth_name}().\n"
"Missing values have NaN semantics, so the"
" requested comparison would always produce False.\n"
"Use the isnull() method to check for missing values.\n"
"Received choices were {choices}.".format(
mv=self.missing_value,
typename=(type(self).__name__),
choices=sorted(choices),
meth_name=self.element_of.__name__,
)
)
def only_contains(type_, values):
return all(isinstance(v, type_) for v in values)
if self.dtype == int64_dtype:
if only_contains(int, choices):
return ArrayPredicate(
term=self,
op=vectorized_is_element,
opargs=(choices,),
)
else:
raise TypeError(
"Found non-int in choices for {typename}.element_of.\n"
"Supplied choices were {choices}.".format(
typename=type(self).__name__,
choices=choices,
)
)
elif self.dtype == categorical_dtype:
if only_contains((bytes, unicode), choices):
return ArrayPredicate(
term=self,
op=LabelArray.element_of,
opargs=(choices,),
)
else:
raise TypeError(
"Found non-string in choices for {typename}.element_of.\n"
"Supplied choices were {choices}.".format(
typename=type(self).__name__,
choices=choices,
)
)
assert False, "Unknown dtype in Classifier.element_of %s." % self.dtype
def postprocess(self, data):
if self.dtype == int64_dtype:
return data
if not isinstance(data, LabelArray):
raise AssertionError("Expected a LabelArray, got %s." % type(data))
return data.as_categorical()
def to_workspace_value(self, result, assets):
"""
Called with the result of a pipeline. This needs to return an object
which can be put into the workspace to continue doing computations.
This is the inverse of :func:`~zipline.pipeline.term.Term.postprocess`.
"""
if self.dtype == int64_dtype:
return super(Classifier, self).to_workspace_value(result, assets)
assert isinstance(result.values, pd.Categorical), (
'Expected a Categorical, got %r.' % type(result.values)
)
with_missing = pd.Series(
data=pd.Categorical(
result.values,
result.values.categories.union([self.missing_value]),
),
index=result.index,
)
return LabelArray(
super(Classifier, self).to_workspace_value(
with_missing,
assets,
),
self.missing_value,
)
@classlazyval
def _downsampled_type(self):
return DownsampledMixin.make_downsampled_type(Classifier)
@classlazyval
def _aliased_type(self):
return AliasedMixin.make_aliased_type(Classifier)
class Everything(Classifier):
"""
A trivial classifier that classifies everything the same.
"""
dtype = int64_dtype
window_length = 0
inputs = ()
missing_value = -1
def _compute(self, arrays, dates, assets, mask):
return where(
mask,
zeros(shape=mask.shape, dtype=int64_dtype),
self.missing_value,
)
class Quantiles(SingleInputMixin, Classifier):
"""
A classifier computing quantiles over an input.
"""
params = ('bins',)
dtype = int64_dtype
window_length = 0
missing_value = -1
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
bins = self.params['bins']
to_bin = where(mask, data, nan)
result = quantiles(to_bin, bins)
# Write self.missing_value into nan locations, whether they were
# generated by our input mask or not.
result[isnan(result)] = self.missing_value
return result.astype(int64_dtype)
def short_repr(self):
return type(self).__name__ + '(%d)' % self.params['bins']
class CustomClassifier(PositiveWindowLengthMixin,
StandardOutputs,
CustomTermMixin,
Classifier):
"""
Base class for user-defined Classifiers.
Does not suppport multiple outputs.
See Also
--------
zipline.pipeline.CustomFactor
zipline.pipeline.CustomFilter
"""
def _allocate_output(self, windows, shape):
"""
Override the default array allocation to produce a LabelArray when we
have a string-like dtype.
"""
if self.dtype == int64_dtype:
return super(CustomClassifier, self)._allocate_output(
windows,
shape,
)
# This is a little bit of a hack. We might not know what the
# categories for a LabelArray are until it's actually been loaded, so
# we need to look at the underlying data.
return windows[0].data.empty_like(shape)
class Latest(LatestMixin, CustomClassifier):
"""
A classifier producing the latest value of an input.
See Also
--------
zipline.pipeline.data.dataset.BoundColumn.latest
zipline.pipeline.factors.factor.Latest
zipline.pipeline.filters.filter.Latest
"""
pass
class InvalidClassifierComparison(TypeError):
def __init__(self, classifier, compval):
super(InvalidClassifierComparison, self).__init__(
"Can't compare classifier of dtype"
" {dtype} to value {value} of type {type}.".format(
dtype=classifier.dtype,
value=compval,
type=type(compval).__name__,
)
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/classifiers/classifier.py | classifier.py |
from functools import total_ordering
from six import (
iteritems,
with_metaclass,
)
from zipline.pipeline.classifiers import Classifier, Latest as LatestClassifier
from zipline.pipeline.factors import Factor, Latest as LatestFactor
from zipline.pipeline.filters import Filter, Latest as LatestFilter
from zipline.pipeline.sentinels import NotSpecified
from zipline.pipeline.term import (
AssetExists,
LoadableTerm,
validate_dtype,
)
from zipline.utils.input_validation import ensure_dtype
from zipline.utils.numpy_utils import NoDefaultMissingValue
from zipline.utils.preprocess import preprocess
class Column(object):
"""
An abstract column of data, not yet associated with a dataset.
"""
@preprocess(dtype=ensure_dtype)
def __init__(self, dtype, missing_value=NotSpecified):
self.dtype = dtype
self.missing_value = missing_value
def bind(self, name):
"""
Bind a `Column` object to its name.
"""
return _BoundColumnDescr(
dtype=self.dtype,
missing_value=self.missing_value,
name=name,
)
class _BoundColumnDescr(object):
"""
Intermediate class that sits on `DataSet` objects and returns memoized
`BoundColumn` objects when requested.
This exists so that subclasses of DataSets don't share columns with their
parent classes.
"""
def __init__(self, dtype, missing_value, name):
# Validating and calculating default missing values here guarantees
# that we fail quickly if the user passes an unsupporte dtype or fails
# to provide a missing value for a dtype that requires one
# (e.g. int64), but still enables us to provide an error message that
# points to the name of the failing column.
try:
self.dtype, self.missing_value = validate_dtype(
termname="Column(name={name!r})".format(name=name),
dtype=dtype,
missing_value=missing_value,
)
except NoDefaultMissingValue:
# Re-raise with a more specific message.
raise NoDefaultMissingValue(
"Failed to create Column with name {name!r} and"
" dtype {dtype} because no missing_value was provided\n\n"
"Columns with dtype {dtype} require a missing_value.\n"
"Please pass missing_value to Column() or use a different"
" dtype.".format(dtype=dtype, name=name)
)
self.name = name
def __get__(self, instance, owner):
"""
Produce a concrete BoundColumn object when accessed.
We don't bind to datasets at class creation time so that subclasses of
DataSets produce different BoundColumns.
"""
return BoundColumn(
dtype=self.dtype,
missing_value=self.missing_value,
dataset=owner,
name=self.name,
)
class BoundColumn(LoadableTerm):
"""
A column of data that's been concretely bound to a particular dataset.
Instances of this class are dynamically created upon access to attributes
of DataSets (for example, USEquityPricing.close is an instance of this
class).
Attributes
----------
dtype : numpy.dtype
The dtype of data produced when this column is loaded.
latest : zipline.pipeline.data.Factor or zipline.pipeline.data.Filter
A Filter, Factor, or Classifier computing the most recently known value
of this column on each date.
Produces a Filter if self.dtype == ``np.bool_``.
Produces a Classifier if self.dtype == ``np.int64``
Otherwise produces a Factor.
dataset : zipline.pipeline.data.DataSet
The dataset to which this column is bound.
name : str
The name of this column.
"""
mask = AssetExists()
window_safe = True
def __new__(cls, dtype, missing_value, dataset, name):
return super(BoundColumn, cls).__new__(
cls,
domain=dataset.domain,
dtype=dtype,
missing_value=missing_value,
dataset=dataset,
name=name,
ndim=dataset.ndim,
)
def _init(self, dataset, name, *args, **kwargs):
self._dataset = dataset
self._name = name
return super(BoundColumn, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, dataset, name, *args, **kwargs):
return (
super(BoundColumn, cls)._static_identity(*args, **kwargs),
dataset,
name,
)
@property
def dataset(self):
"""
The dataset to which this column is bound.
"""
return self._dataset
@property
def name(self):
"""
The name of this column.
"""
return self._name
@property
def qualname(self):
"""
The fully-qualified name of this column.
Generated by doing '.'.join([self.dataset.__name__, self.name]).
"""
return '.'.join([self.dataset.__name__, self.name])
@property
def latest(self):
dtype = self.dtype
if dtype in Filter.ALLOWED_DTYPES:
Latest = LatestFilter
elif dtype in Classifier.ALLOWED_DTYPES:
Latest = LatestClassifier
else:
assert dtype in Factor.ALLOWED_DTYPES, "Unknown dtype %s." % dtype
Latest = LatestFactor
return Latest(
inputs=(self,),
dtype=dtype,
missing_value=self.missing_value,
ndim=self.ndim,
)
def __repr__(self):
return "{qualname}::{dtype}".format(
qualname=self.qualname,
dtype=self.dtype.name,
)
def short_repr(self):
return self.qualname
@total_ordering
class DataSetMeta(type):
"""
Metaclass for DataSets
Supplies name and dataset information to Column attributes.
"""
def __new__(mcls, name, bases, dict_):
newtype = super(DataSetMeta, mcls).__new__(mcls, name, bases, dict_)
# collect all of the column names that we inherit from our parents
column_names = set().union(
*(getattr(base, '_column_names', ()) for base in bases)
)
for maybe_colname, maybe_column in iteritems(dict_):
if isinstance(maybe_column, Column):
# add column names defined on our class
bound_column_descr = maybe_column.bind(maybe_colname)
setattr(newtype, maybe_colname, bound_column_descr)
column_names.add(maybe_colname)
newtype._column_names = frozenset(column_names)
return newtype
@property
def columns(self):
return frozenset(
getattr(self, colname) for colname in self._column_names
)
def __lt__(self, other):
return id(self) < id(other)
def __repr__(self):
return '<DataSet: %r>' % self.__name__
class DataSet(with_metaclass(DataSetMeta, object)):
domain = None
ndim = 2 | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/data/dataset.py | dataset.py |
from itertools import chain
from operator import attrgetter
from numpy import (
float64,
nan,
nanpercentile,
)
from zipline.errors import (
BadPercentileBounds,
NonExistentAssetInTimeFrame,
UnsupportedDataType,
)
from zipline.lib.labelarray import LabelArray
from zipline.lib.rank import is_missing
from zipline.pipeline.expression import (
BadBinaryOperator,
FILTER_BINOPS,
method_name_for_op,
NumericalExpression,
)
from zipline.pipeline.mixins import (
AliasedMixin,
CustomTermMixin,
DownsampledMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
)
from zipline.pipeline.term import ComputableTerm, Term
from zipline.utils.input_validation import expect_types
from zipline.utils.memoize import classlazyval
from zipline.utils.numpy_utils import bool_dtype, repeat_first_axis
def concat_tuples(*tuples):
"""
Concatenate a sequence of tuples into one tuple.
"""
return tuple(chain(*tuples))
def binary_operator(op):
"""
Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__.
"""
# When combining a Filter with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted interpretation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
def binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return NumExprFilter.create(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
)
elif isinstance(other, NumericalExpression):
# NumericalExpression overrides numerical ops to correctly handle
# merging of inputs. Look up and call the appropriate
# right-binding operator with ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if other.dtype != bool_dtype:
raise BadBinaryOperator(op, self, other)
if self is other:
return NumExprFilter.create(
"x_0 {op} x_0".format(op=op),
(self,),
)
return NumExprFilter.create(
"x_0 {op} x_1".format(op=op),
(self, other),
)
elif isinstance(other, int): # Note that this is true for bool as well
return NumExprFilter.create(
"x_0 {op} {constant}".format(op=op, constant=int(other)),
binds=(self,),
)
raise BadBinaryOperator(op, self, other)
binary_operator.__doc__ = "Binary Operator: '%s'" % op
return binary_operator
def unary_operator(op):
"""
Factory function for making unary operator methods for Filters.
"""
valid_ops = {'~'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
def unary_operator(self):
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFilter.create(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
)
else:
return NumExprFilter.create("{op}x_0".format(op=op), (self,))
unary_operator.__doc__ = "Unary Operator: '%s'" % op
return unary_operator
class Filter(RestrictedDTypeMixin, ComputableTerm):
"""
Pipeline expression computing a boolean output.
Filters are most commonly useful for describing sets of assets to include
or exclude for some particular purpose. Many Pipeline API functions accept
a ``mask`` argument, which can be supplied a Filter indicating that only
values passing the Filter should be considered when performing the
requested computation. For example, :meth:`zipline.pipeline.Factor.top`
accepts a mask indicating that ranks should be computed only on assets that
passed the specified Filter.
The most common way to construct a Filter is via one of the comparison
operators (``<``, ``<=``, ``!=``, ``eq``, ``>``, ``>=``) of
:class:`~zipline.pipeline.Factor`. For example, a natural way to construct
a Filter for stocks with a 10-day VWAP less than $20.0 is to first
construct a Factor computing 10-day VWAP and compare it to the scalar value
20.0::
>>> from zipline.pipeline.factors import VWAP
>>> vwap_10 = VWAP(window_length=10)
>>> vwaps_under_20 = (vwap_10 <= 20)
Filters can also be constructed via comparisons between two Factors. For
example, to construct a Filter producing True for asset/date pairs where
the asset's 10-day VWAP was greater than it's 30-day VWAP::
>>> short_vwap = VWAP(window_length=10)
>>> long_vwap = VWAP(window_length=30)
>>> higher_short_vwap = (short_vwap > long_vwap)
Filters can be combined via the ``&`` (and) and ``|`` (or) operators.
``&``-ing together two filters produces a new Filter that produces True if
**both** of the inputs produced True.
``|``-ing together two filters produces a new Filter that produces True if
**either** of its inputs produced True.
The ``~`` operator can be used to invert a Filter, swapping all True values
with Falses and vice-versa.
Filters may be set as the ``screen`` attribute of a Pipeline, indicating
asset/date pairs for which the filter produces False should be excluded
from the Pipeline's output. This is useful both for reducing noise in the
output of a Pipeline and for reducing memory consumption of Pipeline
results.
"""
# Filters are window-safe by default, since a yes/no decision means the
# same thing from all temporal perspectives.
window_safe = True
ALLOWED_DTYPES = (bool_dtype,) # Used by RestrictedDTypeMixin
dtype = bool_dtype
clsdict = locals()
clsdict.update(
{
method_name_for_op(op): binary_operator(op)
for op in FILTER_BINOPS
}
)
clsdict.update(
{
method_name_for_op(op, commute=True): binary_operator(op)
for op in FILTER_BINOPS
}
)
__invert__ = unary_operator('~')
def _validate(self):
# Run superclass validation first so that we handle `dtype not passed`
# before this.
retval = super(Filter, self)._validate()
if self.dtype != bool_dtype:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype
)
return retval
@classlazyval
def _downsampled_type(self):
return DownsampledMixin.make_downsampled_type(Filter)
@classlazyval
def _aliased_type(self):
return AliasedMixin.make_aliased_type(Filter)
class NumExprFilter(NumericalExpression, Filter):
"""
A Filter computed from a numexpr expression.
"""
@classmethod
def create(cls, expr, binds):
"""
Helper for creating new NumExprFactors.
This is just a wrapper around NumericalExpression.__new__ that always
forwards `bool` as the dtype, since Filters can only be of boolean
dtype.
"""
return cls(expr=expr, binds=binds, dtype=bool_dtype)
def _compute(self, arrays, dates, assets, mask):
"""
Compute our result with numexpr, then re-apply `mask`.
"""
return super(NumExprFilter, self)._compute(
arrays,
dates,
assets,
mask,
) & mask
class NullFilter(SingleInputMixin, Filter):
"""
A Filter indicating whether input values are missing from an input.
Parameters
----------
factor : zipline.pipeline.Term
The factor to compare against its missing_value.
"""
window_length = 0
def __new__(cls, term):
return super(NullFilter, cls).__new__(
cls,
inputs=(term,),
)
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
if isinstance(data, LabelArray):
return data.is_missing()
return is_missing(arrays[0], self.inputs[0].missing_value)
class NotNullFilter(SingleInputMixin, Filter):
"""
A Filter indicating whether input values are **not** missing from an input.
Parameters
----------
factor : zipline.pipeline.Term
The factor to compare against its missing_value.
"""
window_length = 0
def __new__(cls, term):
return super(NotNullFilter, cls).__new__(
cls,
inputs=(term,),
)
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
if isinstance(data, LabelArray):
return ~data.is_missing()
return ~is_missing(arrays[0], self.inputs[0].missing_value)
class PercentileFilter(SingleInputMixin, Filter):
"""
A Filter representing assets falling between percentile bounds of a Factor.
Parameters
----------
factor : zipline.pipeline.factor.Factor
The factor over which to compute percentile bounds.
min_percentile : float [0.0, 1.0]
The minimum percentile rank of an asset that will pass the filter.
max_percentile : float [0.0, 1.0]
The maxiumum percentile rank of an asset that will pass the filter.
"""
window_length = 0
def __new__(cls, factor, min_percentile, max_percentile, mask):
return super(PercentileFilter, cls).__new__(
cls,
inputs=(factor,),
mask=mask,
min_percentile=min_percentile,
max_percentile=max_percentile,
)
def _init(self, min_percentile, max_percentile, *args, **kwargs):
self._min_percentile = min_percentile
self._max_percentile = max_percentile
return super(PercentileFilter, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, min_percentile, max_percentile, *args, **kwargs):
return (
super(PercentileFilter, cls)._static_identity(*args, **kwargs),
min_percentile,
max_percentile,
)
def _validate(self):
"""
Ensure that our percentile bounds are well-formed.
"""
if not 0.0 <= self._min_percentile < self._max_percentile <= 100.0:
raise BadPercentileBounds(
min_percentile=self._min_percentile,
max_percentile=self._max_percentile,
upper_bound=100.0
)
return super(PercentileFilter, self)._validate()
def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a mask of all values falling between
the given percentiles.
"""
# TODO: Review whether there's a better way of handling small numbers
# of columns.
data = arrays[0].copy().astype(float64)
data[~mask] = nan
# FIXME: np.nanpercentile **should** support computing multiple bounds
# at once, but there's a bug in the logic for multiple bounds in numpy
# 1.9.2. It will be fixed in 1.10.
# c.f. https://github.com/numpy/numpy/pull/5981
lower_bounds = nanpercentile(
data,
self._min_percentile,
axis=1,
keepdims=True,
)
upper_bounds = nanpercentile(
data,
self._max_percentile,
axis=1,
keepdims=True,
)
return (lower_bounds <= data) & (data <= upper_bounds)
class CustomFilter(PositiveWindowLengthMixin, CustomTermMixin, Filter):
"""
Base class for user-defined Filters.
Parameters
----------
inputs : iterable, optional
An iterable of `BoundColumn` instances (e.g. USEquityPricing.close),
describing the data to load and pass to `self.compute`. If this
argument is passed to the CustomFilter constructor, we look for a
class-level attribute named `inputs`.
window_length : int, optional
Number of rows to pass for each input. If this argument is not passed
to the CustomFilter constructor, we look for a class-level attribute
named `window_length`.
Notes
-----
Users implementing their own Filters should subclass CustomFilter and
implement a method named `compute` with the following signature:
.. code-block:: python
def compute(self, today, assets, out, *inputs):
...
On each simulation date, ``compute`` will be called with the current date,
an array of sids, an output array, and an input array for each expression
passed as inputs to the CustomFilter constructor.
The specific types of the values passed to `compute` are as follows::
today : np.datetime64[ns]
Row label for the last row of all arrays passed as `inputs`.
assets : np.array[int64, ndim=1]
Column labels for `out` and`inputs`.
out : np.array[bool, ndim=1]
Output array of the same shape as `assets`. `compute` should write
its desired return values into `out`.
*inputs : tuple of np.array
Raw data arrays corresponding to the values of `self.inputs`.
See the documentation for
:class:`~zipline.pipeline.factors.factor.CustomFactor` for more details on
implementing a custom ``compute`` method.
See Also
--------
zipline.pipeline.factors.factor.CustomFactor
"""
class ArrayPredicate(SingleInputMixin, Filter):
"""
A filter applying a function from (ndarray, *args) -> ndarray[bool].
Parameters
----------
term : zipline.pipeline.Term
Term producing the array over which the predicate will be computed.
op : function(ndarray, *args) -> ndarray[bool]
Function to apply to the result of `term`.
opargs : tuple[hashable]
Additional argument to apply to ``op``.
"""
window_length = 0
@expect_types(term=Term, opargs=tuple)
def __new__(cls, term, op, opargs):
hash(opargs) # fail fast if opargs isn't hashable.
return super(ArrayPredicate, cls).__new__(
ArrayPredicate,
op=op,
opargs=opargs,
inputs=(term,),
mask=term.mask,
)
def _init(self, op, opargs, *args, **kwargs):
self._op = op
self._opargs = opargs
return super(ArrayPredicate, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, op, opargs, *args, **kwargs):
return (
super(ArrayPredicate, cls)._static_identity(*args, **kwargs),
op,
opargs,
)
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
return self._op(data, *self._opargs) & mask
class Latest(LatestMixin, CustomFilter):
"""
Filter producing the most recently-known value of `inputs[0]` on each day.
"""
pass
class SingleAsset(Filter):
"""
A Filter that computes to True only for the given asset.
"""
inputs = []
window_length = 1
def __new__(cls, asset):
return super(SingleAsset, cls).__new__(cls, asset=asset)
def _init(self, asset, *args, **kwargs):
self._asset = asset
return super(SingleAsset, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, asset, *args, **kwargs):
return (
super(SingleAsset, cls)._static_identity(*args, **kwargs), asset,
)
def _compute(self, arrays, dates, assets, mask):
is_my_asset = (assets == self._asset.sid)
out = repeat_first_axis(is_my_asset, len(mask))
# Raise an exception if `self._asset` does not exist for the entirety
# of the timeframe over which we are computing.
if (is_my_asset.sum() != 1) or ((out & mask).sum() != len(mask)):
raise NonExistentAssetInTimeFrame(
asset=self._asset, start_date=dates[0], end_date=dates[-1],
)
return out
class StaticSids(Filter):
"""
A Filter that computes True for a specific set of predetermined sids.
``StaticSids`` is mostly useful for debugging or for interactively
computing pipeline terms for a fixed set of sids that are known ahead of
time.
Parameters
----------
sids : iterable[int]
An iterable of sids for which to filter.
"""
inputs = ()
window_length = 0
params = ('sids',)
def __new__(cls, sids):
sids = frozenset(sids)
return super(StaticSids, cls).__new__(cls, sids=sids)
def _compute(self, arrays, dates, sids, mask):
my_columns = sids.isin(self.params['sids'])
return repeat_first_axis(my_columns, len(mask)) & mask
class StaticAssets(StaticSids):
"""
A Filter that computes True for a specific set of predetermined assets.
``StaticAssets`` is mostly useful for debugging or for interactively
computing pipeline terms for a fixed set of assets that are known ahead of
time.
Parameters
----------
assets : iterable[Asset]
An iterable of assets for which to filter.
"""
def __new__(cls, assets):
sids = frozenset(asset.sid for asset in assets)
return super(StaticAssets, cls).__new__(cls, sids) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/filters/filter.py | filter.py |
from numpy import (
iinfo,
uint32,
)
from zipline.data.us_equity_pricing import (
BcolzDailyBarReader,
SQLiteAdjustmentReader,
)
from zipline.lib.adjusted_array import AdjustedArray
from zipline.errors import NoFurtherDataError
from zipline.utils.calendars import get_calendar
from .base import PipelineLoader
UINT32_MAX = iinfo(uint32).max
class USEquityPricingLoader(PipelineLoader):
"""
PipelineLoader for US Equity Pricing data
Delegates loading of baselines and adjustments.
"""
def __init__(self, raw_price_loader, adjustments_loader):
self.raw_price_loader = raw_price_loader
self.adjustments_loader = adjustments_loader
cal = self.raw_price_loader.trading_calendar or \
get_calendar("NYSE")
self._all_sessions = cal.all_sessions
@classmethod
def from_files(cls, pricing_path, adjustments_path):
"""
Create a loader from a bcolz equity pricing dir and a SQLite
adjustments path.
Parameters
----------
pricing_path : str
Path to a bcolz directory written by a BcolzDailyBarWriter.
adjusments_path : str
Path to an adjusments db written by a SQLiteAdjustmentWriter.
"""
return cls(
BcolzDailyBarReader(pricing_path),
SQLiteAdjustmentReader(adjustments_path)
)
def load_adjusted_array(self, columns, dates, assets, mask):
# load_adjusted_array is called with dates on which the user's algo
# will be shown data, which means we need to return the data that would
# be known at the start of each date. We assume that the latest data
# known on day N is the data from day (N - 1), so we shift all query
# dates back by a day.
start_date, end_date = _shift_dates(
self._all_sessions, dates[0], dates[-1], shift=1,
)
colnames = [c.name for c in columns]
raw_arrays = self.raw_price_loader.load_raw_arrays(
colnames,
start_date,
end_date,
assets,
)
adjustments = self.adjustments_loader.load_adjustments(
colnames,
dates,
assets,
)
out = {}
for c, c_raw, c_adjs in zip(columns, raw_arrays, adjustments):
out[c] = AdjustedArray(
c_raw.astype(c.dtype),
mask,
c_adjs,
c.missing_value,
)
return out
def _shift_dates(dates, start_date, end_date, shift):
try:
start = dates.get_loc(start_date)
except KeyError:
if start_date < dates[0]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data starting on {query_start}, "
"but first known date is {calendar_start}"
).format(
query_start=str(start_date),
calendar_start=str(dates[0]),
)
)
else:
raise ValueError("Query start %s not in calendar" % start_date)
# Make sure that shifting doesn't push us out of the calendar.
if start < shift:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data from {shift}"
" days before {query_start}, but first known date is only "
"{start} days earlier."
).format(shift=shift, query_start=start_date, start=start),
)
try:
end = dates.get_loc(end_date)
except KeyError:
if end_date > dates[-1]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requesting data up to {query_end}, "
"but last known date is {calendar_end}"
).format(
query_end=end_date,
calendar_end=dates[-1],
)
)
else:
raise ValueError("Query end %s not in calendar" % end_date)
return dates[start - shift], dates[end - shift] | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/loaders/equity_pricing_loader.py | equity_pricing_loader.py |
from numpy import (
arange,
array,
eye,
float64,
full,
iinfo,
uint32,
)
from numpy.random import RandomState
from pandas import DataFrame, Timestamp
from six import iteritems
from sqlite3 import connect as sqlite3_connect
from .base import PipelineLoader
from .frame import DataFrameLoader
from zipline.data.us_equity_pricing import (
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
US_EQUITY_PRICING_BCOLZ_COLUMNS,
)
from zipline.utils.numpy_utils import (
bool_dtype,
datetime64ns_dtype,
float64_dtype,
int64_dtype,
object_dtype,
)
UINT_32_MAX = iinfo(uint32).max
def nanos_to_seconds(nanos):
return nanos / (1000 * 1000 * 1000)
class PrecomputedLoader(PipelineLoader):
"""
Synthetic PipelineLoader that uses a pre-computed array for each column.
Parameters
----------
values : dict
Map from column to values to use for that column.
Values can be anything that can be passed as the first positional
argument to a DataFrame whose indices are ``dates`` and ``sids``
dates : iterable[datetime-like]
Row labels for input data. Can be anything that pd.DataFrame will
coerce to a DatetimeIndex.
sids : iterable[int-like]
Column labels for input data. Can be anything that pd.DataFrame will
coerce to an Int64Index.
Notes
-----
Adjustments are unsupported by this loader.
"""
def __init__(self, constants, dates, sids):
loaders = {}
for column, const in iteritems(constants):
frame = DataFrame(
const,
index=dates,
columns=sids,
dtype=column.dtype,
)
loaders[column] = DataFrameLoader(
column=column,
baseline=frame,
adjustments=None,
)
self._loaders = loaders
def load_adjusted_array(self, columns, dates, assets, mask):
"""
Load by delegating to sub-loaders.
"""
out = {}
for col in columns:
try:
loader = self._loaders[col]
except KeyError:
raise ValueError("Couldn't find loader for %s" % col)
out.update(
loader.load_adjusted_array([col], dates, assets, mask)
)
return out
class EyeLoader(PrecomputedLoader):
"""
A PrecomputedLoader that emits arrays containing 1s on the diagonal and 0s
elsewhere.
Parameters
----------
columns : list[BoundColumn]
Columns that this loader should know about.
dates : iterable[datetime-like]
Same as PrecomputedLoader.
sids : iterable[int-like]
Same as PrecomputedLoader
"""
def __init__(self, columns, dates, sids):
shape = (len(dates), len(sids))
super(EyeLoader, self).__init__(
{column: eye(shape, dtype=column.dtype) for column in columns},
dates,
sids,
)
class SeededRandomLoader(PrecomputedLoader):
"""
A PrecomputedLoader that emits arrays randomly-generated with a given seed.
Parameters
----------
seed : int
Seed for numpy.random.RandomState.
columns : list[BoundColumn]
Columns that this loader should know about.
dates : iterable[datetime-like]
Same as PrecomputedLoader.
sids : iterable[int-like]
Same as PrecomputedLoader
"""
def __init__(self, seed, columns, dates, sids):
self._seed = seed
super(SeededRandomLoader, self).__init__(
{c: self.values(c.dtype, dates, sids) for c in columns},
dates,
sids,
)
def values(self, dtype, dates, sids):
"""
Make a random array of shape (len(dates), len(sids)) with ``dtype``.
"""
shape = (len(dates), len(sids))
return {
datetime64ns_dtype: self._datetime_values,
float64_dtype: self._float_values,
int64_dtype: self._int_values,
bool_dtype: self._bool_values,
object_dtype: self._object_values,
}[dtype](shape)
@property
def state(self):
"""
Make a new RandomState from our seed.
This ensures that every call to _*_values produces the same output
every time for a given SeededRandomLoader instance.
"""
return RandomState(self._seed)
def _float_values(self, shape):
"""
Return uniformly-distributed floats between -0.0 and 100.0.
"""
return self.state.uniform(low=0.0, high=100.0, size=shape)
def _int_values(self, shape):
"""
Return uniformly-distributed integers between 0 and 100.
"""
return (self.state.randint(low=0, high=100, size=shape)
.astype('int64')) # default is system int
def _datetime_values(self, shape):
"""
Return uniformly-distributed dates in 2014.
"""
start = Timestamp('2014', tz='UTC').asm8
offsets = self.state.randint(
low=0,
high=364,
size=shape,
).astype('timedelta64[D]')
return start + offsets
def _bool_values(self, shape):
"""
Return uniformly-distributed True/False values.
"""
return self.state.randn(*shape) < 0
def _object_values(self, shape):
res = self._int_values(shape).astype(str).astype(object)
return res
OHLCV = ('open', 'high', 'low', 'close', 'volume')
OHLC = ('open', 'high', 'low', 'close')
PSEUDO_EPOCH = Timestamp('2000-01-01', tz='UTC')
def asset_start(asset_info, asset):
ret = asset_info.loc[asset]['start_date']
if ret.tz is None:
ret = ret.tz_localize('UTC')
assert ret.tzname() == 'UTC', "Unexpected non-UTC timestamp"
return ret
def asset_end(asset_info, asset):
ret = asset_info.loc[asset]['end_date']
if ret.tz is None:
ret = ret.tz_localize('UTC')
assert ret.tzname() == 'UTC', "Unexpected non-UTC timestamp"
return ret
def make_bar_data(asset_info, calendar):
"""
For a given asset/date/column combination, we generate a corresponding raw
value using the following formula for OHLCV columns:
data(asset, date, column) = (100,000 * asset_id)
+ (10,000 * column_num)
+ (date - Jan 1 2000).days # ~6000 for 2015
where:
column_num('open') = 0
column_num('high') = 1
column_num('low') = 2
column_num('close') = 3
column_num('volume') = 4
We use days since Jan 1, 2000 to guarantee that there are no collisions
while also the produced values smaller than UINT32_MAX / 1000.
For 'day' and 'id', we use the standard format expected by the base class.
Parameters
----------
asset_info : DataFrame
DataFrame with asset_id as index and 'start_date'/'end_date' columns.
calendar : pd.DatetimeIndex
The trading calendar to use.
Yields
------
p : (int, pd.DataFrame)
A sid, data pair to be passed to BcolzDailyDailyBarWriter.write
"""
assert (
# Using .value here to avoid having to care about UTC-aware dates.
PSEUDO_EPOCH.value <
calendar.normalize().min().value <=
asset_info['start_date'].min().value
), "calendar.min(): %s\nasset_info['start_date'].min(): %s" % (
calendar.min(),
asset_info['start_date'].min(),
)
assert (asset_info['start_date'] < asset_info['end_date']).all()
def _raw_data_for_asset(asset_id):
"""
Generate 'raw' data that encodes information about the asset.
See docstring for a description of the data format.
"""
# Get the dates for which this asset existed according to our asset
# info.
datetimes = calendar[calendar.slice_indexer(
asset_start(asset_info, asset_id),
asset_end(asset_info, asset_id),
)]
data = full(
(len(datetimes), len(US_EQUITY_PRICING_BCOLZ_COLUMNS)),
asset_id * 100 * 1000,
dtype=uint32,
)
# Add 10,000 * column-index to OHLCV columns
data[:, :5] += arange(5, dtype=uint32) * 1000
# Add days since Jan 1 2001 for OHLCV columns.
data[:, :5] += (datetimes - PSEUDO_EPOCH).days[:, None].astype(uint32)
frame = DataFrame(
data,
index=datetimes,
columns=US_EQUITY_PRICING_BCOLZ_COLUMNS,
)
frame['day'] = nanos_to_seconds(datetimes.asi8)
frame['id'] = asset_id
return frame
for asset in asset_info.index:
yield asset, _raw_data_for_asset(asset)
def expected_bar_value(asset_id, date, colname):
"""
Check that the raw value for an asset/date/column triple is as
expected.
Used by tests to verify data written by a writer.
"""
from_asset = asset_id * 100000
from_colname = OHLCV.index(colname) * 1000
from_date = (date - PSEUDO_EPOCH).days
return from_asset + from_colname + from_date
def expected_bar_values_2d(dates, asset_info, colname):
"""
Return an 2D array containing cls.expected_value(asset_id, date,
colname) for each date/asset pair in the inputs.
Values before/after an assets lifetime are filled with 0 for volume and
NaN for price columns.
"""
if colname == 'volume':
dtype = uint32
missing = 0
else:
dtype = float64
missing = float('nan')
assets = asset_info.index
data = full((len(dates), len(assets)), missing, dtype=dtype)
for j, asset in enumerate(assets):
start = asset_start(asset_info, asset)
end = asset_end(asset_info, asset)
for i, date in enumerate(dates):
# No value expected for dates outside the asset's start/end
# date.
if not (start <= date <= end):
continue
data[i, j] = expected_bar_value(asset, date, colname)
return data
class NullAdjustmentReader(SQLiteAdjustmentReader):
"""
A SQLiteAdjustmentReader that stores no adjustments and uses in-memory
SQLite.
"""
def __init__(self):
conn = sqlite3_connect(':memory:')
writer = SQLiteAdjustmentWriter(conn, None, None)
empty = DataFrame({
'sid': array([], dtype=uint32),
'effective_date': array([], dtype=uint32),
'ratio': array([], dtype=float),
})
empty_dividends = DataFrame({
'sid': array([], dtype=uint32),
'amount': array([], dtype=float64),
'record_date': array([], dtype='datetime64[ns]'),
'ex_date': array([], dtype='datetime64[ns]'),
'declared_date': array([], dtype='datetime64[ns]'),
'pay_date': array([], dtype='datetime64[ns]'),
})
writer.write(splits=empty, mergers=empty, dividends=empty_dividends)
super(NullAdjustmentReader, self).__init__(conn) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/loaders/synthetic.py | synthetic.py |
from functools import partial
from numpy import (
ix_,
zeros,
)
from pandas import (
DataFrame,
DatetimeIndex,
Index,
Int64Index,
)
from zipline.lib.adjusted_array import AdjustedArray
from zipline.lib.adjustment import make_adjustment_from_labels
from zipline.utils.numpy_utils import as_column
from .base import PipelineLoader
ADJUSTMENT_COLUMNS = Index([
'sid',
'value',
'kind',
'start_date',
'end_date',
'apply_date',
])
class DataFrameLoader(PipelineLoader):
"""
A PipelineLoader that reads its input from DataFrames.
Mostly useful for testing, but can also be used for real work if your data
fits in memory.
Parameters
----------
column : zipline.pipeline.data.BoundColumn
The column whose data is loadable by this loader.
baseline : pandas.DataFrame
A DataFrame with index of type DatetimeIndex and columns of type
Int64Index. Dates should be labelled with the first date on which a
value would be **available** to an algorithm. This means that OHLCV
data should generally be shifted back by a trading day before being
supplied to this class.
adjustments : pandas.DataFrame, default=None
A DataFrame with the following columns:
sid : int
value : any
kind : int (zipline.pipeline.loaders.frame.ADJUSTMENT_TYPES)
start_date : datetime64 (can be NaT)
end_date : datetime64 (must be set)
apply_date : datetime64 (must be set)
The default of None is interpreted as "no adjustments to the baseline".
"""
def __init__(self, column, baseline, adjustments=None):
self.column = column
self.baseline = baseline.values.astype(self.column.dtype)
self.dates = baseline.index
self.assets = baseline.columns
if adjustments is None:
adjustments = DataFrame(
index=DatetimeIndex([]),
columns=ADJUSTMENT_COLUMNS,
)
else:
# Ensure that columns are in the correct order.
adjustments = adjustments.reindex_axis(ADJUSTMENT_COLUMNS, axis=1)
adjustments.sort_values(['apply_date', 'sid'], inplace=True)
self.adjustments = adjustments
self.adjustment_apply_dates = DatetimeIndex(adjustments.apply_date)
self.adjustment_end_dates = DatetimeIndex(adjustments.end_date)
self.adjustment_sids = Int64Index(adjustments.sid)
def format_adjustments(self, dates, assets):
"""
Build a dict of Adjustment objects in the format expected by
AdjustedArray.
Returns a dict of the form:
{
# Integer index into `dates` for the date on which we should
# apply the list of adjustments.
1 : [
Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),
Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),
...
],
...
}
"""
make_adjustment = partial(make_adjustment_from_labels, dates, assets)
min_date, max_date = dates[[0, -1]]
# TODO: Consider porting this to Cython.
if len(self.adjustments) == 0:
return {}
# Mask for adjustments whose apply_dates are in the requested window of
# dates.
date_bounds = self.adjustment_apply_dates.slice_indexer(
min_date,
max_date,
)
dates_filter = zeros(len(self.adjustments), dtype='bool')
dates_filter[date_bounds] = True
# Ignore adjustments whose apply_date is in range, but whose end_date
# is out of range.
dates_filter &= (self.adjustment_end_dates >= min_date)
# Mask for adjustments whose sids are in the requested assets.
sids_filter = self.adjustment_sids.isin(assets.values)
adjustments_to_use = self.adjustments.loc[
dates_filter & sids_filter
].set_index('apply_date')
# For each apply_date on which we have an adjustment, compute
# the integer index of that adjustment's apply_date in `dates`.
# Then build a list of Adjustment objects for that apply_date.
# This logic relies on the sorting applied on the previous line.
out = {}
previous_apply_date = object()
for row in adjustments_to_use.itertuples():
# This expansion depends on the ordering of the DataFrame columns,
# defined above.
apply_date, sid, value, kind, start_date, end_date = row
if apply_date != previous_apply_date:
# Get the next apply date if no exact match.
row_loc = dates.get_loc(apply_date, method='bfill')
current_date_adjustments = out[row_loc] = []
previous_apply_date = apply_date
# Look up the approprate Adjustment constructor based on the value
# of `kind`.
current_date_adjustments.append(
make_adjustment(start_date, end_date, sid, kind, value)
)
return out
def load_adjusted_array(self, columns, dates, assets, mask):
"""
Load data from our stored baseline.
"""
column = self.column
if len(columns) != 1:
raise ValueError(
"Can't load multiple columns with DataFrameLoader"
)
elif columns[0] != column:
raise ValueError("Can't load unknown column %s" % columns[0])
date_indexer = self.dates.get_indexer(dates)
assets_indexer = self.assets.get_indexer(assets)
# Boolean arrays with True on matched entries
good_dates = (date_indexer != -1)
good_assets = (assets_indexer != -1)
return {
column: AdjustedArray(
# Pull out requested columns/rows from our baseline data.
data=self.baseline[ix_(date_indexer, assets_indexer)],
# Mask out requested columns/rows that didnt match.
mask=(good_assets & as_column(good_dates)) & mask,
adjustments=self.format_adjustments(dates, assets),
missing_value=column.missing_value,
),
} | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/loaders/frame.py | frame.py |
import datetime
import numpy as np
import pandas as pd
from zipline.pipeline.common import TS_FIELD_NAME, SID_FIELD_NAME
from zipline.utils.numpy_utils import categorical_dtype
from zipline.utils.pandas_utils import mask_between_time
def is_sorted_ascending(a):
"""Check if a numpy array is sorted."""
return (np.fmax.accumulate(a) <= a).all()
def validate_event_metadata(event_dates,
event_timestamps,
event_sids):
assert is_sorted_ascending(event_dates), "event dates must be sorted"
assert len(event_sids) == len(event_dates) == len(event_timestamps), \
"mismatched arrays: %d != %d != %d" % (
len(event_sids),
len(event_dates),
len(event_timestamps),
)
def next_event_indexer(all_dates,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the next event for each
sid at each moment in time.
Locations where no next event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
sid_ixs = all_sids.searchsorted(event_sids)
# side='right' here ensures that we include the event date itself
# if it's in all_dates.
dt_ixs = all_dates.searchsorted(event_dates, side='right')
ts_ixs = all_dates.searchsorted(event_timestamps)
# Walk backward through the events, writing the index of the event into
# slots ranging from the event's timestamp to its asof. This depends for
# correctness on the fact that event_dates is sorted in ascending order,
# because we need to overwrite later events with earlier ones if their
# eligible windows overlap.
for i in range(len(event_sids) - 1, -1, -1):
start_ix = ts_ixs[i]
end_ix = dt_ixs[i]
out[start_ix:end_ix, sid_ixs[i]] = i
return out
def previous_event_indexer(all_dates,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the previous event for
each sid at each moment in time.
Locations where no previous event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
eff_dts = np.maximum(event_dates, event_timestamps)
sid_ixs = all_sids.searchsorted(event_sids)
dt_ixs = all_dates.searchsorted(eff_dts)
# Walk backwards through the events, writing the index of the event into
# slots ranging from max(event_date, event_timestamp) to the start of the
# previously-written event. This depends for correctness on the fact that
# event_dates is sorted in ascending order, because we need to have written
# later events so we know where to stop forward-filling earlier events.
last_written = {}
for i in range(len(event_dates) - 1, -1, -1):
sid_ix = sid_ixs[i]
dt_ix = dt_ixs[i]
out[dt_ix:last_written.get(sid_ix, None), sid_ix] = i
last_written[sid_ix] = dt_ix
return out
def normalize_data_query_time(dt, time, tz):
"""Apply the correct time and timezone to a date.
Parameters
----------
dt : pd.Timestamp
The original datetime that represents the date.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
Returns
-------
query_dt : pd.Timestamp
The timestamp with the correct time and date in utc.
"""
# merge the correct date with the time in the given timezone then convert
# back to utc
return pd.Timestamp(
datetime.datetime.combine(dt.date(), time),
tz=tz,
).tz_convert('utc')
def normalize_data_query_bounds(lower, upper, time, tz):
"""Adjust the first and last dates in the requested datetime index based on
the provided query time and tz.
lower : pd.Timestamp
The lower date requested.
upper : pd.Timestamp
The upper date requested.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
"""
# Subtract one day to grab things that happened on the first day we are
# requesting. This doesn't need to be a trading day, we are only adding
# a lower bound to limit the amount of in memory filtering that needs
# to happen.
lower -= datetime.timedelta(days=1)
if time is not None:
return normalize_data_query_time(
lower,
time,
tz,
), normalize_data_query_time(
upper,
time,
tz,
)
return lower, upper
_midnight = datetime.time(0, 0)
def normalize_timestamp_to_query_time(df,
time,
tz,
inplace=False,
ts_field='timestamp'):
"""Update the timestamp field of a dataframe to normalize dates around
some data query time/timezone.
Parameters
----------
df : pd.DataFrame
The dataframe to update. This needs a column named ``ts_field``.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
inplace : bool, optional
Update the dataframe in place.
ts_field : str, optional
The name of the timestamp field in ``df``.
Returns
-------
df : pd.DataFrame
The dataframe with the timestamp field normalized. If ``inplace`` is
true, then this will be the same object as ``df`` otherwise this will
be a copy.
"""
if not inplace:
# don't mutate the dataframe in place
df = df.copy()
# There is a pandas bug (0.18.1) where if the timestamps in a
# normalized DatetimeIndex are not sorted and one calls `tz_localize(None)`
# on tha DatetimeIndex, some of the dates will be shifted by an hour
# (similarly to the previously mentioned bug). Therefore, we must sort
# the df here to ensure that we get the normalize correctly.
df.sort_values(ts_field, inplace=True)
dtidx = pd.DatetimeIndex(df.loc[:, ts_field], tz='utc')
dtidx_local_time = dtidx.tz_convert(tz)
to_roll_forward = mask_between_time(
dtidx_local_time,
time,
_midnight,
include_end=False,
)
# For all of the times that are greater than our query time add 1
# day and truncate to the date.
# We normalize twice here because of a bug in pandas 0.16.1 that causes
# tz_localize() to shift some timestamps by an hour if they are not grouped
# together by DST/EST.
df.loc[to_roll_forward, ts_field] = (
dtidx_local_time[to_roll_forward] + datetime.timedelta(days=1)
).normalize().tz_localize(None).tz_localize('utc').normalize()
df.loc[~to_roll_forward, ts_field] = dtidx[~to_roll_forward].normalize()
return df
def check_data_query_args(data_query_time, data_query_tz):
"""Checks the data_query_time and data_query_tz arguments for loaders
and raises a standard exception if one is None and the other is not.
Parameters
----------
data_query_time : datetime.time or None
data_query_tz : tzinfo or None
Raises
------
ValueError
Raised when only one of the arguments is None.
"""
if (data_query_time is None) ^ (data_query_tz is None):
raise ValueError(
"either 'data_query_time' and 'data_query_tz' must both be"
" None or neither may be None (got %r, %r)" % (
data_query_time,
data_query_tz,
),
)
def last_in_date_group(df,
dates,
assets,
reindex=True,
have_sids=True,
extra_groupers=None):
"""
Determine the last piece of information known on each date in the date
index for each group. Input df MUST be sorted such that the correct last
item is chosen from each group.
Parameters
----------
df : pd.DataFrame
The DataFrame containing the data to be grouped. Must be sorted so that
the correct last item is chosen from each group.
dates : pd.DatetimeIndex
The dates to use for grouping and reindexing.
assets : pd.Int64Index
The assets that should be included in the column multiindex.
reindex : bool
Whether or not the DataFrame should be reindexed against the date
index. This will add back any dates to the index that were grouped
away.
have_sids : bool
Whether or not the DataFrame has sids. If it does, they will be used
in the groupby.
extra_groupers : list of str
Any extra field names that should be included in the groupby.
Returns
-------
last_in_group : pd.DataFrame
A DataFrame with dates as the index and fields used in the groupby as
levels of a multiindex of columns.
"""
idx = [dates[dates.searchsorted(
df[TS_FIELD_NAME].values.astype('datetime64[D]')
)]]
if have_sids:
idx += [SID_FIELD_NAME]
if extra_groupers is None:
extra_groupers = []
idx += extra_groupers
last_in_group = df.drop(TS_FIELD_NAME, axis=1).groupby(
idx,
sort=False,
).last()
# For the number of things that we're grouping by (except TS), unstack
# the df. Done this way because of an unresolved pandas bug whereby
# passing a list of levels with mixed dtypes to unstack causes the
# resulting DataFrame to have all object-type columns.
for _ in range(len(idx) - 1):
last_in_group = last_in_group.unstack(-1)
if reindex:
if have_sids:
cols = last_in_group.columns
last_in_group = last_in_group.reindex(
index=dates,
columns=pd.MultiIndex.from_product(
tuple(cols.levels[0:len(extra_groupers) + 1]) + (assets,),
names=cols.names,
),
)
else:
last_in_group = last_in_group.reindex(dates)
return last_in_group
def ffill_across_cols(df, columns, name_map):
"""
Forward fill values in a DataFrame with special logic to handle cases
that pd.DataFrame.ffill cannot and cast columns to appropriate types.
Parameters
----------
df : pd.DataFrame
The DataFrame to do forward-filling on.
columns : list of BoundColumn
The BoundColumns that correspond to columns in the DataFrame to which
special filling and/or casting logic should be applied.
name_map: map of string -> string
Mapping from the name of each BoundColumn to the associated column
name in `df`.
"""
df.ffill(inplace=True)
# Fill in missing values specified by each column. This is made
# significantly more complex by the fact that we need to work around
# two pandas issues:
# 1) When we have sids, if there are no records for a given sid for any
# dates, pandas will generate a column full of NaNs for that sid.
# This means that some of the columns in `dense_output` are now
# float instead of the intended dtype, so we have to coerce back to
# our expected type and convert NaNs into the desired missing value.
# 2) DataFrame.ffill assumes that receiving None as a fill-value means
# that no value was passed. Consequently, there's no way to tell
# pandas to replace NaNs in an object column with None using fillna,
# so we have to roll our own instead using df.where.
for column in columns:
column_name = name_map[column.name]
# Special logic for strings since `fillna` doesn't work if the
# missing value is `None`.
if column.dtype == categorical_dtype:
df[column_name] = df[
column.name
].where(pd.notnull(df[column_name]),
column.missing_value)
else:
# We need to execute `fillna` before `astype` in case the
# column contains NaNs and needs to be cast to bool or int.
# This is so that the NaNs are replaced first, since pandas
# can't convert NaNs for those types.
df[column_name] = df[
column_name
].fillna(column.missing_value).astype(column.dtype) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/loaders/utils.py | utils.py |
from abc import abstractmethod, abstractproperty
import numpy as np
import pandas as pd
from six import viewvalues
from toolz import groupby
from zipline.lib.adjusted_array import AdjustedArray
from zipline.lib.adjustment import (
Datetime641DArrayOverwrite,
Datetime64Overwrite,
Float641DArrayOverwrite,
Float64Multiply,
Float64Overwrite,
)
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.loaders.base import PipelineLoader
from zipline.utils.numpy_utils import datetime64ns_dtype, float64_dtype
from zipline.pipeline.loaders.utils import (
ffill_across_cols,
last_in_date_group
)
INVALID_NUM_QTRS_MESSAGE = "Passed invalid number of quarters %s; " \
"must pass a number of quarters >= 0"
NEXT_FISCAL_QUARTER = 'next_fiscal_quarter'
NEXT_FISCAL_YEAR = 'next_fiscal_year'
NORMALIZED_QUARTERS = 'normalized_quarters'
PREVIOUS_FISCAL_QUARTER = 'previous_fiscal_quarter'
PREVIOUS_FISCAL_YEAR = 'previous_fiscal_year'
SHIFTED_NORMALIZED_QTRS = 'shifted_normalized_quarters'
SIMULATION_DATES = 'dates'
def normalize_quarters(years, quarters):
return years * 4 + quarters - 1
def split_normalized_quarters(normalized_quarters):
years = normalized_quarters // 4
quarters = normalized_quarters % 4
return years, quarters + 1
# These metadata columns are used to align event indexers.
metadata_columns = frozenset({
TS_FIELD_NAME,
SID_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
})
def required_estimates_fields(columns):
"""
Compute the set of resource columns required to serve
`columns`.
"""
# We also expect any of the field names that our loadable columns
# are mapped to.
return metadata_columns.union(viewvalues(columns))
def validate_column_specs(events, columns):
"""
Verify that the columns of ``events`` can be used by a
EarningsEstimatesLoader to serve the BoundColumns described by
`columns`.
"""
required = required_estimates_fields(columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EarningsEstimatesLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
)
def add_new_adjustments(adjustments_dict,
adjustments,
column_name,
ts):
try:
adjustments_dict[column_name][ts].extend(adjustments)
except KeyError:
adjustments_dict[column_name][ts] = adjustments
class EarningsEstimatesLoader(PipelineLoader):
"""
An abstract pipeline loader for estimates data that can load data a
variable number of quarters forwards/backwards from calendar dates
depending on the `num_announcements` attribute of the columns' dataset.
If split adjustments are to be applied, a loader, split-adjusted columns,
and the split-adjusted asof-date must be supplied.
Parameters
----------
estimates : pd.DataFrame
The raw estimates data.
``estimates`` must contain at least 5 columns:
sid : int64
The asset id associated with each estimate.
event_date : datetime64[ns]
The date on which the event that the estimate is for will/has
occurred..
timestamp : datetime64[ns]
The date on which we learned about the estimate.
fiscal_quarter : int64
The quarter during which the event has/will occur.
fiscal_year : int64
The year during which the event has/will occur.
name_map : dict[str -> str]
A map of names of BoundColumns that this loader will load to the
names of the corresponding columns in `events`.
"""
def __init__(self,
estimates,
name_map):
validate_column_specs(
estimates,
name_map
)
self.estimates = estimates[
estimates[EVENT_DATE_FIELD_NAME].notnull() &
estimates[FISCAL_QUARTER_FIELD_NAME].notnull() &
estimates[FISCAL_YEAR_FIELD_NAME].notnull()
]
self.estimates[NORMALIZED_QUARTERS] = normalize_quarters(
self.estimates[FISCAL_YEAR_FIELD_NAME],
self.estimates[FISCAL_QUARTER_FIELD_NAME],
)
self.array_overwrites_dict = {
datetime64ns_dtype: Datetime641DArrayOverwrite,
float64_dtype: Float641DArrayOverwrite,
}
self.scalar_overwrites_dict = {
datetime64ns_dtype: Datetime64Overwrite,
float64_dtype: Float64Overwrite,
}
self.name_map = name_map
@abstractmethod
def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
raise NotImplementedError('get_zeroth_quarter_idx')
@abstractmethod
def get_shifted_qtrs(self, zero_qtrs, num_announcements):
raise NotImplementedError('get_shifted_qtrs')
@abstractmethod
def create_overwrite_for_estimate(self,
column,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
col_to_split_adjustments,
split_adjusted_asof_idx):
raise NotImplementedError('create_overwrite_for_estimate')
@abstractproperty
def searchsorted_side(self):
return NotImplementedError('searchsorted_side')
def get_requested_quarter_data(self,
zero_qtr_data,
zeroth_quarter_idx,
stacked_last_per_qtr,
num_announcements,
dates):
"""
Selects the requested data for each date.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
zeroth_quarter_idx : pd.Index
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a next or previous earnings estimate.
stacked_last_per_qtr : pd.DataFrame
The latest estimate known with the dates, normalized quarter, and
sid as the index.
num_announcements : int
The number of annoucements out the user requested relative to
each date in the calendar dates.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
Returns
--------
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns; `dates` are the index and columns are a MultiIndex
with sids at the top level and the dataset columns on the bottom.
"""
zero_qtr_data_idx = zero_qtr_data.index
requested_qtr_idx = pd.MultiIndex.from_arrays(
[
zero_qtr_data_idx.get_level_values(0),
zero_qtr_data_idx.get_level_values(1),
self.get_shifted_qtrs(
zeroth_quarter_idx.get_level_values(
NORMALIZED_QUARTERS,
),
num_announcements,
),
],
names=[
zero_qtr_data_idx.names[0],
zero_qtr_data_idx.names[1],
SHIFTED_NORMALIZED_QTRS,
],
)
requested_qtr_data = stacked_last_per_qtr.loc[requested_qtr_idx]
requested_qtr_data = requested_qtr_data.reset_index(
SHIFTED_NORMALIZED_QTRS,
)
# Calculate the actual year/quarter being requested and add those in
# as columns.
(requested_qtr_data[FISCAL_YEAR_FIELD_NAME],
requested_qtr_data[FISCAL_QUARTER_FIELD_NAME]) = \
split_normalized_quarters(
requested_qtr_data[SHIFTED_NORMALIZED_QTRS]
)
# Once we're left with just dates as the index, we can reindex by all
# dates so that we have a value for each calendar date.
return requested_qtr_data.unstack(SID_FIELD_NAME).reindex(dates)
def get_split_adjusted_asof_idx(self, dates):
"""
Compute the index in `dates` where the split-adjusted-asof-date
falls. This is the date up to which, and including which, we will
need to unapply all adjustments for and then re-apply them as they
come in. After this date, adjustments are applied as normal.
Parameters
----------
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
Returns
-------
split_adjusted_asof_idx : int
The index in `dates` at which the data should be split.
"""
split_adjusted_asof_idx = dates.searchsorted(
self._split_adjusted_asof
)
# The split-asof date is after the date index.
if split_adjusted_asof_idx == len(dates):
split_adjusted_asof_idx = len(dates) - 1
elif self._split_adjusted_asof < dates[0].tz_localize(None):
split_adjusted_asof_idx = -1
return split_adjusted_asof_idx
def collect_overwrites_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_idx,
columns,
all_adjustments_for_sid,
sid):
"""
Given a sid, collect all overwrites that should be applied for this
sid at each quarter boundary.
Parameters
----------
group : pd.DataFrame
The data for `sid`.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
sid_idx : int
The sid's index in the asset index.
columns : list of BoundColumn
The columns for which the overwrites should be computed.
all_adjustments_for_sid : dict[int -> AdjustedArray]
A dictionary of the integer index of each timestamp into the date
index, mapped to adjustments that should be applied at that
index for the given sid (`sid`). This dictionary is modified as
adjustments are collected.
sid : int
The sid for which overwrites should be computed.
"""
# If data was requested for only 1 date, there can never be any
# overwrites, so skip the extra work.
if len(dates) == 1:
return
next_qtr_start_indices = dates.searchsorted(
group[EVENT_DATE_FIELD_NAME].values,
side=self.searchsorted_side,
)
qtrs_with_estimates = group.index.get_level_values(
NORMALIZED_QUARTERS
).values
for idx in next_qtr_start_indices:
if 0 < idx < len(dates):
# Find the quarter being requested in the quarter we're
# crossing into.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid,
].iloc[idx]
# Only add adjustments if the next quarter starts somewhere
# in our date index for this sid. Our 'next' quarter can
# never start at index 0; a starting index of 0 means that
# the next quarter's event date was NaT.
self.create_overwrites_for_quarter(
all_adjustments_for_sid,
idx,
last_per_qtr,
qtrs_with_estimates,
requested_quarter,
sid,
sid_idx,
columns
)
def get_adjustments_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
**kwargs):
"""
Parameters
----------
group : pd.DataFrame
The data for the given sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
sid_to_idx : dict[int -> int]
A dictionary mapping sid to he sid's index in the asset index.
columns : list of BoundColumn
The columns for which the overwrites should be computed.
col_to_all_adjustments : dict[int -> AdjustedArray]
A dictionary of the integer index of each timestamp into the date
index, mapped to adjustments that should be applied at that
index. This dictionary is for adjustments for ALL sids. It is
modified as adjustments are collected.
kwargs :
Additional arguments used in collecting adjustments; unused here.
"""
# Collect all adjustments for a given sid.
all_adjustments_for_sid = {}
sid = int(group.name)
self.collect_overwrites_for_sid(group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx[sid],
columns,
all_adjustments_for_sid,
sid)
self.merge_into_adjustments_for_all_sids(
all_adjustments_for_sid, col_to_all_adjustments
)
def merge_into_adjustments_for_all_sids(self,
all_adjustments_for_sid,
col_to_all_adjustments):
"""
Merge adjustments for a particular sid into a dictionary containing
adjustments for all sids.
Parameters
----------
all_adjustments_for_sid : dict[int -> AdjustedArray]
All adjustments for a particular sid.
col_to_all_adjustments : dict[int -> AdjustedArray]
All adjustments for all sids.
"""
for col_name in all_adjustments_for_sid:
if col_name not in col_to_all_adjustments:
col_to_all_adjustments[col_name] = {}
for ts in all_adjustments_for_sid[col_name]:
adjs = all_adjustments_for_sid[col_name][ts]
add_new_adjustments(col_to_all_adjustments,
adjs,
col_name,
ts)
def get_adjustments(self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs):
"""
Creates an AdjustedArray from the given estimates data for the given
dates.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
assets : pd.Int64Index
An index of all the assets from the raw data.
columns : list of BoundColumn
The columns for which adjustments need to be calculated.
kwargs :
Additional keyword arguments that should be forwarded to
`get_adjustments_for_sid` and to be used in computing adjustments
for each sid.
Returns
-------
col_to_all_adjustments : dict[int -> AdjustedArray]
A dictionary of all adjustments that should be applied.
"""
zero_qtr_data.sort_index(inplace=True)
# Here we want to get the LAST record from each group of records
# corresponding to a single quarter. This is to ensure that we select
# the most up-to-date event date in case the event date changes.
quarter_shifts = zero_qtr_data.groupby(
level=[SID_FIELD_NAME, NORMALIZED_QUARTERS]
).nth(-1)
col_to_all_adjustments = {}
sid_to_idx = dict(zip(assets, range(len(assets))))
quarter_shifts.groupby(level=SID_FIELD_NAME).apply(
self.get_adjustments_for_sid,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
**kwargs
)
return col_to_all_adjustments
def create_overwrites_for_quarter(self,
col_to_overwrites,
next_qtr_start_idx,
last_per_qtr,
quarters_with_estimates_for_sid,
requested_quarter,
sid,
sid_idx,
columns):
"""
Add entries to the dictionary of columns to adjustments for the given
sid and the given quarter.
Parameters
----------
col_to_overwrites : dict [column_name -> list of ArrayAdjustment]
A dictionary mapping column names to all overwrites for those
columns.
next_qtr_start_idx : int
The index of the first day of the next quarter in the calendar
dates.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter; this
is particularly useful for getting adjustments for 'next'
estimates.
quarters_with_estimates_for_sid : np.array
An array of all quarters for which there are estimates for the
given sid.
requested_quarter : float
The quarter for which the overwrite should be created.
sid : int
The sid for which to create overwrites.
sid_idx : int
The index of the sid in `assets`.
columns : list of BoundColumn
The columns for which to create overwrites.
"""
for col in columns:
column_name = self.name_map[col.name]
if column_name not in col_to_overwrites:
col_to_overwrites[column_name] = {}
# If there are estimates for the requested quarter,
# overwrite all values going up to the starting index of
# that quarter with estimates for that quarter.
if requested_quarter in quarters_with_estimates_for_sid:
adjs = self.create_overwrite_for_estimate(
col,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
)
add_new_adjustments(col_to_overwrites,
adjs,
column_name,
next_qtr_start_idx)
# There are no estimates for the quarter. Overwrite all
# values going up to the starting index of that quarter
# with the missing value for this column.
else:
adjs = [self.overwrite_with_null(
col,
next_qtr_start_idx,
sid_idx)]
add_new_adjustments(col_to_overwrites,
adjs,
column_name,
next_qtr_start_idx)
def overwrite_with_null(self,
column,
next_qtr_start_idx,
sid_idx):
return self.scalar_overwrites_dict[column.dtype](
0,
next_qtr_start_idx - 1,
sid_idx,
sid_idx,
column.missing_value
)
def load_adjusted_array(self, columns, dates, assets, mask):
# Separate out getting the columns' datasets and the datasets'
# num_announcements attributes to ensure that we're catching the right
# AttributeError.
col_to_datasets = {col: col.dataset for col in columns}
try:
groups = groupby(lambda col:
col_to_datasets[col].num_announcements,
col_to_datasets)
except AttributeError:
raise AttributeError("Datasets loaded via the "
"EarningsEstimatesLoader must define a "
"`num_announcements` attribute that defines "
"how many quarters out the loader should load"
" the data relative to `dates`.")
if any(num_qtr < 0 for num_qtr in groups):
raise ValueError(
INVALID_NUM_QTRS_MESSAGE % ','.join(
str(qtr) for qtr in groups if qtr < 0
)
)
out = {}
# To optimize performance, only work below on assets that are
# actually in the raw data.
assets_with_data = set(assets) & set(self.estimates[SID_FIELD_NAME])
last_per_qtr, stacked_last_per_qtr = self.get_last_data_per_qtr(
assets_with_data,
columns,
dates
)
# Determine which quarter is immediately next/previous for each
# date.
zeroth_quarter_idx = self.get_zeroth_quarter_idx(stacked_last_per_qtr)
zero_qtr_data = stacked_last_per_qtr.loc[zeroth_quarter_idx]
for num_announcements, columns in groups.items():
requested_qtr_data = self.get_requested_quarter_data(
zero_qtr_data,
zeroth_quarter_idx,
stacked_last_per_qtr,
num_announcements,
dates,
)
# Calculate all adjustments for the given quarter and accumulate
# them for each column.
col_to_adjustments = self.get_adjustments(
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns
)
# Lookup the asset indexer once, this is so we can reindex
# the assets returned into the assets requested for each column.
# This depends on the fact that our column multiindex has the same
# sids for each field. This allows us to do the lookup once on
# level 1 instead of doing the lookup each time per value in
# level 0.
asset_indexer = assets.get_indexer_for(
requested_qtr_data.columns.levels[1],
)
for col in columns:
column_name = self.name_map[col.name]
# allocate the empty output with the correct missing value
output_array = np.full(
(len(dates), len(assets)),
col.missing_value,
dtype=col.dtype,
)
# overwrite the missing value with values from the computed
# data
output_array[
:,
asset_indexer,
] = requested_qtr_data[column_name].values
out[col] = AdjustedArray(
output_array,
mask,
# There may not be any adjustments at all (e.g. if
# len(date) == 1), so provide a default.
dict(col_to_adjustments.get(column_name, {})),
col.missing_value,
)
return out
def get_last_data_per_qtr(self, assets_with_data, columns, dates):
"""
Determine the last piece of information we know for each column on each
date in the index for each sid and quarter.
Parameters
----------
assets_with_data : pd.Index
Index of all assets that appear in the raw data given to the
loader.
columns : iterable of BoundColumn
The columns that need to be loaded from the raw data.
dates : pd.DatetimeIndex
The calendar of dates for which data should be loaded.
Returns
-------
stacked_last_per_qtr : pd.DataFrame
A DataFrame indexed by [dates, sid, normalized_quarters] that has
the latest information for each row of the index, sorted by event
date.
last_per_qtr : pd.DataFrame
A DataFrame with columns that are a MultiIndex of [
self.estimates.columns, normalized_quarters, sid].
"""
# Get a DataFrame indexed by date with a MultiIndex of columns of [
# self.estimates.columns, normalized_quarters, sid], where each cell
# contains the latest data for that day.
last_per_qtr = last_in_date_group(
self.estimates,
dates,
assets_with_data,
reindex=True,
extra_groupers=[NORMALIZED_QUARTERS],
)
# Forward fill values for each quarter/sid/dataset column.
ffill_across_cols(last_per_qtr, columns, self.name_map)
# Stack quarter and sid into the index.
stacked_last_per_qtr = last_per_qtr.stack(
[SID_FIELD_NAME, NORMALIZED_QUARTERS],
)
# Set date index name for ease of reference
stacked_last_per_qtr.index.set_names(
SIMULATION_DATES,
level=0,
inplace=True,
)
stacked_last_per_qtr = stacked_last_per_qtr.sort_values(
EVENT_DATE_FIELD_NAME,
)
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] = pd.to_datetime(
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME]
)
return last_per_qtr, stacked_last_per_qtr
class NextEarningsEstimatesLoader(EarningsEstimatesLoader):
searchsorted_side = 'right'
def create_overwrite_for_estimate(self,
column,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
col_to_split_adjustments=None,
split_adjusted_asof_idx=None):
return [self.array_overwrites_dict[column.dtype](
0,
next_qtr_start_idx - 1,
sid_idx,
sid_idx,
last_per_qtr[
column_name,
requested_quarter,
sid,
].values[:next_qtr_start_idx],
)]
def get_shifted_qtrs(self, zero_qtrs, num_announcements):
return zero_qtrs + (num_announcements - 1)
def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
"""
Filters for releases that are on or after each simulation date and
determines the next quarter by picking out the upcoming release for
each date in the index.
Parameters
----------
stacked_last_per_qtr : pd.DataFrame
A DataFrame with index of calendar dates, sid, and normalized
quarters with each row being the latest estimate for the row's
index values, sorted by event date.
Returns
-------
next_releases_per_date_index : pd.MultiIndex
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a next event.
"""
next_releases_per_date = stacked_last_per_qtr.loc[
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] >=
stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES)
].groupby(
level=[SIMULATION_DATES, SID_FIELD_NAME],
as_index=False,
# Here we take advantage of the fact that `stacked_last_per_qtr` is
# sorted by event date.
).nth(0)
return next_releases_per_date.index
class PreviousEarningsEstimatesLoader(EarningsEstimatesLoader):
searchsorted_side = 'left'
def create_overwrite_for_estimate(self,
column,
column_name,
dates,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
col_to_split_adjustments=None,
split_adjusted_asof_idx=None,
split_dict=None):
return [self.overwrite_with_null(
column,
next_qtr_start_idx,
sid_idx,
)]
def get_shifted_qtrs(self, zero_qtrs, num_announcements):
return zero_qtrs - (num_announcements - 1)
def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
"""
Filters for releases that are on or after each simulation date and
determines the previous quarter by picking out the most recent
release relative to each date in the index.
Parameters
----------
stacked_last_per_qtr : pd.DataFrame
A DataFrame with index of calendar dates, sid, and normalized
quarters with each row being the latest estimate for the row's
index values, sorted by event date.
Returns
-------
previous_releases_per_date_index : pd.MultiIndex
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a previous event.
"""
previous_releases_per_date = stacked_last_per_qtr.loc[
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] <=
stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES)
].groupby(
level=[SIMULATION_DATES, SID_FIELD_NAME],
as_index=False,
# Here we take advantage of the fact that `stacked_last_per_qtr` is
# sorted by event date.
).nth(-1)
return previous_releases_per_date.index
def validate_split_adjusted_column_specs(name_map, columns):
to_be_split = set(columns)
available = set(name_map.keys())
extra = to_be_split - available
if extra:
raise ValueError(
"EarningsEstimatesLoader got the following extra columns to be "
"split-adjusted: {extra}.\n"
"Got Columns: {to_be_split}\n"
"Available Columns: {available}".format(
extra=sorted(extra),
to_be_split=sorted(to_be_split),
available=sorted(available),
)
)
class SplitAdjustedEstimatesLoader(EarningsEstimatesLoader):
"""
Estimates loader that loads data that needs to be split-adjusted.
Parameters
----------
split_adjustments_loader : SQLiteAdjustmentReader
The loader to use for reading split adjustments.
split_adjusted_column_names : iterable of str
The column names that should be split-adjusted.
split_adjusted_asof : pd.Timestamp
The date that separates data into 2 halves: the first half is the set
of dates up to and including the split_adjusted_asof date. All
adjustments occurring during this first half are applied to all
dates in this first half. The second half is the set of dates after
the split_adjusted_asof date. All adjustments occurring during this
second half are applied sequentially as they appear in the timeline.
"""
def __init__(self,
estimates,
name_map,
split_adjustments_loader,
split_adjusted_column_names,
split_adjusted_asof):
validate_split_adjusted_column_specs(name_map,
split_adjusted_column_names)
self._split_adjustments = split_adjustments_loader
self._split_adjusted_column_names = split_adjusted_column_names
self._split_adjusted_asof = split_adjusted_asof
self._split_adjustment_dict = {}
super(SplitAdjustedEstimatesLoader, self).__init__(
estimates,
name_map
)
@abstractmethod
def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
raise NotImplementedError('collect_split_adjustments')
def get_adjustments_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
split_adjusted_asof_idx=None,
split_adjusted_cols_for_group=None):
"""
Collects both overwrites and adjustments for a particular sid.
Parameters
----------
split_adjusted_asof_idx : int
The integer index of the date on which the data was split-adjusted.
split_adjusted_cols_for_group : list of str
The names of requested columns that should also be split-adjusted.
"""
all_adjustments_for_sid = {}
sid = int(group.name)
self.collect_overwrites_for_sid(group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx[sid],
columns,
all_adjustments_for_sid,
sid)
(pre_adjustments,
post_adjustments) = self.retrieve_split_adjustment_data_for_sid(
dates, sid, split_adjusted_asof_idx
)
sid_estimates = self.estimates[
self.estimates[SID_FIELD_NAME] == sid
]
# We might not have any overwrites but still have
# adjustments, and we will need to manually add columns if
# that is the case.
for col_name in split_adjusted_cols_for_group:
if col_name not in all_adjustments_for_sid:
all_adjustments_for_sid[col_name] = {}
self.collect_split_adjustments(
all_adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_to_idx[sid],
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
split_adjusted_cols_for_group
)
self.merge_into_adjustments_for_all_sids(
all_adjustments_for_sid, col_to_all_adjustments
)
def get_adjustments(self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs):
"""
Calculates both split adjustments and overwrites for all sids.
"""
split_adjusted_cols_for_group = [
self.name_map[col.name]
for col in columns
if self.name_map[col.name] in self._split_adjusted_column_names
]
# Add all splits to the adjustment dict for this sid.
split_adjusted_asof_idx = self.get_split_adjusted_asof_idx(
dates
)
return super(SplitAdjustedEstimatesLoader, self).get_adjustments(
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
split_adjusted_cols_for_group=split_adjusted_cols_for_group,
split_adjusted_asof_idx=split_adjusted_asof_idx
)
def determine_end_idx_for_adjustment(self,
adjustment_ts,
dates,
upper_bound,
requested_quarter,
sid_estimates):
"""
Determines the date until which the adjustment at the given date
index should be applied for the given quarter.
Parameters
----------
adjustment_ts : pd.Timestamp
The timestamp at which the adjustment occurs.
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
upper_bound : int
The index of the upper bound in the calendar dates. This is the
index until which the adjusment will be applied unless there is
information for the requested quarter that comes in on or before
that date.
requested_quarter : float
The quarter for which we are determining how the adjustment
should be applied.
sid_estimates : pd.DataFrame
The DataFrame of estimates data for the sid for which we're
applying the given adjustment.
Returns
-------
end_idx : int
The last index to which the adjustment should be applied for the
given quarter/sid.
"""
end_idx = upper_bound
# Find the next newest kd that happens on or after
# the date of this adjustment
newest_kd_for_qtr = sid_estimates[
(sid_estimates[NORMALIZED_QUARTERS] == requested_quarter) &
(sid_estimates[TS_FIELD_NAME] >= adjustment_ts)
][TS_FIELD_NAME].min()
if pd.notnull(newest_kd_for_qtr):
newest_kd_idx = dates.searchsorted(
newest_kd_for_qtr
)
# We have fresh information that comes in
# before the end of the overwrite and
# presumably is already split-adjusted to the
# current split. We should stop applying the
# adjustment the day before this new
# information comes in.
if newest_kd_idx <= upper_bound:
end_idx = newest_kd_idx - 1
return end_idx
def collect_pre_split_asof_date_adjustments(
self,
split_adjusted_asof_date_idx,
sid_idx,
pre_adjustments,
requested_split_adjusted_columns
):
"""
Collect split adjustments that occur before the
split-adjusted-asof-date. All those adjustments must first be
UN-applied at the first date index and then re-applied on the
appropriate dates in order to match point in time share pricing data.
Parameters
----------
split_adjusted_asof_date_idx : int
The index in the calendar dates as-of which all data was
split-adjusted.
sid_idx : int
The index of the sid for which adjustments should be collected in
the adjusted array.
pre_adjustments : tuple(list(float), list(int))
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred on or before the
split-asof-date.
"""
col_to_split_adjustments = {}
if len(pre_adjustments[0]):
adjustment_values, date_indexes = pre_adjustments
for column_name in requested_split_adjusted_columns:
col_to_split_adjustments[column_name] = {}
# We need to undo all adjustments that happen before the
# split_asof_date here by reversing the split ratio.
col_to_split_adjustments[column_name][0] = [Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
1 / future_adjustment
) for future_adjustment in adjustment_values]
for adjustment, date_index in zip(adjustment_values,
date_indexes):
adj = Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
adjustment
)
add_new_adjustments(col_to_split_adjustments,
[adj],
column_name,
date_index)
return col_to_split_adjustments
def collect_post_asof_split_adjustments(self,
post_adjustments,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
requested_split_adjusted_columns):
"""
Collect split adjustments that occur after the
split-adjusted-asof-date. Each adjustment needs to be applied to all
dates on which knowledge for the requested quarter was older than the
date of the adjustment.
Parameters
----------
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for this sid.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred after the
split-asof-date.
"""
col_to_split_adjustments = {}
if post_adjustments:
# Get an integer index
requested_qtr_timeline = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS
][sid].reset_index()
requested_qtr_timeline = requested_qtr_timeline[
requested_qtr_timeline[sid].notnull()
]
# Split the data into range by quarter and determine which quarter
# was being requested in each range.
# Split integer indexes up by quarter range
qtr_ranges_idxs = np.split(
requested_qtr_timeline.index,
np.where(np.diff(requested_qtr_timeline[sid]) != 0)[0] + 1
)
requested_quarters_per_range = [requested_qtr_timeline[sid][r[0]]
for r in qtr_ranges_idxs]
# Try to apply each adjustment to each quarter range.
for i, qtr_range in enumerate(qtr_ranges_idxs):
for adjustment, date_index, timestamp in zip(
*post_adjustments
):
# In the default case, apply through the end of the quarter
upper_bound = qtr_range[-1]
# Find the smallest KD in estimates that is on or after the
# date of the given adjustment. Apply the given adjustment
# until that KD.
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
requested_qtr_data.index,
upper_bound,
requested_quarters_per_range[i],
sid_estimates
)
# In the default case, apply adjustment on the first day of
# the quarter.
start_idx = qtr_range[0]
# If the adjustment happens during this quarter, apply the
# adjustment on the day it happens.
if date_index > start_idx:
start_idx = date_index
# We only want to apply the adjustment if we have any stale
# data to apply it to.
if qtr_range[0] <= end_idx:
for column_name in requested_split_adjusted_columns:
if column_name not in col_to_split_adjustments:
col_to_split_adjustments[column_name] = {}
adj = Float64Multiply(
# Always apply from first day of qtr
qtr_range[0],
end_idx,
sid_idx,
sid_idx,
adjustment
)
add_new_adjustments(
col_to_split_adjustments,
[adj],
column_name,
start_idx
)
return col_to_split_adjustments
def retrieve_split_adjustment_data_for_sid(self,
dates,
sid,
split_adjusted_asof_idx):
"""
dates : pd.DatetimeIndex
The calendar dates.
sid : int
The sid for which we want to retrieve adjustments.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
Returns
-------
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
"""
adjustments = self._split_adjustments.get_adjustments_for_sid(
'splits', sid
)
sorted(adjustments, key=lambda adj: adj[0])
# Get rid of any adjustments that happen outside of our date index.
adjustments = list(filter(lambda x: dates[0] <= x[0] <= dates[-1],
adjustments))
adjustment_values = np.array([adj[1] for adj in adjustments])
timestamps = pd.DatetimeIndex([adj[0] for adj in adjustments])
# We need the first date on which we would have known about each
# adjustment.
date_indexes = dates.searchsorted(timestamps)
pre_adjustment_idxs = np.where(
date_indexes <= split_adjusted_asof_idx
)[0]
last_adjustment_split_asof_idx = -1
if len(pre_adjustment_idxs):
last_adjustment_split_asof_idx = pre_adjustment_idxs.max()
pre_adjustments = (
adjustment_values[:last_adjustment_split_asof_idx + 1],
date_indexes[:last_adjustment_split_asof_idx + 1]
)
post_adjustments = (
adjustment_values[last_adjustment_split_asof_idx + 1:],
date_indexes[last_adjustment_split_asof_idx + 1:],
timestamps[last_adjustment_split_asof_idx + 1:]
)
return pre_adjustments, post_adjustments
def _collect_adjustments(self,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
pre_adjustments_dict = self.collect_pre_split_asof_date_adjustments(
split_adjusted_asof_idx,
sid_idx,
pre_adjustments,
requested_split_adjusted_columns
)
post_adjustments_dict = self.collect_post_asof_split_adjustments(
post_adjustments,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
requested_split_adjusted_columns
)
return pre_adjustments_dict, post_adjustments_dict
def merge_split_adjustments_with_overwrites(
self,
pre,
post,
overwrites,
requested_split_adjusted_columns
):
"""
Merge split adjustments with the dict containing overwrites.
Parameters
----------
pre : dict[str -> dict[int -> list]]
The adjustments that occur before the split-adjusted-asof-date.
post : dict[str -> dict[int -> list]]
The adjustments that occur after the split-adjusted-asof-date.
overwrites : dict[str -> dict[int -> list]]
The overwrites across all time. Adjustments will be merged into
this dictionary.
requested_split_adjusted_columns : list of str
List of names of split adjusted columns that are being requested.
"""
for column_name in requested_split_adjusted_columns:
# We can do a merge here because the timestamps in 'pre' and
# 'post' are guaranteed to not overlap.
if pre:
# Either empty or contains all columns.
for ts in pre[column_name]:
add_new_adjustments(
overwrites,
pre[column_name][ts],
column_name,
ts
)
if post:
# Either empty or contains all columns.
for ts in post[column_name]:
add_new_adjustments(
overwrites,
post[column_name][ts],
column_name,
ts
)
class PreviousSplitAdjustedEarningsEstimatesLoader(
SplitAdjustedEstimatesLoader, PreviousEarningsEstimatesLoader
):
def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
"""
Collect split adjustments for previous quarters and apply them to the
given dictionary of splits for the given sid. Since overwrites just
replace all estimates before the new quarter with NaN, we don't need to
worry about re-applying split adjustments.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
"""
(pre_adjustments_dict,
post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns
)
class NextSplitAdjustedEarningsEstimatesLoader(
SplitAdjustedEstimatesLoader, NextEarningsEstimatesLoader
):
def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
"""
Collect split adjustments for future quarters. Re-apply adjustments
that would be overwritten by overwrites. Merge split adjustments with
overwrites into the given dictionary of splits for the given sid.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
"""
(pre_adjustments_dict,
post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns,
)
for column_name in requested_split_adjusted_columns:
for overwrite_ts in adjustments_for_sid[column_name]:
# We need to cumulatively re-apply all adjustments up to the
# split-adjusted-asof-date. We might not have any
# pre-adjustments, so we should check for that.
if overwrite_ts <= split_adjusted_asof_idx \
and pre_adjustments_dict:
for split_ts in pre_adjustments_dict[column_name]:
# The split has to have occurred during the span of
# the overwrite.
if split_ts < overwrite_ts:
# Create new adjustments here so that we can
# re-apply all applicable adjustments to ONLY
# the dates being overwritten.
adjustments_for_sid[
column_name
][overwrite_ts].extend([
Float64Multiply(
0,
overwrite_ts - 1,
sid_idx,
sid_idx,
adjustment.value
)
for adjustment
in pre_adjustments_dict[
column_name
][split_ts]
])
# After the split-adjusted-asof-date, we need to re-apply all
# adjustments that occur after that date and within the
# bounds of the overwrite. They need to be applied starting
# from the first date and until an end date. The end date is
# the date of the newest information we get about
# `requested_quarter` that is >= `split_ts`, or if there is no
# new knowledge before `overwrite_ts`, then it is the date
# before `overwrite_ts`.
else:
# Overwrites happen at the first index of a new quarter,
# so determine here which quarter that is.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid
].iloc[overwrite_ts]
for adjustment_value, date_index, timestamp in zip(
*post_adjustments
):
if split_adjusted_asof_idx < date_index < overwrite_ts:
# Assume the entire overwrite contains stale data
upper_bound = overwrite_ts - 1
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
dates,
upper_bound,
requested_quarter,
sid_estimates
)
adjustments_for_sid[
column_name
][overwrite_ts].append(
Float64Multiply(
0,
end_idx,
sid_idx,
sid_idx,
adjustment_value
)
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/loaders/earnings_estimates.py | earnings_estimates.py |
import numpy as np
import pandas as pd
from six import viewvalues
from toolz import groupby, merge
from .base import PipelineLoader
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.loaders.frame import DataFrameLoader
from zipline.pipeline.loaders.utils import (
next_event_indexer,
previous_event_indexer,
)
def required_event_fields(next_value_columns, previous_value_columns):
"""
Compute the set of resource columns required to serve
``next_value_columns`` and ``previous_value_columns``.
"""
# These metadata columns are used to align event indexers.
return {
TS_FIELD_NAME,
SID_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
}.union(
# We also expect any of the field names that our loadable columns
# are mapped to.
viewvalues(next_value_columns),
viewvalues(previous_value_columns),
)
def validate_column_specs(events, next_value_columns, previous_value_columns):
"""
Verify that the columns of ``events`` can be used by an EventsLoader to
serve the BoundColumns described by ``next_value_columns`` and
``previous_value_columns``.
"""
required = required_event_fields(next_value_columns,
previous_value_columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EventsLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
)
class EventsLoader(PipelineLoader):
"""
Base class for PipelineLoaders that supports loading the next and previous
value of an event field.
Does not currently support adjustments.
Parameters
----------
events : pd.DataFrame
A DataFrame representing events (e.g. share buybacks or
earnings announcements) associated with particular companies.
``events`` must contain at least three columns::
sid : int64
The asset id associated with each event.
event_date : datetime64[ns]
The date on which the event occurred.
timestamp : datetime64[ns]
The date on which we learned about the event.
next_value_columns : dict[BoundColumn -> str]
Map from dataset columns to raw field names that should be used when
searching for a next event value.
previous_value_columns : dict[BoundColumn -> str]
Map from dataset columns to raw field names that should be used when
searching for a previous event value.
"""
def __init__(self,
events,
next_value_columns,
previous_value_columns):
validate_column_specs(
events,
next_value_columns,
previous_value_columns,
)
events = events[events[EVENT_DATE_FIELD_NAME].notnull()]
# We always work with entries from ``events`` directly as numpy arrays,
# so we coerce from a frame to a dict of arrays here.
self.events = {
name: np.asarray(series)
for name, series in (
events.sort_values(EVENT_DATE_FIELD_NAME).iteritems()
)
}
# Columns to load with self.load_next_events.
self.next_value_columns = next_value_columns
# Columns to load with self.load_previous_events.
self.previous_value_columns = previous_value_columns
def split_next_and_previous_event_columns(self, requested_columns):
"""
Split requested columns into columns that should load the next known
value and columns that should load the previous known value.
Parameters
----------
requested_columns : iterable[BoundColumn]
Returns
-------
next_cols, previous_cols : iterable[BoundColumn], iterable[BoundColumn]
``requested_columns``, partitioned into sub-sequences based on
whether the column should produce values from the next event or the
previous event
"""
def next_or_previous(c):
if c in self.next_value_columns:
return 'next'
elif c in self.previous_value_columns:
return 'previous'
raise ValueError(
"{c} not found in next_value_columns "
"or previous_value_columns".format(c=c)
)
groups = groupby(next_or_previous, requested_columns)
return groups.get('next', ()), groups.get('previous', ())
def next_event_indexer(self, dates, sids):
return next_event_indexer(
dates,
sids,
self.events[EVENT_DATE_FIELD_NAME],
self.events[TS_FIELD_NAME],
self.events[SID_FIELD_NAME],
)
def previous_event_indexer(self, dates, sids):
return previous_event_indexer(
dates,
sids,
self.events[EVENT_DATE_FIELD_NAME],
self.events[TS_FIELD_NAME],
self.events[SID_FIELD_NAME],
)
def load_next_events(self, columns, dates, sids, mask):
if not columns:
return {}
return self._load_events(
name_map=self.next_value_columns,
indexer=self.next_event_indexer(dates, sids),
columns=columns,
dates=dates,
sids=sids,
mask=mask,
)
def load_previous_events(self, columns, dates, sids, mask):
if not columns:
return {}
return self._load_events(
name_map=self.previous_value_columns,
indexer=self.previous_event_indexer(dates, sids),
columns=columns,
dates=dates,
sids=sids,
mask=mask,
)
def _load_events(self, name_map, indexer, columns, dates, sids, mask):
def to_frame(array):
return pd.DataFrame(array, index=dates, columns=sids)
assert indexer.shape == (len(dates), len(sids))
out = {}
for c in columns:
# Array holding the value for column `c` for every event we have.
col_array = self.events[name_map[c]]
if not len(col_array):
# We don't have **any** events, so return col.missing_value
# every day for every sid. We have to special case empty events
# because in normal branch we depend on being able to index
# with -1 for missing values, which fails if there are no
# events at all.
raw = np.full(
(len(dates), len(sids)), c.missing_value, dtype=c.dtype
)
else:
# Slot event values into sid/date locations using `indexer`.
# This produces a 2D array of the same shape as `indexer`,
# which must be (len(dates), len(sids))`.
raw = col_array[indexer]
# indexer will be -1 for locations where we don't have a known
# value. Overwrite those locations with c.missing_value.
raw[indexer < 0] = c.missing_value
# Delegate the actual array formatting logic to a DataFrameLoader.
loader = DataFrameLoader(c, to_frame(raw), adjustments=None)
out[c] = loader.load_adjusted_array([c], dates, sids, mask)[c]
return out
def load_adjusted_array(self, columns, dates, sids, mask):
n, p = self.split_next_and_previous_event_columns(columns)
return merge(
self.load_next_events(n, dates, sids, mask),
self.load_previous_events(p, dates, sids, mask),
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/loaders/events.py | events.py |
from __future__ import division, absolute_import
from abc import ABCMeta, abstractproperty
from collections import namedtuple, defaultdict
from functools import partial
from itertools import count
import warnings
from weakref import WeakKeyDictionary
import blaze as bz
from datashape import (
Date,
DateTime,
Option,
isrecord,
isscalar,
)
import numpy as np
from odo import odo
import pandas as pd
from six import with_metaclass, PY2, itervalues, iteritems
from toolz import (
complement,
compose,
flip,
groupby,
identity,
memoize,
merge,
)
import toolz.curried.operator as op
from zipline.pipeline.common import (
AD_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME
)
from zipline.pipeline.data.dataset import DataSet, Column
from zipline.pipeline.loaders.utils import (
check_data_query_args,
last_in_date_group,
normalize_data_query_bounds,
normalize_timestamp_to_query_time,
ffill_across_cols
)
from zipline.pipeline.sentinels import NotSpecified
from zipline.lib.adjusted_array import AdjustedArray, can_represent_dtype
from zipline.lib.adjustment import make_adjustment_from_indices, OVERWRITE
from zipline.utils.input_validation import (
expect_element,
ensure_timezone,
optionally,
)
from zipline.utils.numpy_utils import bool_dtype
from zipline.utils.pool import SequentialPool
from zipline.utils.preprocess import preprocess
valid_deltas_node_types = (
bz.expr.Field,
bz.expr.ReLabel,
bz.expr.Symbol,
)
traversable_nodes = (
bz.expr.Field,
bz.expr.Label,
)
is_invalid_deltas_node = complement(flip(isinstance, valid_deltas_node_types))
get__name__ = op.attrgetter('__name__')
_expr_data_base = namedtuple(
'ExprData', 'expr deltas checkpoints odo_kwargs apply_deltas_adjustments'
)
class ExprData(_expr_data_base):
"""A pair of expressions and data resources. The expressions will be
computed using the resources as the starting scope.
Parameters
----------
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
apply_deltas_adjustments : bool, optional
Whether or not deltas adjustments should be applied to the baseline
values. If False, only novel deltas will be applied.
"""
def __new__(cls,
expr,
deltas=None,
checkpoints=None,
odo_kwargs=None,
apply_deltas_adjustments=True):
return super(ExprData, cls).__new__(
cls,
expr,
deltas,
checkpoints,
odo_kwargs or {},
apply_deltas_adjustments,
)
def __repr__(self):
# If the expressions have _resources() then the repr will
# drive computation so we take the str here.
cls = type(self)
return super(ExprData, cls).__repr__(cls(
str(self.expr),
str(self.deltas),
str(self.checkpoints),
self.odo_kwargs,
self.apply_deltas_adjustments,
))
class InvalidField(with_metaclass(ABCMeta)):
"""A field that raises an exception indicating that the
field was invalid.
Parameters
----------
field : str
The name of the field.
type_ : dshape
The shape of the field.
"""
@abstractproperty
def error_format(self): # pragma: no cover
raise NotImplementedError('error_format')
def __init__(self, field, type_):
self._field = field
self._type = type_
def __get__(self, instance, owner):
raise AttributeError(
self.error_format.format(field=self._field, type_=self._type),
)
class NonNumpyField(InvalidField):
error_format = (
"field '{field}' was a non numpy compatible type: '{type_}'"
)
class NonPipelineField(InvalidField):
error_format = (
"field '{field}' was a non Pipeline API compatible type: '{type_}'"
)
_new_names = ('BlazeDataSet_%d' % n for n in count())
def datashape_type_to_numpy(type_):
"""
Given a datashape type, return the associated numpy type. Maps
datashape's DateTime type to numpy's `datetime64[ns]` dtype, since the
numpy datetime returned by datashape isn't supported by pipeline.
Parameters
----------
type_: datashape.coretypes.Type
The datashape type.
Returns
-------
type_ np.dtype
The numpy dtype.
"""
if isinstance(type_, Option):
type_ = type_.ty
if isinstance(type_, DateTime):
return np.dtype('datetime64[ns]')
else:
return type_.to_numpy_dtype()
@memoize
def new_dataset(expr, deltas, missing_values):
"""
Creates or returns a dataset from a pair of blaze expressions.
Parameters
----------
expr : Expr
The blaze expression representing the first known values.
deltas : Expr
The blaze expression representing the deltas to the data.
missing_values : frozenset((name, value) pairs
Association pairs column name and missing_value for that column.
This needs to be a frozenset rather than a dict or tuple of tuples
because we want a collection that's unordered but still hashable.
Returns
-------
ds : type
A new dataset type.
Notes
-----
This function is memoized. repeated calls with the same inputs will return
the same type.
"""
missing_values = dict(missing_values)
class_dict = {'ndim': 2 if SID_FIELD_NAME in expr.fields else 1}
for name, type_ in expr.dshape.measure.fields:
# Don't generate a column for sid or timestamp, since they're
# implicitly the labels if the arrays that will be passed to pipeline
# Terms.
if name in (SID_FIELD_NAME, TS_FIELD_NAME):
continue
type_ = datashape_type_to_numpy(type_)
if can_represent_dtype(type_):
col = Column(
type_,
missing_values.get(name, NotSpecified),
)
else:
col = NonPipelineField(name, type_)
class_dict[name] = col
name = expr._name
if name is None:
name = next(_new_names)
# unicode is a name error in py3 but the branch is only hit
# when we are in python 2.
if PY2 and isinstance(name, unicode): # pragma: no cover # noqa
name = name.encode('utf-8')
return type(name, (DataSet,), class_dict)
def _check_resources(name, expr, resources):
"""Validate that the expression and resources passed match up.
Parameters
----------
name : str
The name of the argument we are checking.
expr : Expr
The potentially bound expr.
resources
The explicitly passed resources to compute expr.
Raises
------
ValueError
If the resources do not match for an expression.
"""
if expr is None:
return
bound = expr._resources()
if not bound and resources is None:
raise ValueError('no resources provided to compute %s' % name)
if bound and resources:
raise ValueError(
'explicit and implicit resources provided to compute %s' % name,
)
def _check_datetime_field(name, measure):
"""Check that a field is a datetime inside some measure.
Parameters
----------
name : str
The name of the field to check.
measure : Record
The record to check the field of.
Raises
------
TypeError
If the field is not a datetime inside ``measure``.
"""
if not isinstance(measure[name], (Date, DateTime)):
raise TypeError(
"'{name}' field must be a '{dt}', not: '{dshape}'".format(
name=name,
dt=DateTime(),
dshape=measure[name],
),
)
class NoMetaDataWarning(UserWarning):
"""Warning used to signal that no deltas or checkpoints could be found and
none were provided.
Parameters
----------
expr : Expr
The expression that was searched.
field : {'deltas', 'checkpoints'}
The field that was looked up.
"""
def __init__(self, expr, field):
self._expr = expr
self._field = field
def __str__(self):
return 'No %s could be inferred from expr: %s' % (
self._field,
self._expr,
)
no_metadata_rules = frozenset({'warn', 'raise', 'ignore'})
def _get_metadata(field, expr, metadata_expr, no_metadata_rule):
"""Find the correct metadata expression for the expression.
Parameters
----------
field : {'deltas', 'checkpoints'}
The kind of metadata expr to lookup.
expr : Expr
The baseline expression.
metadata_expr : Expr, 'auto', or None
The metadata argument. If this is 'auto', then the metadata table will
be searched for by walking up the expression tree. If this cannot be
reflected, then an action will be taken based on the
``no_metadata_rule``.
no_metadata_rule : {'warn', 'raise', 'ignore'}
How to handle the case where the metadata_expr='auto' but no expr
could be found.
Returns
-------
metadata : Expr or None
The deltas or metadata table to use.
"""
if isinstance(metadata_expr, bz.Expr) or metadata_expr is None:
return metadata_expr
try:
return expr._child['_'.join(((expr._name or ''), field))]
except (ValueError, AttributeError):
if no_metadata_rule == 'raise':
raise ValueError(
"no %s table could be reflected for %s" % (field, expr)
)
elif no_metadata_rule == 'warn':
warnings.warn(NoMetaDataWarning(expr, field), stacklevel=4)
return None
def _ad_as_ts(expr):
"""Duplicate the asof_date column as the timestamp column.
Parameters
----------
expr : Expr or None
The expression to change the columns of.
Returns
-------
transformed : Expr or None
The transformed expression or None if ``expr`` is None.
"""
return (
None
if expr is None else
bz.transform(expr, **{TS_FIELD_NAME: expr[AD_FIELD_NAME]})
)
def _ensure_timestamp_field(dataset_expr, deltas, checkpoints):
"""Verify that the baseline and deltas expressions have a timestamp field.
If there is not a ``TS_FIELD_NAME`` on either of the expressions, it will
be copied from the ``AD_FIELD_NAME``. If one is provided, then we will
verify that it is the correct dshape.
Parameters
----------
dataset_expr : Expr
The baseline expression.
deltas : Expr or None
The deltas expression if any was provided.
checkpoints : Expr or None
The checkpoints expression if any was provided.
Returns
-------
dataset_expr, deltas : Expr
The new baseline and deltas expressions to use.
"""
measure = dataset_expr.dshape.measure
if TS_FIELD_NAME not in measure.names:
dataset_expr = bz.transform(
dataset_expr,
**{TS_FIELD_NAME: dataset_expr[AD_FIELD_NAME]}
)
deltas = _ad_as_ts(deltas)
checkpoints = _ad_as_ts(checkpoints)
else:
_check_datetime_field(TS_FIELD_NAME, measure)
return dataset_expr, deltas, checkpoints
@expect_element(
no_deltas_rule=no_metadata_rules,
no_checkpoints_rule=no_metadata_rules,
)
def from_blaze(expr,
deltas='auto',
checkpoints='auto',
loader=None,
resources=None,
odo_kwargs=None,
missing_values=None,
no_deltas_rule='warn',
no_checkpoints_rule='warn',
apply_deltas_adjustments=True,):
"""Create a Pipeline API object from a blaze expression.
Parameters
----------
expr : Expr
The blaze expression to use.
deltas : Expr, 'auto' or None, optional
The expression to use for the point in time adjustments.
If the string 'auto' is passed, a deltas expr will be looked up
by stepping up the expression tree and looking for another field
with the name of ``expr._name`` + '_deltas'. If None is passed, no
deltas will be used.
checkpoints : Expr, 'auto' or None, optional
The expression to use for the forward fill checkpoints.
If the string 'auto' is passed, a checkpoints expr will be looked up
by stepping up the expression tree and looking for another field
with the name of ``expr._name`` + '_checkpoints'. If None is passed,
no checkpoints will be used.
loader : BlazeLoader, optional
The blaze loader to attach this pipeline dataset to. If None is passed,
the global blaze loader is used.
resources : dict or any, optional
The data to execute the blaze expressions against. This is used as the
scope for ``bz.compute``.
odo_kwargs : dict, optional
The keyword arguments to pass to odo when evaluating the expressions.
missing_values : dict[str -> any], optional
A dict mapping column names to missing values for those columns.
Missing values are required for integral columns.
no_deltas_rule : {'warn', 'raise', 'ignore'}, optional
What should happen if ``deltas='auto'`` but no deltas can be found.
'warn' says to raise a warning but continue.
'raise' says to raise an exception if no deltas can be found.
'ignore' says take no action and proceed with no deltas.
no_checkpoints_rule : {'warn', 'raise', 'ignore'}, optional
What should happen if ``checkpoints='auto'`` but no checkpoints can be
found. 'warn' says to raise a warning but continue.
'raise' says to raise an exception if no deltas can be found.
'ignore' says take no action and proceed with no deltas.
apply_deltas_adjustments : bool, optional
Whether or not deltas adjustments should be applied for this dataset.
True by default because not applying deltas adjustments is an exception
rather than the rule.
Returns
-------
pipeline_api_obj : DataSet or BoundColumn
Either a new dataset or bound column based on the shape of the expr
passed in. If a table shaped expression is passed, this will return
a ``DataSet`` that represents the whole table. If an array-like shape
is passed, a ``BoundColumn`` on the dataset that would be constructed
from passing the parent is returned.
"""
if 'auto' in {deltas, checkpoints}:
invalid_nodes = tuple(filter(is_invalid_deltas_node, expr._subterms()))
if invalid_nodes:
raise TypeError(
'expression with auto %s may only contain (%s) nodes,'
" found: %s" % (
' or '.join(
['deltas'] if deltas is not None else [] +
['checkpoints'] if checkpoints is not None else [],
),
', '.join(map(get__name__, valid_deltas_node_types)),
', '.join(
set(map(compose(get__name__, type), invalid_nodes)),
),
),
)
deltas = _get_metadata(
'deltas',
expr,
deltas,
no_deltas_rule,
)
checkpoints = _get_metadata(
'checkpoints',
expr,
checkpoints,
no_checkpoints_rule,
)
# Check if this is a single column out of a dataset.
if bz.ndim(expr) != 1:
raise TypeError(
'expression was not tabular or array-like,'
' %s dimensions: %d' % (
'too many' if bz.ndim(expr) > 1 else 'not enough',
bz.ndim(expr),
),
)
single_column = None
if isscalar(expr.dshape.measure):
# This is a single column. Record which column we are to return
# but create the entire dataset.
single_column = rename = expr._name
field_hit = False
if not isinstance(expr, traversable_nodes):
raise TypeError(
"expression '%s' was array-like but not a simple field of"
" some larger table" % str(expr),
)
while isinstance(expr, traversable_nodes):
if isinstance(expr, bz.expr.Field):
if not field_hit:
field_hit = True
else:
break
rename = expr._name
expr = expr._child
dataset_expr = expr.relabel({rename: single_column})
else:
dataset_expr = expr
measure = dataset_expr.dshape.measure
if not isrecord(measure) or AD_FIELD_NAME not in measure.names:
raise TypeError(
"The dataset must be a collection of records with at least an"
" '{ad}' field. Fields provided: '{fields}'\nhint: maybe you need"
" to use `relabel` to change your field names".format(
ad=AD_FIELD_NAME,
fields=measure,
),
)
_check_datetime_field(AD_FIELD_NAME, measure)
dataset_expr, deltas, checkpoints = _ensure_timestamp_field(
dataset_expr,
deltas,
checkpoints,
)
if deltas is not None and (sorted(deltas.dshape.measure.fields) !=
sorted(measure.fields)):
raise TypeError(
'baseline measure != deltas measure:\n%s != %s' % (
measure,
deltas.dshape.measure,
),
)
if (checkpoints is not None and
(sorted(checkpoints.dshape.measure.fields) !=
sorted(measure.fields))):
raise TypeError(
'baseline measure != checkpoints measure:\n%s != %s' % (
measure,
checkpoints.dshape.measure,
),
)
# Ensure that we have a data resource to execute the query against.
_check_resources('expr', dataset_expr, resources)
_check_resources('deltas', deltas, resources)
_check_resources('checkpoints', checkpoints, resources)
# Create or retrieve the Pipeline API dataset.
if missing_values is None:
missing_values = {}
ds = new_dataset(dataset_expr, deltas, frozenset(missing_values.items()))
# Register our new dataset with the loader.
(loader if loader is not None else global_loader)[ds] = ExprData(
bind_expression_to_resources(dataset_expr, resources),
bind_expression_to_resources(deltas, resources)
if deltas is not None else
None,
bind_expression_to_resources(checkpoints, resources)
if checkpoints is not None else
None,
odo_kwargs=odo_kwargs,
apply_deltas_adjustments=apply_deltas_adjustments
)
if single_column is not None:
# We were passed a single column, extract and return it.
return getattr(ds, single_column)
return ds
getdataset = op.attrgetter('dataset')
getname = op.attrgetter('name')
def overwrite_novel_deltas(baseline, deltas, dates):
"""overwrite any deltas into the baseline set that would have changed our
most recently known value.
Parameters
----------
baseline : pd.DataFrame
The first known values.
deltas : pd.DataFrame
Overwrites to the baseline data.
dates : pd.DatetimeIndex
The dates requested by the loader.
Returns
-------
non_novel_deltas : pd.DataFrame
The deltas that do not represent a baseline value.
"""
get_indexes = dates.searchsorted
novel_idx = (
get_indexes(deltas[TS_FIELD_NAME].values, 'right') -
get_indexes(deltas[AD_FIELD_NAME].values, 'left')
) <= 1
novel_deltas = deltas.loc[novel_idx]
non_novel_deltas = deltas.loc[~novel_idx]
cat = pd.concat(
(baseline, novel_deltas),
ignore_index=True,
copy=False,
)
cat.sort_values(TS_FIELD_NAME, inplace=True)
return cat, non_novel_deltas
def overwrite_from_dates(asof, dense_dates, sparse_dates, asset_idx, value):
"""Construct an Overwrite with the correct
start and end date based on the asof date of the delta,
the dense_dates, and the dense_dates.
Parameters
----------
asof : datetime
The asof date of the delta.
dense_dates : pd.DatetimeIndex
The dates requested by the loader.
sparse_dates : pd.DatetimeIndex
The dates that appeared in the dataset.
asset_idx : tuple of int
The index of the asset in the block. If this is a tuple, then this
is treated as the first and last index to use.
value : any
The value to overwrite with.
Returns
-------
overwrite : Float64Overwrite
The overwrite that will apply the new value to the data.
Notes
-----
This is forward-filling all dense dates that are between the asof_date date
and the next sparse date after the asof_date.
For example:
let ``asof = pd.Timestamp('2014-01-02')``,
``dense_dates = pd.date_range('2014-01-01', '2014-01-05')``
``sparse_dates = pd.to_datetime(['2014-01', '2014-02', '2014-04'])``
Then the overwrite will apply to indexes: 1, 2, 3, 4
"""
if asof is pd.NaT:
# Not an actual delta.
# This happens due to the groupby we do on the deltas.
return
first_row = dense_dates.searchsorted(asof)
next_idx = sparse_dates.searchsorted(asof.asm8, 'right')
if next_idx == len(sparse_dates):
# There is no next date in the sparse, this overwrite should apply
# through the end of the dense dates.
last_row = len(dense_dates) - 1
else:
# There is a next date in sparse dates. This means that the overwrite
# should only apply until the index of this date in the dense dates.
last_row = dense_dates.searchsorted(sparse_dates[next_idx]) - 1
if first_row > last_row:
return
first, last = asset_idx
yield make_adjustment_from_indices(
first_row, last_row, first, last, OVERWRITE, value
)
def adjustments_from_deltas_no_sids(dense_dates,
sparse_dates,
column_idx,
column_name,
asset_idx,
deltas):
"""Collect all the adjustments that occur in a dataset that does not
have a sid column.
Parameters
----------
dense_dates : pd.DatetimeIndex
The dates requested by the loader.
sparse_dates : pd.DatetimeIndex
The dates that were in the raw data.
column_idx : int
The index of the column in the dataset.
column_name : str
The name of the column to compute deltas for.
asset_idx : pd.Series[int -> int]
The mapping of sids to their index in the output.
deltas : pd.DataFrame
The overwrites that should be applied to the dataset.
Returns
-------
adjustments : dict[idx -> Float64Overwrite]
The adjustments dictionary to feed to the adjusted array.
"""
ad_series = deltas[AD_FIELD_NAME]
idx = 0, 0
return {
dense_dates.get_loc(kd): overwrite_from_dates(
ad_series.loc[kd],
dense_dates,
sparse_dates,
idx,
v,
) for kd, v in deltas[column_name].iteritems()
}
def adjustments_from_deltas_with_sids(dense_dates,
sparse_dates,
column_idx,
column_name,
asset_idx,
deltas):
"""Collect all the adjustments that occur in a dataset that has a sid
column.
Parameters
----------
dense_dates : pd.DatetimeIndex
The dates requested by the loader.
sparse_dates : pd.DatetimeIndex
The dates that were in the raw data.
column_idx : int
The index of the column in the dataset.
column_name : str
The name of the column to compute deltas for.
asset_idx : pd.Series[int -> int]
The mapping of sids to their index in the output.
deltas : pd.DataFrame
The overwrites that should be applied to the dataset.
Returns
-------
adjustments : dict[idx -> Float64Overwrite]
The adjustments dictionary to feed to the adjusted array.
"""
ad_series = deltas[AD_FIELD_NAME]
adjustments = defaultdict(list)
for sid, per_sid in deltas[column_name].iteritems():
idx = asset_idx[sid]
for kd, v in per_sid.iteritems():
adjustments[dense_dates.searchsorted(kd)].extend(
overwrite_from_dates(
ad_series.loc[kd, sid],
dense_dates,
sparse_dates,
(idx, idx),
v,
),
)
return dict(adjustments) # no subclasses of dict
class BlazeLoader(dict):
"""A PipelineLoader for datasets constructed with ``from_blaze``.
Parameters
----------
dsmap : mapping, optional
An initial mapping of datasets to ``ExprData`` objects.
NOTE: Further mutations to this map will not be reflected by this
object.
data_query_time : time, optional
The time to use for the data query cutoff.
data_query_tz : tzinfo or str, optional
The timezeone to use for the data query cutoff.
pool : Pool, optional
The pool to use to run blaze queries concurrently. This object must
support ``imap_unordered``, ``apply`` and ``apply_async`` methods.
Attributes
----------
pool : Pool
The pool to use to run blaze queries concurrently. This object must
support ``imap_unordered``, ``apply`` and ``apply_async`` methods.
It is possible to change the pool after the loader has been
constructed. This allows us to set a new pool for the ``global_loader``
like: ``global_loader.pool = multiprocessing.Pool(4)``.
See Also
--------
:class:`zipline.utils.pool.SequentialPool`
:class:`multiprocessing.Pool`
"""
@preprocess(data_query_tz=optionally(ensure_timezone))
def __init__(self,
dsmap=None,
data_query_time=None,
data_query_tz=None,
pool=SequentialPool()):
self.update(dsmap or {})
check_data_query_args(data_query_time, data_query_tz)
self._data_query_time = data_query_time
self._data_query_tz = data_query_tz
# explicitly public
self.pool = pool
@classmethod
@memoize(cache=WeakKeyDictionary())
def global_instance(cls):
return cls()
def __hash__(self):
return id(self)
def __call__(self, column):
if column.dataset in self:
return self
raise KeyError(column)
def __repr__(self):
return '<%s: %s>' % (
type(self).__name__,
super(BlazeLoader, self).__repr__(),
)
def load_adjusted_array(self, columns, dates, assets, mask):
return merge(
self.pool.imap_unordered(
partial(self._load_dataset, dates, assets, mask),
itervalues(groupby(getdataset, columns)),
),
)
def _load_dataset(self, dates, assets, mask, columns):
try:
(dataset,) = set(map(getdataset, columns))
except ValueError:
raise AssertionError('all columns must come from the same dataset')
expr, deltas, checkpoints, odo_kwargs, apply_deltas_adjustments = self[
dataset
]
have_sids = (dataset.ndim == 2)
asset_idx = pd.Series(index=assets, data=np.arange(len(assets)))
assets = list(map(int, assets)) # coerce from numpy.int64
added_query_fields = {AD_FIELD_NAME, TS_FIELD_NAME} | (
{SID_FIELD_NAME} if have_sids else set()
)
requested_columns = set(map(getname, columns))
colnames = sorted(added_query_fields | requested_columns)
data_query_time = self._data_query_time
data_query_tz = self._data_query_tz
lower_dt, upper_dt = normalize_data_query_bounds(
dates[0],
dates[-1],
data_query_time,
data_query_tz,
)
def collect_expr(e, lower):
"""Materialize the expression as a dataframe.
Parameters
----------
e : Expr
The baseline or deltas expression.
lower : datetime
The lower time bound to query.
Returns
-------
result : pd.DataFrame
The resulting dataframe.
Notes
-----
This can return more data than needed. The in memory reindex will
handle this.
"""
predicate = e[TS_FIELD_NAME] <= upper_dt
if lower is not None:
predicate &= e[TS_FIELD_NAME] >= lower
return odo(e[predicate][colnames], pd.DataFrame, **odo_kwargs)
lower, materialized_checkpoints = get_materialized_checkpoints(
checkpoints, colnames, lower_dt, odo_kwargs
)
materialized_expr = self.pool.apply_async(collect_expr, (expr, lower))
materialized_deltas = (
self.pool.apply(collect_expr, (deltas, lower))
if deltas is not None else
pd.DataFrame(columns=colnames)
)
if materialized_checkpoints is not None:
materialized_expr = pd.concat(
(
materialized_checkpoints,
materialized_expr.get(),
),
ignore_index=True,
copy=False,
)
# It's not guaranteed that assets returned by the engine will contain
# all sids from the deltas table; filter out such mismatches here.
if not materialized_deltas.empty and have_sids:
materialized_deltas = materialized_deltas[
materialized_deltas[SID_FIELD_NAME].isin(assets)
]
if data_query_time is not None:
for m in (materialized_expr, materialized_deltas):
m.loc[:, TS_FIELD_NAME] = m.loc[
:, TS_FIELD_NAME
].astype('datetime64[ns]')
normalize_timestamp_to_query_time(
m,
data_query_time,
data_query_tz,
inplace=True,
ts_field=TS_FIELD_NAME,
)
# Inline the deltas that changed our most recently known value.
# Also, we reindex by the dates to create a dense representation of
# the data.
sparse_output, non_novel_deltas = overwrite_novel_deltas(
materialized_expr,
materialized_deltas,
dates,
)
# If we ever have cases where we find out about multiple asof_dates'
# data on the same TS, we want to make sure that last_in_date_group
# selects the correct last asof_date's value.
sparse_output.sort_values(AD_FIELD_NAME, inplace=True)
non_novel_deltas.sort_values(AD_FIELD_NAME, inplace=True)
if AD_FIELD_NAME not in requested_columns:
sparse_output.drop(AD_FIELD_NAME, axis=1, inplace=True)
sparse_deltas = last_in_date_group(non_novel_deltas,
dates,
assets,
reindex=False,
have_sids=have_sids)
dense_output = last_in_date_group(sparse_output,
dates,
assets,
reindex=True,
have_sids=have_sids)
ffill_across_cols(dense_output, columns, {c.name: c.name
for c in columns})
# By default, no non-novel deltas are applied.
def no_adjustments_from_deltas(*args):
return {}
adjustments_from_deltas = no_adjustments_from_deltas
if have_sids:
if apply_deltas_adjustments:
adjustments_from_deltas = adjustments_from_deltas_with_sids
column_view = identity
else:
# If we do not have sids, use the column view to make a single
# column vector which is unassociated with any assets.
column_view = op.itemgetter(np.s_[:, np.newaxis])
if apply_deltas_adjustments:
adjustments_from_deltas = adjustments_from_deltas_no_sids
mask = np.full(
shape=(len(mask), 1), fill_value=True, dtype=bool_dtype,
)
return {
column: AdjustedArray(
column_view(
dense_output[column.name].values.astype(column.dtype),
),
mask,
adjustments_from_deltas(
dates,
sparse_output[TS_FIELD_NAME].values,
column_idx,
column.name,
asset_idx,
sparse_deltas,
),
column.missing_value,
)
for column_idx, column in enumerate(columns)
}
global_loader = BlazeLoader.global_instance()
def bind_expression_to_resources(expr, resources):
"""
Bind a Blaze expression to resources.
Parameters
----------
expr : bz.Expr
The expression to which we want to bind resources.
resources : dict[bz.Symbol -> any]
Mapping from the loadable terms of ``expr`` to actual data resources.
Returns
-------
bound_expr : bz.Expr
``expr`` with bound resources.
"""
# bind the resources into the expression
if resources is None:
resources = {}
# _subs stands for substitute. It's not actually private, blaze just
# prefixes symbol-manipulation methods with underscores to prevent
# collisions with data column names.
return expr._subs({
k: bz.data(v, dshape=k.dshape) for k, v in iteritems(resources)
})
def get_materialized_checkpoints(checkpoints, colnames, lower_dt, odo_kwargs):
"""
Computes a lower bound and a DataFrame checkpoints.
Parameters
----------
checkpoints : Expr
Bound blaze expression for a checkpoints table from which to get a
computed lower bound.
colnames : iterable of str
The names of the columns for which checkpoints should be computed.
lower_dt : pd.Timestamp
The lower date being queried for that serves as an upper bound for
checkpoints.
odo_kwargs : dict, optional
The extra keyword arguments to pass to ``odo``.
"""
if checkpoints is not None:
ts = checkpoints[TS_FIELD_NAME]
checkpoints_ts = odo(
ts[ts <= lower_dt].max(),
pd.Timestamp,
**odo_kwargs
)
if pd.isnull(checkpoints_ts):
# We don't have a checkpoint for before our start date so just
# don't constrain the lower date.
materialized_checkpoints = pd.DataFrame(columns=colnames)
lower = None
else:
materialized_checkpoints = odo(
checkpoints[ts == checkpoints_ts][colnames],
pd.DataFrame,
**odo_kwargs
)
lower = checkpoints_ts
else:
materialized_checkpoints = pd.DataFrame(columns=colnames)
lower = None # we don't have a good lower date constraint
return lower, materialized_checkpoints
def ffill_query_in_range(expr,
lower,
upper,
checkpoints=None,
odo_kwargs=None,
ts_field=TS_FIELD_NAME):
"""Query a blaze expression in a given time range properly forward filling
from values that fall before the lower date.
Parameters
----------
expr : Expr
Bound blaze expression.
lower : datetime
The lower date to query for.
upper : datetime
The upper date to query for.
checkpoints : Expr, optional
Bound blaze expression for a checkpoints table from which to get a
computed lower bound.
odo_kwargs : dict, optional
The extra keyword arguments to pass to ``odo``.
ts_field : str, optional
The name of the timestamp field in the given blaze expression.
Returns
-------
raw : pd.DataFrame
A strict dataframe for the data in the given date range. This may
start before the requested start date if a value is needed to ffill.
"""
odo_kwargs = odo_kwargs or {}
computed_lower, materialized_checkpoints = get_materialized_checkpoints(
checkpoints,
expr.fields,
lower,
odo_kwargs,
)
pred = expr[ts_field] <= upper
if computed_lower is not None:
# only constrain the lower date if we computed a new lower date
pred &= expr[ts_field] >= computed_lower
raw = pd.concat(
(
materialized_checkpoints,
odo(
expr[pred],
pd.DataFrame,
**odo_kwargs
),
),
ignore_index=True,
)
raw.loc[:, ts_field] = raw.loc[:, ts_field].astype('datetime64[ns]')
return raw | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/loaders/blaze/core.py | core.py |
from datashape import istabular
from .core import (
bind_expression_to_resources,
)
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.loaders.base import PipelineLoader
from zipline.pipeline.loaders.blaze.utils import load_raw_data
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
PreviousEarningsEstimatesLoader,
required_estimates_fields,
metadata_columns,
PreviousSplitAdjustedEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader)
from zipline.pipeline.loaders.utils import (
check_data_query_args,
)
from zipline.utils.input_validation import ensure_timezone, optionally
from zipline.utils.preprocess import preprocess
class BlazeEstimatesLoader(PipelineLoader):
"""An abstract pipeline loader for the estimates datasets that loads
data from a blaze expression.
Parameters
----------
expr : Expr
The expression representing the data to load.
columns : dict[str -> str]
A dict mapping BoundColumn names to the associated names in `expr`.
resources : dict, optional
Mapping from the loadable terms of ``expr`` to actual data resources.
odo_kwargs : dict, optional
Extra keyword arguments to pass to odo when executing the expression.
data_query_time : time, optional
The time to use for the data query cutoff.
data_query_tz : tzinfo or str
The timezeone to use for the data query cutoff.
checkpoints : Expr, optional
The expression representing checkpointed data to be used for faster
forward-filling of data from `expr`.
Notes
-----
The expression should have a tabular dshape of::
Dim * {{
{SID_FIELD_NAME}: int64,
{TS_FIELD_NAME}: datetime,
{FISCAL_YEAR_FIELD_NAME}: float64,
{FISCAL_QUARTER_FIELD_NAME}: float64,
{EVENT_DATE_FIELD_NAME}: datetime,
}}
And other dataset-specific fields, where each row of the table is a
record including the sid to identify the company, the timestamp where we
learned about the announcement, and the date of the event.
If the '{TS_FIELD_NAME}' field is not included it is assumed that we
start the backtest with knowledge of all announcements.
"""
__doc__ = __doc__.format(
SID_FIELD_NAME=SID_FIELD_NAME,
TS_FIELD_NAME=TS_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME=FISCAL_YEAR_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME=FISCAL_QUARTER_FIELD_NAME,
EVENT_DATE_FIELD_NAME=EVENT_DATE_FIELD_NAME,
)
@preprocess(data_query_tz=optionally(ensure_timezone))
def __init__(self,
expr,
columns,
resources=None,
odo_kwargs=None,
data_query_time=None,
data_query_tz=None,
checkpoints=None):
dshape = expr.dshape
if not istabular(dshape):
raise ValueError(
'expression dshape must be tabular, got: %s' % dshape,
)
required_cols = list(
required_estimates_fields(columns)
)
self._expr = bind_expression_to_resources(
expr[required_cols],
resources,
)
self._columns = columns
self._odo_kwargs = odo_kwargs if odo_kwargs is not None else {}
check_data_query_args(data_query_time, data_query_tz)
self._data_query_time = data_query_time
self._data_query_tz = data_query_tz
self._checkpoints = checkpoints
def load_adjusted_array(self, columns, dates, assets, mask):
# Only load requested columns.
requested_column_names = [self._columns[column.name]
for column in columns]
raw = load_raw_data(
assets,
dates,
self._data_query_time,
self._data_query_tz,
self._expr[sorted(metadata_columns.union(requested_column_names))],
self._odo_kwargs,
checkpoints=self._checkpoints,
)
return self.loader(
raw,
{column.name: self._columns[column.name] for column in columns},
).load_adjusted_array(
columns,
dates,
assets,
mask,
)
class BlazeNextEstimatesLoader(BlazeEstimatesLoader):
loader = NextEarningsEstimatesLoader
class BlazePreviousEstimatesLoader(BlazeEstimatesLoader):
loader = PreviousEarningsEstimatesLoader
class BlazeSplitAdjustedEstimatesLoader(BlazeEstimatesLoader):
def __init__(self,
expr,
columns,
split_adjustments_loader,
split_adjusted_column_names,
split_adjusted_asof,
**kwargs):
self._split_adjustments = split_adjustments_loader
self._split_adjusted_column_names = split_adjusted_column_names
self._split_adjusted_asof = split_adjusted_asof
super(BlazeSplitAdjustedEstimatesLoader, self).__init__(
expr,
columns,
**kwargs
)
def load_adjusted_array(self, columns, dates, assets, mask):
# Only load requested columns.
requested_column_names = [self._columns[column.name]
for column in columns]
requested_spilt_adjusted_columns = [
column_name
for column_name in self._split_adjusted_column_names
if column_name in requested_column_names
]
raw = load_raw_data(
assets,
dates,
self._data_query_time,
self._data_query_tz,
self._expr[sorted(metadata_columns.union(requested_column_names))],
self._odo_kwargs,
checkpoints=self._checkpoints,
)
return self.loader(
raw,
{column.name: self._columns[column.name] for column in columns},
self._split_adjustments,
requested_spilt_adjusted_columns,
self._split_adjusted_asof,
).load_adjusted_array(
columns,
dates,
assets,
mask,
)
class BlazeNextSplitAdjustedEstimatesLoader(BlazeSplitAdjustedEstimatesLoader):
loader = NextSplitAdjustedEarningsEstimatesLoader
class BlazePreviousSplitAdjustedEstimatesLoader(
BlazeSplitAdjustedEstimatesLoader
):
loader = PreviousSplitAdjustedEarningsEstimatesLoader | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/loaders/blaze/estimates.py | estimates.py |
from zipline.pipeline.common import SID_FIELD_NAME, TS_FIELD_NAME
from zipline.pipeline.loaders.blaze.core import ffill_query_in_range
from zipline.pipeline.loaders.utils import (
normalize_data_query_bounds,
normalize_timestamp_to_query_time,
)
def load_raw_data(assets,
dates,
data_query_time,
data_query_tz,
expr,
odo_kwargs,
checkpoints=None):
"""
Given an expression representing data to load, perform normalization and
forward-filling and return the data, materialized. Only accepts data with a
`sid` field.
Parameters
----------
assets : pd.int64index
the assets to load data for.
dates : pd.datetimeindex
the simulation dates to load data for.
data_query_time : datetime.time
the time used as cutoff for new information.
data_query_tz : tzinfo
the timezone to normalize your dates to before comparing against
`time`.
expr : expr
the expression representing the data to load.
odo_kwargs : dict
extra keyword arguments to pass to odo when executing the expression.
checkpoints : expr, optional
the expression representing the checkpointed data for `expr`.
Returns
-------
raw : pd.dataframe
The result of computing expr and materializing the result as a
dataframe.
"""
lower_dt, upper_dt = normalize_data_query_bounds(
dates[0],
dates[-1],
data_query_time,
data_query_tz,
)
raw = ffill_query_in_range(
expr,
lower_dt,
upper_dt,
checkpoints=checkpoints,
odo_kwargs=odo_kwargs,
)
sids = raw[SID_FIELD_NAME]
raw.drop(
sids[~sids.isin(assets)].index,
inplace=True
)
if data_query_time is not None:
normalize_timestamp_to_query_time(
raw,
data_query_time,
data_query_tz,
inplace=True,
ts_field=TS_FIELD_NAME,
)
return raw | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/loaders/blaze/utils.py | utils.py |
from datashape import istabular
from .core import (
bind_expression_to_resources,
)
from zipline.pipeline.common import SID_FIELD_NAME, TS_FIELD_NAME, \
EVENT_DATE_FIELD_NAME
from zipline.pipeline.loaders.base import PipelineLoader
from zipline.pipeline.loaders.blaze.utils import load_raw_data
from zipline.pipeline.loaders.events import (
EventsLoader,
required_event_fields,
)
from zipline.pipeline.loaders.utils import (
check_data_query_args,
)
from zipline.utils.input_validation import ensure_timezone, optionally
from zipline.utils.preprocess import preprocess
class BlazeEventsLoader(PipelineLoader):
"""An abstract pipeline loader for the events datasets that loads
data from a blaze expression.
Parameters
----------
expr : Expr
The expression representing the data to load.
next_value_columns : dict[BoundColumn -> raw column name]
A dict mapping 'next' BoundColumns to their column names in `expr`.
previous_value_columns : dict[BoundColumn -> raw column name]
A dict mapping 'previous' BoundColumns to their column names in `expr`.
resources : dict, optional
Mapping from the loadable terms of ``expr`` to actual data resources.
odo_kwargs : dict, optional
Extra keyword arguments to pass to odo when executing the expression.
data_query_time : time, optional
The time to use for the data query cutoff.
data_query_tz : tzinfo or str
The timezone to use for the data query cutoff.
Notes
-----
The expression should have a tabular dshape of::
Dim * {{
{SID_FIELD_NAME}: int64,
{TS_FIELD_NAME}: datetime,
{EVENT_DATE_FIELD_NAME}: datetime,
}}
And other dataset-specific fields, where each row of the table is a
record including the sid to identify the company, the timestamp where we
learned about the announcement, and the event date.
If the '{TS_FIELD_NAME}' field is not included it is assumed that we
start the backtest with knowledge of all announcements.
"""
__doc__ = __doc__.format(SID_FIELD_NAME=SID_FIELD_NAME,
TS_FIELD_NAME=TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME=EVENT_DATE_FIELD_NAME)
@preprocess(data_query_tz=optionally(ensure_timezone))
def __init__(self,
expr,
next_value_columns,
previous_value_columns,
resources=None,
odo_kwargs=None,
data_query_time=None,
data_query_tz=None):
dshape = expr.dshape
if not istabular(dshape):
raise ValueError(
'expression dshape must be tabular, got: %s' % dshape,
)
required_cols = list(
required_event_fields(next_value_columns, previous_value_columns)
)
self._expr = bind_expression_to_resources(
expr[required_cols],
resources,
)
self._next_value_columns = next_value_columns
self._previous_value_columns = previous_value_columns
self._odo_kwargs = odo_kwargs if odo_kwargs is not None else {}
check_data_query_args(data_query_time, data_query_tz)
self._data_query_time = data_query_time
self._data_query_tz = data_query_tz
def load_adjusted_array(self, columns, dates, assets, mask):
raw = load_raw_data(assets,
dates,
self._data_query_time,
self._data_query_tz,
self._expr,
self._odo_kwargs)
return EventsLoader(
events=raw,
next_value_columns=self._next_value_columns,
previous_value_columns=self._previous_value_columns,
).load_adjusted_array(
columns,
dates,
assets,
mask,
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/pipeline/loaders/blaze/events.py | events.py |
from abc import ABCMeta
import array
import binascii
from collections import deque, namedtuple
from functools import partial
from numbers import Integral
from operator import itemgetter, attrgetter
import struct
from logbook import Logger
import numpy as np
import pandas as pd
from pandas import isnull
from six import with_metaclass, string_types, viewkeys, iteritems
import sqlalchemy as sa
from toolz import (
compose,
concat,
concatv,
curry,
merge,
partition_all,
sliding_window,
valmap,
)
from toolz.curried import operator as op
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MapAssetIdentifierIndexError,
MultipleSymbolsFound,
MultipleValuesFoundForField,
MultipleValuesFoundForSid,
NoValueForSid,
ValueNotFoundForField,
SidsNotFound,
SymbolNotFound,
)
from . import (
Asset, Equity, Future,
)
from . continuous_futures import (
ADJUSTMENT_STYLES,
CHAIN_PREDICATES,
ContinuousFuture,
OrderedContracts,
)
from .asset_writer import (
check_version_info,
split_delimited_symbol,
asset_db_table_names,
symbol_columns,
SQLITE_MAX_VARIABLE_NUMBER,
)
from .asset_db_schema import (
ASSET_DB_VERSION
)
from zipline.utils.control_flow import invert
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import as_column
from zipline.utils.preprocess import preprocess
from zipline.utils.sqlite_utils import group_into_chunks, coerce_string_to_eng
log = Logger('assets.py')
# A set of fields that need to be converted to strings before building an
# Asset to avoid unicode fields
_asset_str_fields = frozenset({
'symbol',
'asset_name',
'exchange',
})
# A set of fields that need to be converted to timestamps in UTC
_asset_timestamp_fields = frozenset({
'start_date',
'end_date',
'first_traded',
'notice_date',
'expiration_date',
'auto_close_date',
})
OwnershipPeriod = namedtuple('OwnershipPeriod', 'start end sid value')
def merge_ownership_periods(mappings):
"""
Given a dict of mappings where the values are lists of
OwnershipPeriod objects, returns a dict with the same structure with
new OwnershipPeriod objects adjusted so that the periods have no
gaps.
Orders the periods chronologically, and pushes forward the end date
of each period to match the start date of the following period. The
end date of the last period pushed forward to the max Timestamp.
"""
return valmap(
lambda v: tuple(
OwnershipPeriod(
a.start,
b.start,
a.sid,
a.value,
) for a, b in sliding_window(
2,
concatv(
sorted(v),
# concat with a fake ownership object to make the last
# end date be max timestamp
[OwnershipPeriod(
pd.Timestamp.max.tz_localize('utc'),
None,
None,
None,
)],
),
)
),
mappings,
)
def build_ownership_map(table, key_from_row, value_from_row):
"""
Builds a dict mapping to lists of OwnershipPeriods, from a db table.
"""
rows = sa.select(table.c).execute().fetchall()
mappings = {}
for row in rows:
mappings.setdefault(
key_from_row(row),
[],
).append(
OwnershipPeriod(
pd.Timestamp(row.start_date, unit='ns', tz='utc'),
pd.Timestamp(row.end_date, unit='ns', tz='utc'),
row.sid,
value_from_row(row),
),
)
return merge_ownership_periods(mappings)
@curry
def _filter_kwargs(names, dict_):
"""Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
"""
return {k: v for k, v in dict_.items() if k in names and v is not None}
_filter_future_kwargs = _filter_kwargs(Future._kwargnames)
_filter_equity_kwargs = _filter_kwargs(Equity._kwargnames)
def _convert_asset_timestamp_fields(dict_):
"""
Takes in a dict of Asset init args and converts dates to pd.Timestamps
"""
for key in _asset_timestamp_fields & viewkeys(dict_):
value = pd.Timestamp(dict_[key], tz='UTC')
dict_[key] = None if isnull(value) else value
return dict_
SID_TYPE_IDS = {
# Asset would be 0,
ContinuousFuture: 1,
}
CONTINUOUS_FUTURE_ROLL_STYLE_IDS = {
'calendar': 0,
'volume': 1,
}
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS = {
None: 0,
'div': 1,
'add': 2,
}
def _encode_continuous_future_sid(root_symbol,
offset,
roll_style,
adjustment_style):
s = struct.Struct("B 2B B B B 2B")
# B - sid type
# 2B - root symbol
# B - offset (could be packed smaller since offsets of greater than 12 are
# probably unneeded.)
# B - roll type
# B - adjustment
# 2B - empty space left for parameterized roll types
# The root symbol currently supports 2 characters. If 3 char root symbols
# are needed, the size of the root symbol does not need to change, however
# writing the string directly will need to change to a scheme of writing
# the A-Z values in 5-bit chunks.
a = array.array('B', [0] * s.size)
rs = bytearray(root_symbol, 'ascii')
values = (SID_TYPE_IDS[ContinuousFuture],
rs[0],
rs[1],
offset,
CONTINUOUS_FUTURE_ROLL_STYLE_IDS[roll_style],
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS[adjustment_style],
0, 0)
s.pack_into(a, 0, *values)
return int(binascii.hexlify(a), 16)
class AssetFinder(object):
"""
An AssetFinder is an interface to a database of Asset metadata written by
an ``AssetDBWriter``.
This class provides methods for looking up assets by unique integer id or
by symbol. For historical reasons, we refer to these unique ids as 'sids'.
Parameters
----------
engine : str or SQLAlchemy.engine
An engine with a connection to the asset database to use, or a string
that can be parsed by SQLAlchemy as a URI.
future_chain_predicates : dict
A dict mapping future root symbol to a predicate function which accepts
a contract as a parameter and returns whether or not the contract should be
included in the chain.
See Also
--------
:class:`zipline.assets.AssetDBWriter`
"""
# Token used as a substitute for pickling objects that contain a
# reference to an AssetFinder.
PERSISTENT_TOKEN = "<AssetFinder>"
@preprocess(engine=coerce_string_to_eng)
def __init__(self, engine, future_chain_predicates=CHAIN_PREDICATES):
self.engine = engine
metadata = sa.MetaData(bind=engine)
metadata.reflect(only=asset_db_table_names)
for table_name in asset_db_table_names:
setattr(self, table_name, metadata.tables[table_name])
# Check the version info of the db for compatibility
check_version_info(engine, self.version_info, ASSET_DB_VERSION)
# Cache for lookup of assets by sid, the objects in the asset lookup
# may be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset will populate the cache on first retrieval.
self._caches = (self._asset_cache, self._asset_type_cache) = {}, {}
self._future_chain_predicates = future_chain_predicates \
if future_chain_predicates is not None else {}
self._ordered_contracts = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = None
def _reset_caches(self):
"""
Reset our asset caches.
You probably shouldn't call this method.
"""
# This method exists as a workaround for the in-place mutating behavior
# of `TradingAlgorithm._write_and_map_id_index_to_sids`. No one else
# should be calling this.
for cache in self._caches:
cache.clear()
self.reload_symbol_maps()
def reload_symbol_maps(self):
"""Clear the in memory symbol lookup maps.
This will make any changes to the underlying db available to the
symbol maps.
"""
# clear the lazyval caches, the next access will requery
try:
del type(self).symbol_ownership_map[self]
except KeyError:
pass
try:
del type(self).fuzzy_symbol_ownership_map[self]
except KeyError:
pass
try:
del type(self).equity_supplementary_map[self]
except KeyError:
pass
try:
del type(self).equity_supplementary_map_by_sid[self]
except KeyError:
pass
@lazyval
def symbol_ownership_map(self):
return build_ownership_map(
table=self.equity_symbol_mappings,
key_from_row=(
lambda row: (row.company_symbol, row.share_class_symbol)
),
value_from_row=lambda row: row.symbol,
)
@lazyval
def fuzzy_symbol_ownership_map(self):
fuzzy_mappings = {}
for (cs, scs), owners in iteritems(self.symbol_ownership_map):
fuzzy_owners = fuzzy_mappings.setdefault(
cs + scs,
[],
)
fuzzy_owners.extend(owners)
fuzzy_owners.sort()
return fuzzy_mappings
@lazyval
def equity_supplementary_map(self):
return build_ownership_map(
table=self.equity_supplementary_mappings,
key_from_row=lambda row: (row.field, row.value),
value_from_row=lambda row: row.value,
)
@lazyval
def equity_supplementary_map_by_sid(self):
return build_ownership_map(
table=self.equity_supplementary_mappings,
key_from_row=lambda row: (row.field, row.sid),
value_from_row=lambda row: row.value,
)
def lookup_asset_types(self, sids):
"""
Retrieve asset types for a list of sids.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[sid -> str or None]
Asset types for the provided sids.
"""
found = {}
missing = set()
for sid in sids:
try:
found[sid] = self._asset_type_cache[sid]
except KeyError:
missing.add(sid)
if not missing:
return found
router_cols = self.asset_router.c
for assets in group_into_chunks(missing):
query = sa.select((router_cols.sid, router_cols.asset_type)).where(
self.asset_router.c.sid.in_(map(int, assets))
)
for sid, type_ in query.execute().fetchall():
missing.remove(sid)
found[sid] = self._asset_type_cache[sid] = type_
for sid in missing:
found[sid] = self._asset_type_cache[sid] = None
return found
def group_by_type(self, sids):
"""
Group a list of sids by asset type.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[str or None -> list[int]]
A dict mapping unique asset types to lists of sids drawn from sids.
If we fail to look up an asset, we assign it a key of None.
"""
return invert(self.lookup_asset_types(sids))
def retrieve_asset(self, sid, default_none=False):
"""
Retrieve the Asset for a given sid.
"""
try:
asset = self._asset_cache[sid]
if asset is None and not default_none:
raise SidsNotFound(sids=[sid])
return asset
except KeyError:
return self.retrieve_all((sid,), default_none=default_none)[0]
def retrieve_all(self, sids, default_none=False):
"""
Retrieve all assets in `sids`.
Parameters
----------
sids : iterable of int
Assets to retrieve.
default_none : bool
If True, return None for failed lookups.
If False, raise `SidsNotFound`.
Returns
-------
assets : list[Asset or None]
A list of the same length as `sids` containing Assets (or Nones)
corresponding to the requested sids.
Raises
------
SidsNotFound
When a requested sid is not found and default_none=False.
"""
hits, missing, failures = {}, set(), []
for sid in sids:
try:
asset = self._asset_cache[sid]
if not default_none and asset is None:
# Bail early if we've already cached that we don't know
# about an asset.
raise SidsNotFound(sids=[sid])
hits[sid] = asset
except KeyError:
missing.add(sid)
# All requests were cache hits. Return requested sids in order.
if not missing:
return [hits[sid] for sid in sids]
update_hits = hits.update
# Look up cache misses by type.
type_to_assets = self.group_by_type(missing)
# Handle failures
failures = {failure: None for failure in type_to_assets.pop(None, ())}
update_hits(failures)
self._asset_cache.update(failures)
if failures and not default_none:
raise SidsNotFound(sids=list(failures))
# We don't update the asset cache here because it should already be
# updated by `self.retrieve_equities`.
update_hits(self.retrieve_equities(type_to_assets.pop('equity', ())))
update_hits(
self.retrieve_futures_contracts(type_to_assets.pop('future', ()))
)
# We shouldn't know about any other asset types.
if type_to_assets:
raise AssertionError(
"Found asset types: %s" % list(type_to_assets.keys())
)
return [hits[sid] for sid in sids]
def retrieve_equities(self, sids):
"""
Retrieve Equity objects for a list of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.equities, Equity)
def _retrieve_equity(self, sid):
return self.retrieve_equities((sid,))[sid]
def retrieve_futures_contracts(self, sids):
"""
Retrieve Future objects for an iterable of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.futures_contracts, Future)
@staticmethod
def _select_assets_by_sid(asset_tbl, sids):
return sa.select([asset_tbl]).where(
asset_tbl.c.sid.in_(map(int, sids))
)
@staticmethod
def _select_asset_by_symbol(asset_tbl, symbol):
return sa.select([asset_tbl]).where(asset_tbl.c.symbol == symbol)
def _select_most_recent_symbols_chunk(self, sid_group):
"""Retrieve the most recent symbol for a set of sids.
Parameters
----------
sid_group : iterable[int]
The sids to lookup. The length of this sequence must be less than
or equal to SQLITE_MAX_VARIABLE_NUMBER because the sids will be
passed in as sql bind params.
Returns
-------
sel : Selectable
The sqlalchemy selectable that will query for the most recent
symbol for each sid.
Notes
-----
This is implemented as an inner select of the columns of interest
ordered by the end date of the (sid, symbol) mapping. We then group
that inner select on the sid with no aggregations to select the last
row per group which gives us the most recently active symbol for all
of the sids.
"""
symbol_cols = self.equity_symbol_mappings.c
inner = sa.select(
(symbol_cols.sid,) +
tuple(map(
op.getitem(symbol_cols),
symbol_columns,
)),
).where(
symbol_cols.sid.in_(map(int, sid_group)),
).order_by(
symbol_cols.end_date.asc(),
)
return sa.select(inner.c).group_by(inner.c.sid)
def _lookup_most_recent_symbols(self, sids):
symbols = {
row.sid: {c: row[c] for c in symbol_columns}
for row in concat(
self.engine.execute(
self._select_most_recent_symbols_chunk(sid_group),
).fetchall()
for sid_group in partition_all(
SQLITE_MAX_VARIABLE_NUMBER,
sids
),
)
}
if len(symbols) != len(sids):
raise EquitiesNotFound(
sids=set(sids) - set(symbols),
plural=True,
)
return symbols
def _retrieve_asset_dicts(self, sids, asset_tbl, querying_equities):
if not sids:
return
if querying_equities:
def mkdict(row,
symbols=self._lookup_most_recent_symbols(sids)):
return merge(row, symbols[row['sid']])
else:
mkdict = dict
for assets in group_into_chunks(sids):
# Load misses from the db.
query = self._select_assets_by_sid(asset_tbl, assets)
for row in query.execute().fetchall():
yield _convert_asset_timestamp_fields(mkdict(row))
def _retrieve_assets(self, sids, asset_tbl, asset_type):
"""
Internal function for loading assets from a table.
This should be the only method of `AssetFinder` that writes Assets into
self._asset_cache.
Parameters
---------
sids : iterable of int
Asset ids to look up.
asset_tbl : sqlalchemy.Table
Table from which to query assets.
asset_type : type
Type of asset to be constructed.
Returns
-------
assets : dict[int -> Asset]
Dict mapping requested sids to the retrieved assets.
"""
# Fastpath for empty request.
if not sids:
return {}
cache = self._asset_cache
hits = {}
querying_equities = issubclass(asset_type, Equity)
filter_kwargs = (
_filter_equity_kwargs
if querying_equities else
_filter_future_kwargs
)
rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities)
for row in rows:
sid = row['sid']
asset = asset_type(**filter_kwargs(row))
hits[sid] = cache[sid] = asset
# If we get here, it means something in our code thought that a
# particular sid was an equity/future and called this function with a
# concrete type, but we couldn't actually resolve the asset. This is
# an error in our code, not a user-input error.
misses = tuple(set(sids) - viewkeys(hits))
if misses:
if querying_equities:
raise EquitiesNotFound(sids=misses)
else:
raise FutureContractsNotFound(sids=misses)
return hits
def _lookup_symbol_strict(self, symbol, as_of_date):
# split the symbol into the components, if there are no
# company/share class parts then share_class_symbol will be empty
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = self.symbol_ownership_map[
company_symbol,
share_class_symbol,
]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held this symbol
raise SymbolNotFound(symbol=symbol)
if not as_of_date:
if len(owners) > 1:
# more than one equity has held this ticker, this is ambigious
# without the date
raise MultipleSymbolsFound(
symbol=symbol,
options=set(map(
compose(self.retrieve_asset, attrgetter('sid')),
owners,
)),
)
# exactly one equity has ever held this symbol, we may resolve
# without the date
return self.retrieve_asset(owners[0].sid)
for start, end, sid, _ in owners:
if start <= as_of_date < end:
# find the equity that owned it on the given asof date
return self.retrieve_asset(sid)
# no equity held the ticker on the given asof date
raise SymbolNotFound(symbol=symbol)
def _lookup_symbol_fuzzy(self, symbol, as_of_date):
symbol = symbol.upper()
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = self.fuzzy_symbol_ownership_map[
company_symbol + share_class_symbol
]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held a symbol matching the fuzzy symbol
raise SymbolNotFound(symbol=symbol)
if not as_of_date:
if len(owners) == 1:
# only one valid match
return self.retrieve_asset(owners[0].sid)
options = []
for _, _, sid, sym in owners:
if sym == symbol:
# there are multiple options, look for exact matches
options.append(self.retrieve_asset(sid))
if len(options) == 1:
# there was only one exact match
return options[0]
# there are more than one exact match for this fuzzy symbol
raise MultipleSymbolsFound(
symbol=symbol,
options=set(options),
)
options = {}
for start, end, sid, sym in owners:
if start <= as_of_date < end:
# see which fuzzy symbols were owned on the asof date.
options[sid] = sym
if not options:
# no equity owned the fuzzy symbol on the date requested
raise SymbolNotFound(symbol=symbol)
sid_keys = list(options.keys())
# If there was only one owner, or there is a fuzzy and non-fuzzy which
# map to the same sid, return it.
if len(options) == 1:
return self.retrieve_asset(sid_keys[0])
for sid, sym in options.items():
# Possible to have a scenario where multiple fuzzy matches have the
# same date. Want to find the one where symbol and share class
# match.
if (company_symbol, share_class_symbol) == \
split_delimited_symbol(sym):
return self.retrieve_asset(sid)
# multiple equities held tickers matching the fuzzy ticker but
# there are no exact matches
raise MultipleSymbolsFound(
symbol=symbol,
options=[self.retrieve_asset(s) for s in sid_keys],
)
def lookup_symbol(self, symbol, as_of_date, fuzzy=False):
"""Lookup an equity by symbol.
Parameters
----------
symbol : str
The ticker symbol to resolve.
as_of_date : datetime or None
Look up the last owner of this symbol as of this datetime.
If ``as_of_date`` is None, then this can only resolve the equity
if exactly one equity has ever owned the ticker.
fuzzy : bool, optional
Should fuzzy symbol matching be used? Fuzzy symbol matching
attempts to resolve differences in representations for
shareclasses. For example, some people may represent the ``A``
shareclass of ``BRK`` as ``BRK.A``, where others could write
``BRK_A``.
Returns
-------
equity : Equity
The equity that held ``symbol`` on the given ``as_of_date``, or the
only equity to hold ``symbol`` if ``as_of_date`` is None.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when no ``as_of_date`` is given and more than one equity
has held ``symbol``. This is also raised when ``fuzzy=True`` and
there are multiple candidates for the given ``symbol`` on the
``as_of_date``.
"""
if symbol is None:
raise TypeError("Cannot lookup asset for symbol of None for "
"as of date %s." % as_of_date)
if fuzzy:
return self._lookup_symbol_fuzzy(symbol, as_of_date)
return self._lookup_symbol_strict(symbol, as_of_date)
def lookup_symbols(self, symbols, as_of_date, fuzzy=False):
"""
Lookup a list of equities by symbol.
Equivalent to::
[finder.lookup_symbol(s, as_of, fuzzy) for s in symbols]
but potentially faster because repeated lookups are memoized.
Parameters
----------
symbols : sequence[str]
Sequence of ticker symbols to resolve.
as_of_date : pd.Timestamp
Forwarded to ``lookup_symbol``.
fuzzy : bool, optional
Forwarded to ``lookup_symbol``.
Returns
-------
equities : list[Equity]
"""
memo = {}
out = []
append_output = out.append
for sym in symbols:
if sym in memo:
append_output(memo[sym])
else:
equity = memo[sym] = self.lookup_symbol(sym, as_of_date, fuzzy)
append_output(equity)
return out
def lookup_future_symbol(self, symbol):
"""Lookup a future contract by symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : Future
The future contract referenced by ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
data = self._select_asset_by_symbol(self.futures_contracts, symbol)\
.execute().fetchone()
# If no data found, raise an exception
if not data:
raise SymbolNotFound(symbol=symbol)
return self.retrieve_asset(data['sid'])
def lookup_by_supplementary_field(self, field_name, value, as_of_date):
try:
owners = self.equity_supplementary_map[
field_name,
value,
]
assert owners, 'empty owners list for %r' % (field_name, value)
except KeyError:
# no equity has ever held this value
raise ValueNotFoundForField(field=field_name, value=value)
if not as_of_date:
if len(owners) > 1:
# more than one equity has held this value, this is ambigious
# without the date
raise MultipleValuesFoundForField(
field=field_name,
value=value,
options=set(map(
compose(self.retrieve_asset, attrgetter('sid')),
owners,
)),
)
# exactly one equity has ever held this value, we may resolve
# without the date
return self.retrieve_asset(owners[0].sid)
for start, end, sid, _ in owners:
if start <= as_of_date < end:
# find the equity that owned it on the given asof date
return self.retrieve_asset(sid)
# no equity held the value on the given asof date
raise ValueNotFoundForField(field=field_name, value=value)
def get_supplementary_field(
self,
sid,
field_name,
as_of_date,
):
"""Get the value of a supplementary field for an asset.
Parameters
----------
sid : int
The sid of the asset to query.
field_name : str
Name of the supplementary field.
as_of_date : pd.Timestamp, None
The last known value on this date is returned. If None, a
value is returned only if we've only ever had one value for
this sid. If None and we've had multiple values,
MultipleValuesFoundForSid is raised.
Raises
------
NoValueForSid
If we have no values for this asset, or no values was known
on this as_of_date.
MultipleValuesFoundForSid
If we have had multiple values for this asset over time, and
None was passed for as_of_date.
"""
try:
periods = self.equity_supplementary_map_by_sid[
field_name,
sid,
]
assert periods, 'empty periods list for %r' % (field_name, sid)
except KeyError:
raise NoValueForSid(field=field_name, sid=sid)
if not as_of_date:
if len(periods) > 1:
# This equity has held more than one value, this is ambigious
# without the date
raise MultipleValuesFoundForSid(
field=field_name,
sid=sid,
options={p.value for p in periods},
)
# this equity has only ever held this value, we may resolve
# without the date
return periods[0].value
for start, end, _, value in periods:
if start <= as_of_date < end:
return value
# Could not find a value for this sid on the as_of_date.
raise NoValueForSid(field=field_name, sid=sid)
def _get_contract_sids(self, root_symbol):
fc_cols = self.futures_contracts.c
return [r.sid for r in
list(sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol) &
(fc_cols.start_date != pd.NaT.value)).order_by(
fc_cols.sid).execute().fetchall())]
def _get_root_symbol_exchange(self, root_symbol):
fc_cols = self.futures_root_symbols.c
fields = (fc_cols.exchange,)
exchange = sa.select(fields).where(
fc_cols.root_symbol == root_symbol).execute().scalar()
if exchange is not None:
return exchange
else:
raise SymbolNotFound(symbol=root_symbol)
def get_ordered_contracts(self, root_symbol):
try:
return self._ordered_contracts[root_symbol]
except KeyError:
contract_sids = self._get_contract_sids(root_symbol)
contracts = deque(self.retrieve_all(contract_sids))
chain_predicate = self._future_chain_predicates.get(root_symbol,
None)
oc = OrderedContracts(root_symbol, contracts, chain_predicate)
self._ordered_contracts[root_symbol] = oc
return oc
def create_continuous_future(self,
root_symbol,
offset,
roll_style,
adjustment):
if adjustment not in ADJUSTMENT_STYLES:
raise ValueError(
'Invalid adjustment style {!r}. Allowed adjustment styles are '
'{}.'.format(adjustment, list(ADJUSTMENT_STYLES))
)
oc = self.get_ordered_contracts(root_symbol)
exchange = self._get_root_symbol_exchange(root_symbol)
sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
None)
mul_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'div')
add_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'add')
cf_template = partial(
ContinuousFuture,
root_symbol=root_symbol,
offset=offset,
roll_style=roll_style,
start_date=oc.start_date,
end_date=oc.end_date,
exchange=exchange,
)
cf = cf_template(sid=sid)
mul_cf = cf_template(sid=mul_sid, adjustment='mul')
add_cf = cf_template(sid=add_sid, adjustment='add')
self._asset_cache[cf.sid] = cf
self._asset_cache[mul_cf.sid] = mul_cf
self._asset_cache[add_cf.sid] = add_cf
return {None: cf, 'mul': mul_cf, 'add': add_cf}[adjustment]
def _make_sids(tblattr):
def _(self):
return tuple(map(
itemgetter('sid'),
sa.select((
getattr(self, tblattr).c.sid,
)).execute().fetchall(),
))
return _
sids = property(
_make_sids('asset_router'),
doc='All the sids in the asset finder.',
)
equities_sids = property(
_make_sids('equities'),
doc='All of the sids for equities in the asset finder.',
)
futures_sids = property(
_make_sids('futures_contracts'),
doc='All of the sids for futures consracts in the asset finder.',
)
del _make_sids
@lazyval
def _symbol_lookups(self):
"""
An iterable of symbol lookup functions to use with ``lookup_generic``
Attempts equities lookup, then futures.
"""
return (
self.lookup_symbol,
# lookup_future_symbol method does not use as_of date, since
# symbols are unique.
#
# Wrap the function in a lambda so that both methods share a
# signature, so that when the functions are iterated over
# the consumer can use the same arguments with both methods.
lambda symbol, _: self.lookup_future_symbol(symbol)
)
def _lookup_generic_scalar(self,
asset_convertible,
as_of_date,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
if isinstance(asset_convertible, Asset):
matches.append(asset_convertible)
elif isinstance(asset_convertible, Integral):
try:
result = self.retrieve_asset(int(asset_convertible))
except SidsNotFound:
missing.append(asset_convertible)
return None
matches.append(result)
elif isinstance(asset_convertible, string_types):
for lookup in self._symbol_lookups:
try:
matches.append(lookup(asset_convertible, as_of_date))
return
except SymbolNotFound:
continue
else:
missing.append(asset_convertible)
return None
else:
raise NotAssetConvertible(
"Input was %s, not AssetConvertible."
% asset_convertible
)
def lookup_generic(self,
asset_convertible_or_iterable,
as_of_date):
"""
Convert a AssetConvertible or iterable of AssetConvertibles into
a list of Asset objects.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Returns a pair of objects, the first of which is the result of the
conversion, and the second of which is a list containing any values
that couldn't be resolved.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(asset_convertible_or_iterable, AssetConvertible):
self._lookup_generic_scalar(
asset_convertible=asset_convertible_or_iterable,
as_of_date=as_of_date,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(asset_convertible_or_iterable, '__int__'):
raise SidsNotFound(sids=[asset_convertible_or_iterable])
else:
raise SymbolNotFound(symbol=asset_convertible_or_iterable)
# If the input is a ContinuousFuture just return it as-is.
elif isinstance(asset_convertible_or_iterable, ContinuousFuture):
return asset_convertible_or_iterable, missing
# Interpret input as iterable.
try:
iterator = iter(asset_convertible_or_iterable)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
if isinstance(obj, ContinuousFuture):
matches.append(obj)
else:
self._lookup_generic_scalar(obj, as_of_date, matches, missing)
return matches, missing
def map_identifier_index_to_sids(self, index, as_of_date):
"""
This method is for use in sanitizing a user's DataFrame or Panel
inputs.
Takes the given index of identifiers, checks their types, builds assets
if necessary, and returns a list of the sids that correspond to the
input index.
Parameters
----------
index : Iterable
An iterable containing ints, strings, or Assets
as_of_date : pandas.Timestamp
A date to be used to resolve any dual-mapped symbols
Returns
-------
List
A list of integer sids corresponding to the input index
"""
# This method assumes that the type of the objects in the index is
# consistent and can, therefore, be taken from the first identifier
first_identifier = index[0]
# Ensure that input is AssetConvertible (integer, string, or Asset)
if not isinstance(first_identifier, AssetConvertible):
raise MapAssetIdentifierIndexError(obj=first_identifier)
# If sids are provided, no mapping is necessary
if isinstance(first_identifier, Integral):
return index
# Look up all Assets for mapping
matches = []
missing = []
for identifier in index:
self._lookup_generic_scalar(identifier, as_of_date,
matches, missing)
if missing:
raise ValueError("Missing assets for identifiers: %s" % missing)
# Return a list of the sids of the found assets
return [asset.sid for asset in matches]
def _compute_asset_lifetimes(self):
"""
Compute and cache a recarry of asset lifetimes.
"""
equities_cols = self.equities.c
buf = np.array(
tuple(
sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.end_date,
)).execute(),
), dtype='<f8', # use doubles so we get NaNs
)
lifetimes = np.recarray(
buf=buf,
shape=(len(buf),),
dtype=[
('sid', '<f8'),
('start', '<f8'),
('end', '<f8')
],
)
start = lifetimes.start
end = lifetimes.end
start[np.isnan(start)] = 0 # convert missing starts to 0
end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX
# Cast the results back down to int.
return lifetimes.astype([
('sid', '<i8'),
('start', '<i8'),
('end', '<i8'),
])
def lifetimes(self, dates, include_start_date):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
# This is a less than ideal place to do this, because if someone adds
# assets to the finder after we've touched lifetimes we won't have
# those new assets available. Mutability is not my favorite
# programming feature.
if self._asset_lifetimes is None:
self._asset_lifetimes = self._compute_asset_lifetimes()
lifetimes = self._asset_lifetimes
raw_dates = as_column(dates.asi8)
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Integral)
AssetConvertible.register(Asset)
# Use six.string_types for Python2/3 compatibility
for _type in string_types:
AssetConvertible.register(_type)
class NotAssetConvertible(ValueError):
pass
class PricingDataAssociable(with_metaclass(ABCMeta)):
"""
ABC for types that can be associated with pricing data.
Includes Asset, Future, ContinuousFuture
"""
pass
PricingDataAssociable.register(Asset)
PricingDataAssociable.register(Future)
PricingDataAssociable.register(ContinuousFuture)
def was_active(reference_date_value, asset):
"""
Whether or not `asset` was active at the time corresponding to
`reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
asset : Asset
The asset object to check.
Returns
-------
was_active : bool
Whether or not the `asset` existed at the specified time.
"""
return (
asset.start_date.value
<= reference_date_value
<= asset.end_date.value
)
def only_active_assets(reference_date_value, assets):
"""
Filter an iterable of Asset objects down to just assets that were alive at
the time corresponding to `reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
assets : iterable[Asset]
The assets to filter.
Returns
-------
active_assets : list
List of the active assets from `assets` on the requested date.
"""
return [a for a in assets if was_active(reference_date_value, a)] | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/assets/assets.py | assets.py |
from itertools import product
from string import ascii_uppercase
import pandas as pd
from pandas.tseries.offsets import MonthBegin
from six import iteritems
from .futures import CME_CODE_TO_MONTH
def make_rotating_equity_info(num_assets,
first_start,
frequency,
periods_between_starts,
asset_lifetime):
"""
Create a DataFrame representing lifetimes of assets that are constantly
rotating in and out of existence.
Parameters
----------
num_assets : int
How many assets to create.
first_start : pd.Timestamp
The start date for the first asset.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret next two arguments.
periods_between_starts : int
Create a new asset every `frequency` * `periods_between_new`
asset_lifetime : int
Each asset exists for `frequency` * `asset_lifetime` days.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
return pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
# Start a new asset every `periods_between_starts` days.
'start_date': pd.date_range(
first_start,
freq=(periods_between_starts * frequency),
periods=num_assets,
),
# Each asset lasts for `asset_lifetime` days.
'end_date': pd.date_range(
first_start + (asset_lifetime * frequency),
freq=(periods_between_starts * frequency),
periods=num_assets,
),
'exchange': 'TEST',
'exchange_full': 'TEST FULL',
},
index=range(num_assets),
)
def make_simple_equity_info(sids,
start_date,
end_date,
symbols=None):
"""
Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`.
Parameters
----------
sids : array-like of int
start_date : pd.Timestamp, optional
end_date : pd.Timestamp, optional
symbols : list, optional
Symbols to use for the assets.
If not provided, symbols are generated from the sequence 'A', 'B', ...
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
num_assets = len(sids)
if symbols is None:
symbols = list(ascii_uppercase[:num_assets])
return pd.DataFrame(
{
'symbol': list(symbols),
'start_date': pd.to_datetime([start_date] * num_assets),
'end_date': pd.to_datetime([end_date] * num_assets),
'exchange': 'TEST',
'exchange_full': 'TEST FULL',
},
index=sids,
columns=(
'start_date',
'end_date',
'symbol',
'exchange',
'exchange_full',
),
)
def make_jagged_equity_info(num_assets,
start_date,
first_end,
frequency,
periods_between_ends,
auto_close_delta):
"""
Create a DataFrame representing assets that all begin at the same start
date, but have cascading end dates.
Parameters
----------
num_assets : int
How many assets to create.
start_date : pd.Timestamp
The start date for all the assets.
first_end : pd.Timestamp
The date at which the first equity will end.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret the next argument.
periods_between_ends : int
Starting after the first end date, end each asset every
`frequency` * `periods_between_ends`.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
frame = pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
'start_date': start_date,
'end_date': pd.date_range(
first_end,
freq=(periods_between_ends * frequency),
periods=num_assets,
),
'exchange': 'TEST',
'exchange_full': 'TEST FULL',
},
index=range(num_assets),
)
# Explicitly pass None to disable setting the auto_close_date column.
if auto_close_delta is not None:
frame['auto_close_date'] = frame['end_date'] + auto_close_delta
return frame
def make_future_info(first_sid,
root_symbols,
years,
notice_date_func,
expiration_date_func,
start_date_func,
month_codes=None):
"""
Create a DataFrame representing futures for `root_symbols` during `year`.
Generates a contract per triple of (symbol, year, month) supplied to
`root_symbols`, `years`, and `month_codes`.
Parameters
----------
first_sid : int
The first sid to use for assigning sids to the created contracts.
root_symbols : list[str]
A list of root symbols for which to create futures.
years : list[int or str]
Years (e.g. 2014), for which to produce individual contracts.
notice_date_func : (Timestamp) -> Timestamp
Function to generate notice dates from first of the month associated
with asset month code. Return NaT to simulate futures with no notice
date.
expiration_date_func : (Timestamp) -> Timestamp
Function to generate expiration dates from first of the month
associated with asset month code.
start_date_func : (Timestamp) -> Timestamp, optional
Function to generate start dates from first of the month associated
with each asset month code. Defaults to a start_date one year prior
to the month_code date.
month_codes : dict[str -> [1..12]], optional
Dictionary of month codes for which to create contracts. Entries
should be strings mapped to values from 1 (January) to 12 (December).
Default is zipline.futures.CME_CODE_TO_MONTH
Returns
-------
futures_info : pd.DataFrame
DataFrame of futures data suitable for passing to an AssetDBWriter.
"""
if month_codes is None:
month_codes = CME_CODE_TO_MONTH
year_strs = list(map(str, years))
years = [pd.Timestamp(s, tz='UTC') for s in year_strs]
# Pairs of string/date like ('K06', 2006-05-01)
contract_suffix_to_beginning_of_month = tuple(
(month_code + year_str[-2:], year + MonthBegin(month_num))
for ((year, year_str), (month_code, month_num))
in product(
zip(years, year_strs),
iteritems(month_codes),
)
)
contracts = []
parts = product(root_symbols, contract_suffix_to_beginning_of_month)
for sid, (root_sym, (suffix, month_begin)) in enumerate(parts, first_sid):
contracts.append({
'sid': sid,
'root_symbol': root_sym,
'symbol': root_sym + suffix,
'start_date': start_date_func(month_begin),
'notice_date': notice_date_func(month_begin),
'expiration_date': notice_date_func(month_begin),
'multiplier': 500,
'exchange': "TEST",
'exchange_full': 'TEST FULL',
})
return pd.DataFrame.from_records(contracts, index='sid')
def make_commodity_future_info(first_sid,
root_symbols,
years,
month_codes=None):
"""
Make futures testing data that simulates the notice/expiration date
behavior of physical commodities like oil.
Parameters
----------
first_sid : int
root_symbols : list[str]
years : list[int]
month_codes : dict[str -> int]
Expiration dates are on the 20th of the month prior to the month code.
Notice dates are are on the 20th two months prior to the month code.
Start dates are one year before the contract month.
See Also
--------
make_future_info
"""
nineteen_days = pd.Timedelta(days=19)
one_year = pd.Timedelta(days=365)
return make_future_info(
first_sid=first_sid,
root_symbols=root_symbols,
years=years,
notice_date_func=lambda dt: dt - MonthBegin(2) + nineteen_days,
expiration_date_func=lambda dt: dt - MonthBegin(1) + nineteen_days,
start_date_func=lambda dt: dt - one_year,
month_codes=month_codes,
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/assets/synthetic.py | synthetic.py |
from collections import namedtuple
import re
from contextlib2 import ExitStack
import numpy as np
import pandas as pd
import sqlalchemy as sa
from toolz import first
from zipline.errors import AssetDBVersionError
from zipline.assets.asset_db_schema import (
ASSET_DB_VERSION,
asset_db_table_names,
asset_router,
equities as equities_table,
equity_symbol_mappings,
equity_supplementary_mappings as equity_supplementary_mappings_table,
futures_contracts as futures_contracts_table,
futures_exchanges,
futures_root_symbols,
metadata,
version_info,
)
from zipline.utils.preprocess import preprocess
from zipline.utils.range import from_tuple, intersecting_ranges
from zipline.utils.sqlite_utils import coerce_string_to_eng
# Define a namedtuple for use with the load_data and _load_data methods
AssetData = namedtuple(
'AssetData', (
'equities',
'equities_mappings',
'futures',
'exchanges',
'root_symbols',
'equity_supplementary_mappings',
),
)
SQLITE_MAX_VARIABLE_NUMBER = 999
symbol_columns = frozenset({
'symbol',
'company_symbol',
'share_class_symbol',
})
mapping_columns = symbol_columns | {'start_date', 'end_date'}
# Default values for the equities DataFrame
_equities_defaults = {
'symbol': None,
'asset_name': None,
'start_date': 0,
'end_date': 2 ** 62 - 1,
'first_traded': None,
'auto_close_date': None,
# the canonical exchange name, like "NYSE"
'exchange': None,
# optional, something like "New York Stock Exchange"
'exchange_full': None,
}
# Default values for the futures DataFrame
_futures_defaults = {
'symbol': None,
'root_symbol': None,
'asset_name': None,
'start_date': 0,
'end_date': 2 ** 62 - 1,
'first_traded': None,
'exchange': None,
'notice_date': None,
'expiration_date': None,
'auto_close_date': None,
'tick_size': None,
'multiplier': 1,
}
# Default values for the exchanges DataFrame
_exchanges_defaults = {
'timezone': None,
}
# Default values for the root_symbols DataFrame
_root_symbols_defaults = {
'root_symbol_id': None,
'sector': None,
'description': None,
'exchange': None,
}
# Default values for the equity_supplementary_mappings DataFrame
_equity_supplementary_mappings_defaults = {
'sid': None,
'value': None,
'field': None,
'start_date': 0,
'end_date': 2 ** 62 - 1,
}
# Fuzzy symbol delimiters that may break up a company symbol and share class
_delimited_symbol_delimiters_regex = re.compile(r'[./\-_]')
_delimited_symbol_default_triggers = frozenset({np.nan, None, ''})
def split_delimited_symbol(symbol):
"""
Takes in a symbol that may be delimited and splits it in to a company
symbol and share class symbol. Also returns the fuzzy symbol, which is the
symbol without any fuzzy characters at all.
Parameters
----------
symbol : str
The possibly-delimited symbol to be split
Returns
-------
company_symbol : str
The company part of the symbol.
share_class_symbol : str
The share class part of a symbol.
"""
# return blank strings for any bad fuzzy symbols, like NaN or None
if symbol in _delimited_symbol_default_triggers:
return '', ''
symbol = symbol.upper()
split_list = re.split(
pattern=_delimited_symbol_delimiters_regex,
string=symbol,
maxsplit=1,
)
# Break the list up in to its two components, the company symbol and the
# share class symbol
company_symbol = split_list[0]
if len(split_list) > 1:
share_class_symbol = split_list[1]
else:
share_class_symbol = ''
return company_symbol, share_class_symbol
def _generate_output_dataframe(data_subset, defaults):
"""
Generates an output dataframe from the given subset of user-provided
data, the given column names, and the given default values.
Parameters
----------
data_subset : DataFrame
A DataFrame, usually from an AssetData object,
that contains the user's input metadata for the asset type being
processed
defaults : dict
A dict where the keys are the names of the columns of the desired
output DataFrame and the values are the default values to insert in the
DataFrame if no user data is provided
Returns
-------
DataFrame
A DataFrame containing all user-provided metadata, and default values
wherever user-provided metadata was missing
"""
# The columns provided.
cols = set(data_subset.columns)
desired_cols = set(defaults)
# Drop columns with unrecognised headers.
data_subset.drop(cols - desired_cols,
axis=1,
inplace=True)
# Get those columns which we need but
# for which no data has been supplied.
for col in desired_cols - cols:
# write the default value for any missing columns
data_subset[col] = defaults[col]
return data_subset
def _check_asset_group(group):
row = group.sort_values('end_date').iloc[-1]
row.start_date = group.start_date.min()
row.end_date = group.end_date.max()
row.drop(list(symbol_columns), inplace=True)
return row
def _format_range(r):
return (
str(pd.Timestamp(r.start, unit='ns')),
str(pd.Timestamp(r.stop, unit='ns')),
)
def _split_symbol_mappings(df):
"""Split out the symbol: sid mappings from the raw data.
Parameters
----------
df : pd.DataFrame
The dataframe with multiple rows for each symbol: sid pair.
Returns
-------
asset_info : pd.DataFrame
The asset info with one row per asset.
symbol_mappings : pd.DataFrame
The dataframe of just symbol: sid mappings. The index will be
the sid, then there will be three columns: symbol, start_date, and
end_date.
"""
mappings = df[list(mapping_columns)]
ambigious = {}
for symbol in mappings.symbol.unique():
persymbol = mappings[mappings.symbol == symbol]
intersections = list(intersecting_ranges(map(
from_tuple,
zip(persymbol.start_date, persymbol.end_date),
)))
if intersections:
ambigious[symbol] = (
intersections,
persymbol[['start_date', 'end_date']].astype('datetime64[ns]'),
)
if ambigious:
raise ValueError(
'Ambiguous ownership for %d symbol%s, multiple assets held the'
' following symbols:\n%s' % (
len(ambigious),
'' if len(ambigious) == 1 else 's',
'\n'.join(
'%s:\n intersections: %s\n %s' % (
symbol,
tuple(map(_format_range, intersections)),
# indent the dataframe string
'\n '.join(str(df).splitlines()),
)
for symbol, (intersections, df) in sorted(
ambigious.items(),
key=first,
),
),
)
)
return (
df.groupby(level=0).apply(_check_asset_group),
df[list(mapping_columns)],
)
def _dt_to_epoch_ns(dt_series):
"""Convert a timeseries into an Int64Index of nanoseconds since the epoch.
Parameters
----------
dt_series : pd.Series
The timeseries to convert.
Returns
-------
idx : pd.Int64Index
The index converted to nanoseconds since the epoch.
"""
index = pd.to_datetime(dt_series.values)
if index.tzinfo is None:
index = index.tz_localize('UTC')
else:
index = index.tz_convert('UTC')
return index.view(np.int64)
def check_version_info(conn, version_table, expected_version):
"""
Checks for a version value in the version table.
Parameters
----------
conn : sa.Connection
The connection to use to perform the check.
version_table : sa.Table
The version table of the asset database
expected_version : int
The expected version of the asset database
Raises
------
AssetDBVersionError
If the version is in the table and not equal to ASSET_DB_VERSION.
"""
# Read the version out of the table
version_from_table = conn.execute(
sa.select((version_table.c.version,)),
).scalar()
# A db without a version is considered v0
if version_from_table is None:
version_from_table = 0
# Raise an error if the versions do not match
if (version_from_table != expected_version):
raise AssetDBVersionError(db_version=version_from_table,
expected_version=expected_version)
def write_version_info(conn, version_table, version_value):
"""
Inserts the version value in to the version table.
Parameters
----------
conn : sa.Connection
The connection to use to execute the insert.
version_table : sa.Table
The version table of the asset database
version_value : int
The version to write in to the database
"""
conn.execute(sa.insert(version_table, values={'version': version_value}))
class _empty(object):
columns = ()
class AssetDBWriter(object):
"""Class used to write data to an assets db.
Parameters
----------
engine : Engine or str
An SQLAlchemy engine or path to a SQL database.
"""
DEFAULT_CHUNK_SIZE = SQLITE_MAX_VARIABLE_NUMBER
@preprocess(engine=coerce_string_to_eng)
def __init__(self, engine):
self.engine = engine
def write(self,
equities=None,
futures=None,
exchanges=None,
root_symbols=None,
equity_supplementary_mappings=None,
chunk_size=DEFAULT_CHUNK_SIZE):
"""Write asset metadata to a sqlite database.
Parameters
----------
equities : pd.DataFrame, optional
The equity metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this equity.
asset_name : str
The full name for this asset.
start_date : datetime
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
auto_close_date : datetime, optional
The date on which to close any positions in this asset.
exchange : str
The exchange where this asset is traded.
The index of this dataframe should contain the sids.
futures : pd.DataFrame, optional
The future contract metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this futures contract.
root_symbol : str
The root symbol, or the symbol with the expiration stripped
out.
asset_name : str
The full name for this asset.
start_date : datetime, optional
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
exchange : str
The exchange where this asset is traded.
notice_date : datetime
The date when the owner of the contract may be forced
to take physical delivery of the contract's asset.
expiration_date : datetime
The date when the contract expires.
auto_close_date : datetime
The date when the broker will automatically close any
positions in this contract.
tick_size : float
The minimum price movement of the contract.
multiplier: float
The amount of the underlying asset represented by this
contract.
exchanges : pd.DataFrame, optional
The exchanges where assets can be traded. The columns of this
dataframe are:
exchange : str
The name of the exchange.
timezone : str
The timezone of the exchange.
root_symbols : pd.DataFrame, optional
The root symbols for the futures contracts. The columns for this
dataframe are:
root_symbol : str
The root symbol name.
root_symbol_id : int
The unique id for this root symbol.
sector : string, optional
The sector of this root symbol.
description : string, optional
A short description of this root symbol.
exchange : str
The exchange where this root symbol is traded.
equity_supplementary_mappings : pd.DataFrame, optional
Additional mappings from values of abitrary type to assets.
chunk_size : int, optional
The amount of rows to write to the SQLite table at once.
This defaults to the default number of bind params in sqlite.
If you have compiled sqlite3 with more bind or less params you may
want to pass that value here.
See Also
--------
zipline.assets.asset_finder
"""
with self.engine.begin() as conn:
# Create SQL tables if they do not exist.
self.init_db(conn)
# Get the data to add to SQL.
data = self._load_data(
equities if equities is not None else pd.DataFrame(),
futures if futures is not None else pd.DataFrame(),
exchanges if exchanges is not None else pd.DataFrame(),
root_symbols if root_symbols is not None else pd.DataFrame(),
(
equity_supplementary_mappings
if equity_supplementary_mappings is not None
else pd.DataFrame()
),
)
# Write the data to SQL.
self._write_df_to_table(
futures_exchanges,
data.exchanges,
conn,
chunk_size,
)
self._write_df_to_table(
futures_root_symbols,
data.root_symbols,
conn,
chunk_size,
)
self._write_df_to_table(
equity_supplementary_mappings_table,
data.equity_supplementary_mappings,
conn,
chunk_size,
idx=False,
)
self._write_assets(
'future',
data.futures,
conn,
chunk_size,
)
self._write_assets(
'equity',
data.equities,
conn,
chunk_size,
mapping_data=data.equities_mappings,
)
def _write_df_to_table(
self,
tbl,
df,
txn,
chunk_size,
idx=True,
idx_label=None,
):
df.to_sql(
tbl.name,
txn.connection,
index=idx,
index_label=(
idx_label
if idx_label is not None else
first(tbl.primary_key.columns).name
),
if_exists='append',
chunksize=chunk_size,
)
def _write_assets(self,
asset_type,
assets,
txn,
chunk_size,
mapping_data=None):
if asset_type == 'future':
tbl = futures_contracts_table
if mapping_data is not None:
raise TypeError('no mapping data expected for futures')
elif asset_type == 'equity':
tbl = equities_table
if mapping_data is None:
raise TypeError('mapping data required for equities')
# write the symbol mapping data.
self._write_df_to_table(
equity_symbol_mappings,
mapping_data,
txn,
chunk_size,
idx_label='sid',
)
else:
raise ValueError(
"asset_type must be in {'future', 'equity'}, got: %s" %
asset_type,
)
self._write_df_to_table(tbl, assets, txn, chunk_size)
pd.DataFrame({
asset_router.c.sid.name: assets.index.values,
asset_router.c.asset_type.name: asset_type,
}).to_sql(
asset_router.name,
txn.connection,
if_exists='append',
index=False,
chunksize=chunk_size
)
def _all_tables_present(self, txn):
"""
Checks if any tables are present in the current assets database.
Parameters
----------
txn : Transaction
The open transaction to check in.
Returns
-------
has_tables : bool
True if any tables are present, otherwise False.
"""
conn = txn.connect()
for table_name in asset_db_table_names:
if txn.dialect.has_table(conn, table_name):
return True
return False
def init_db(self, txn=None):
"""Connect to database and create tables.
Parameters
----------
txn : sa.engine.Connection, optional
The transaction to execute in. If this is not provided, a new
transaction will be started with the engine provided.
Returns
-------
metadata : sa.MetaData
The metadata that describes the new assets db.
"""
with ExitStack() as stack:
if txn is None:
txn = stack.enter_context(self.engine.begin())
tables_already_exist = self._all_tables_present(txn)
# Create the SQL tables if they do not already exist.
metadata.create_all(txn, checkfirst=True)
if tables_already_exist:
check_version_info(txn, version_info, ASSET_DB_VERSION)
else:
write_version_info(txn, version_info, ASSET_DB_VERSION)
def _normalize_equities(self, equities):
# HACK: If 'company_name' is provided, map it to asset_name
if ('company_name' in equities.columns and
'asset_name' not in equities.columns):
equities['asset_name'] = equities['company_name']
# remap 'file_name' to 'symbol' if provided
if 'file_name' in equities.columns:
equities['symbol'] = equities['file_name']
equities_output = _generate_output_dataframe(
data_subset=equities,
defaults=_equities_defaults,
)
# Split symbols to company_symbols and share_class_symbols
tuple_series = equities_output['symbol'].apply(split_delimited_symbol)
split_symbols = pd.DataFrame(
tuple_series.tolist(),
columns=['company_symbol', 'share_class_symbol'],
index=tuple_series.index
)
equities_output = pd.concat((equities_output, split_symbols), axis=1)
# Upper-case all symbol data
for col in symbol_columns:
equities_output[col] = equities_output[col].str.upper()
# Convert date columns to UNIX Epoch integers (nanoseconds)
for col in ('start_date',
'end_date',
'first_traded',
'auto_close_date'):
equities_output[col] = _dt_to_epoch_ns(equities_output[col])
return _split_symbol_mappings(equities_output)
def _normalize_futures(self, futures):
futures_output = _generate_output_dataframe(
data_subset=futures,
defaults=_futures_defaults,
)
for col in ('symbol', 'root_symbol'):
futures_output[col] = futures_output[col].str.upper()
for col in ('start_date',
'end_date',
'first_traded',
'notice_date',
'expiration_date',
'auto_close_date'):
futures_output[col] = _dt_to_epoch_ns(futures_output[col])
return futures_output
def _normalize_equity_supplementary_mappings(self, mappings):
mappings_output = _generate_output_dataframe(
data_subset=mappings,
defaults=_equity_supplementary_mappings_defaults,
)
for col in ('start_date', 'end_date'):
mappings_output[col] = _dt_to_epoch_ns(mappings_output[col])
return mappings_output
def _load_data(
self,
equities,
futures,
exchanges,
root_symbols,
equity_supplementary_mappings,
):
"""
Returns a standard set of pandas.DataFrames:
equities, futures, exchanges, root_symbols
"""
# Check whether identifier columns have been provided.
# If they have, set the index to this column.
# If not, assume the index already cotains the identifier information.
for df, id_col in [(equities, 'sid'),
(futures, 'sid'),
(exchanges, 'exchange'),
(root_symbols, 'root_symbol')]:
if id_col in df.columns:
df.set_index(id_col, inplace=True)
equities_output, equities_mappings = self._normalize_equities(equities)
futures_output = self._normalize_futures(futures)
equity_supplementary_mappings_output = (
self._normalize_equity_supplementary_mappings(
equity_supplementary_mappings,
)
)
exchanges_output = _generate_output_dataframe(
data_subset=exchanges,
defaults=_exchanges_defaults,
)
root_symbols_output = _generate_output_dataframe(
data_subset=root_symbols,
defaults=_root_symbols_defaults,
)
return AssetData(
equities=equities_output,
equities_mappings=equities_mappings,
futures=futures_output,
exchanges=exchanges_output,
root_symbols=root_symbols_output,
equity_supplementary_mappings=equity_supplementary_mappings_output,
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/assets/asset_writer.py | asset_writer.py |
from functools import wraps
from alembic.migration import MigrationContext
from alembic.operations import Operations
import sqlalchemy as sa
from toolz.curried import do, operator as op
from zipline.assets.asset_writer import write_version_info
from zipline.errors import AssetDBImpossibleDowngrade
from zipline.utils.preprocess import preprocess
from zipline.utils.sqlite_utils import coerce_string_to_eng
@preprocess(engine=coerce_string_to_eng)
def downgrade(engine, desired_version):
"""Downgrades the assets db at the given engine to the desired version.
Parameters
----------
engine : Engine
An SQLAlchemy engine to the assets database.
desired_version : int
The desired resulting version for the assets database.
"""
# Check the version of the db at the engine
with engine.begin() as conn:
metadata = sa.MetaData(conn)
metadata.reflect()
version_info_table = metadata.tables['version_info']
starting_version = sa.select((version_info_table.c.version,)).scalar()
# Check for accidental upgrade
if starting_version < desired_version:
raise AssetDBImpossibleDowngrade(db_version=starting_version,
desired_version=desired_version)
# Check if the desired version is already the db version
if starting_version == desired_version:
# No downgrade needed
return
# Create alembic context
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
# Integer keys of downgrades to run
# E.g.: [5, 4, 3, 2] would downgrade v6 to v2
downgrade_keys = range(desired_version, starting_version)[::-1]
# Disable foreign keys until all downgrades are complete
_pragma_foreign_keys(conn, False)
# Execute the downgrades in order
for downgrade_key in downgrade_keys:
_downgrade_methods[downgrade_key](op, conn, version_info_table)
# Re-enable foreign keys
_pragma_foreign_keys(conn, True)
def _pragma_foreign_keys(connection, on):
"""Sets the PRAGMA foreign_keys state of the SQLite database. Disabling
the pragma allows for batch modification of tables with foreign keys.
Parameters
----------
connection : Connection
A SQLAlchemy connection to the db
on : bool
If true, PRAGMA foreign_keys will be set to ON. Otherwise, the PRAGMA
foreign_keys will be set to OFF.
"""
connection.execute("PRAGMA foreign_keys=%s" % ("ON" if on else "OFF"))
# This dict contains references to downgrade methods that can be applied to an
# assets db. The resulting db's version is the key.
# e.g. The method at key '0' is the downgrade method from v1 to v0
_downgrade_methods = {}
def downgrades(src):
"""Decorator for marking that a method is a downgrade to a version to the
previous version.
Parameters
----------
src : int
The version this downgrades from.
Returns
-------
decorator : callable[(callable) -> callable]
The decorator to apply.
"""
def _(f):
destination = src - 1
@do(op.setitem(_downgrade_methods, destination))
@wraps(f)
def wrapper(op, conn, version_info_table):
conn.execute(version_info_table.delete()) # clear the version
f(op)
write_version_info(conn, version_info_table, destination)
return wrapper
return _
@downgrades(1)
def _downgrade_v1(op):
"""
Downgrade assets db by removing the 'tick_size' column and renaming the
'multiplier' column.
"""
# Drop indices before batch
# This is to prevent index collision when creating the temp table
op.drop_index('ix_futures_contracts_root_symbol')
op.drop_index('ix_futures_contracts_symbol')
# Execute batch op to allow column modification in SQLite
with op.batch_alter_table('futures_contracts') as batch_op:
# Rename 'multiplier'
batch_op.alter_column(column_name='multiplier',
new_column_name='contract_multiplier')
# Delete 'tick_size'
batch_op.drop_column('tick_size')
# Recreate indices after batch
op.create_index('ix_futures_contracts_root_symbol',
table_name='futures_contracts',
columns=['root_symbol'])
op.create_index('ix_futures_contracts_symbol',
table_name='futures_contracts',
columns=['symbol'],
unique=True)
@downgrades(2)
def _downgrade_v2(op):
"""
Downgrade assets db by removing the 'auto_close_date' column.
"""
# Drop indices before batch
# This is to prevent index collision when creating the temp table
op.drop_index('ix_equities_fuzzy_symbol')
op.drop_index('ix_equities_company_symbol')
# Execute batch op to allow column modification in SQLite
with op.batch_alter_table('equities') as batch_op:
batch_op.drop_column('auto_close_date')
# Recreate indices after batch
op.create_index('ix_equities_fuzzy_symbol',
table_name='equities',
columns=['fuzzy_symbol'])
op.create_index('ix_equities_company_symbol',
table_name='equities',
columns=['company_symbol'])
@downgrades(3)
def _downgrade_v3(op):
"""
Downgrade assets db by adding a not null constraint on
``equities.first_traded``
"""
op.create_table(
'_new_equities',
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('symbol', sa.Text),
sa.Column('company_symbol', sa.Text),
sa.Column('share_class_symbol', sa.Text),
sa.Column('fuzzy_symbol', sa.Text),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer, nullable=False),
sa.Column('auto_close_date', sa.Integer),
sa.Column('exchange', sa.Text),
)
op.execute(
"""
insert into _new_equities
select * from equities
where equities.first_traded is not null
""",
)
op.drop_table('equities')
op.rename_table('_new_equities', 'equities')
# we need to make sure the indices have the proper names after the rename
op.create_index(
'ix_equities_company_symbol',
'equities',
['company_symbol'],
)
op.create_index(
'ix_equities_fuzzy_symbol',
'equities',
['fuzzy_symbol'],
)
@downgrades(4)
def _downgrade_v4(op):
"""
Downgrades assets db by copying the `exchange_full` column to `exchange`,
then dropping the `exchange_full` column.
"""
op.drop_index('ix_equities_fuzzy_symbol')
op.drop_index('ix_equities_company_symbol')
op.execute("UPDATE equities SET exchange = exchange_full")
with op.batch_alter_table('equities') as batch_op:
batch_op.drop_column('exchange_full')
op.create_index('ix_equities_fuzzy_symbol',
table_name='equities',
columns=['fuzzy_symbol'])
op.create_index('ix_equities_company_symbol',
table_name='equities',
columns=['company_symbol'])
@downgrades(5)
def _downgrade_v5(op):
op.create_table(
'_new_equities',
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('symbol', sa.Text),
sa.Column('company_symbol', sa.Text),
sa.Column('share_class_symbol', sa.Text),
sa.Column('fuzzy_symbol', sa.Text),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer),
sa.Column('auto_close_date', sa.Integer),
sa.Column('exchange', sa.Text),
sa.Column('exchange_full', sa.Text)
)
op.execute(
"""
insert into _new_equities
select
equities.sid as sid,
sym.symbol as symbol,
sym.company_symbol as company_symbol,
sym.share_class_symbol as share_class_symbol,
sym.company_symbol || sym.share_class_symbol as fuzzy_symbol,
equities.asset_name as asset_name,
equities.start_date as start_date,
equities.end_date as end_date,
equities.first_traded as first_traded,
equities.auto_close_date as auto_close_date,
equities.exchange as exchange,
equities.exchange_full as exchange_full
from
equities
inner join
-- Nested select here to take the most recently held ticker
-- for each sid. The group by with no aggregation function will
-- take the last element in the group, so we first order by
-- the end date ascending to ensure that the groupby takes
-- the last ticker.
(select
*
from
(select
*
from
equity_symbol_mappings
order by
equity_symbol_mappings.end_date asc)
group by
sid) sym
on
equities.sid == sym.sid
""",
)
op.drop_table('equity_symbol_mappings')
op.drop_table('equities')
op.rename_table('_new_equities', 'equities')
# we need to make sure the indicies have the proper names after the rename
op.create_index(
'ix_equities_company_symbol',
'equities',
['company_symbol'],
)
op.create_index(
'ix_equities_fuzzy_symbol',
'equities',
['fuzzy_symbol'],
)
@downgrades(6)
def _downgrade_v6(op):
op.drop_table('equity_supplementary_mappings') | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/assets/asset_db_migrations.py | asset_db_migrations.py |
from abc import ABCMeta, abstractmethod
from six import with_metaclass
class RollFinder(with_metaclass(ABCMeta, object)):
"""
Abstract base class for calculating when futures contracts are the active
contract.
"""
@abstractmethod
def _active_contract(self, oc, front, back, dt):
raise NotImplementedError
def get_contract_center(self, root_symbol, dt, offset):
"""
Parameters
----------
root_symbol : str
The root symbol for the contract chain.
dt : Timestamp
The datetime for which to retrieve the current contract.
offset : int
The offset from the primary contract.
0 is the primary, 1 is the secondary, etc.
Returns
-------
Future
The active future contract at the given dt.
"""
oc = self.asset_finder.get_ordered_contracts(root_symbol)
session = self.trading_calendar.minute_to_session_label(dt)
front = oc.contract_before_auto_close(session.value)
back = oc.contract_at_offset(front, 1, dt.value)
if back is None:
return front
primary = self._active_contract(oc, front, back, session)
return oc.contract_at_offset(primary, offset, session.value)
def get_rolls(self, root_symbol, start, end, offset):
"""
Get the rolls, i.e. the session at which to hop from contract to
contract in the chain.
Parameters
----------
root_symbol : str
The root symbol for which to calculate rolls.
start : Timestamp
Start of the date range.
end : Timestamp
End of the date range.
offset : int
Offset from the primary.
Returns
-------
rolls - list[tuple(sid, roll_date)]
A list of rolls, where first value is the first active `sid`,
and the `roll_date` on which to hop to the next contract.
The last pair in the chain has a value of `None` since the roll
is after the range.
"""
oc = self.asset_finder.get_ordered_contracts(root_symbol)
front = self.get_contract_center(root_symbol, end, 0)
back = oc.contract_at_offset(front, 1, end.value)
if back is not None:
end_session = self.trading_calendar.minute_to_session_label(end)
first = self._active_contract(oc, front, back, end_session)
else:
first = front
first_contract = oc.sid_to_contract[first]
rolls = [((first_contract >> offset).contract.sid, None)]
tc = self.trading_calendar
sessions = tc.sessions_in_range(tc.minute_to_session_label(start),
tc.minute_to_session_label(end))
freq = sessions.freq
if first == front:
curr = first_contract << 1
else:
curr = first_contract << 2
session = sessions[-1]
while session > start and curr is not None:
front = curr.contract.sid
back = rolls[0][0]
prev_c = curr.prev
while session > start:
prev = session - freq
if prev_c is not None:
if prev < prev_c.contract.auto_close_date:
break
if back != self._active_contract(oc, front, back, prev):
# TODO: Instead of listing each contract with its roll date
# as tuples, create a series which maps every day to the
# active contract on that day.
rolls.insert(0, ((curr >> offset).contract.sid, session))
break
session = prev
curr = curr.prev
if curr is not None:
session = curr.contract.auto_close_date
return rolls
class CalendarRollFinder(RollFinder):
"""
The CalendarRollFinder calculates contract rolls based purely on the
contract's auto close date.
"""
def __init__(self, trading_calendar, asset_finder):
self.trading_calendar = trading_calendar
self.asset_finder = asset_finder
def _active_contract(self, oc, front, back, dt):
contract = oc.sid_to_contract[front].contract
auto_close_date = contract.auto_close_date
auto_closed = dt >= auto_close_date
return back if auto_closed else front
class VolumeRollFinder(RollFinder):
"""
The CalendarRollFinder calculates contract rolls based on when
volume activity transfers from one contract to another.
"""
GRACE_DAYS = 7
THRESHOLD = 0.10
def __init__(self, trading_calendar, asset_finder, session_reader):
self.trading_calendar = trading_calendar
self.asset_finder = asset_finder
self.session_reader = session_reader
def _active_contract(self, oc, front, back, dt):
"""
Return the active contract based on the previous trading day's volume.
In the rare case that a double volume switch occurs we treat the first
switch as the roll. Take the following case for example:
| +++++ _____
| + __ / <--- 'G'
| ++/++\++++/++
| _/ \__/ +
| / +
| ____/ + <--- 'F'
|_________|__|___|________
a b c <--- Switches
We should treat 'a' as the roll date rather than 'c' because from the
perspective of 'a', if a switch happens and we are pretty close to the
auto-close date, we would probably assume it is time to roll. This
means that for every date after 'a', `data.current(cf, 'contract')`
should return the 'G' contract.
"""
tc = self.trading_calendar
trading_day = tc.day
prev = dt - trading_day
get_value = self.session_reader.get_value
front_vol = get_value(front, prev, 'volume')
back_vol = get_value(back, prev, 'volume')
front_contract = oc.sid_to_contract[front].contract
if dt >= front_contract.auto_close_date or back_vol > front_vol:
return back
gap_start = \
front_contract.auto_close_date - (trading_day * self.GRACE_DAYS)
gap_end = prev - trading_day
if dt < gap_start:
return front
# If we are within `self.GRACE_DAYS` of the front contract's auto close
# date, and a volume flip happened during that period, return the back
# contract as the active one.
sessions = tc.sessions_in_range(
tc.minute_to_session_label(gap_start),
tc.minute_to_session_label(gap_end),
)
for session in sessions:
front_vol = get_value(front, session, 'volume')
back_vol = get_value(back, session, 'volume')
if back_vol > front_vol:
return back
return front | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/assets/roll_finder.py | roll_finder.py |
import sys
import logbook
import numpy as np
from zipline.finance import commission
zipline_logging = logbook.NestedSetup([
logbook.NullHandler(),
logbook.StreamHandler(sys.stdout, level=logbook.INFO),
logbook.StreamHandler(sys.stderr, level=logbook.ERROR),
])
zipline_logging.push_application()
STOCKS = ['AMD', 'CERN', 'COST', 'DELL', 'GPS', 'INTC', 'MMM']
# On-Line Portfolio Moving Average Reversion
# More info can be found in the corresponding paper:
# http://icml.cc/2012/papers/168.pdf
def initialize(algo, eps=1, window_length=5):
algo.stocks = STOCKS
algo.sids = [algo.symbol(symbol) for symbol in algo.stocks]
algo.m = len(algo.stocks)
algo.price = {}
algo.b_t = np.ones(algo.m) / algo.m
algo.last_desired_port = np.ones(algo.m) / algo.m
algo.eps = eps
algo.init = True
algo.days = 0
algo.window_length = window_length
algo.set_commission(commission.PerShare(cost=0))
def handle_data(algo, data):
algo.days += 1
if algo.days < algo.window_length:
return
if algo.init:
rebalance_portfolio(algo, data, algo.b_t)
algo.init = False
return
m = algo.m
x_tilde = np.zeros(m)
# find relative moving average price for each asset
mavgs = data.history(algo.sids, 'price', algo.window_length, '1d').mean()
for i, sid in enumerate(algo.sids):
price = data.current(sid, "price")
# Relative mean deviation
x_tilde[i] = mavgs[sid] / price
###########################
# Inside of OLMAR (algo 2)
x_bar = x_tilde.mean()
# market relative deviation
mark_rel_dev = x_tilde - x_bar
# Expected return with current portfolio
exp_return = np.dot(algo.b_t, x_tilde)
weight = algo.eps - exp_return
variability = (np.linalg.norm(mark_rel_dev)) ** 2
# test for divide-by-zero case
if variability == 0.0:
step_size = 0
else:
step_size = max(0, weight / variability)
b = algo.b_t + step_size * mark_rel_dev
b_norm = simplex_projection(b)
np.testing.assert_almost_equal(b_norm.sum(), 1)
rebalance_portfolio(algo, data, b_norm)
# update portfolio
algo.b_t = b_norm
def rebalance_portfolio(algo, data, desired_port):
# rebalance portfolio
desired_amount = np.zeros_like(desired_port)
current_amount = np.zeros_like(desired_port)
prices = np.zeros_like(desired_port)
if algo.init:
positions_value = algo.portfolio.starting_cash
else:
positions_value = algo.portfolio.positions_value + \
algo.portfolio.cash
for i, sid in enumerate(algo.sids):
current_amount[i] = algo.portfolio.positions[sid].amount
prices[i] = data.current(sid, "price")
desired_amount = np.round(desired_port * positions_value / prices)
algo.last_desired_port = desired_port
diff_amount = desired_amount - current_amount
for i, sid in enumerate(algo.sids):
algo.order(sid, diff_amount[i])
def simplex_projection(v, b=1):
"""Projection vectors to the simplex domain
Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0
Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w
:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> proj # doctest: +NORMALIZE_WHITESPACE
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print(proj.sum())
1.0
Original matlab implementation: John Duchi ([email protected])
Python-port: Copyright 2013 by Thomas Wiecki ([email protected]).
"""
v = np.asarray(v)
p = len(v)
# Sort v into u in descending order
v = (v > 0) * v
u = np.sort(v)[::-1]
sv = np.cumsum(u)
rho = np.where(u > (sv - b) / np.arange(1, p + 1))[0][-1]
theta = np.max([0, (sv[rho] - b) / (rho + 1)])
w = (v - theta)
w[w < 0] = 0
return w
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
results.portfolio_value.plot(ax=ax)
ax.set_ylabel('Portfolio value (USD)')
plt.show()
def _test_args():
"""Extra arguments to use when zipline's automated tests run this example.
"""
import pandas as pd
return {
'start': pd.Timestamp('2004', tz='utc'),
'end': pd.Timestamp('2008', tz='utc'),
} | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/examples/olmar.py | olmar.py |
from zipline.api import order, record, symbol
# Import exponential moving average from talib wrapper
from talib import EMA
def initialize(context):
context.asset = symbol('AAPL')
# To keep track of whether we invested in the stock or not
context.invested = False
def handle_data(context, data):
trailing_window = data.history(context.asset, 'price', 40, '1d')
if trailing_window.isnull().values.any():
return
short_ema = EMA(trailing_window.values, timeperiod=20)
long_ema = EMA(trailing_window.values, timeperiod=40)
buy = False
sell = False
if (short_ema[-1] > long_ema[-1]) and not context.invested:
order(context.asset, 100)
context.invested = True
buy = True
elif (short_ema[-1] < long_ema[-1]) and context.invested:
order(context.asset, -100)
context.invested = False
sell = True
record(AAPL=data.current(context.asset, "price"),
short_ema=short_ema[-1],
long_ema=long_ema[-1],
buy=buy,
sell=sell)
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
import logbook
logbook.StderrHandler().push_application()
log = logbook.Logger('Algorithm')
fig = plt.figure()
ax1 = fig.add_subplot(211)
results.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('Portfolio value (USD)')
ax2 = fig.add_subplot(212)
ax2.set_ylabel('Price (USD)')
# If data has been record()ed, then plot it.
# Otherwise, log the fact that no data has been recorded.
if 'AAPL' in results and 'short_ema' in results and 'long_ema' in results:
results[['AAPL', 'short_ema', 'long_ema']].plot(ax=ax2)
ax2.plot(results.ix[results.buy].index, results.short_ema[results.buy],
'^', markersize=10, color='m')
ax2.plot(results.ix[results.sell].index,
results.short_ema[results.sell],
'v', markersize=10, color='k')
plt.legend(loc=0)
plt.gcf().set_size_inches(18, 8)
else:
msg = 'AAPL, short_ema and long_ema data not captured using record().'
ax2.annotate(msg, xy=(0.1, 0.5))
log.info(msg)
plt.show()
def _test_args():
"""Extra arguments to use when zipline's automated tests run this example.
"""
import pandas as pd
return {
'start': pd.Timestamp('2014-01-01', tz='utc'),
'end': pd.Timestamp('2014-11-01', tz='utc'),
} | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/examples/dual_ema_talib.py | dual_ema_talib.py |
from six import viewkeys
from zipline.api import (
attach_pipeline,
date_rules,
order_target_percent,
pipeline_output,
record,
schedule_function,
)
from zipline.pipeline import Pipeline
from zipline.pipeline.factors import RSI
def make_pipeline():
rsi = RSI()
return Pipeline(
columns={
'longs': rsi.top(3),
'shorts': rsi.bottom(3),
},
)
def rebalance(context, data):
# Pipeline data will be a dataframe with boolean columns named 'longs' and
# 'shorts'.
pipeline_data = context.pipeline_data
all_assets = pipeline_data.index
longs = all_assets[pipeline_data.longs]
shorts = all_assets[pipeline_data.shorts]
record(universe_size=len(all_assets))
# Build a 2x-leveraged, equal-weight, long-short portfolio.
one_third = 1.0 / 3.0
for asset in longs:
order_target_percent(asset, one_third)
for asset in shorts:
order_target_percent(asset, -one_third)
# Remove any assets that should no longer be in our portfolio.
portfolio_assets = longs | shorts
positions = context.portfolio.positions
for asset in viewkeys(positions) - set(portfolio_assets):
# This will fail if the asset was removed from our portfolio because it
# was delisted.
if data.can_trade(asset):
order_target_percent(asset, 0)
def initialize(context):
attach_pipeline(make_pipeline(), 'my_pipeline')
# Rebalance each day. In daily mode, this is equivalent to putting
# `rebalance` in our handle_data, but in minute mode, it's equivalent to
# running at the start of the day each day.
schedule_function(rebalance, date_rules.every_day())
def before_trading_start(context, data):
context.pipeline_data = pipeline_output('my_pipeline')
def _test_args():
"""
Extra arguments to use when zipline's automated tests run this example.
Notes for testers:
Gross leverage should be roughly 2.0 on every day except the first.
Net leverage should be roughly 2.0 on every day except the first.
Longs Count should always be 3 after the first day.
Shorts Count should be 3 after the first day, except on 2013-10-30, when it
dips to 2 for a day because DELL is delisted.
"""
import pandas as pd
return {
# We run through october of 2013 because DELL is in the test data and
# it went private on 2013-10-29.
'start': pd.Timestamp('2013-10-07', tz='utc'),
'end': pd.Timestamp('2013-11-30', tz='utc'),
'capital_base': 100000,
} | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/examples/momentum_pipeline.py | momentum_pipeline.py |
from importlib import import_module
import os
from toolz import merge
from zipline import run_algorithm
# These are used by test_examples.py to discover the examples to run.
from zipline.utils.calendars import register_calendar, get_calendar
EXAMPLE_MODULES = {}
for f in os.listdir(os.path.dirname(__file__)):
if not f.endswith('.py') or f == '__init__.py':
continue
modname = f[:-len('.py')]
mod = import_module('.' + modname, package=__name__)
EXAMPLE_MODULES[modname] = mod
globals()[modname] = mod
# Remove noise from loop variables.
del f, modname, mod
# Columns that we expect to be able to reliably deterministic
# Doesn't include fields that have UUIDS.
_cols_to_check = [
'algo_volatility',
'algorithm_period_return',
'alpha',
'benchmark_period_return',
'benchmark_volatility',
'beta',
'capital_used',
'ending_cash',
'ending_exposure',
'ending_value',
'excess_return',
'gross_leverage',
'long_exposure',
'long_value',
'longs_count',
'max_drawdown',
'max_leverage',
'net_leverage',
'period_close',
'period_label',
'period_open',
'pnl',
'portfolio_value',
'positions',
'returns',
'short_exposure',
'short_value',
'shorts_count',
'sortino',
'starting_cash',
'starting_exposure',
'starting_value',
'trading_days',
'treasury_period_return',
]
def run_example(example_name, environ):
"""
Run an example module from zipline.examples.
"""
mod = EXAMPLE_MODULES[example_name]
register_calendar("YAHOO", get_calendar("NYSE"), force=True)
return run_algorithm(
initialize=getattr(mod, 'initialize', None),
handle_data=getattr(mod, 'handle_data', None),
before_trading_start=getattr(mod, 'before_trading_start', None),
analyze=getattr(mod, 'analyze', None),
bundle='test',
environ=environ,
# Provide a default capital base, but allow the test to override.
**merge({'capital_base': 1e7}, mod._test_args())
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/examples/__init__.py | __init__.py |
from zipline.api import order_target, record, symbol
def initialize(context):
context.sym = symbol('AAPL')
context.i = 0
def handle_data(context, data):
# Skip first 300 days to get full windows
context.i += 1
if context.i < 300:
return
# Compute averages
# history() has to be called with the same params
# from above and returns a pandas dataframe.
short_mavg = data.history(context.sym, 'price', 100, '1d').mean()
long_mavg = data.history(context.sym, 'price', 300, '1d').mean()
# Trading logic
if short_mavg > long_mavg:
# order_target orders as many shares as needed to
# achieve the desired number of shares.
order_target(context.sym, 100)
elif short_mavg < long_mavg:
order_target(context.sym, 0)
# Save values for later inspection
record(AAPL=data.current(context.sym, "price"),
short_mavg=short_mavg,
long_mavg=long_mavg)
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
import logbook
logbook.StderrHandler().push_application()
log = logbook.Logger('Algorithm')
fig = plt.figure()
ax1 = fig.add_subplot(211)
results.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('Portfolio value (USD)')
ax2 = fig.add_subplot(212)
ax2.set_ylabel('Price (USD)')
# If data has been record()ed, then plot it.
# Otherwise, log the fact that no data has been recorded.
if ('AAPL' in results and 'short_mavg' in results and
'long_mavg' in results):
results['AAPL'].plot(ax=ax2)
results[['short_mavg', 'long_mavg']].plot(ax=ax2)
trans = results.ix[[t != [] for t in results.transactions]]
buys = trans.ix[[t[0]['amount'] > 0 for t in
trans.transactions]]
sells = trans.ix[
[t[0]['amount'] < 0 for t in trans.transactions]]
ax2.plot(buys.index, results.short_mavg.ix[buys.index],
'^', markersize=10, color='m')
ax2.plot(sells.index, results.short_mavg.ix[sells.index],
'v', markersize=10, color='k')
plt.legend(loc=0)
else:
msg = 'AAPL, short_mavg & long_mavg data not captured using record().'
ax2.annotate(msg, xy=(0.1, 0.5))
log.info(msg)
plt.show()
def _test_args():
"""Extra arguments to use when zipline's automated tests run this example.
"""
import pandas as pd
return {
'start': pd.Timestamp('2011', tz='utc'),
'end': pd.Timestamp('2013', tz='utc'),
} | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/examples/dual_moving_average.py | dual_moving_average.py |
import pandas as pd
from zipline.errors import (
InvalidBenchmarkAsset,
BenchmarkAssetNotAvailableTooEarly,
BenchmarkAssetNotAvailableTooLate
)
class BenchmarkSource(object):
def __init__(self,
benchmark_asset,
trading_calendar,
sessions,
data_portal,
emission_rate="daily",
benchmark_returns=None):
self.benchmark_asset = benchmark_asset
self.sessions = sessions
self.emission_rate = emission_rate
self.data_portal = data_portal
if len(sessions) == 0:
self._precalculated_series = pd.Series()
elif benchmark_asset is not None:
self._validate_benchmark(benchmark_asset)
self._precalculated_series = \
self._initialize_precalculated_series(
benchmark_asset,
trading_calendar,
self.sessions,
self.data_portal
)
elif benchmark_returns is not None:
daily_series = benchmark_returns[sessions[0]:sessions[-1]]
if self.emission_rate == "minute":
# we need to take the env's benchmark returns, which are daily,
# and resample them to minute
minutes = trading_calendar.minutes_for_sessions_in_range(
sessions[0],
sessions[-1]
)
minute_series = daily_series.reindex(
index=minutes,
method="ffill"
)
self._precalculated_series = minute_series
else:
self._precalculated_series = daily_series
else:
raise Exception("Must provide either benchmark_asset or "
"benchmark_returns.")
def get_value(self, dt):
return self._precalculated_series.loc[dt]
def get_range(self, start_dt, end_dt):
return self._precalculated_series.loc[start_dt:end_dt]
def _validate_benchmark(self, benchmark_asset):
# check if this security has a stock dividend. if so, raise an
# error suggesting that the user pick a different asset to use
# as benchmark.
stock_dividends = \
self.data_portal.get_stock_dividends(self.benchmark_asset,
self.sessions)
if len(stock_dividends) > 0:
raise InvalidBenchmarkAsset(
sid=str(self.benchmark_asset),
dt=stock_dividends[0]["ex_date"]
)
if benchmark_asset.start_date > self.sessions[0]:
# the asset started trading after the first simulation day
raise BenchmarkAssetNotAvailableTooEarly(
sid=str(self.benchmark_asset),
dt=self.sessions[0],
start_dt=benchmark_asset.start_date
)
if benchmark_asset.end_date < self.sessions[-1]:
# the asset stopped trading before the last simulation day
raise BenchmarkAssetNotAvailableTooLate(
sid=str(self.benchmark_asset),
dt=self.sessions[-1],
end_dt=benchmark_asset.end_date
)
def _initialize_precalculated_series(self, asset, trading_calendar,
trading_days, data_portal):
"""
Internal method that pre-calculates the benchmark return series for
use in the simulation.
Parameters
----------
asset: Asset to use
trading_calendar: TradingCalendar
trading_days: pd.DateTimeIndex
data_portal: DataPortal
Notes
-----
If the benchmark asset started trading after the simulation start,
or finished trading before the simulation end, exceptions are raised.
If the benchmark asset started trading the same day as the simulation
start, the first available minute price on that day is used instead
of the previous close.
We use history to get an adjusted price history for each day's close,
as of the look-back date (the last day of the simulation). Prices are
fully adjusted for dividends, splits, and mergers.
Returns
-------
A pd.Series, indexed by trading day, whose values represent the %
change from close to close.
"""
if self.emission_rate == "minute":
minutes = trading_calendar.minutes_for_sessions_in_range(
self.sessions[0], self.sessions[-1]
)
benchmark_series = data_portal.get_history_window(
[asset],
minutes[-1],
bar_count=len(minutes) + 1,
frequency="1m",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
return benchmark_series.pct_change()[1:]
else:
start_date = asset.start_date
if start_date < trading_days[0]:
# get the window of close prices for benchmark_asset from the
# last trading day of the simulation, going up to one day
# before the simulation start day (so that we can get the %
# change on day 1)
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days) + 1,
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
return benchmark_series.pct_change()[1:]
elif start_date == trading_days[0]:
# Attempt to handle case where stock data starts on first
# day, in this case use the open to close return.
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days),
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
# get a minute history window of the first day
first_open = data_portal.get_spot_value(
asset, 'open', trading_days[0], 'daily')
first_close = data_portal.get_spot_value(
asset, 'close', trading_days[0], 'daily')
first_day_return = (first_close - first_open) / first_open
returns = benchmark_series.pct_change()[:]
returns[0] = first_day_return
return returns | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/sources/benchmark_source.py | benchmark_source.py |
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import hashlib
from textwrap import dedent
import warnings
from logbook import Logger
import numpy
import pandas as pd
from pandas import read_csv
import pytz
import requests
from six import StringIO, iteritems, with_metaclass
from zipline.errors import (
MultipleSymbolsFound,
SymbolNotFound,
ZiplineError
)
from zipline.protocol import (
DATASOURCE_TYPE,
Event
)
from zipline.assets import Equity
logger = Logger('Requests Source Logger')
def roll_dts_to_midnight(dts, trading_day):
if len(dts) == 0:
return dts
return pd.DatetimeIndex(
(dts.tz_convert('US/Eastern') - pd.Timedelta(hours=16)).date,
tz='UTC',
) + trading_day
class FetcherEvent(Event):
pass
class FetcherCSVRedirectError(ZiplineError):
msg = dedent(
"""\
Attempt to fetch_csv from a redirected url. {url}
must be changed to {new_url}
"""
)
def __init__(self, *args, **kwargs):
self.url = kwargs["url"]
self.new_url = kwargs["new_url"]
self.extra = kwargs["extra"]
super(FetcherCSVRedirectError, self).__init__(*args, **kwargs)
# The following optional arguments are supported for
# requests backed data sources.
# see http://docs.python-requests.org/en/latest/api/#main-interface
# for a full list.
ALLOWED_REQUESTS_KWARGS = {
'params',
'headers',
'auth',
'cert'
}
# The following optional arguments are supported for pandas' read_csv
# function, and may be passed as kwargs to the datasource below.
# see http://pandas.pydata.org/
# pandas-docs/stable/generated/pandas.io.parsers.read_csv.html
ALLOWED_READ_CSV_KWARGS = {
'sep',
'dialect',
'doublequote',
'escapechar',
'quotechar',
'quoting',
'skipinitialspace',
'lineterminator',
'header',
'index_col',
'names',
'prefix',
'skiprows',
'skipfooter',
'skip_footer',
'na_values',
'true_values',
'false_values',
'delimiter',
'converters',
'dtype',
'delim_whitespace',
'as_recarray',
'na_filter',
'compact_ints',
'use_unsigned',
'buffer_lines',
'warn_bad_lines',
'error_bad_lines',
'keep_default_na',
'thousands',
'comment',
'decimal',
'keep_date_col',
'nrows',
'chunksize',
'encoding',
'usecols'
}
SHARED_REQUESTS_KWARGS = {
'stream': True,
'allow_redirects': False,
}
def mask_requests_args(url, validating=False, params_checker=None, **kwargs):
requests_kwargs = {key: val for (key, val) in iteritems(kwargs)
if key in ALLOWED_REQUESTS_KWARGS}
if params_checker is not None:
url, s_params = params_checker(url)
if s_params:
if 'params' in requests_kwargs:
requests_kwargs['params'].update(s_params)
else:
requests_kwargs['params'] = s_params
# Giving the connection 30 seconds. This timeout does not
# apply to the download of the response body.
# (Note that Quandl links can take >10 seconds to return their
# first byte on occasion)
requests_kwargs['timeout'] = 1.0 if validating else 30.0
requests_kwargs.update(SHARED_REQUESTS_KWARGS)
request_pair = namedtuple("RequestPair", ("requests_kwargs", "url"))
return request_pair(requests_kwargs, url)
class PandasCSV(with_metaclass(ABCMeta, object)):
def __init__(self,
pre_func,
post_func,
asset_finder,
trading_day,
start_date,
end_date,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency,
**kwargs):
self.start_date = start_date
self.end_date = end_date
self.date_column = date_column
self.date_format = date_format
self.timezone = timezone
self.mask = mask
self.symbol_column = symbol_column or "symbol"
self.data_frequency = data_frequency
invalid_kwargs = set(kwargs) - ALLOWED_READ_CSV_KWARGS
if invalid_kwargs:
raise TypeError(
"Unexpected keyword arguments: %s" % invalid_kwargs,
)
self.pandas_kwargs = self.mask_pandas_args(kwargs)
self.symbol = symbol
self.finder = asset_finder
self.trading_day = trading_day
self.pre_func = pre_func
self.post_func = post_func
@property
def fields(self):
return self.df.columns.tolist()
def get_hash(self):
return self.namestring
@abstractmethod
def fetch_data(self):
return
@staticmethod
def parse_date_str_series(format_str, tz, date_str_series, data_frequency,
trading_day):
"""
Efficient parsing for a 1d Pandas/numpy object containing string
representations of dates.
Note: pd.to_datetime is significantly faster when no format string is
passed, and in pandas 0.12.0 the %p strptime directive is not correctly
handled if a format string is explicitly passed, but AM/PM is handled
properly if format=None.
Moreover, we were previously ignoring this parameter unintentionally
because we were incorrectly passing it as a positional. For all these
reasons, we ignore the format_str parameter when parsing datetimes.
"""
# Explicitly ignoring this parameter. See note above.
if format_str is not None:
logger.warn(
"The 'format_str' parameter to fetch_csv is deprecated. "
"Ignoring and defaulting to pandas default date parsing."
)
format_str = None
tz_str = str(tz)
if tz_str == pytz.utc.zone:
parsed = pd.to_datetime(
date_str_series.values,
format=format_str,
utc=True,
errors='coerce',
)
else:
parsed = pd.to_datetime(
date_str_series.values,
format=format_str,
errors='coerce',
).tz_localize(tz_str).tz_convert('UTC')
if data_frequency == 'daily':
parsed = roll_dts_to_midnight(parsed, trading_day)
return parsed
def mask_pandas_args(self, kwargs):
pandas_kwargs = {key: val for (key, val) in iteritems(kwargs)
if key in ALLOWED_READ_CSV_KWARGS}
if 'usecols' in pandas_kwargs:
usecols = pandas_kwargs['usecols']
if usecols and self.date_column not in usecols:
# make a new list so we don't modify user's,
# and to ensure it is mutable
with_date = list(usecols)
with_date.append(self.date_column)
pandas_kwargs['usecols'] = with_date
# No strings in the 'symbol' column should be interpreted as NaNs
pandas_kwargs.setdefault('keep_default_na', False)
pandas_kwargs.setdefault('na_values', {'symbol': []})
return pandas_kwargs
def _lookup_unconflicted_symbol(self, symbol):
"""
Attempt to find a unique asset whose symbol is the given string.
If multiple assets have held the given symbol, return a 0.
If no asset has held the given symbol, return a NaN.
"""
try:
uppered = symbol.upper()
except AttributeError:
# The mapping fails because symbol was a non-string
return numpy.nan
try:
return self.finder.lookup_symbol(uppered, as_of_date=None)
except MultipleSymbolsFound:
# Fill conflicted entries with zeros to mark that they need to be
# resolved by date.
return 0
except SymbolNotFound:
# Fill not found entries with nans.
return numpy.nan
def load_df(self):
df = self.fetch_data()
if self.pre_func:
df = self.pre_func(df)
# Batch-convert the user-specifed date column into timestamps.
df['dt'] = self.parse_date_str_series(
self.date_format,
self.timezone,
df[self.date_column],
self.data_frequency,
self.trading_day,
).values
# ignore rows whose dates we couldn't parse
df = df[df['dt'].notnull()]
if self.symbol is not None:
df['sid'] = self.symbol
elif self.finder:
df.sort_values(by=self.symbol_column, inplace=True)
# Pop the 'sid' column off of the DataFrame, just in case the user
# has assigned it, and throw a warning
try:
df.pop('sid')
warnings.warn(
"Assignment of the 'sid' column of a DataFrame is "
"not supported by Fetcher. The 'sid' column has been "
"overwritten.",
category=UserWarning,
stacklevel=2,
)
except KeyError:
# There was no 'sid' column, so no warning is necessary
pass
# Fill entries for any symbols that don't require a date to
# uniquely identify. Entries for which multiple securities exist
# are replaced with zeroes, while entries for which no asset
# exists are replaced with NaNs.
unique_symbols = df[self.symbol_column].unique()
sid_series = pd.Series(
data=map(self._lookup_unconflicted_symbol, unique_symbols),
index=unique_symbols,
name='sid',
)
df = df.join(sid_series, on=self.symbol_column)
# Fill any zero entries left in our sid column by doing a lookup
# using both symbol and the row date.
conflict_rows = df[df['sid'] == 0]
for row_idx, row in conflict_rows.iterrows():
try:
asset = self.finder.lookup_symbol(
row[self.symbol_column],
# Replacing tzinfo here is necessary because of the
# timezone metadata bug described below.
row['dt'].replace(tzinfo=pytz.utc),
# It's possible that no asset comes back here if our
# lookup date is from before any asset held the
# requested symbol. Mark such cases as NaN so that
# they get dropped in the next step.
) or numpy.nan
except SymbolNotFound:
asset = numpy.nan
# Assign the resolved asset to the cell
df.ix[row_idx, 'sid'] = asset
# Filter out rows containing symbols that we failed to find.
length_before_drop = len(df)
df = df[df['sid'].notnull()]
no_sid_count = length_before_drop - len(df)
if no_sid_count:
logger.warn(
"Dropped {} rows from fetched csv.".format(no_sid_count),
no_sid_count,
extra={'syslog': True},
)
else:
df['sid'] = df['symbol']
# Dates are localized to UTC when they come out of
# parse_date_str_series, but we need to re-localize them here because
# of a bug that wasn't fixed until
# https://github.com/pydata/pandas/pull/7092.
# We should be able to remove the call to tz_localize once we're on
# pandas 0.14.0
# We don't set 'dt' as the index until here because the Symbol parsing
# operations above depend on having a unique index for the dataframe,
# and the 'dt' column can contain multiple dates for the same entry.
df.drop_duplicates(["sid", "dt"])
df.set_index(['dt'], inplace=True)
df = df.tz_localize('UTC')
df.sort_index(inplace=True)
cols_to_drop = [self.date_column]
if self.symbol is None:
cols_to_drop.append(self.symbol_column)
df = df[df.columns.drop(cols_to_drop)]
if self.post_func:
df = self.post_func(df)
return df
def __iter__(self):
asset_cache = {}
for dt, series in self.df.iterrows():
if dt < self.start_date:
continue
if dt > self.end_date:
return
event = FetcherEvent()
# when dt column is converted to be the dataframe's index
# the dt column is dropped. So, we need to manually copy
# dt into the event.
event.dt = dt
for k, v in series.iteritems():
# convert numpy integer types to
# int. This assumes we are on a 64bit
# platform that will not lose information
# by casting.
# TODO: this is only necessary on the
# amazon qexec instances. would be good
# to figure out how to use the numpy dtypes
# without this check and casting.
if isinstance(v, numpy.integer):
v = int(v)
setattr(event, k, v)
# If it has start_date, then it's already an Asset
# object from asset_for_symbol, and we don't have to
# transform it any further. Checking for start_date is
# faster than isinstance.
if event.sid in asset_cache:
event.sid = asset_cache[event.sid]
elif hasattr(event.sid, 'start_date'):
# Clone for user algo code, if we haven't already.
asset_cache[event.sid] = event.sid
elif self.finder and isinstance(event.sid, int):
asset = self.finder.retrieve_asset(event.sid,
default_none=True)
if asset:
# Clone for user algo code.
event.sid = asset_cache[asset] = asset
elif self.mask:
# When masking drop all non-mappable values.
continue
elif self.symbol is None:
# If the event's sid property is an int we coerce
# it into an Equity.
event.sid = asset_cache[event.sid] = Equity(event.sid)
event.type = DATASOURCE_TYPE.CUSTOM
event.source_id = self.namestring
yield event
class PandasRequestsCSV(PandasCSV):
# maximum 100 megs to prevent DDoS
MAX_DOCUMENT_SIZE = (1024 * 1024) * 100
# maximum number of bytes to read in at a time
CONTENT_CHUNK_SIZE = 4096
def __init__(self,
url,
pre_func,
post_func,
asset_finder,
trading_day,
start_date,
end_date,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency,
special_params_checker=None,
**kwargs):
# Peel off extra requests kwargs, forwarding the remaining kwargs to
# the superclass.
# Also returns possible https updated url if sent to http quandl ds
# If url hasn't changed, will just return the original.
self._requests_kwargs, self.url =\
mask_requests_args(url,
params_checker=special_params_checker,
**kwargs)
remaining_kwargs = {
k: v for k, v in iteritems(kwargs)
if k not in self.requests_kwargs
}
self.namestring = type(self).__name__
super(PandasRequestsCSV, self).__init__(
pre_func,
post_func,
asset_finder,
trading_day,
start_date,
end_date,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency,
**remaining_kwargs
)
self.fetch_size = None
self.fetch_hash = None
self.df = self.load_df()
self.special_params_checker = special_params_checker
@property
def requests_kwargs(self):
return self._requests_kwargs
def fetch_url(self, url):
info = "checking {url} with {params}"
logger.info(info.format(url=url, params=self.requests_kwargs))
# setting decode_unicode=True sometimes results in a
# UnicodeEncodeError exception, so instead we'll use
# pandas logic for decoding content
try:
response = requests.get(url, **self.requests_kwargs)
except requests.exceptions.ConnectionError:
raise Exception('Could not connect to %s' % url)
if not response.ok:
raise Exception('Problem reaching %s' % url)
elif response.is_redirect:
# On the offchance we don't catch a redirect URL
# in validation, this will catch it.
new_url = response.headers['location']
raise FetcherCSVRedirectError(
url=url,
new_url=new_url,
extra={
'old_url': url,
'new_url': new_url
}
)
content_length = 0
logger.info('{} connection established in {:.1f} seconds'.format(
url, response.elapsed.total_seconds()))
# use the decode_unicode flag to ensure that the output of this is
# a string, and not bytes.
for chunk in response.iter_content(self.CONTENT_CHUNK_SIZE,
decode_unicode=True):
if content_length > self.MAX_DOCUMENT_SIZE:
raise Exception('Document size too big.')
if chunk:
content_length += len(chunk)
yield chunk
return
def fetch_data(self):
# create a data frame directly from the full text of
# the response from the returned file-descriptor.
data = self.fetch_url(self.url)
fd = StringIO()
if isinstance(data, str):
fd.write(data)
else:
for chunk in data:
fd.write(chunk)
self.fetch_size = fd.tell()
fd.seek(0)
try:
# see if pandas can parse csv data
frames = read_csv(fd, **self.pandas_kwargs)
frames_hash = hashlib.md5(str(fd.getvalue()).encode('utf-8'))
self.fetch_hash = frames_hash.hexdigest()
except pd.parser.CParserError:
# could not parse the data, raise exception
raise Exception('Error parsing remote CSV data.')
finally:
fd.close()
return frames | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/sources/requests_csv.py | requests_csv.py |
from abc import (
ABCMeta,
abstractmethod,
abstractproperty,
)
from numpy import concatenate
from lru import LRU
from pandas import isnull
from pandas.tslib import normalize_date
from toolz import sliding_window
from six import with_metaclass
from zipline.assets import Equity, Future
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.lib._int64window import AdjustedArrayWindow as Int64Window
from zipline.lib._float64window import AdjustedArrayWindow as Float64Window
from zipline.lib.adjustment import Float64Multiply, Float64Add
from zipline.utils.cache import ExpiringCache
from zipline.utils.math_utils import number_of_decimal_places
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import float64_dtype
from zipline.utils.pandas_utils import find_in_sorted_index
# Default number of decimal places used for rounding asset prices.
DEFAULT_ASSET_PRICE_DECIMALS = 3
class HistoryCompatibleUSEquityAdjustmentReader(object):
def __init__(self, adjustment_reader):
self._adjustments_reader = adjustment_reader
def load_adjustments(self, columns, dts, assets):
"""
Returns
-------
adjustments : list[dict[int -> Adjustment]]
A list, where each element corresponds to the `columns`, of
mappings from index to adjustment objects to apply at that index.
"""
out = [None] * len(columns)
for i, column in enumerate(columns):
adjs = {}
for asset in assets:
adjs.update(self._get_adjustments_in_range(
asset, dts, column))
out[i] = adjs
return out
def _get_adjustments_in_range(self, asset, dts, field):
"""
Get the Float64Multiply objects to pass to an AdjustedArrayWindow.
For the use of AdjustedArrayWindow in the loader, which looks back
from current simulation time back to a window of data the dictionary is
structured with:
- the key into the dictionary for adjustments is the location of the
day from which the window is being viewed.
- the start of all multiply objects is always 0 (in each window all
adjustments are overlapping)
- the end of the multiply object is the location before the calendar
location of the adjustment action, making all days before the event
adjusted.
Parameters
----------
asset : Asset
The assets for which to get adjustments.
dts : iterable of datetime64-like
The dts for which adjustment data is needed.
field : str
OHLCV field for which to get the adjustments.
Returns
-------
out : dict[loc -> Float64Multiply]
The adjustments as a dict of loc -> Float64Multiply
"""
sid = int(asset)
start = normalize_date(dts[0])
end = normalize_date(dts[-1])
adjs = {}
if field != 'volume':
mergers = self._adjustments_reader.get_adjustments_for_sid(
'mergers', sid)
for m in mergers:
dt = m[0]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
m[1])
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
divs = self._adjustments_reader.get_adjustments_for_sid(
'dividends', sid)
for d in divs:
dt = d[0]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
d[1])
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
splits = self._adjustments_reader.get_adjustments_for_sid(
'splits', sid)
for s in splits:
dt = s[0]
if start < dt <= end:
if field == 'volume':
ratio = 1.0 / s[1]
else:
ratio = s[1]
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
ratio)
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
return adjs
class ContinuousFutureAdjustmentReader(object):
"""
Calculates adjustments for continuous futures, based on the
close and open of the contracts on the either side of each roll.
"""
def __init__(self,
trading_calendar,
asset_finder,
bar_reader,
roll_finders,
frequency):
self._trading_calendar = trading_calendar
self._asset_finder = asset_finder
self._bar_reader = bar_reader
self._roll_finders = roll_finders
self._frequency = frequency
def load_adjustments(self, columns, dts, assets):
"""
Returns
-------
adjustments : list[dict[int -> Adjustment]]
A list, where each element corresponds to the `columns`, of
mappings from index to adjustment objects to apply at that index.
"""
out = [None] * len(columns)
for i, column in enumerate(columns):
adjs = {}
for asset in assets:
adjs.update(self._get_adjustments_in_range(
asset, dts, column))
out[i] = adjs
return out
def _make_adjustment(self,
adjustment_type,
front_close,
back_close,
end_loc):
adj_base = back_close - front_close
if adjustment_type == 'mul':
adj_value = 1.0 + adj_base / front_close
adj_class = Float64Multiply
elif adjustment_type == 'add':
adj_value = adj_base
adj_class = Float64Add
return adj_class(0,
end_loc,
0,
0,
adj_value)
def _get_adjustments_in_range(self, cf, dts, field):
if field == 'volume' or field == 'sid':
return {}
if cf.adjustment is None:
return {}
rf = self._roll_finders[cf.roll_style]
partitions = []
rolls = rf.get_rolls(cf.root_symbol, dts[0], dts[-1],
cf.offset)
tc = self._trading_calendar
adjs = {}
for front, back in sliding_window(2, rolls):
front_sid, roll_dt = front
back_sid = back[0]
dt = tc.previous_session_label(roll_dt)
if self._frequency == 'minute':
dt = tc.open_and_close_for_session(dt)[1]
roll_dt = tc.open_and_close_for_session(roll_dt)[0]
partitions.append((front_sid,
back_sid,
dt,
roll_dt))
for partition in partitions:
front_sid, back_sid, dt, roll_dt = partition
last_front_dt = self._bar_reader.get_last_traded_dt(
self._asset_finder.retrieve_asset(front_sid), dt)
last_back_dt = self._bar_reader.get_last_traded_dt(
self._asset_finder.retrieve_asset(back_sid), dt)
if isnull(last_front_dt) or isnull(last_back_dt):
continue
front_close = self._bar_reader.get_value(
front_sid, last_front_dt, 'close')
back_close = self._bar_reader.get_value(
back_sid, last_back_dt, 'close')
adj_loc = dts.searchsorted(roll_dt)
end_loc = adj_loc - 1
adj = self._make_adjustment(cf.adjustment,
front_close,
back_close,
end_loc)
try:
adjs[adj_loc].append(adj)
except KeyError:
adjs[adj_loc] = [adj]
return adjs
class SlidingWindow(object):
"""
Wrapper around an AdjustedArrayWindow which supports monotonically
increasing (by datetime) requests for a sized window of data.
Parameters
----------
window : AdjustedArrayWindow
Window of pricing data with prefetched values beyond the current
simulation dt.
cal_start : int
Index in the overall calendar at which the window starts.
"""
def __init__(self, window, size, cal_start, offset):
self.window = window
self.cal_start = cal_start
self.current = next(window)
self.offset = offset
self.most_recent_ix = self.cal_start + size
def get(self, end_ix):
"""
Returns
-------
out : A np.ndarray of the equity pricing up to end_ix after adjustments
and rounding have been applied.
"""
if self.most_recent_ix == end_ix:
return self.current
target = end_ix - self.cal_start - self.offset + 1
self.current = self.window.seek(target)
self.most_recent_ix = end_ix
return self.current
class HistoryLoader(with_metaclass(ABCMeta)):
"""
Loader for sliding history windows, with support for adjustments.
Parameters
----------
trading_calendar: TradingCalendar
Contains the grouping logic needed to assign minutes to periods.
reader : DailyBarReader, MinuteBarReader
Reader for pricing bars.
adjustment_reader : SQLiteAdjustmentReader
Reader for adjustment data.
"""
FIELDS = ('open', 'high', 'low', 'close', 'volume', 'sid')
def __init__(self, trading_calendar, reader, equity_adjustment_reader,
asset_finder,
roll_finders=None,
sid_cache_size=1000,
prefetch_length=0):
self.trading_calendar = trading_calendar
self._asset_finder = asset_finder
self._reader = reader
self._adjustment_readers = {}
if equity_adjustment_reader is not None:
self._adjustment_readers[Equity] = \
HistoryCompatibleUSEquityAdjustmentReader(
equity_adjustment_reader)
if roll_finders:
self._adjustment_readers[ContinuousFuture] =\
ContinuousFutureAdjustmentReader(trading_calendar,
asset_finder,
reader,
roll_finders,
self._frequency)
self._window_blocks = {
field: ExpiringCache(LRU(sid_cache_size))
for field in self.FIELDS
}
self._prefetch_length = prefetch_length
@abstractproperty
def _frequency(self):
pass
@abstractproperty
def _calendar(self):
pass
@abstractmethod
def _array(self, start, end, assets, field):
pass
def _decimal_places_for_asset(self, asset, reference_date):
if isinstance(asset, Future) and asset.tick_size:
return number_of_decimal_places(asset.tick_size)
elif isinstance(asset, ContinuousFuture):
# Tick size should be the same for all contracts of a continuous
# future, so arbitrarily get the contract with next upcoming auto
# close date.
oc = self._asset_finder.get_ordered_contracts(asset.root_symbol)
contract_sid = oc.contract_before_auto_close(reference_date.value)
if contract_sid is not None:
contract = self._asset_finder.retrieve_asset(contract_sid)
if contract.tick_size:
return number_of_decimal_places(contract.tick_size)
return DEFAULT_ASSET_PRICE_DECIMALS
def _ensure_sliding_windows(self, assets, dts, field,
is_perspective_after):
"""
Ensure that there is a Float64Multiply window for each asset that can
provide data for the given parameters.
If the corresponding window for the (assets, len(dts), field) does not
exist, then create a new one.
If a corresponding window does exist for (assets, len(dts), field), but
can not provide data for the current dts range, then create a new
one and replace the expired window.
Parameters
----------
assets : iterable of Assets
The assets in the window
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
is_perspective_after : bool
see: `PricingHistoryLoader.history`
Returns
-------
out : list of Float64Window with sufficient data so that each asset's
window can provide `get` for the index corresponding with the last
value in `dts`
"""
end = dts[-1]
size = len(dts)
asset_windows = {}
needed_assets = []
cal = self._calendar
assets = self._asset_finder.retrieve_all(assets)
end_ix = find_in_sorted_index(cal, end)
for asset in assets:
try:
window = self._window_blocks[field].get(
(asset, size, is_perspective_after), end)
except KeyError:
needed_assets.append(asset)
else:
if end_ix < window.most_recent_ix:
# Window needs reset. Requested end index occurs before the
# end index from the previous history call for this window.
# Grab new window instead of rewinding adjustments.
needed_assets.append(asset)
else:
asset_windows[asset] = window
if needed_assets:
offset = 0
start_ix = find_in_sorted_index(cal, dts[0])
prefetch_end_ix = min(end_ix + self._prefetch_length, len(cal) - 1)
prefetch_end = cal[prefetch_end_ix]
prefetch_dts = cal[start_ix:prefetch_end_ix + 1]
if is_perspective_after:
adj_end_ix = min(prefetch_end_ix + 1, len(cal) - 1)
adj_dts = cal[start_ix:adj_end_ix + 1]
else:
adj_dts = prefetch_dts
prefetch_len = len(prefetch_dts)
array = self._array(prefetch_dts, needed_assets, field)
if field == 'sid':
window_type = Int64Window
else:
window_type = Float64Window
view_kwargs = {}
if field == 'volume':
array = array.astype(float64_dtype)
for i, asset in enumerate(needed_assets):
adj_reader = None
try:
adj_reader = self._adjustment_readers[type(asset)]
except KeyError:
adj_reader = None
if adj_reader is not None:
adjs = adj_reader.load_adjustments(
[field], adj_dts, [asset])[0]
else:
adjs = {}
window = window_type(
array[:, i].reshape(prefetch_len, 1),
view_kwargs,
adjs,
offset,
size,
int(is_perspective_after),
self._decimal_places_for_asset(asset, dts[-1]),
)
sliding_window = SlidingWindow(window, size, start_ix, offset)
asset_windows[asset] = sliding_window
self._window_blocks[field].set(
(asset, size, is_perspective_after),
sliding_window,
prefetch_end)
return [asset_windows[asset] for asset in assets]
def history(self, assets, dts, field, is_perspective_after):
"""
A window of pricing data with adjustments applied assuming that the
end of the window is the day before the current simulation time.
Parameters
----------
assets : iterable of Assets
The assets in the window.
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
is_perspective_after : bool
True, if the window is being viewed immediately after the last dt
in the sliding window.
False, if the window is viewed on the last dt.
This flag is used for handling the case where the last dt in the
requested window immediately precedes a corporate action, e.g.:
- is_perspective_after is True
When the viewpoint is after the last dt in the window, as when a
daily history window is accessed from a simulation that uses a
minute data frequency, the history call to this loader will not
include the current simulation dt. At that point in time, the raw
data for the last day in the window will require adjustment, so the
most recent adjustment with respect to the simulation time is
applied to the last dt in the requested window.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 9:31. Simulation frequency is 'minute'.
(In this case this function is called with 4 daily dts, and the
calling function is responsible for stitching back on the
'current' dt)
| | | | | last dt | <-- viewer is here |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | |
| adj | 5.05 | 5.10 | 5.15 | 5.25 | |
The adjustment is applied to the last dt, 05-26, and all previous
dts.
- is_perspective_after is False, daily
When the viewpoint is the same point in time as the last dt in the
window, as when a daily history window is accessed from a
simulation that uses a daily data frequency, the history call will
include the current dt. At that point in time, the raw data for the
last day in the window will be post-adjustment, so no adjustment
is applied to the last dt.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 0:00. Simulation frequency is 'daily'.
| | | | | | <-- viewer is here |
| | | | | | last dt |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 |
| adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 |
Adjustments are applied 05-23 through 05-26 but not to the last dt,
05-27
Returns
-------
out : np.ndarray with shape(len(days between start, end), len(assets))
"""
block = self._ensure_sliding_windows(assets,
dts,
field,
is_perspective_after)
end_ix = self._calendar.searchsorted(dts[-1])
return concatenate(
[window.get(end_ix) for window in block],
axis=1,
)
class DailyHistoryLoader(HistoryLoader):
@property
def _frequency(self):
return 'daily'
@property
def _calendar(self):
return self._reader.sessions
def _array(self, dts, assets, field):
return self._reader.load_raw_arrays(
[field],
dts[0],
dts[-1],
assets,
)[0]
class MinuteHistoryLoader(HistoryLoader):
@property
def _frequency(self):
return 'minute'
@lazyval
def _calendar(self):
mm = self.trading_calendar.all_minutes
start = mm.searchsorted(self._reader.first_trading_day)
end = mm.searchsorted(self._reader.last_available_dt, side='right')
return mm[start:end]
def _array(self, dts, assets, field):
return self._reader.load_raw_arrays(
[field],
dts[0],
dts[-1],
assets,
)[0] | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/history_loader.py | history_loader.py |
from abc import ABCMeta, abstractmethod
from numpy import (
full,
nan,
int64,
zeros
)
from six import iteritems, with_metaclass
from zipline.utils.memoize import lazyval
class AssetDispatchBarReader(with_metaclass(ABCMeta)):
"""
Parameters
----------
- trading_calendar : zipline.utils.trading_calendar.TradingCalendar
- asset_finder : zipline.assets.AssetFinder
- readers : dict
A dict mapping Asset type to the corresponding
[Minute|Session]BarReader
- last_available_dt : pd.Timestamp or None, optional
If not provided, infers it by using the min of the
last_available_dt values of the underlying readers.
"""
def __init__(
self,
trading_calendar,
asset_finder,
readers,
last_available_dt=None,
):
self._trading_calendar = trading_calendar
self._asset_finder = asset_finder
self._readers = readers
self._last_available_dt = last_available_dt
for t, r in iteritems(self._readers):
assert trading_calendar == r.trading_calendar, \
"All readers must share target trading_calendar. " \
"Reader={0} for type={1} uses calendar={2} which does not " \
"match the desired shared calendar={3} ".format(
r, t, r.trading_calendar, trading_calendar)
@abstractmethod
def _dt_window_size(self, start_dt, end_dt):
pass
@property
def _asset_types(self):
return self._readers.keys()
def _make_raw_array_shape(self, start_dt, end_dt, num_sids):
return self._dt_window_size(start_dt, end_dt), num_sids
def _make_raw_array_out(self, field, shape):
if field != 'volume' and field != 'sid':
out = full(shape, nan)
else:
out = zeros(shape, dtype=int64)
return out
@property
def trading_calendar(self):
return self._trading_calendar
@lazyval
def last_available_dt(self):
if self._last_available_dt is not None:
return self._last_available_dt
else:
return min(r.last_available_dt for r in self._readers.values())
@lazyval
def first_trading_day(self):
return max(r.first_trading_day for r in self._readers.values())
def get_value(self, sid, dt, field):
asset = self._asset_finder.retrieve_asset(sid)
r = self._readers[type(asset)]
return r.get_value(asset, dt, field)
def get_last_traded_dt(self, asset, dt):
r = self._readers[type(asset)]
return r.get_last_traded_dt(asset, dt)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
asset_types = self._asset_types
sid_groups = {t: [] for t in asset_types}
out_pos = {t: [] for t in asset_types}
assets = self._asset_finder.retrieve_all(sids)
for i, asset in enumerate(assets):
t = type(asset)
sid_groups[t].append(asset)
out_pos[t].append(i)
batched_arrays = {
t: self._readers[t].load_raw_arrays(fields,
start_dt,
end_dt,
sid_groups[t])
for t in asset_types if sid_groups[t]}
results = []
shape = self._make_raw_array_shape(start_dt, end_dt, len(sids))
for i, field in enumerate(fields):
out = self._make_raw_array_out(field, shape)
for t, arrays in iteritems(batched_arrays):
out[:, out_pos[t]] = arrays[i]
results.append(out)
return results
class AssetDispatchMinuteBarReader(AssetDispatchBarReader):
def _dt_window_size(self, start_dt, end_dt):
return len(self.trading_calendar.minutes_in_range(start_dt, end_dt))
class AssetDispatchSessionBarReader(AssetDispatchBarReader):
def _dt_window_size(self, start_dt, end_dt):
return len(self.trading_calendar.sessions_in_range(start_dt, end_dt))
@lazyval
def sessions(self):
return self.trading_calendar.sessions_in_range(
self.first_trading_day,
self.last_available_dt) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/dispatch_bar_reader.py | dispatch_bar_reader.py |
from abc import ABCMeta, abstractmethod
import json
import os
from glob import glob
from os.path import join
from textwrap import dedent
from lru import LRU
import bcolz
from bcolz import ctable
from intervaltree import IntervalTree
import logbook
import numpy as np
import pandas as pd
from pandas import HDFStore
import tables
from six import with_metaclass
from toolz import keymap, valmap
from zipline.data._minute_bar_internal import (
minute_value,
find_position_of_minute,
find_last_traded_position_internal
)
from zipline.gens.sim_engine import NANOS_IN_MINUTE
from zipline.data.bar_reader import BarReader, NoDataOnDate
from zipline.data.us_equity_pricing import check_uint32_safe
from zipline.utils.calendars import get_calendar
from zipline.utils.cli import maybe_show_progress
from zipline.utils.memoize import lazyval
logger = logbook.Logger('MinuteBars')
US_EQUITIES_MINUTES_PER_DAY = 390
FUTURES_MINUTES_PER_DAY = 1440
DEFAULT_EXPECTEDLEN = US_EQUITIES_MINUTES_PER_DAY * 252 * 15
OHLC_RATIO = 1000
class BcolzMinuteOverlappingData(Exception):
pass
class BcolzMinuteWriterColumnMismatch(Exception):
pass
class MinuteBarReader(BarReader):
@property
def data_frequency(self):
return "minute"
def _calc_minute_index(market_opens, minutes_per_day):
minutes = np.zeros(len(market_opens) * minutes_per_day,
dtype='datetime64[ns]')
deltas = np.arange(0, minutes_per_day, dtype='timedelta64[m]')
for i, market_open in enumerate(market_opens):
start = market_open.asm8
minute_values = start + deltas
start_ix = minutes_per_day * i
end_ix = start_ix + minutes_per_day
minutes[start_ix:end_ix] = minute_values
return pd.to_datetime(minutes, utc=True, box=True)
def _sid_subdir_path(sid):
"""
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters:
-----------
sid : int
Asset identifier.
Returns:
--------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
"""
padded_sid = format(sid, '06')
return os.path.join(
# subdir 1 00/XX
padded_sid[0:2],
# subdir 2 XX/00
padded_sid[2:4],
"{0}.bcolz".format(str(padded_sid))
)
def convert_cols(cols, scale_factor, sid, invalid_data_behavior):
"""Adapt OHLCV columns into uint32 columns.
Parameters
----------
cols : dict
A dict mapping each column name (open, high, low, close, volume)
to a float column to convert to uint32.
scale_factor : int
Factor to use to scale float values before converting to uint32.
sid : int
Sid of the relevant asset, for logging.
invalid_data_behavior : str
Specifies behavior when data cannot be converted to uint32.
If 'raise', raises an exception.
If 'warn', logs a warning and filters out incompatible values.
If 'ignore', silently filters out incompatible values.
"""
scaled_opens = np.nan_to_num(cols['open']) * scale_factor
scaled_highs = np.nan_to_num(cols['high']) * scale_factor
scaled_lows = np.nan_to_num(cols['low']) * scale_factor
scaled_closes = np.nan_to_num(cols['close']) * scale_factor
exclude_mask = np.zeros_like(scaled_opens, dtype=bool)
for col_name, scaled_col in [
('open', scaled_opens),
('high', scaled_highs),
('low', scaled_lows),
('close', scaled_closes),
]:
max_val = scaled_col.max()
try:
check_uint32_safe(max_val, col_name)
except ValueError:
if invalid_data_behavior == 'raise':
raise
if invalid_data_behavior == 'warn':
logger.warn(
'Values for sid={}, col={} contain some too large for '
'uint32 (max={}), filtering them out',
sid, col_name, max_val,
)
# We want to exclude all rows that have an unsafe value in
# this column.
exclude_mask &= (scaled_col >= np.iinfo(np.uint32).max)
# Convert all cols to uint32.
opens = scaled_opens.astype(np.uint32)
highs = scaled_highs.astype(np.uint32)
lows = scaled_lows.astype(np.uint32)
closes = scaled_closes.astype(np.uint32)
volumes = cols['volume'].astype(np.uint32)
# Exclude rows with unsafe values by setting to zero.
opens[exclude_mask] = 0
highs[exclude_mask] = 0
lows[exclude_mask] = 0
closes[exclude_mask] = 0
volumes[exclude_mask] = 0
return opens, highs, lows, closes, volumes
class BcolzMinuteBarMetadata(object):
"""
Parameters
----------
ohlc_ratio : int
The factor by which the pricing data is multiplied so that the
float data can be stored as an integer.
calendar : zipline.utils.calendars.trading_calendar.TradingCalendar
The TradingCalendar on which the minute bars are based.
start_session : datetime
The first trading session in the data set.
end_session : datetime
The last trading session in the data set.
minutes_per_day : int
The number of minutes per each period.
"""
FORMAT_VERSION = 3
METADATA_FILENAME = 'metadata.json'
@classmethod
def metadata_path(cls, rootdir):
return os.path.join(rootdir, cls.METADATA_FILENAME)
@classmethod
def read(cls, rootdir):
path = cls.metadata_path(rootdir)
with open(path) as fp:
raw_data = json.load(fp)
try:
version = raw_data['version']
except KeyError:
# Version was first written with version 1, assume 0,
# if version does not match.
version = 0
default_ohlc_ratio = raw_data['ohlc_ratio']
if version >= 1:
minutes_per_day = raw_data['minutes_per_day']
else:
# version 0 always assumed US equities.
minutes_per_day = US_EQUITIES_MINUTES_PER_DAY
if version >= 2:
calendar = get_calendar(raw_data['calendar_name'])
start_session = pd.Timestamp(
raw_data['start_session'], tz='UTC')
end_session = pd.Timestamp(raw_data['end_session'], tz='UTC')
else:
# No calendar info included in older versions, so
# default to NYSE.
calendar = get_calendar('NYSE')
start_session = pd.Timestamp(
raw_data['first_trading_day'], tz='UTC')
end_session = calendar.minute_to_session_label(
pd.Timestamp(
raw_data['market_closes'][-1], unit='m', tz='UTC')
)
if version >= 3:
ohlc_ratios_per_sid = raw_data['ohlc_ratios_per_sid']
if ohlc_ratios_per_sid is not None:
ohlc_ratios_per_sid = keymap(int, ohlc_ratios_per_sid)
else:
ohlc_ratios_per_sid = None
return cls(
default_ohlc_ratio,
ohlc_ratios_per_sid,
calendar,
start_session,
end_session,
minutes_per_day,
version=version,
)
def __init__(
self,
default_ohlc_ratio,
ohlc_ratios_per_sid,
calendar,
start_session,
end_session,
minutes_per_day,
version=FORMAT_VERSION,
):
self.calendar = calendar
self.start_session = start_session
self.end_session = end_session
self.default_ohlc_ratio = default_ohlc_ratio
self.ohlc_ratios_per_sid = ohlc_ratios_per_sid
self.minutes_per_day = minutes_per_day
self.version = version
def write(self, rootdir):
"""
Write the metadata to a JSON file in the rootdir.
Values contained in the metadata are:
version : int
The value of FORMAT_VERSION of this class.
ohlc_ratio : int
The default ratio by which to multiply the pricing data to
convert the floats from floats to an integer to fit within
the np.uint32. If ohlc_ratios_per_sid is None or does not
contain a mapping for a given sid, this ratio is used.
ohlc_ratios_per_sid : dict
A dict mapping each sid in the output to the factor by
which the pricing data is multiplied so that the float data
can be stored as an integer.
minutes_per_day : int
The number of minutes per each period.
calendar_name : str
The name of the TradingCalendar on which the minute bars are
based.
start_session : datetime
'YYYY-MM-DD' formatted representation of the first trading
session in the data set.
end_session : datetime
'YYYY-MM-DD' formatted representation of the last trading
session in the data set.
Deprecated, but included for backwards compatibility:
first_trading_day : string
'YYYY-MM-DD' formatted representation of the first trading day
available in the dataset.
market_opens : list
List of int64 values representing UTC market opens as
minutes since epoch.
market_closes : list
List of int64 values representing UTC market closes as
minutes since epoch.
"""
calendar = self.calendar
slicer = calendar.schedule.index.slice_indexer(
self.start_session,
self.end_session,
)
schedule = calendar.schedule[slicer]
market_opens = schedule.market_open
market_closes = schedule.market_close
metadata = {
'version': self.version,
'ohlc_ratio': self.default_ohlc_ratio,
'ohlc_ratios_per_sid': self.ohlc_ratios_per_sid,
'minutes_per_day': self.minutes_per_day,
'calendar_name': self.calendar.name,
'start_session': str(self.start_session.date()),
'end_session': str(self.end_session.date()),
# Write these values for backwards compatibility
'first_trading_day': str(self.start_session.date()),
'market_opens': (
market_opens.values.astype('datetime64[m]').
astype(np.int64).tolist()),
'market_closes': (
market_closes.values.astype('datetime64[m]').
astype(np.int64).tolist()),
}
with open(self.metadata_path(rootdir), 'w+') as fp:
json.dump(metadata, fp)
class BcolzMinuteBarWriter(object):
"""
Class capable of writing minute OHLCV data to disk into bcolz format.
Parameters
----------
rootdir : string
Path to the root directory into which to write the metadata and
bcolz subdirectories.
calendar : zipline.utils.calendars.trading_calendar.TradingCalendar
The trading calendar on which to base the minute bars. Used to
get the market opens used as a starting point for each periodic
span of minutes in the index, and the market closes that
correspond with the market opens.
minutes_per_day : int
The number of minutes per each period. Defaults to 390, the mode
of minutes in NYSE trading days.
start_session : datetime
The first trading session in the data set.
end_session : datetime
The last trading session in the data set.
default_ohlc_ratio : int, optional
The default ratio by which to multiply the pricing data to
convert from floats to integers that fit within np.uint32. If
ohlc_ratios_per_sid is None or does not contain a mapping for a
given sid, this ratio is used. Default is OHLC_RATIO (1000).
ohlc_ratios_per_sid : dict, optional
A dict mapping each sid in the output to the ratio by which to
multiply the pricing data to convert the floats from floats to
an integer to fit within the np.uint32.
expectedlen : int, optional
The expected length of the dataset, used when creating the initial
bcolz ctable.
If the expectedlen is not used, the chunksize and corresponding
compression ratios are not ideal.
Defaults to supporting 15 years of NYSE equity market data.
see: http://bcolz.blosc.org/opt-tips.html#informing-about-the-length-of-your-carrays # noqa
write_metadata : bool, optional
If True, writes the minute bar metadata (on init of the writer).
If False, no metadata is written (existing metadata is
retained). Default is True.
Notes
-----
Writes a bcolz directory for each individual sid, all contained within
a root directory which also contains metadata about the entire dataset.
Each individual asset's data is stored as a bcolz table with a column for
each pricing field: (open, high, low, close, volume)
The open, high, low, and close columns are integers which are 1000 times
the quoted price, so that the data can represented and stored as an
np.uint32, supporting market prices quoted up to the thousands place.
volume is a np.uint32 with no mutation of the tens place.
The 'index' for each individual asset are a repeating period of minutes of
length `minutes_per_day` starting from each market open.
The file format does not account for half-days.
e.g.:
2016-01-19 14:31
2016-01-19 14:32
...
2016-01-19 20:59
2016-01-19 21:00
2016-01-20 14:31
2016-01-20 14:32
...
2016-01-20 20:59
2016-01-20 21:00
All assets are written with a common 'index', sharing a common first
trading day. Assets that do not begin trading until after the first trading
day will have zeros for all pricing data up and until data is traded.
'index' is in quotations, because bcolz does not provide an index. The
format allows index-like behavior by writing each minute's data into the
corresponding position of the enumeration of the aforementioned datetime
index.
The datetimes which correspond to each position are written in the metadata
as integer nanoseconds since the epoch into the `minute_index` key.
See Also
--------
zipline.data.minute_bars.BcolzMinuteBarReader
"""
COL_NAMES = ('open', 'high', 'low', 'close', 'volume')
def __init__(self,
rootdir,
calendar,
start_session,
end_session,
minutes_per_day,
default_ohlc_ratio=OHLC_RATIO,
ohlc_ratios_per_sid=None,
expectedlen=DEFAULT_EXPECTEDLEN,
write_metadata=True):
self._rootdir = rootdir
self._start_session = start_session
self._end_session = end_session
self._calendar = calendar
slicer = (
calendar.schedule.index.slice_indexer(start_session, end_session))
self._schedule = calendar.schedule[slicer]
self._session_labels = self._schedule.index
self._minutes_per_day = minutes_per_day
self._expectedlen = expectedlen
self._default_ohlc_ratio = default_ohlc_ratio
self._ohlc_ratios_per_sid = ohlc_ratios_per_sid
self._minute_index = _calc_minute_index(
self._schedule.market_open, self._minutes_per_day)
if write_metadata:
metadata = BcolzMinuteBarMetadata(
self._default_ohlc_ratio,
self._ohlc_ratios_per_sid,
self._calendar,
self._start_session,
self._end_session,
self._minutes_per_day,
)
metadata.write(self._rootdir)
@classmethod
def open(cls, rootdir, end_session=None):
"""
Open an existing ``rootdir`` for writing.
Parameters
----------
end_session : Timestamp (optional)
When appending, the intended new ``end_session``.
"""
metadata = BcolzMinuteBarMetadata.read(rootdir)
return BcolzMinuteBarWriter(
rootdir,
metadata.calendar,
metadata.start_session,
end_session if end_session is not None else metadata.end_session,
metadata.minutes_per_day,
metadata.default_ohlc_ratio,
metadata.ohlc_ratios_per_sid,
write_metadata=end_session is not None
)
@property
def first_trading_day(self):
return self._start_session
def ohlc_ratio_for_sid(self, sid):
if self._ohlc_ratios_per_sid is not None:
try:
return self._ohlc_ratios_per_sid[sid]
except KeyError:
pass
# If no ohlc_ratios_per_sid dict is passed, or if the specified
# sid is not in the dict, fallback to the general ohlc_ratio.
return self._default_ohlc_ratio
def sidpath(self, sid):
"""
Parameters:
-----------
sid : int
Asset identifier.
Returns:
--------
out : string
Full path to the bcolz rootdir for the given sid.
"""
sid_subdir = _sid_subdir_path(sid)
return join(self._rootdir, sid_subdir)
def last_date_in_output_for_sid(self, sid):
"""
Parameters:
-----------
sid : int
Asset identifier.
Returns:
--------
out : pd.Timestamp
The midnight of the last date written in to the output for the
given sid.
"""
sizes_path = "{0}/close/meta/sizes".format(self.sidpath(sid))
if not os.path.exists(sizes_path):
return pd.NaT
with open(sizes_path, mode='r') as f:
sizes = f.read()
data = json.loads(sizes)
# use integer division so that the result is an int
# for pandas index later https://github.com/pandas-dev/pandas/blob/master/pandas/tseries/base.py#L247 # noqa
num_days = data['shape'][0] // self._minutes_per_day
if num_days == 0:
# empty container
return pd.NaT
return self._session_labels[num_days - 1]
def _init_ctable(self, path):
"""
Create empty ctable for given path.
Parameters:
-----------
path : string
The path to rootdir of the new ctable.
"""
# Only create the containing subdir on creation.
# This is not to be confused with the `.bcolz` directory, but is the
# directory up one level from the `.bcolz` directories.
sid_containing_dirname = os.path.dirname(path)
if not os.path.exists(sid_containing_dirname):
# Other sids may have already created the containing directory.
os.makedirs(sid_containing_dirname)
initial_array = np.empty(0, np.uint32)
table = ctable(
rootdir=path,
columns=[
initial_array,
initial_array,
initial_array,
initial_array,
initial_array,
],
names=[
'open',
'high',
'low',
'close',
'volume'
],
expectedlen=self._expectedlen,
mode='w',
)
table.flush()
return table
def _ensure_ctable(self, sid):
"""Ensure that a ctable exists for ``sid``, then return it."""
sidpath = self.sidpath(sid)
if not os.path.exists(sidpath):
return self._init_ctable(sidpath)
return bcolz.ctable(rootdir=sidpath, mode='a')
def _zerofill(self, table, numdays):
# Compute the number of minutes to be filled, accounting for the
# possibility of a partial day's worth of minutes existing for
# the previous day.
minute_offset = len(table) % self._minutes_per_day
num_to_prepend = numdays * self._minutes_per_day - minute_offset
prepend_array = np.zeros(num_to_prepend, np.uint32)
# Fill all OHLCV with zeros.
table.append([prepend_array] * 5)
table.flush()
def pad(self, sid, date):
"""
Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters:
-----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date`
"""
table = self._ensure_ctable(sid)
last_date = self.last_date_in_output_for_sid(sid)
tds = self._session_labels
if date <= last_date or date < tds[0]:
# No need to pad.
return
if last_date == pd.NaT:
# If there is no data, determine how many days to add so that
# desired days are written to the correct slots.
days_to_zerofill = tds[tds.slice_indexer(end=date)]
else:
days_to_zerofill = tds[tds.slice_indexer(
start=last_date + tds.freq,
end=date)]
self._zerofill(table, len(days_to_zerofill))
new_last_date = self.last_date_in_output_for_sid(sid)
assert new_last_date == date, "new_last_date={0} != date={1}".format(
new_last_date, date)
def set_sid_attrs(self, sid, **kwargs):
"""Write all the supplied kwargs as attributes of the sid's file.
"""
table = self._ensure_ctable(sid)
for k, v in kwargs.items():
table.attrs[k] = v
def write(self, data, show_progress=False, invalid_data_behavior='warn'):
"""Write a stream of minute data.
Parameters
----------
data : iterable[(int, pd.DataFrame)]
The data to write. Each element should be a tuple of sid, data
where data has the following format:
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
A given sid may appear more than once in ``data``; however,
the dates must be strictly increasing.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
"""
ctx = maybe_show_progress(
data,
show_progress=show_progress,
item_show_func=lambda e: e if e is None else str(e[0]),
label="Merging minute equity files:",
)
write_sid = self.write_sid
with ctx as it:
for e in it:
write_sid(*e, invalid_data_behavior=invalid_data_behavior)
def write_sid(self, sid, df, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters:
-----------
sid : int
The asset identifer for the data being written.
df : pd.DataFrame
DataFrame of market data with the following characteristics.
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
"""
cols = {
'open': df.open.values,
'high': df.high.values,
'low': df.low.values,
'close': df.close.values,
'volume': df.volume.values,
}
dts = df.index.values
# Call internal method, since DataFrame has already ensured matching
# index and value lengths.
self._write_cols(sid, dts, cols, invalid_data_behavior)
def write_cols(self, sid, dts, cols, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters:
-----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
if not all(len(dts) == len(cols[name]) for name in self.COL_NAMES):
raise BcolzMinuteWriterColumnMismatch(
"Length of dts={0} should match cols: {1}".format(
len(dts),
" ".join("{0}={1}".format(name, len(cols[name]))
for name in self.COL_NAMES)))
self._write_cols(sid, dts, cols, invalid_data_behavior)
def _write_cols(self, sid, dts, cols, invalid_data_behavior):
"""
Internal method for `write_cols` and `write`.
Parameters:
-----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
table = self._ensure_ctable(sid)
tds = self._session_labels
input_first_day = self._calendar.minute_to_session_label(
pd.Timestamp(dts[0]), direction='previous')
last_date = self.last_date_in_output_for_sid(sid)
day_before_input = input_first_day - tds.freq
self.pad(sid, day_before_input)
table = self._ensure_ctable(sid)
# Get the number of minutes already recorded in this sid's ctable
num_rec_mins = table.size
all_minutes = self._minute_index
# Get the latest minute we wish to write to the ctable
last_minute_to_write = pd.Timestamp(dts[-1], tz='UTC')
# In the event that we've already written some minutely data to the
# ctable, guard against overwriting that data.
if num_rec_mins > 0:
last_recorded_minute = all_minutes[num_rec_mins - 1]
if last_minute_to_write <= last_recorded_minute:
raise BcolzMinuteOverlappingData(dedent("""
Data with last_date={0} already includes input start={1} for
sid={2}""".strip()).format(last_date, input_first_day, sid))
latest_min_count = all_minutes.get_loc(last_minute_to_write)
# Get all the minutes we wish to write (all market minutes after the
# latest currently written, up to and including last_minute_to_write)
all_minutes_in_window = all_minutes[num_rec_mins:latest_min_count + 1]
minutes_count = all_minutes_in_window.size
open_col = np.zeros(minutes_count, dtype=np.uint32)
high_col = np.zeros(minutes_count, dtype=np.uint32)
low_col = np.zeros(minutes_count, dtype=np.uint32)
close_col = np.zeros(minutes_count, dtype=np.uint32)
vol_col = np.zeros(minutes_count, dtype=np.uint32)
dt_ixs = np.searchsorted(all_minutes_in_window.values,
dts.astype('datetime64[ns]'))
ohlc_ratio = self.ohlc_ratio_for_sid(sid)
(
open_col[dt_ixs],
high_col[dt_ixs],
low_col[dt_ixs],
close_col[dt_ixs],
vol_col[dt_ixs],
) = convert_cols(cols, ohlc_ratio, sid, invalid_data_behavior)
table.append([
open_col,
high_col,
low_col,
close_col,
vol_col
])
table.flush()
def data_len_for_day(self, day):
"""
Return the number of data points up to and including the
provided day.
"""
day_ix = self._session_labels.get_loc(day)
# Add one to the 0-indexed day_ix to get the number of days.
num_days = day_ix + 1
return num_days * self._minutes_per_day
def truncate(self, date):
"""Truncate data beyond this date in all ctables."""
truncate_slice_end = self.data_len_for_day(date)
glob_path = os.path.join(self._rootdir, "*", "*", "*.bcolz")
sid_paths = sorted(glob(glob_path))
for sid_path in sid_paths:
file_name = os.path.basename(sid_path)
try:
table = bcolz.open(rootdir=sid_path)
except IOError:
continue
if table.len <= truncate_slice_end:
logger.info("{0} not past truncate date={1}.", file_name, date)
continue
logger.info(
"Truncating {0} at end_date={1}", file_name, date.date()
)
table.resize(truncate_slice_end)
# Update end session in metadata.
metadata = BcolzMinuteBarMetadata.read(self._rootdir)
metadata.end_session = date
metadata.write(self._rootdir)
class BcolzMinuteBarReader(MinuteBarReader):
"""
Reader for data written by BcolzMinuteBarWriter
Parameters:
-----------
rootdir : string
The root directory containing the metadata and asset bcolz
directories.
See Also
--------
zipline.data.minute_bars.BcolzMinuteBarWriter
"""
FIELDS = ('open', 'high', 'low', 'close', 'volume')
def __init__(self, rootdir, sid_cache_size=1000):
self._rootdir = rootdir
metadata = self._get_metadata()
self._start_session = metadata.start_session
self._end_session = metadata.end_session
self.calendar = metadata.calendar
slicer = self.calendar.schedule.index.slice_indexer(
self._start_session,
self._end_session,
)
self._schedule = self.calendar.schedule[slicer]
self._market_opens = self._schedule.market_open
self._market_open_values = self._market_opens.values.\
astype('datetime64[m]').astype(np.int64)
self._market_closes = self._schedule.market_close
self._market_close_values = self._market_closes.values.\
astype('datetime64[m]').astype(np.int64)
self._default_ohlc_inverse = 1.0 / metadata.default_ohlc_ratio
ohlc_ratios = metadata.ohlc_ratios_per_sid
if ohlc_ratios:
self._ohlc_inverses_per_sid = (
valmap(lambda x: 1.0 / x, ohlc_ratios))
else:
self._ohlc_inverses_per_sid = None
self._minutes_per_day = metadata.minutes_per_day
self._carrays = {
field: LRU(sid_cache_size)
for field in self.FIELDS
}
self._last_get_value_dt_position = None
self._last_get_value_dt_value = None
# This is to avoid any bad data or other performance-killing situation
# where there a consecutive streak of 0 (no volume) starting at an
# asset's start date.
# if asset 1 started on 2015-01-03 but its first trade is 2015-01-06
# 10:31 AM US/Eastern, this dict would store {1: 23675971},
# which is the minute epoch of that date.
self._known_zero_volume_dict = {}
def _get_metadata(self):
return BcolzMinuteBarMetadata.read(self._rootdir)
@property
def trading_calendar(self):
return self.calendar
@lazyval
def last_available_dt(self):
_, close = self.calendar.open_and_close_for_session(self._end_session)
return close
@property
def first_trading_day(self):
return self._start_session
def _ohlc_ratio_inverse_for_sid(self, sid):
if self._ohlc_inverses_per_sid is not None:
try:
return self._ohlc_inverses_per_sid[sid]
except KeyError:
pass
# If we can not get a sid-specific OHLC inverse for this sid,
# fallback to the default.
return self._default_ohlc_inverse
def _minutes_to_exclude(self):
"""
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns:
--------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
"""
market_opens = self._market_opens.values.astype('datetime64[m]')
market_closes = self._market_closes.values.astype('datetime64[m]')
minutes_per_day = (market_closes - market_opens).astype(np.int64)
early_indices = np.where(
minutes_per_day != self._minutes_per_day - 1)[0]
early_opens = self._market_opens[early_indices]
early_closes = self._market_closes[early_indices]
minutes = [(market_open, early_close)
for market_open, early_close
in zip(early_opens, early_closes)]
return minutes
@lazyval
def _minute_exclusion_tree(self):
"""
Build an interval tree keyed by the start and end of each range
of positions should be dropped from windows. (These are the minutes
between an early close and the minute which would be the close based
on the regular period if there were no early close.)
The value of each node is the same start and end position stored as
a tuple.
The data is stored as such in support of a fast answer to the question,
does a given start and end position overlap any of the exclusion spans?
Returns
-------
IntervalTree containing nodes which represent the minutes to exclude
because of early closes.
"""
itree = IntervalTree()
for market_open, early_close in self._minutes_to_exclude():
start_pos = self._find_position_of_minute(early_close) + 1
end_pos = (
self._find_position_of_minute(market_open)
+
self._minutes_per_day
-
1
)
data = (start_pos, end_pos)
itree[start_pos:end_pos + 1] = data
return itree
def _exclusion_indices_for_range(self, start_idx, end_idx):
"""
Returns
-------
List of tuples of (start, stop) which represent the ranges of minutes
which should be excluded when a market minute window is requested.
"""
itree = self._minute_exclusion_tree
if itree.overlaps(start_idx, end_idx):
ranges = []
intervals = itree[start_idx:end_idx]
for interval in intervals:
ranges.append(interval.data)
return sorted(ranges)
else:
return None
def _get_carray_path(self, sid, field):
sid_subdir = _sid_subdir_path(sid)
# carrays are subdirectories of the sid's rootdir
return os.path.join(self._rootdir, sid_subdir, field)
def _open_minute_file(self, field, sid):
sid = int(sid)
try:
carray = self._carrays[field][sid]
except KeyError:
carray = self._carrays[field][sid] = \
bcolz.carray(rootdir=self._get_carray_path(sid, field),
mode='r')
return carray
def table_len(self, sid):
"""Returns the length of the underlying table for this sid."""
return len(self._open_minute_file('close', sid))
def get_sid_attr(self, sid, name):
sid_subdir = _sid_subdir_path(sid)
sid_path = os.path.join(self._rootdir, sid_subdir)
attrs = bcolz.attrs.attrs(sid_path, 'r')
try:
return attrs[name]
except KeyError:
return None
def get_value(self, sid, dt, field):
"""
Retrieve the pricing info for the given sid, dt, and field.
Parameters:
-----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns:
--------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.)
"""
if self._last_get_value_dt_value == dt.value:
minute_pos = self._last_get_value_dt_position
else:
try:
minute_pos = self._find_position_of_minute(dt)
except ValueError:
raise NoDataOnDate()
self._last_get_value_dt_value = dt.value
self._last_get_value_dt_position = minute_pos
try:
value = self._open_minute_file(field, sid)[minute_pos]
except IndexError:
value = 0
if value == 0:
if field == 'volume':
return 0
else:
return np.nan
if field != 'volume':
value *= self._ohlc_ratio_inverse_for_sid(sid)
return value
def get_last_traded_dt(self, asset, dt):
minute_pos = self._find_last_traded_position(asset, dt)
if minute_pos == -1:
return pd.NaT
return self._pos_to_minute(minute_pos)
def _find_last_traded_position(self, asset, dt):
volumes = self._open_minute_file('volume', asset)
start_date_minute = asset.start_date.value / NANOS_IN_MINUTE
dt_minute = dt.value / NANOS_IN_MINUTE
try:
# if we know of a dt before which this asset has no volume,
# don't look before that dt
earliest_dt_to_search = self._known_zero_volume_dict[asset.sid]
except KeyError:
earliest_dt_to_search = start_date_minute
if dt_minute < earliest_dt_to_search:
return -1
pos = find_last_traded_position_internal(
self._market_open_values,
self._market_close_values,
dt_minute,
earliest_dt_to_search,
volumes,
self._minutes_per_day,
)
if pos == -1:
# if we didn't find any volume before this dt, save it to avoid
# work in the future.
try:
self._known_zero_volume_dict[asset.sid] = max(
dt_minute,
self._known_zero_volume_dict[asset.sid]
)
except KeyError:
self._known_zero_volume_dict[asset.sid] = dt_minute
return pos
def _pos_to_minute(self, pos):
minute_epoch = minute_value(
self._market_open_values,
pos,
self._minutes_per_day
)
return pd.Timestamp(minute_epoch, tz='UTC', unit="m")
def _find_position_of_minute(self, minute_dt):
"""
Internal method that returns the position of the given minute in the
list of every trading minute since market open of the first trading
day. Adjusts non market minutes to the last close.
ex. this method would return 1 for 2002-01-02 9:32 AM Eastern, if
2002-01-02 is the first trading day of the dataset.
Parameters
----------
minute_dt: pd.Timestamp
The minute whose position should be calculated.
Returns
-------
int: The position of the given minute in the list of all trading
minutes since market open on the first trading day.
"""
return find_position_of_minute(
self._market_open_values,
self._market_close_values,
minute_dt.value / NANOS_IN_MINUTE,
self._minutes_per_day,
False,
)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
start_idx = self._find_position_of_minute(start_dt)
end_idx = self._find_position_of_minute(end_dt)
num_minutes = (end_idx - start_idx + 1)
results = []
indices_to_exclude = self._exclusion_indices_for_range(
start_idx, end_idx)
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude:
length = excl_stop - excl_start + 1
num_minutes -= length
shape = num_minutes, len(sids)
for field in fields:
if field != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, sid in enumerate(sids):
carray = self._open_minute_file(field, sid)
values = carray[start_idx:end_idx + 1]
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude[::-1]:
excl_slice = np.s_[
excl_start - start_idx:excl_stop - start_idx + 1]
values = np.delete(values, excl_slice)
where = values != 0
# first slice down to len(where) because we might not have
# written data for all the minutes requested
if field != 'volume':
out[:len(where), i][where] = (
values[where] * self._ohlc_ratio_inverse_for_sid(sid))
else:
out[:len(where), i][where] = values[where]
results.append(out)
return results
class MinuteBarUpdateReader(with_metaclass(ABCMeta, object)):
"""
Abstract base class for minute update readers.
"""
@abstractmethod
def read(self, dts, sids):
"""
Read and return pricing update data.
Parameters
----------
dts : DatetimeIndex
The minutes for which to read the pricing updates.
sids : iter[int]
The sids for which to read the pricing updates.
Returns
-------
data : iter[(int, DataFrame)]
Returns an iterable of ``sid`` to the corresponding OHLCV data.
"""
raise NotImplementedError()
class H5MinuteBarUpdateWriter(object):
"""
Writer for files containing minute bar updates for consumption by a writer
for a ``MinuteBarReader`` format.
Parameters
----------
path : str
The destination path.
complevel : int, optional
The HDF5 complevel, defaults to ``5``.
complib : str, optional
The HDF5 complib, defaults to ``zlib``.
"""
FORMAT_VERSION = 0
_COMPLEVEL = 5
_COMPLIB = 'zlib'
def __init__(self, path, complevel=None, complib=None):
self._complevel = complevel if complevel \
is not None else self._COMPLEVEL
self._complib = complib if complib \
is not None else self._COMPLIB
self._path = path
def write(self, frames):
"""
Write the frames to the target HDF5 file, using the format used by
``pd.Panel.to_hdf``
Parameters
----------
frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
An iterable or other mapping of sid to the corresponding OHLCV
pricing data.
"""
with HDFStore(self._path, 'w',
complevel=self._complevel, complib=self._complib) \
as store:
panel = pd.Panel.from_dict(dict(frames))
panel.to_hdf(store, 'updates')
with tables.open_file(self._path, mode='r+') as h5file:
h5file.set_node_attr('/', 'version', 0)
class H5MinuteBarUpdateReader(MinuteBarUpdateReader):
"""
Reader for minute bar updates stored in HDF5 files.
Parameters
----------
path : str
The path of the HDF5 file from which to source data.
"""
def __init__(self, path):
self._panel = pd.read_hdf(path)
def read(self, dts, sids):
panel = self._panel[sids, dts, :]
return panel.iteritems() | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/minute_bars.py | minute_bars.py |
from operator import itemgetter
import re
import numpy as np
import pandas as pd
get_unit_and_periods = itemgetter('unit', 'periods')
def parse_treasury_csv_column(column):
"""
Parse a treasury CSV column into a more human-readable format.
Columns start with 'RIFLGFC', followed by Y or M (year or month), followed
by a two-digit number signifying number of years/months, followed by _N.B.
We only care about the middle two entries, which we turn into a string like
3month or 30year.
"""
column_re = re.compile(
r"^(?P<prefix>RIFLGFC)"
"(?P<unit>[YM])"
"(?P<periods>[0-9]{2})"
"(?P<suffix>_N.B)$"
)
match = column_re.match(column)
if match is None:
raise ValueError("Couldn't parse CSV column %r." % column)
unit, periods = get_unit_and_periods(match.groupdict())
# Roundtrip through int to coerce '06' into '6'.
return str(int(periods)) + ('year' if unit == 'Y' else 'month')
def earliest_possible_date():
"""
The earliest date for which we can load data from this module.
"""
# The US Treasury actually has data going back further than this, but it's
# pretty rare to find pricing data going back that far, and there's no
# reason to make people download benchmarks back to 1950 that they'll never
# be able to use.
return pd.Timestamp('1980', tz='UTC')
def get_treasury_data(start_date, end_date):
return pd.read_csv(
"https://www.federalreserve.gov/datadownload/Output.aspx"
"?rel=H15"
"&series=bf17364827e38702b42a58cf8eaa3f78"
"&lastObs="
"&from=" # An unbounded query is ~2x faster than specifying dates.
"&to="
"&filetype=csv"
"&layout=seriescolumn"
"&type=package",
skiprows=1, # First row is a useless header.
parse_dates=['Time Period'],
na_values=['ND'], # Presumably this stands for "No Data".
index_col=0,
).loc[
start_date:end_date
].dropna(
how='all'
).rename(
columns=parse_treasury_csv_column
).tz_localize('UTC') * 0.01 # Convert from 2.57% to 0.0257.
def dataconverter(s):
try:
return float(s) / 100
except:
return np.nan
def get_daily_10yr_treasury_data():
"""Download daily 10 year treasury rates from the Federal Reserve and
return a pandas.Series."""
url = "https://www.federalreserve.gov/datadownload/Output.aspx?rel=H15" \
"&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=" \
"&filetype=csv&label=include&layout=seriescolumn"
return pd.read_csv(url, header=5, index_col=0, names=['DATE', 'BC_10YEAR'],
parse_dates=True, converters={1: dataconverter},
squeeze=True) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/treasuries.py | treasuries.py |
from operator import mul
from logbook import Logger
import numpy as np
from numpy import float64, int64, nan
import pandas as pd
from pandas import isnull
from pandas.tslib import normalize_date
from six import iteritems
from six.moves import reduce
from zipline.assets import (
Asset,
AssetConvertible,
Equity,
Future,
PricingDataAssociable,
)
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.data.continuous_future_reader import (
ContinuousFutureSessionBarReader,
ContinuousFutureMinuteBarReader
)
from zipline.assets.roll_finder import (
CalendarRollFinder,
VolumeRollFinder
)
from zipline.data.dispatch_bar_reader import (
AssetDispatchMinuteBarReader,
AssetDispatchSessionBarReader
)
from zipline.data.resample import (
DailyHistoryAggregator,
ReindexMinuteBarReader,
ReindexSessionBarReader,
)
from zipline.data.history_loader import (
DailyHistoryLoader,
MinuteHistoryLoader,
)
from zipline.data.us_equity_pricing import NoDataOnDate
from zipline.utils.math_utils import (
nansum,
nanmean,
nanstd
)
from zipline.utils.memoize import remember_last, weak_lru_cache
from zipline.utils.pandas_utils import timedelta_to_integral_minutes
from zipline.errors import (
NoTradeDataAvailableTooEarly,
NoTradeDataAvailableTooLate,
HistoryWindowStartsBeforeData,
)
log = Logger('DataPortal')
BASE_FIELDS = frozenset([
"open",
"high",
"low",
"close",
"volume",
"price",
"contract",
"sid",
"last_traded",
])
OHLCV_FIELDS = frozenset([
"open", "high", "low", "close", "volume"
])
OHLCVP_FIELDS = frozenset([
"open", "high", "low", "close", "volume", "price"
])
HISTORY_FREQUENCIES = set(["1m", "1d"])
DEFAULT_MINUTE_HISTORY_PREFETCH = 1560
DEFAULT_DAILY_HISTORY_PREFETCH = 40
_DEF_M_HIST_PREFETCH = DEFAULT_MINUTE_HISTORY_PREFETCH
_DEF_D_HIST_PREFETCH = DEFAULT_DAILY_HISTORY_PREFETCH
class DataPortal(object):
"""Interface to all of the data that a zipline simulation needs.
This is used by the simulation runner to answer questions about the data,
like getting the prices of assets on a given day or to service history
calls.
Parameters
----------
asset_finder : zipline.assets.assets.AssetFinder
The AssetFinder instance used to resolve assets.
trading_calendar: zipline.utils.calendar.exchange_calendar.TradingCalendar
The calendar instance used to provide minute->session information.
first_trading_day : pd.Timestamp
The first trading day for the simulation.
equity_daily_reader : BcolzDailyBarReader, optional
The daily bar reader for equities. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
equity_minute_reader : BcolzMinuteBarReader, optional
The minute bar reader for equities. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
future_daily_reader : BcolzDailyBarReader, optional
The daily bar ready for futures. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
future_minute_reader : BcolzFutureMinuteBarReader, optional
The minute bar reader for futures. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
adjustment_reader : SQLiteAdjustmentWriter, optional
The adjustment reader. This is used to apply splits, dividends, and
other adjustment data to the raw data from the readers.
last_available_session : pd.Timestamp, optional
The last session to make available in session-level data.
last_available_minute : pd.Timestamp, optional
The last minute to make available in minute-level data.
"""
def __init__(self,
asset_finder,
trading_calendar,
first_trading_day,
equity_daily_reader=None,
equity_minute_reader=None,
future_daily_reader=None,
future_minute_reader=None,
adjustment_reader=None,
last_available_session=None,
last_available_minute=None,
minute_history_prefetch_length=_DEF_M_HIST_PREFETCH,
daily_history_prefetch_length=_DEF_D_HIST_PREFETCH):
self.trading_calendar = trading_calendar
self.asset_finder = asset_finder
self._adjustment_reader = adjustment_reader
# caches of sid -> adjustment list
self._splits_dict = {}
self._mergers_dict = {}
self._dividends_dict = {}
# Cache of sid -> the first trading day of an asset.
self._asset_start_dates = {}
self._asset_end_dates = {}
# Handle extra sources, like Fetcher.
self._augmented_sources_map = {}
self._extra_source_df = None
self._first_available_session = first_trading_day
if last_available_session:
self._last_available_session = last_available_session
else:
# Infer the last session from the provided readers.
last_sessions = [
reader.last_available_dt
for reader in [equity_daily_reader, future_daily_reader]
if reader is not None
]
if last_sessions:
self._last_available_session = min(last_sessions)
else:
self._last_available_session = None
if last_available_minute:
self._last_available_minute = last_available_minute
else:
# Infer the last minute from the provided readers.
last_minutes = [
reader.last_available_dt
for reader in [equity_minute_reader, future_minute_reader]
if reader is not None
]
if last_minutes:
self._last_available_minute = min(last_minutes)
else:
self._last_available_minute = None
aligned_equity_minute_reader = self._ensure_reader_aligned(
equity_minute_reader)
aligned_equity_session_reader = self._ensure_reader_aligned(
equity_daily_reader)
aligned_future_minute_reader = self._ensure_reader_aligned(
future_minute_reader)
aligned_future_session_reader = self._ensure_reader_aligned(
future_daily_reader)
self._roll_finders = {
'calendar': CalendarRollFinder(self.trading_calendar,
self.asset_finder),
}
aligned_minute_readers = {}
aligned_session_readers = {}
if aligned_equity_minute_reader is not None:
aligned_minute_readers[Equity] = aligned_equity_minute_reader
if aligned_equity_session_reader is not None:
aligned_session_readers[Equity] = aligned_equity_session_reader
if aligned_future_minute_reader is not None:
aligned_minute_readers[Future] = aligned_future_minute_reader
aligned_minute_readers[ContinuousFuture] = \
ContinuousFutureMinuteBarReader(
aligned_future_minute_reader,
self._roll_finders,
)
if aligned_future_session_reader is not None:
aligned_session_readers[Future] = aligned_future_session_reader
self._roll_finders['volume'] = VolumeRollFinder(
self.trading_calendar,
self.asset_finder,
aligned_future_session_reader,
)
aligned_session_readers[ContinuousFuture] = \
ContinuousFutureSessionBarReader(
aligned_future_session_reader,
self._roll_finders,
)
_dispatch_minute_reader = AssetDispatchMinuteBarReader(
self.trading_calendar,
self.asset_finder,
aligned_minute_readers,
self._last_available_minute,
)
_dispatch_session_reader = AssetDispatchSessionBarReader(
self.trading_calendar,
self.asset_finder,
aligned_session_readers,
self._last_available_session,
)
self._pricing_readers = {
'minute': _dispatch_minute_reader,
'daily': _dispatch_session_reader,
}
self._daily_aggregator = DailyHistoryAggregator(
self.trading_calendar.schedule.market_open,
_dispatch_minute_reader,
self.trading_calendar
)
self._history_loader = DailyHistoryLoader(
self.trading_calendar,
_dispatch_session_reader,
self._adjustment_reader,
self.asset_finder,
self._roll_finders,
prefetch_length=daily_history_prefetch_length,
)
self._minute_history_loader = MinuteHistoryLoader(
self.trading_calendar,
_dispatch_minute_reader,
self._adjustment_reader,
self.asset_finder,
self._roll_finders,
prefetch_length=minute_history_prefetch_length,
)
self._first_trading_day = first_trading_day
# Get the first trading minute
self._first_trading_minute, _ = (
self.trading_calendar.open_and_close_for_session(
self._first_trading_day
)
if self._first_trading_day is not None else (None, None)
)
# Store the locs of the first day and first minute
self._first_trading_day_loc = (
self.trading_calendar.all_sessions.get_loc(self._first_trading_day)
if self._first_trading_day is not None else None
)
def _ensure_reader_aligned(self, reader):
if reader is None:
return
if reader.trading_calendar.name == self.trading_calendar.name:
return reader
elif reader.data_frequency == 'minute':
return ReindexMinuteBarReader(
self.trading_calendar,
reader,
self._first_available_session,
self._last_available_session
)
elif reader.data_frequency == 'session':
return ReindexSessionBarReader(
self.trading_calendar,
reader,
self._first_available_session,
self._last_available_session
)
def _reindex_extra_source(self, df, source_date_index):
return df.reindex(index=source_date_index, method='ffill')
def handle_extra_source(self, source_df, sim_params):
"""
Extra sources always have a sid column.
We expand the given data (by forward filling) to the full range of
the simulation dates, so that lookup is fast during simulation.
"""
if source_df is None:
return
# Normalize all the dates in the df
source_df.index = source_df.index.normalize()
# source_df's sid column can either consist of assets we know about
# (such as sid(24)) or of assets we don't know about (such as
# palladium).
#
# In both cases, we break up the dataframe into individual dfs
# that only contain a single asset's information. ie, if source_df
# has data for PALLADIUM and GOLD, we split source_df into two
# dataframes, one for each. (same applies if source_df has data for
# AAPL and IBM).
#
# We then take each child df and reindex it to the simulation's date
# range by forward-filling missing values. this makes reads simpler.
#
# Finally, we store the data. For each column, we store a mapping in
# self.augmented_sources_map from the column to a dictionary of
# asset -> df. In other words,
# self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df
# holding that data.
source_date_index = self.trading_calendar.sessions_in_range(
sim_params.start_session,
sim_params.end_session
)
# Break the source_df up into one dataframe per sid. This lets
# us (more easily) calculate accurate start/end dates for each sid,
# de-dup data, and expand the data to fit the backtest start/end date.
grouped_by_sid = source_df.groupby(["sid"])
group_names = grouped_by_sid.groups.keys()
group_dict = {}
for group_name in group_names:
group_dict[group_name] = grouped_by_sid.get_group(group_name)
# This will be the dataframe which we query to get fetcher assets at
# any given time. Get's overwritten every time there's a new fetcher
# call
extra_source_df = pd.DataFrame()
for identifier, df in iteritems(group_dict):
# Before reindexing, save the earliest and latest dates
earliest_date = df.index[0]
latest_date = df.index[-1]
# Since we know this df only contains a single sid, we can safely
# de-dupe by the index (dt). If minute granularity, will take the
# last data point on any given day
df = df.groupby(level=0).last()
# Reindex the dataframe based on the backtest start/end date.
# This makes reads easier during the backtest.
df = self._reindex_extra_source(df, source_date_index)
if not isinstance(identifier, Asset):
# for fake assets we need to store a start/end date
self._asset_start_dates[identifier] = earliest_date
self._asset_end_dates[identifier] = latest_date
for col_name in df.columns.difference(['sid']):
if col_name not in self._augmented_sources_map:
self._augmented_sources_map[col_name] = {}
self._augmented_sources_map[col_name][identifier] = df
# Append to extra_source_df the reindexed dataframe for the single
# sid
extra_source_df = extra_source_df.append(df)
self._extra_source_df = extra_source_df
def _get_pricing_reader(self, data_frequency):
return self._pricing_readers[data_frequency]
def get_last_traded_dt(self, asset, dt, data_frequency):
"""
Given an asset and dt, returns the last traded dt from the viewpoint
of the given dt.
If there is a trade on the dt, the answer is dt provided.
"""
return self._get_pricing_reader(data_frequency).get_last_traded_dt(
asset, dt)
@staticmethod
def _is_extra_source(asset, field, map):
"""
Internal method that determines if this asset/field combination
represents a fetcher value or a regular OHLCVP lookup.
"""
# If we have an extra source with a column called "price", only look
# at it if it's on something like palladium and not AAPL (since our
# own price data always wins when dealing with assets).
return not (field in BASE_FIELDS and
(isinstance(asset, (Asset, ContinuousFuture))))
def _get_fetcher_value(self, asset, field, dt):
day = normalize_date(dt)
try:
return \
self._augmented_sources_map[field][asset].loc[day, field]
except KeyError:
return np.NaN
def get_spot_value(self, assets, field, dt, data_frequency):
"""
Public API method that returns a scalar value representing the value
of the desired asset's field at either the given dt.
Parameters
----------
assets : Asset, ContinuousFuture, or iterable of same.
The asset or assets whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume',
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The spot value of ``field`` for ``asset`` The return type is based
on the ``field`` requested. If the field is one of 'open', 'high',
'low', 'close', or 'price', the value will be a float. If the
``field`` is 'volume' the value will be a int. If the ``field`` is
'last_traded' the value will be a Timestamp.
"""
assets_is_scalar = False
if isinstance(assets, (AssetConvertible, PricingDataAssociable)):
assets_is_scalar = True
else:
# If 'assets' was not one of the expected types then it should be
# an iterable.
try:
iter(assets)
except TypeError:
raise TypeError(
"Unexpected 'assets' value of type {}."
.format(type(assets))
)
session_label = self.trading_calendar.minute_to_session_label(dt)
def get_single_asset_value(asset):
if self._is_extra_source(
asset, field, self._augmented_sources_map):
return self._get_fetcher_value(asset, field, dt)
if field not in BASE_FIELDS:
raise KeyError("Invalid column: " + str(field))
if dt < asset.start_date or \
(data_frequency == "daily" and
session_label > asset.end_date) or \
(data_frequency == "minute" and
session_label > asset.end_date):
if field == "volume":
return 0
elif field == "contract":
return None
elif field != "last_traded":
return np.NaN
if data_frequency == "daily":
if field == "contract":
return self._get_current_contract(asset, session_label)
else:
return self._get_daily_spot_value(
asset, field, session_label,
)
else:
if field == "last_traded":
return self.get_last_traded_dt(asset, dt, 'minute')
elif field == "price":
return self._get_minute_spot_value(
asset, "close", dt, ffill=True,
)
elif field == "contract":
return self._get_current_contract(asset, dt)
else:
return self._get_minute_spot_value(asset, field, dt)
if assets_is_scalar:
return get_single_asset_value(assets)
else:
return list(map(get_single_asset_value, assets))
def get_adjustments(self, assets, field, dt, perspective_dt):
"""
Returns a list of adjustments between the dt and perspective_dt for the
given field and list of assets
Parameters
----------
assets : list of type Asset, or Asset
The asset, or assets whose adjustments are desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
adjustments : list[Adjustment]
The adjustments to that field.
"""
if isinstance(assets, Asset):
assets = [assets]
adjustment_ratios_per_asset = []
def split_adj_factor(x):
return x if field != 'volume' else 1.0 / x
for asset in assets:
adjustments_for_asset = []
split_adjustments = self._get_adjustment_list(
asset, self._splits_dict, "SPLITS"
)
for adj_dt, adj in split_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(split_adj_factor(adj))
elif adj_dt > perspective_dt:
break
if field != 'volume':
merger_adjustments = self._get_adjustment_list(
asset, self._mergers_dict, "MERGERS"
)
for adj_dt, adj in merger_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
dividend_adjustments = self._get_adjustment_list(
asset, self._dividends_dict, "DIVIDENDS",
)
for adj_dt, adj in dividend_adjustments:
if dt <= adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
ratio = reduce(mul, adjustments_for_asset, 1.0)
adjustment_ratios_per_asset.append(ratio)
return adjustment_ratios_per_asset
def get_adjusted_value(self, asset, field, dt,
perspective_dt,
data_frequency,
spot_value=None):
"""
Returns a scalar value representing the value
of the desired asset's field at the given dt with adjustments applied.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The value of the given ``field`` for ``asset`` at ``dt`` with any
adjustments known by ``perspective_dt`` applied. The return type is
based on the ``field`` requested. If the field is one of 'open',
'high', 'low', 'close', or 'price', the value will be a float. If
the ``field`` is 'volume' the value will be a int. If the ``field``
is 'last_traded' the value will be a Timestamp.
"""
if spot_value is None:
# if this a fetcher field, we want to use perspective_dt (not dt)
# because we want the new value as of midnight (fetcher only works
# on a daily basis, all timestamps are on midnight)
if self._is_extra_source(asset, field,
self._augmented_sources_map):
spot_value = self.get_spot_value(asset, field, perspective_dt,
data_frequency)
else:
spot_value = self.get_spot_value(asset, field, dt,
data_frequency)
if isinstance(asset, Equity):
ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0]
spot_value *= ratio
return spot_value
def _get_minute_spot_value(self, asset, column, dt, ffill=False):
reader = self._get_pricing_reader('minute')
if ffill:
# If forward filling, we want the last minute with values (up to
# and including dt).
query_dt = reader.get_last_traded_dt(asset, dt)
if pd.isnull(query_dt):
# no last traded dt, bail
if column == 'volume':
return 0
else:
return np.nan
else:
# If not forward filling, we just want dt.
query_dt = dt
try:
result = reader.get_value(asset.sid, query_dt, column)
except NoDataOnDate:
if column == 'volume':
return 0
else:
return np.nan
if not ffill or (dt == query_dt) or (dt.date() == query_dt.date()):
return result
# the value we found came from a different day, so we have to adjust
# the data if there are any adjustments on that day barrier
return self.get_adjusted_value(
asset, column, query_dt,
dt, "minute", spot_value=result
)
def _get_daily_spot_value(self, asset, column, dt):
reader = self._get_pricing_reader('daily')
if column == "last_traded":
last_traded_dt = reader.get_last_traded_dt(asset, dt)
if isnull(last_traded_dt):
return pd.NaT
else:
return last_traded_dt
elif column in OHLCV_FIELDS:
# don't forward fill
try:
return reader.get_value(asset, dt, column)
except NoDataOnDate:
return np.nan
elif column == "price":
found_dt = dt
while True:
try:
value = reader.get_value(
asset, found_dt, "close"
)
if not isnull(value):
if dt == found_dt:
return value
else:
# adjust if needed
return self.get_adjusted_value(
asset, column, found_dt, dt, "minute",
spot_value=value
)
else:
found_dt -= self.trading_calendar.day
except NoDataOnDate:
return np.nan
@remember_last
def _get_days_for_window(self, end_date, bar_count):
tds = self.trading_calendar.all_sessions
end_loc = tds.get_loc(end_date)
start_loc = end_loc - bar_count + 1
if start_loc < self._first_trading_day_loc:
raise HistoryWindowStartsBeforeData(
first_trading_day=self._first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=tds[
self._first_trading_day_loc + bar_count
].date(),
)
return tds[start_loc:end_loc + 1]
def _get_history_daily_window(self,
assets,
end_dt,
bar_count,
field_to_use,
data_frequency):
"""
Internal method that returns a dataframe containing history bars
of daily frequency for the given sids.
"""
session = self.trading_calendar.minute_to_session_label(end_dt)
days_for_window = self._get_days_for_window(session, bar_count)
if len(assets) == 0:
return pd.DataFrame(None,
index=days_for_window,
columns=None)
data = self._get_history_daily_window_data(
assets, days_for_window, end_dt, field_to_use, data_frequency
)
return pd.DataFrame(
data,
index=days_for_window,
columns=assets
)
def _get_history_daily_window_data(self,
assets,
days_for_window,
end_dt,
field_to_use,
data_frequency):
if data_frequency == 'daily':
# two cases where we use daily data for the whole range:
# 1) the history window ends at midnight utc.
# 2) the last desired day of the window is after the
# last trading day, use daily data for the whole range.
return self._get_daily_window_data(
assets,
field_to_use,
days_for_window,
extra_slot=False
)
else:
# minute mode, requesting '1d'
daily_data = self._get_daily_window_data(
assets,
field_to_use,
days_for_window[0:-1]
)
if field_to_use == 'open':
minute_value = self._daily_aggregator.opens(
assets, end_dt)
elif field_to_use == 'high':
minute_value = self._daily_aggregator.highs(
assets, end_dt)
elif field_to_use == 'low':
minute_value = self._daily_aggregator.lows(
assets, end_dt)
elif field_to_use == 'close':
minute_value = self._daily_aggregator.closes(
assets, end_dt)
elif field_to_use == 'volume':
minute_value = self._daily_aggregator.volumes(
assets, end_dt)
elif field_to_use == 'sid':
minute_value = [
int(self._get_current_contract(asset, end_dt))
for asset in assets]
# append the partial day.
daily_data[-1] = minute_value
return daily_data
def _handle_minute_history_out_of_bounds(self, bar_count):
cal = self.trading_calendar
first_trading_minute_loc = (
cal.all_minutes.get_loc(
self._first_trading_minute
)
if self._first_trading_minute is not None else None
)
suggested_start_day = cal.minute_to_session_label(
cal.all_minutes[
first_trading_minute_loc + bar_count
] + cal.day
)
raise HistoryWindowStartsBeforeData(
first_trading_day=self._first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=suggested_start_day.date(),
)
def _get_history_minute_window(self, assets, end_dt, bar_count,
field_to_use):
"""
Internal method that returns a dataframe containing history bars
of minute frequency for the given sids.
"""
# get all the minutes for this window
try:
minutes_for_window = self.trading_calendar.minutes_window(
end_dt, -bar_count
)
except KeyError:
self._handle_minute_history_out_of_bounds(bar_count)
if minutes_for_window[0] < self._first_trading_minute:
self._handle_minute_history_out_of_bounds(bar_count)
asset_minute_data = self._get_minute_window_data(
assets,
field_to_use,
minutes_for_window,
)
return pd.DataFrame(
asset_minute_data,
index=minutes_for_window,
columns=assets
)
def get_history_window(self,
assets,
end_dt,
bar_count,
frequency,
field,
data_frequency,
ffill=True):
"""
Public API method that returns a dataframe containing the requested
history window. Data is fully adjusted.
Parameters
----------
assets : list of zipline.data.Asset objects
The assets whose data is desired.
bar_count: int
The number of bars desired.
frequency: string
"1d" or "1m"
field: string
The desired field of the asset.
data_frequency: string
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars.
ffill: boolean
Forward-fill missing values. Only has effect if field
is 'price'.
Returns
-------
A dataframe containing the requested data.
"""
if field not in OHLCVP_FIELDS and field != 'sid':
raise ValueError("Invalid field: {0}".format(field))
if frequency == "1d":
if field == "price":
df = self._get_history_daily_window(assets, end_dt, bar_count,
"close", data_frequency)
else:
df = self._get_history_daily_window(assets, end_dt, bar_count,
field, data_frequency)
elif frequency == "1m":
if field == "price":
df = self._get_history_minute_window(assets, end_dt, bar_count,
"close")
else:
df = self._get_history_minute_window(assets, end_dt, bar_count,
field)
else:
raise ValueError("Invalid frequency: {0}".format(frequency))
# forward-fill price
if ffill and field == "price":
if frequency == "1m":
data_frequency = 'minute'
elif frequency == "1d":
data_frequency = 'daily'
else:
raise Exception(
"Only 1d and 1m are supported for forward-filling.")
assets_with_leading_nan = np.where(isnull(df.iloc[0]))[0]
history_start, history_end = df.index[[0, -1]]
initial_values = []
for asset in df.columns[assets_with_leading_nan]:
last_traded = self.get_last_traded_dt(
asset,
history_start,
data_frequency,
)
if isnull(last_traded):
initial_values.append(nan)
else:
initial_values.append(
self.get_adjusted_value(
asset,
field,
dt=last_traded,
perspective_dt=history_end,
data_frequency=data_frequency,
)
)
# Set leading values for assets that were missing data, then ffill.
df.ix[0, assets_with_leading_nan] = np.array(
initial_values,
dtype=np.float64
)
df.fillna(method='ffill', inplace=True)
# forward-filling will incorrectly produce values after the end of
# an asset's lifetime, so write NaNs back over the asset's
# end_date.
normed_index = df.index.normalize()
for asset in df.columns:
if history_end >= asset.end_date:
# if the window extends past the asset's end date, set
# all post-end-date values to NaN in that asset's series
df.loc[normed_index > asset.end_date, asset] = nan
return df
def _get_minute_window_data(self, assets, field, minutes_for_window):
"""
Internal method that gets a window of adjusted minute data for an asset
and specified date range. Used to support the history API method for
minute bars.
Missing bars are filled with NaN.
Parameters
----------
assets : iterable[Asset]
The assets whose data is desired.
field: string
The specific field to return. "open", "high", "close_price", etc.
minutes_for_window: pd.DateTimeIndex
The list of minutes representing the desired window. Each minute
is a pd.Timestamp.
Returns
-------
A numpy array with requested values.
"""
return self._minute_history_loader.history(assets,
minutes_for_window,
field,
False)
def _get_daily_window_data(self,
assets,
field,
days_in_window,
extra_slot=True):
"""
Internal method that gets a window of adjusted daily data for a sid
and specified date range. Used to support the history API method for
daily bars.
Parameters
----------
asset : Asset
The asset whose data is desired.
start_dt: pandas.Timestamp
The start of the desired window of data.
bar_count: int
The number of days of data to return.
field: string
The specific field to return. "open", "high", "close_price", etc.
extra_slot: boolean
Whether to allocate an extra slot in the returned numpy array.
This extra slot will hold the data for the last partial day. It's
much better to create it here than to create a copy of the array
later just to add a slot.
Returns
-------
A numpy array with requested values. Any missing slots filled with
nan.
"""
bar_count = len(days_in_window)
# create an np.array of size bar_count
dtype = float64 if field != 'sid' else int64
if extra_slot:
return_array = np.zeros((bar_count + 1, len(assets)), dtype=dtype)
else:
return_array = np.zeros((bar_count, len(assets)), dtype=dtype)
if field != "volume":
# volumes default to 0, so we don't need to put NaNs in the array
return_array[:] = np.NAN
if bar_count != 0:
data = self._history_loader.history(assets,
days_in_window,
field,
extra_slot)
if extra_slot:
return_array[:len(return_array) - 1, :] = data
else:
return_array[:len(data)] = data
return return_array
def _get_adjustment_list(self, asset, adjustments_dict, table_name):
"""
Internal method that returns a list of adjustments for the given sid.
Parameters
----------
asset : Asset
The asset for which to return adjustments.
adjustments_dict: dict
A dictionary of sid -> list that is used as a cache.
table_name: string
The table that contains this data in the adjustments db.
Returns
-------
adjustments: list
A list of [multiplier, pd.Timestamp], earliest first
"""
if self._adjustment_reader is None:
return []
sid = int(asset)
try:
adjustments = adjustments_dict[sid]
except KeyError:
adjustments = adjustments_dict[sid] = self._adjustment_reader.\
get_adjustments_for_sid(table_name, sid)
return adjustments
def _check_is_currently_alive(self, asset, dt):
sid = int(asset)
if sid not in self._asset_start_dates:
self._get_asset_start_date(asset)
start_date = self._asset_start_dates[sid]
if self._asset_start_dates[sid] > dt:
raise NoTradeDataAvailableTooEarly(
sid=sid,
dt=normalize_date(dt),
start_dt=start_date
)
end_date = self._asset_end_dates[sid]
if self._asset_end_dates[sid] < dt:
raise NoTradeDataAvailableTooLate(
sid=sid,
dt=normalize_date(dt),
end_dt=end_date
)
def _get_asset_start_date(self, asset):
self._ensure_asset_dates(asset)
return self._asset_start_dates[asset]
def _get_asset_end_date(self, asset):
self._ensure_asset_dates(asset)
return self._asset_end_dates[asset]
def _ensure_asset_dates(self, asset):
sid = int(asset)
if sid not in self._asset_start_dates:
if self._first_trading_day is not None:
self._asset_start_dates[sid] = \
max(asset.start_date, self._first_trading_day)
else:
self._asset_start_dates[sid] = asset.start_date
self._asset_end_dates[sid] = asset.end_date
def get_splits(self, assets, dt):
"""
Returns any splits for the given sids and the given dt.
Parameters
----------
assets : container
Assets for which we want splits.
dt : pd.Timestamp
The date for which we are checking for splits. Note: this is
expected to be midnight UTC.
Returns
-------
splits : list[(asset, float)]
List of splits, where each split is a (asset, ratio) tuple.
"""
if self._adjustment_reader is None or not assets:
return []
# convert dt to # of seconds since epoch, because that's what we use
# in the adjustments db
seconds = int(dt.value / 1e9)
splits = self._adjustment_reader.conn.execute(
"SELECT sid, ratio FROM SPLITS WHERE effective_date = ?",
(seconds,)).fetchall()
splits = [split for split in splits if split[0] in assets]
splits = [(self.asset_finder.retrieve_asset(split[0]), split[1])
for split in splits]
return splits
def get_stock_dividends(self, sid, trading_days):
"""
Returns all the stock dividends for a specific sid that occur
in the given trading range.
Parameters
----------
sid: int
The asset whose stock dividends should be returned.
trading_days: pd.DatetimeIndex
The trading range.
Returns
-------
list: A list of objects with all relevant attributes populated.
All timestamp fields are converted to pd.Timestamps.
"""
if self._adjustment_reader is None:
return []
if len(trading_days) == 0:
return []
start_dt = trading_days[0].value / 1e9
end_dt = trading_days[-1].value / 1e9
dividends = self._adjustment_reader.conn.execute(
"SELECT * FROM stock_dividend_payouts WHERE sid = ? AND "
"ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\
fetchall()
dividend_info = []
for dividend_tuple in dividends:
dividend_info.append({
"declared_date": dividend_tuple[1],
"ex_date": pd.Timestamp(dividend_tuple[2], unit="s"),
"pay_date": pd.Timestamp(dividend_tuple[3], unit="s"),
"payment_sid": dividend_tuple[4],
"ratio": dividend_tuple[5],
"record_date": pd.Timestamp(dividend_tuple[6], unit="s"),
"sid": dividend_tuple[7]
})
return dividend_info
def contains(self, asset, field):
return field in BASE_FIELDS or \
(field in self._augmented_sources_map and
asset in self._augmented_sources_map[field])
def get_fetcher_assets(self, dt):
"""
Returns a list of assets for the current date, as defined by the
fetcher data.
Returns
-------
list: a list of Asset objects.
"""
# return a list of assets for the current date, as defined by the
# fetcher source
if self._extra_source_df is None:
return []
day = normalize_date(dt)
if day in self._extra_source_df.index:
assets = self._extra_source_df.loc[day]['sid']
else:
return []
if isinstance(assets, pd.Series):
return [x for x in assets if isinstance(x, Asset)]
else:
return [assets] if isinstance(assets, Asset) else []
# cache size picked somewhat loosely. this code exists purely to
# handle deprecated API.
@weak_lru_cache(20)
def _get_minute_count_for_transform(self, ending_minute, days_count):
# This function works in three steps.
# Step 1. Count the minutes from ``ending_minute`` to the start of its
# session.
# Step 2. Count the minutes from the prior ``days_count - 1`` sessions.
# Step 3. Return the sum of the results from steps (1) and (2).
# Example (NYSE Calendar)
# ending_minute = 2016-12-28 9:40 AM US/Eastern
# days_count = 3
# Step 1. Calculate that there are 10 minutes in the ending session.
# Step 2. Calculate that there are 390 + 210 = 600 minutes in the prior
# two sessions. (Prior sessions are 2015-12-23 and 2015-12-24.)
# 2015-12-24 is a half day.
# Step 3. Return 600 + 10 = 610.
cal = self.trading_calendar
ending_session = cal.minute_to_session_label(
ending_minute,
direction="none", # It's an error to pass a non-trading minute.
)
# Assume that calendar days are always full of contiguous minutes,
# which means we can just take 1 + (number of minutes between the last
# minute and the start of the session). We add one so that we include
# the ending minute in the total.
ending_session_minute_count = timedelta_to_integral_minutes(
ending_minute - cal.open_and_close_for_session(ending_session)[0]
) + 1
if days_count == 1:
# We just need sessions for the active day.
return ending_session_minute_count
# XXX: We're subtracting 2 here to account for two offsets:
# 1. We only want ``days_count - 1`` sessions, since we've already
# accounted for the ending session above.
# 2. The API of ``sessions_window`` is to return one more session than
# the requested number. I don't think any consumers actually want
# that behavior, but it's the tested and documented behavior right
# now, so we have to request one less session than we actually want.
completed_sessions = cal.sessions_window(
cal.previous_session_label(ending_session),
2 - days_count,
)
completed_sessions_minute_count = (
self.trading_calendar.minutes_count_for_sessions_in_range(
completed_sessions[0],
completed_sessions[-1]
)
)
return ending_session_minute_count + completed_sessions_minute_count
def get_simple_transform(self, asset, transform_name, dt, data_frequency,
bars=None):
if transform_name == "returns":
# returns is always calculated over the last 2 days, regardless
# of the simulation's data frequency.
hst = self.get_history_window(
[asset],
dt,
2,
"1d",
"price",
data_frequency,
ffill=True,
)[asset]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
if bars is None:
raise ValueError("bars cannot be None!")
if data_frequency == "minute":
freq_str = "1m"
calculated_bar_count = int(self._get_minute_count_for_transform(
dt, bars
))
else:
freq_str = "1d"
calculated_bar_count = bars
price_arr = self.get_history_window(
[asset],
dt,
calculated_bar_count,
freq_str,
"price",
data_frequency,
ffill=True,
)[asset]
if transform_name == "mavg":
return nanmean(price_arr)
elif transform_name == "stddev":
return nanstd(price_arr, ddof=1)
elif transform_name == "vwap":
volume_arr = self.get_history_window(
[asset],
dt,
calculated_bar_count,
freq_str,
"volume",
data_frequency,
ffill=True,
)[asset]
vol_sum = nansum(volume_arr)
try:
ret = nansum(price_arr * volume_arr) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
def get_current_future_chain(self, continuous_future, dt):
"""
Retrieves the future chain for the contract at the given `dt` according
the `continuous_future` specification.
Returns:
future_chain : list[Future]
A list of active futures, where the first index is the current
contract specified by the continuous future definition, the second
is the next upcoming contract and so on.
"""
rf = self._roll_finders[continuous_future.roll_style]
session = self.trading_calendar.minute_to_session_label(dt)
contract_center = rf.get_contract_center(
continuous_future.root_symbol, session,
continuous_future.offset)
oc = self.asset_finder.get_ordered_contracts(
continuous_future.root_symbol)
chain = oc.active_chain(contract_center, session.value)
return self.asset_finder.retrieve_all(chain)
def _get_current_contract(self, continuous_future, dt):
rf = self._roll_finders[continuous_future.roll_style]
contract_sid = rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset)
if contract_sid is None:
return None
return self.asset_finder.retrieve_asset(contract_sid) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/data_portal.py | data_portal.py |
from collections import OrderedDict
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
from six import with_metaclass
from zipline.data._resample import (
_minute_to_session_open,
_minute_to_session_high,
_minute_to_session_low,
_minute_to_session_close,
_minute_to_session_volume,
)
from zipline.data.bar_reader import NoDataOnDate
from zipline.data.minute_bars import MinuteBarReader
from zipline.data.session_bars import SessionBarReader
from zipline.utils.memoize import lazyval
_MINUTE_TO_SESSION_OHCLV_HOW = OrderedDict((
('open', 'first'),
('high', 'max'),
('low', 'min'),
('close', 'last'),
('volume', 'sum'),
))
def minute_frame_to_session_frame(minute_frame, calendar):
"""
Resample a DataFrame with minute data into the frame expected by a
BcolzDailyBarWriter.
Parameters
----------
minute_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `dt` (minute dts)
calendar : zipline.utils.calendars.trading_calendar.TradingCalendar
A TradingCalendar on which session labels to resample from minute
to session.
Return
------
session_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `day` (datetime-like).
"""
how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c])
for c in minute_frame.columns)
return minute_frame.groupby(calendar.minute_to_session_label).agg(how)
def minute_to_session(column, close_locs, data, out):
"""
Resample an array with minute data into an array with session data.
This function assumes that the minute data is the exact length of all
minutes in the sessions in the output.
Parameters
----------
column : str
The `open`, `high`, `low`, `close`, or `volume` column.
close_locs : array[intp]
The locations in `data` which are the market close minutes.
data : array[float64|uint32]
The minute data to be sampled into session data.
The first value should align with the market open of the first session,
containing values for all minutes for all sessions. With the last value
being the market close of the last session.
out : array[float64|uint32]
The output array into which to write the sampled sessions.
"""
if column == 'open':
_minute_to_session_open(close_locs, data, out)
elif column == 'high':
_minute_to_session_high(close_locs, data, out)
elif column == 'low':
_minute_to_session_low(close_locs, data, out)
elif column == 'close':
_minute_to_session_close(close_locs, data, out)
elif column == 'volume':
_minute_to_session_volume(close_locs, data, out)
return out
class DailyHistoryAggregator(object):
"""
Converts minute pricing data into a daily summary, to be used for the
last slot in a call to history with a frequency of `1d`.
This summary is the same as a daily bar rollup of minute data, with the
distinction that the summary is truncated to the `dt` requested.
i.e. the aggregation slides forward during a the course of simulation day.
Provides aggregation for `open`, `high`, `low`, `close`, and `volume`.
The aggregation rules for each price type is documented in their respective
"""
def __init__(self, market_opens, minute_reader, trading_calendar):
self._market_opens = market_opens
self._minute_reader = minute_reader
self._trading_calendar = trading_calendar
# The caches are structured as (date, market_open, entries), where
# entries is a dict of asset -> (last_visited_dt, value)
#
# Whenever an aggregation method determines the current value,
# the entry for the respective asset should be overwritten with a new
# entry for the current dt.value (int) and aggregation value.
#
# When the requested dt's date is different from date the cache is
# flushed, so that the cache entries do not grow unbounded.
#
# Example cache:
# cache = (date(2016, 3, 17),
# pd.Timestamp('2016-03-17 13:31', tz='UTC'),
# {
# 1: (1458221460000000000, np.nan),
# 2: (1458221460000000000, 42.0),
# })
self._caches = {
'open': None,
'high': None,
'low': None,
'close': None,
'volume': None
}
# The int value is used for deltas to avoid extra computation from
# creating new Timestamps.
self._one_min = pd.Timedelta('1 min').value
def _prelude(self, dt, field):
session = self._trading_calendar.minute_to_session_label(dt)
dt_value = dt.value
cache = self._caches[field]
if cache is None or cache[0] != session:
market_open = self._market_opens.loc[session]
cache = self._caches[field] = (session, market_open, {})
_, market_open, entries = cache
market_open = market_open.tz_localize('UTC')
if dt != market_open:
prev_dt = dt_value - self._one_min
else:
prev_dt = None
return market_open, prev_dt, dt_value, entries
def opens(self, assets, dt):
"""
The open field's aggregation returns the first value that occurs
for the day, if there has been no data on or before the `dt` the open
is `nan`.
Once the first non-nan open is seen, that value remains constant per
asset for the remainder of the day.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')
opens = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
opens.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'open')
entries[asset] = (dt_value, val)
opens.append(val)
continue
else:
try:
last_visited_dt, first_open = entries[asset]
if last_visited_dt == dt_value:
opens.append(first_open)
continue
elif not pd.isnull(first_open):
opens.append(first_open)
entries[asset] = (dt_value, first_open)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['open'],
after_last,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['open'],
market_open,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
return np.array(opens)
def highs(self, assets, dt):
"""
The high field's aggregation returns the largest high seen between
the market open and the current dt.
If there has been no data on or before the `dt` the high is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')
highs = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
highs.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'high')
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
try:
last_visited_dt, last_max = entries[asset]
if last_visited_dt == dt_value:
highs.append(last_max)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'high')
if pd.isnull(curr_val):
val = last_max
elif pd.isnull(last_max):
val = curr_val
else:
val = max(last_max, curr_val)
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['high'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmax(np.append(window, last_max))
entries[asset] = (dt_value, val)
highs.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['high'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmax(window)
entries[asset] = (dt_value, val)
highs.append(val)
continue
return np.array(highs)
def lows(self, assets, dt):
"""
The low field's aggregation returns the smallest low seen between
the market open and the current dt.
If there has been no data on or before the `dt` the low is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')
lows = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
lows.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'low')
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
try:
last_visited_dt, last_min = entries[asset]
if last_visited_dt == dt_value:
lows.append(last_min)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'low')
val = np.nanmin([last_min, curr_val])
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['low'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmin(np.append(window, last_min))
entries[asset] = (dt_value, val)
lows.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['low'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmin(window)
entries[asset] = (dt_value, val)
lows.append(val)
continue
return np.array(lows)
def closes(self, assets, dt):
"""
The close field's aggregation returns the latest close at the given
dt.
If the close for the given dt is `nan`, the most recent non-nan
`close` is used.
If there has been no data on or before the `dt` the close is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')
closes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
def _get_filled_close(asset):
"""
Returns the most recent non-nan close for the asset in this
session. If there has been no data in this session on or before the
`dt`, returns `nan`
"""
window = self._minute_reader.load_raw_arrays(
['close'],
market_open,
dt,
[asset],
)[0]
try:
return window[~np.isnan(window)][-1]
except IndexError:
return np.NaN
for asset in assets:
if not asset.is_alive_for_session(session_label):
closes.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'close')
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
try:
last_visited_dt, last_close = entries[asset]
if last_visited_dt == dt_value:
closes.append(last_close)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = last_close
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
except KeyError:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
return np.array(closes)
def volumes(self, assets, dt):
"""
The volume field's aggregation returns the sum of all volumes
between the market open and the `dt`
If there has been no data on or before the `dt` the volume is 0.
Returns
-------
np.array with dtype=int64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')
volumes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
volumes.append(0)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'volume')
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
try:
last_visited_dt, last_total = entries[asset]
if last_visited_dt == dt_value:
volumes.append(last_total)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'volume')
val += last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['volume'],
after_last,
dt,
[asset],
)[0]
val = np.nansum(window) + last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['volume'],
market_open,
dt,
[asset],
)[0]
val = np.nansum(window)
entries[asset] = (dt_value, val)
volumes.append(val)
continue
return np.array(volumes)
class MinuteResampleSessionBarReader(SessionBarReader):
def __init__(self, calendar, minute_bar_reader):
self._calendar = calendar
self._minute_bar_reader = minute_bar_reader
def _get_resampled(self, columns, start_session, end_session, assets):
range_open = self._calendar.session_open(start_session)
range_close = self._calendar.session_close(end_session)
minute_data = self._minute_bar_reader.load_raw_arrays(
columns,
range_open,
range_close,
assets,
)
# Get the index of the close minute for each session in the range.
# If the range contains only one session, the only close in the range
# is the last minute in the data. Otherwise, we need to get all the
# session closes and find their indices in the range of minutes.
if start_session == end_session:
close_ilocs = np.array([len(minute_data[0]) - 1], dtype=np.int64)
else:
minutes = self._calendar.minutes_in_range(
range_open,
range_close,
)
session_closes = self._calendar.session_closes_in_range(
start_session,
end_session,
)
close_ilocs = minutes.searchsorted(session_closes.values)
results = []
shape = (len(close_ilocs), len(assets))
for col in columns:
if col != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
results.append(out)
for i in range(len(assets)):
for j, column in enumerate(columns):
data = minute_data[j][:, i]
minute_to_session(column, close_ilocs, data, results[j][:, i])
return results
@property
def trading_calendar(self):
return self._calendar
def load_raw_arrays(self, columns, start_dt, end_dt, sids):
return self._get_resampled(columns, start_dt, end_dt, sids)
def get_value(self, sid, session, colname):
# WARNING: This will need caching or other optimization if used in a
# tight loop.
# This was developed to complete interface, but has not been tuned
# for real world use.
return self._get_resampled([colname], session, session, [sid])[0][0][0]
@lazyval
def sessions(self):
cal = self._calendar
first = self._minute_bar_reader.first_trading_day
last = cal.minute_to_session_label(
self._minute_bar_reader.last_available_dt)
return cal.sessions_in_range(first, last)
@lazyval
def last_available_dt(self):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.last_available_dt
)
@property
def first_trading_day(self):
return self._minute_bar_reader.first_trading_day
def get_last_traded_dt(self, asset, dt):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.get_last_traded_dt(asset, dt))
class ReindexBarReader(with_metaclass(ABCMeta)):
"""
A base class for readers which reindexes results, filling in the additional
indices with empty data.
Used to align the reading assets which trade on different calendars.
Currently only supports a ``trading_calendar`` which is a superset of the
``reader``'s calendar.
Parameters
----------
- trading_calendar : zipline.utils.trading_calendar.TradingCalendar
The calendar to use when indexing results from the reader.
- reader : MinuteBarReader|SessionBarReader
The reader which has a calendar that is a subset of the desired
``trading_calendar``.
- first_trading_session : pd.Timestamp
The first trading session the reader should provide. Must be specified,
since the ``reader``'s first session may not exactly align with the
desired calendar. Specifically, in the case where the first session
on the target calendar is a holiday on the ``reader``'s calendar.
- last_trading_session : pd.Timestamp
The last trading session the reader should provide. Must be specified,
since the ``reader``'s last session may not exactly align with the
desired calendar. Specifically, in the case where the last session
on the target calendar is a holiday on the ``reader``'s calendar.
"""
def __init__(self,
trading_calendar,
reader,
first_trading_session,
last_trading_session):
self._trading_calendar = trading_calendar
self._reader = reader
self._first_trading_session = first_trading_session
self._last_trading_session = last_trading_session
@property
def last_available_dt(self):
return self._reader.last_available_dt
def get_last_traded_dt(self, sid, dt):
return self._reader.get_last_traded_dt(sid, dt)
@property
def first_trading_day(self):
return self._reader.first_trading_day
def get_value(self, sid, dt, field):
# Give an empty result if no data is present.
try:
return self._reader.get_value(sid, dt, field)
except NoDataOnDate:
if field == 'volume':
return 0
else:
return np.nan
@abstractmethod
def _outer_dts(self, start_dt, end_dt):
raise NotImplementedError
@abstractmethod
def _inner_dts(self, start_dt, end_dt):
raise NotImplementedError
@property
def trading_calendar(self):
return self._trading_calendar
@lazyval
def sessions(self):
return self.trading_calendar.sessions_in_range(
self._first_trading_session,
self._last_trading_session
)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
outer_dts = self._outer_dts(start_dt, end_dt)
inner_dts = self._inner_dts(start_dt, end_dt)
indices = outer_dts.searchsorted(inner_dts)
shape = len(outer_dts), len(sids)
outer_results = []
if len(inner_dts) > 0:
inner_results = self._reader.load_raw_arrays(
fields, inner_dts[0], inner_dts[-1], sids)
else:
inner_results = None
for i, field in enumerate(fields):
if field != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
if inner_results is not None:
out[indices] = inner_results[i]
outer_results.append(out)
return outer_results
class ReindexMinuteBarReader(ReindexBarReader, MinuteBarReader):
"""
See: ``ReindexBarReader``
"""
def _outer_dts(self, start_dt, end_dt):
return self._trading_calendar.minutes_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.calendar.minutes_in_range(start_dt, end_dt)
class ReindexSessionBarReader(ReindexBarReader, SessionBarReader):
"""
See: ``ReindexBarReader``
"""
def _outer_dts(self, start_dt, end_dt):
return self.trading_calendar.sessions_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.trading_calendar.sessions_in_range(
start_dt, end_dt) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/resample.py | resample.py |
import os
from collections import OrderedDict
import logbook
import pandas as pd
from pandas_datareader.data import DataReader
import pytz
from six import iteritems
from six.moves.urllib_error import HTTPError
from .benchmarks import get_benchmark_returns
from . import treasuries, treasuries_can
from ..utils.paths import (
cache_root,
data_root,
)
from zipline.utils.calendars import get_calendar
logger = logbook.Logger('Loader')
# Mapping from index symbol to appropriate bond data
INDEX_MAPPING = {
'SPY':
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
'^GSPTSE':
(treasuries_can, 'treasury_curves_can.csv', 'bankofcanada.ca'),
'^FTSE': # use US treasuries until UK bonds implemented
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
}
ONE_HOUR = pd.Timedelta(hours=1)
def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC')
def get_data_filepath(name, environ=None):
"""
Returns a handle to data file.
Creates containing directory, if needed.
"""
dr = data_root(environ)
if not os.path.exists(dr):
os.makedirs(dr)
return os.path.join(dr, name)
def get_cache_filepath(name):
cr = cache_root()
if not os.path.exists(cr):
os.makedirs(cr)
return os.path.join(cr, name)
def get_benchmark_filename(symbol):
return "%s_benchmark.csv" % symbol
def has_data_for_dates(series_or_df, first_date, last_date):
"""
Does `series_or_df` have data on or before first_date and on or after
last_date?
"""
dts = series_or_df.index
if not isinstance(dts, pd.DatetimeIndex):
raise TypeError("Expected a DatetimeIndex, but got %s." % type(dts))
first, last = dts[[0, -1]]
return (first <= first_date) and (last >= last_date)
def load_market_data(trading_day=None, trading_days=None, bm_symbol='SPY',
environ=None):
"""
Load benchmark returns and treasury yield curves for the given calendar and
benchmark symbol.
Benchmarks are downloaded as a Series from IEX Trading. Treasury curves
are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov'
by default. For Canadian exchanges, a loader for Canadian bonds from the
Bank of Canada is also available.
Results downloaded from the internet are cached in
~/.zipline/data. Subsequent loads will attempt to read from the cached
files before falling back to redownload.
Parameters
----------
trading_day : pandas.CustomBusinessDay, optional
A trading_day used to determine the latest day for which we
expect to have data. Defaults to an NYSE trading day.
trading_days : pd.DatetimeIndex, optional
A calendar of trading days. Also used for determining what cached
dates we should expect to have cached. Defaults to the NYSE calendar.
bm_symbol : str, optional
Symbol for the benchmark index to load. Defaults to 'SPY', the ticker
for the S&P 500, provided by IEX Trading.
Returns
-------
(benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame)
Notes
-----
Both return values are DatetimeIndexed with values dated to midnight in UTC
of each stored date. The columns of `treasury_curves` are:
'1month', '3month', '6month',
'1year','2year','3year','5year','7year','10year','20year','30year'
"""
if trading_day is None:
trading_day = get_calendar('NYSE').trading_day
if trading_days is None:
trading_days = get_calendar('NYSE').all_sessions
first_date = trading_days[0]
now = pd.Timestamp.utcnow()
# We expect to have benchmark and treasury data that's current up until
# **two** full trading days prior to the most recently completed trading
# day.
# Example:
# On Thu Oct 22 2015, the previous completed trading day is Wed Oct 21.
# However, data for Oct 21 doesn't become available until the early morning
# hours of Oct 22. This means that there are times on the 22nd at which we
# cannot reasonably expect to have data for the 21st available. To be
# conservative, we instead expect that at any time on the 22nd, we can
# download data for Tuesday the 20th, which is two full trading days prior
# to the date on which we're running a test.
# We'll attempt to download new data if the latest entry in our cache is
# before this date.
last_date = trading_days[trading_days.get_loc(now, method='ffill') - 2]
br = ensure_benchmark_data(
bm_symbol,
first_date,
last_date,
now,
# We need the trading_day to figure out the close prior to the first
# date so that we can compute returns for the first date.
trading_day,
environ,
)
tc = ensure_treasury_data(
bm_symbol,
first_date,
last_date,
now,
environ,
)
benchmark_returns = br[br.index.slice_indexer(first_date, last_date)]
treasury_curves = tc[tc.index.slice_indexer(first_date, last_date)]
return benchmark_returns, treasury_curves
def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day,
environ=None):
"""
Ensure we have benchmark data for `symbol` from `first_date` to `last_date`
Parameters
----------
symbol : str
The symbol for the benchmark to load.
first_date : pd.Timestamp
First required date for the cache.
last_date : pd.Timestamp
Last required date for the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
trading_day : pd.CustomBusinessDay
A trading day delta. Used to find the day before first_date so we can
get the close of the day prior to first_date.
We attempt to download data unless we already have data stored at the data
cache for `symbol` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
"""
filename = get_benchmark_filename(symbol)
data = _load_cached_data(filename, first_date, last_date, now, 'benchmark',
environ)
if data is not None:
return data
# If no cached data was found or it was missing any dates then download the
# necessary data.
logger.info('Downloading benchmark data for {symbol!r}.', symbol=symbol)
try:
data = get_benchmark_returns(symbol)
data.to_csv(get_data_filepath(filename, environ))
except (OSError, IOError, HTTPError):
logger.exception('failed to cache the new benchmark returns')
raise
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def ensure_treasury_data(symbol, first_date, last_date, now, environ=None):
"""
Ensure we have treasury data from treasury module associated with
`symbol`.
Parameters
----------
symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
loader_module, filename, source = INDEX_MAPPING.get(
symbol, INDEX_MAPPING['SPY'],
)
first_date = max(first_date, loader_module.earliest_possible_date())
data = _load_cached_data(filename, first_date, last_date, now, 'treasury',
environ)
if data is not None:
return data
# If no cached data was found or it was missing any dates then download the
# necessary data.
logger.info('Downloading treasury data for {symbol!r}.', symbol=symbol)
try:
data = loader_module.get_treasury_data(first_date, last_date)
data.to_csv(get_data_filepath(filename, environ))
except (OSError, IOError, HTTPError):
logger.exception('failed to cache treasury data')
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def _load_cached_data(filename, first_date, last_date, now, resource_name,
environ=None):
if resource_name == 'benchmark':
from_csv = pd.Series.from_csv
else:
from_csv = pd.DataFrame.from_csv
# Path for the cache.
path = get_data_filepath(filename, environ)
# If the path does not exist, it means the first download has not happened
# yet, so don't try to read from 'path'.
if os.path.exists(path):
try:
data = from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a
# file in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new {resource} data because a "
"download succeeded at {time}.",
resource=resource_name,
time=last_download_time,
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].",
path=path,
error=e,
)
logger.info(
"Cache at {path} does not have data from {start} to {end}.\n",
start=first_date,
end=last_date,
path=path,
)
return None
def _load_raw_yahoo_data(indexes=None, stocks=None, start=None, end=None):
"""Load closing prices from yahoo finance.
:Optional:
indexes : dict (Default: {'SPX': 'SPY'})
Financial indexes to load.
stocks : list (Default: ['AAPL', 'GE', 'IBM', 'MSFT',
'XOM', 'AA', 'JNJ', 'PEP', 'KO'])
Stock closing prices to load.
start : datetime (Default: datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices from start date on.
end : datetime (Default: datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices until end date.
:Note:
This is based on code presented in a talk by Wes McKinney:
http://wesmckinney.com/files/20111017/notebook_output.pdf
"""
assert indexes is not None or stocks is not None, """
must specify stocks or indexes"""
if start is None:
start = pd.datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc)
if start is not None and end is not None:
assert start < end, "start date is later than end date."
data = OrderedDict()
if stocks is not None:
for stock in stocks:
logger.info('Loading stock: {}'.format(stock))
stock_pathsafe = stock.replace(os.path.sep, '--')
cache_filename = "{stock}-{start}-{end}.csv".format(
stock=stock_pathsafe,
start=start,
end=end).replace(':', '-')
cache_filepath = get_cache_filepath(cache_filename)
if os.path.exists(cache_filepath):
stkd = pd.DataFrame.from_csv(cache_filepath)
else:
stkd = DataReader(stock, 'yahoo', start, end).sort_index()
stkd.to_csv(cache_filepath)
data[stock] = stkd
if indexes is not None:
for name, ticker in iteritems(indexes):
logger.info('Loading index: {} ({})'.format(name, ticker))
stkd = DataReader(ticker, 'yahoo', start, end).sort_index()
data[name] = stkd
return data
def load_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads price data from Yahoo into a dataframe for each of the indicated
assets. By default, 'price' is taken from Yahoo's 'Adjusted Close',
which removes the impact of splits and dividends. If the argument
'adjusted' is False, then the non-adjusted 'close' field is used instead.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust the price for splits and dividends.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
if adjusted:
close_key = 'Adj Close'
else:
close_key = 'Close'
df = pd.DataFrame({key: d[close_key] for key, d in iteritems(data)})
df.index = df.index.tz_localize(pytz.utc)
return df
def load_prices_from_csv(filepath, identifier_col, tz='UTC'):
data = pd.read_csv(filepath, index_col=identifier_col)
data.index = pd.DatetimeIndex(data.index, tz=tz)
data.sort_index(inplace=True)
return data
def load_prices_from_csv_folder(folderpath, identifier_col, tz='UTC'):
data = None
for file in os.listdir(folderpath):
if '.csv' not in file:
continue
raw = load_prices_from_csv(os.path.join(folderpath, file),
identifier_col, tz)
if data is None:
data = raw
else:
data = pd.concat([data, raw], axis=1)
return data | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/loader.py | loader.py |
import numpy as np
import pandas as pd
from zipline.data.session_bars import SessionBarReader
class ContinuousFutureSessionBarReader(SessionBarReader):
def __init__(self, bar_reader, roll_finders):
self._bar_reader = bar_reader
self._roll_finders = roll_finders
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'sid'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol, start_date, end_date, asset.offset)
num_sessions = len(
self.trading_calendar.sessions_in_range(start_date, end_date))
shape = num_sessions, len(assets)
results = []
tc = self._bar_reader.trading_calendar
sessions = tc.sessions_in_range(start_date, end_date)
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = sessions.get_loc(start)
if roll_date is not None:
end = roll_date - sessions.freq
end_loc = sessions.get_loc(end)
else:
end = end_date
end_loc = len(sessions) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start = sessions[end_loc + 1]
for column in columns:
if column != 'volume' and column != 'sid':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.int64)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != 'sid':
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return self._bar_reader.last_available_dt
@property
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
return self._bar_reader.trading_calendar
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return self._bar_reader.first_trading_day
def get_value(self, continuous_future, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
rf = self._roll_finders[continuous_future.roll_style]
sid = (rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset))
return self._bar_reader.get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = (rf.get_contract_center(asset.root_symbol,
dt,
asset.offset))
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt)
@property
def sessions(self):
"""
Returns
-------
sessions : DatetimeIndex
All session labels (unionining the range for all assets) which the
reader can provide.
"""
return self._bar_reader.sessions
class ContinuousFutureMinuteBarReader(SessionBarReader):
def __init__(self, bar_reader, roll_finders):
self._bar_reader = bar_reader
self._roll_finders = roll_finders
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
tc = self.trading_calendar
start_session = tc.minute_to_session_label(start_date)
end_session = tc.minute_to_session_label(end_date)
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol,
start_session,
end_session, asset.offset)
sessions = tc.sessions_in_range(start_date, end_date)
minutes = tc.minutes_in_range(start_date, end_date)
num_minutes = len(minutes)
shape = num_minutes, len(assets)
results = []
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = minutes.searchsorted(start)
if roll_date is not None:
_, end = tc.open_and_close_for_session(
roll_date - sessions.freq)
end_loc = minutes.searchsorted(end)
else:
end = end_date
end_loc = len(minutes) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start, _ = tc.open_and_close_for_session(
tc.minute_to_session_label(minutes[end_loc + 1]))
for column in columns:
if column != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != 'sid':
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return self._bar_reader.last_available_dt
@property
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
return self._bar_reader.trading_calendar
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return self._bar_reader.first_trading_day
def get_value(self, continuous_future, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
rf = self._roll_finders[continuous_future.roll_style]
sid = (rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset))
return self._bar_reader.get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = (rf.get_contract_center(asset.root_symbol,
dt,
asset.offset))
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt)
@property
def sessions(self):
return self._bar_reader.sessions | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/continuous_future_reader.py | continuous_future_reader.py |
from errno import ENOENT
from functools import partial
from os import remove
import sqlite3
import warnings
from bcolz import (
carray,
ctable,
)
from collections import namedtuple
import logbook
import numpy as np
from numpy import (
array,
int64,
float64,
full,
iinfo,
integer,
issubdtype,
nan,
uint32,
)
from pandas import (
DataFrame,
DatetimeIndex,
isnull,
NaT,
read_csv,
read_sql,
to_datetime,
Timestamp,
)
from pandas.tslib import iNaT
from six import (
iteritems,
string_types,
viewkeys,
)
from toolz import compose
from zipline.data.session_bars import SessionBarReader
from zipline.data.bar_reader import (
NoDataAfterDate,
NoDataBeforeDate,
NoDataOnDate,
)
from zipline.utils.calendars import get_calendar
from zipline.utils.functional import apply
from zipline.utils.preprocess import call
from zipline.utils.input_validation import (
expect_element,
preprocess,
verify_indices_all_unique,
)
from zipline.utils.sqlite_utils import group_into_chunks, coerce_string_to_conn
from zipline.utils.memoize import lazyval
from zipline.utils.cli import maybe_show_progress
from ._equities import _compute_row_slices, _read_bcolz_data
from ._adjustments import load_adjustments_from_sqlite
logger = logbook.Logger('UsEquityPricing')
OHLC = frozenset(['open', 'high', 'low', 'close'])
US_EQUITY_PRICING_BCOLZ_COLUMNS = (
'open', 'high', 'low', 'close', 'volume', 'day', 'id'
)
SQLITE_ADJUSTMENT_COLUMN_DTYPES = {
'effective_date': integer,
'ratio': float,
'sid': integer,
}
SQLITE_ADJUSTMENT_TABLENAMES = frozenset(['splits', 'dividends', 'mergers'])
SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES = {
'sid': integer,
'ex_date': integer,
'declared_date': integer,
'record_date': integer,
'pay_date': integer,
'amount': float,
}
SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES = {
'sid': integer,
'ex_date': integer,
'declared_date': integer,
'record_date': integer,
'pay_date': integer,
'payment_sid': integer,
'ratio': float,
}
UINT32_MAX = iinfo(uint32).max
def check_uint32_safe(value, colname):
if value >= UINT32_MAX:
raise ValueError(
"Value %s from column '%s' is too large" % (value, colname)
)
@expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})
def winsorise_uint32(df, invalid_data_behavior, column, *columns):
"""Drops any record where a value would not fit into a uint32.
Parameters
----------
df : pd.DataFrame
The dataframe to winsorise.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is outside the bounds of a uint32.
*columns : iterable[str]
The names of the columns to check.
Returns
-------
truncated : pd.DataFrame
``df`` with values that do not fit into a uint32 zeroed out.
"""
columns = list((column,) + columns)
mask = df[columns] > UINT32_MAX
if invalid_data_behavior != 'ignore':
mask |= df[columns].isnull()
else:
# we are not going to generate a warning or error for this so just use
# nan_to_num
df[columns] = np.nan_to_num(df[columns])
mv = mask.values
if mv.any():
if invalid_data_behavior == 'raise':
raise ValueError(
'%d values out of bounds for uint32: %r' % (
mv.sum(), df[mask.any(axis=1)],
),
)
if invalid_data_behavior == 'warn':
warnings.warn(
'Ignoring %d values because they are out of bounds for'
' uint32: %r' % (
mv.sum(), df[mask.any(axis=1)],
),
stacklevel=3, # one extra frame for `expect_element`
)
df[mask] = 0
return df
class BcolzDailyBarWriter(object):
"""
Class capable of writing daily OHLCV data to disk in a format that can
be read efficiently by BcolzDailyOHLCVReader.
Parameters
----------
filename : str
The location at which we should write our output.
calendar : zipline.utils.calendar.trading_calendar
Calendar to use to compute asset calendar offsets.
start_session: pd.Timestamp
Midnight UTC session label.
end_session: pd.Timestamp
Midnight UTC session label.
See Also
--------
zipline.data.us_equity_pricing.BcolzDailyBarReader
"""
_csv_dtypes = {
'open': float64,
'high': float64,
'low': float64,
'close': float64,
'volume': float64,
}
def __init__(self, filename, calendar, start_session, end_session):
self._filename = filename
if start_session != end_session:
if not calendar.is_session(start_session):
raise ValueError(
"Start session %s is invalid!" % start_session
)
if not calendar.is_session(end_session):
raise ValueError(
"End session %s is invalid!" % end_session
)
self._start_session = start_session
self._end_session = end_session
self._calendar = calendar
@property
def progress_bar_message(self):
return "Merging daily equity files:"
def progress_bar_item_show_func(self, value):
return value if value is None else str(value[0])
def write(self,
data,
assets=None,
show_progress=False,
invalid_data_behavior='warn'):
"""
Parameters
----------
data : iterable[tuple[int, pandas.DataFrame or bcolz.ctable]]
The data chunks to write. Each chunk should be a tuple of sid
and the data for that asset.
assets : set[int], optional
The assets that should be in ``data``. If this is provided
we will check ``data`` against the assets and provide better
progress information.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}, optional
What to do when data is encountered that is outside the range of
a uint32.
Returns
-------
table : bcolz.ctable
The newly-written table.
"""
ctx = maybe_show_progress(
(
(sid, self.to_ctable(df, invalid_data_behavior))
for sid, df in data
),
show_progress=show_progress,
item_show_func=self.progress_bar_item_show_func,
label=self.progress_bar_message,
length=len(assets) if assets is not None else None,
)
with ctx as it:
return self._write_internal(it, assets)
def write_csvs(self,
asset_map,
show_progress=False,
invalid_data_behavior='warn'):
"""Read CSVs as DataFrames from our asset map.
Parameters
----------
asset_map : dict[int -> str]
A mapping from asset id to file path with the CSV data for that
asset
show_progress : bool
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is encountered that is outside the range of
a uint32.
"""
read = partial(
read_csv,
parse_dates=['day'],
index_col='day',
dtype=self._csv_dtypes,
)
return self.write(
((asset, read(path)) for asset, path in iteritems(asset_map)),
assets=viewkeys(asset_map),
show_progress=show_progress,
invalid_data_behavior=invalid_data_behavior,
)
def _write_internal(self, iterator, assets):
"""
Internal implementation of write.
`iterator` should be an iterator yielding pairs of (asset, ctable).
"""
total_rows = 0
first_row = {}
last_row = {}
calendar_offset = {}
# Maps column name -> output carray.
columns = {
k: carray(array([], dtype=uint32))
for k in US_EQUITY_PRICING_BCOLZ_COLUMNS
}
earliest_date = None
sessions = self._calendar.sessions_in_range(
self._start_session, self._end_session
)
if assets is not None:
@apply
def iterator(iterator=iterator, assets=set(assets)):
for asset_id, table in iterator:
if asset_id not in assets:
raise ValueError('unknown asset id %r' % asset_id)
yield asset_id, table
for asset_id, table in iterator:
nrows = len(table)
for column_name in columns:
if column_name == 'id':
# We know what the content of this column is, so don't
# bother reading it.
columns['id'].append(
full((nrows,), asset_id, dtype='uint32'),
)
continue
columns[column_name].append(table[column_name])
if earliest_date is None:
earliest_date = table["day"][0]
else:
earliest_date = min(earliest_date, table["day"][0])
# Bcolz doesn't support ints as keys in `attrs`, so convert
# assets to strings for use as attr keys.
asset_key = str(asset_id)
# Calculate the index into the array of the first and last row
# for this asset. This allows us to efficiently load single
# assets when querying the data back out of the table.
first_row[asset_key] = total_rows
last_row[asset_key] = total_rows + nrows - 1
total_rows += nrows
table_day_to_session = compose(
self._calendar.minute_to_session_label,
partial(Timestamp, unit='s', tz='UTC'),
)
asset_first_day = table_day_to_session(table['day'][0])
asset_last_day = table_day_to_session(table['day'][-1])
asset_sessions = sessions[
sessions.slice_indexer(asset_first_day, asset_last_day)
]
assert len(table) == len(asset_sessions), (
'Got {} rows for daily bars table with first day={}, last '
'day={}, expected {} rows.\n'
'Missing sessions: {}\n'
'Extra sessions: {}'.format(
len(table),
asset_first_day.date(),
asset_last_day.date(),
len(asset_sessions),
asset_sessions.difference(
to_datetime(
np.array(table['day']),
unit='s',
utc=True,
)
).tolist(),
to_datetime(
np.array(table['day']),
unit='s',
utc=True,
).difference(asset_sessions).tolist(),
)
)
# Calculate the number of trading days between the first date
# in the stored data and the first date of **this** asset. This
# offset used for output alignment by the reader.
calendar_offset[asset_key] = sessions.get_loc(asset_first_day)
# This writes the table to disk.
full_table = ctable(
columns=[
columns[colname]
for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS
],
names=US_EQUITY_PRICING_BCOLZ_COLUMNS,
rootdir=self._filename,
mode='w',
)
full_table.attrs['first_trading_day'] = (
earliest_date if earliest_date is not None else iNaT
)
full_table.attrs['first_row'] = first_row
full_table.attrs['last_row'] = last_row
full_table.attrs['calendar_offset'] = calendar_offset
full_table.attrs['calendar_name'] = self._calendar.name
full_table.attrs['start_session_ns'] = self._start_session.value
full_table.attrs['end_session_ns'] = self._end_session.value
full_table.flush()
return full_table
@expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})
def to_ctable(self, raw_data, invalid_data_behavior):
if isinstance(raw_data, ctable):
# we already have a ctable so do nothing
return raw_data
winsorise_uint32(raw_data, invalid_data_behavior, 'volume', *OHLC)
processed = (raw_data[list(OHLC)] * 1000).astype('uint32')
dates = raw_data.index.values.astype('datetime64[s]')
check_uint32_safe(dates.max().view(np.int64), 'day')
processed['day'] = dates.astype('uint32')
processed['volume'] = raw_data.volume.astype('uint32')
return ctable.fromdataframe(processed)
class BcolzDailyBarReader(SessionBarReader):
"""
Reader for raw pricing data written by BcolzDailyOHLCVWriter.
Parameters
----------
table : bcolz.ctable
The ctable contaning the pricing data, with attrs corresponding to the
Attributes list below.
read_all_threshold : int
The number of equities at which; below, the data is read by reading a
slice from the carray per asset. above, the data is read by pulling
all of the data for all assets into memory and then indexing into that
array for each day and asset pair. Used to tune performance of reads
when using a small or large number of equities.
Attributes
----------
The table with which this loader interacts contains the following
attributes:
first_row : dict
Map from asset_id -> index of first row in the dataset with that id.
last_row : dict
Map from asset_id -> index of last row in the dataset with that id.
calendar_offset : dict
Map from asset_id -> calendar index of first row.
start_session_ns: int
Epoch ns of the first session used in this dataset.
end_session_ns: int
Epoch ns of the last session used in this dataset.
calendar_name: str
String identifier of trading calendar used (ie, "NYSE").
We use first_row and last_row together to quickly find ranges of rows to
load when reading an asset's data into memory.
We use calendar_offset and calendar to orient loaded blocks within a
range of queried dates.
Notes
------
A Bcolz CTable is comprised of Columns and Attributes.
The table with which this loader interacts contains the following columns:
['open', 'high', 'low', 'close', 'volume', 'day', 'id'].
The data in these columns is interpreted as follows:
- Price columns ('open', 'high', 'low', 'close') are interpreted as 1000 *
as-traded dollar value.
- Volume is interpreted as as-traded volume.
- Day is interpreted as seconds since midnight UTC, Jan 1, 1970.
- Id is the asset id of the row.
The data in each column is grouped by asset and then sorted by day within
each asset block.
The table is built to represent a long time range of data, e.g. ten years
of equity data, so the lengths of each asset block is not equal to each
other. The blocks are clipped to the known start and end date of each asset
to cut down on the number of empty values that would need to be included to
make a regular/cubic dataset.
When read across the open, high, low, close, and volume with the same
index should represent the same asset and day.
See Also
--------
zipline.data.us_equity_pricing.BcolzDailyBarWriter
"""
def __init__(self, table, read_all_threshold=3000):
self._maybe_table_rootdir = table
# Cache of fully read np.array for the carrays in the daily bar table.
# raw_array does not use the same cache, but it could.
# Need to test keeping the entire array in memory for the course of a
# process first.
self._spot_cols = {}
self.PRICE_ADJUSTMENT_FACTOR = 0.001
self._read_all_threshold = read_all_threshold
@lazyval
def _table(self):
maybe_table_rootdir = self._maybe_table_rootdir
if isinstance(maybe_table_rootdir, ctable):
return maybe_table_rootdir
return ctable(rootdir=maybe_table_rootdir, mode='r')
@lazyval
def sessions(self):
if 'calendar' in self._table.attrs.attrs:
# backwards compatibility with old formats, will remove
return DatetimeIndex(self._table.attrs['calendar'], tz='UTC')
else:
cal = get_calendar(self._table.attrs['calendar_name'])
start_session_ns = self._table.attrs['start_session_ns']
start_session = Timestamp(start_session_ns, tz='UTC')
end_session_ns = self._table.attrs['end_session_ns']
end_session = Timestamp(end_session_ns, tz='UTC')
sessions = cal.sessions_in_range(start_session, end_session)
return sessions
@lazyval
def _first_rows(self):
return {
int(asset_id): start_index
for asset_id, start_index in iteritems(
self._table.attrs['first_row'],
)
}
@lazyval
def _last_rows(self):
return {
int(asset_id): end_index
for asset_id, end_index in iteritems(
self._table.attrs['last_row'],
)
}
@lazyval
def _calendar_offsets(self):
return {
int(id_): offset
for id_, offset in iteritems(
self._table.attrs['calendar_offset'],
)
}
@lazyval
def first_trading_day(self):
try:
return Timestamp(
self._table.attrs['first_trading_day'],
unit='s',
tz='UTC'
)
except KeyError:
return None
@lazyval
def trading_calendar(self):
if 'calendar_name' in self._table.attrs.attrs:
return get_calendar(self._table.attrs['calendar_name'])
else:
return None
@property
def last_available_dt(self):
return self.sessions[-1]
def _compute_slices(self, start_idx, end_idx, assets):
"""
Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
"""
# The core implementation of the logic here is implemented in Cython
# for efficiency.
return _compute_row_slices(
self._first_rows,
self._last_rows,
self._calendar_offsets,
start_idx,
end_idx,
assets,
)
def load_raw_arrays(self, columns, start_date, end_date, assets):
# Assumes that the given dates are actually in calendar.
start_idx = self.sessions.get_loc(start_date)
end_idx = self.sessions.get_loc(end_date)
first_rows, last_rows, offsets = self._compute_slices(
start_idx,
end_idx,
assets,
)
read_all = len(assets) > self._read_all_threshold
return _read_bcolz_data(
self._table,
(end_idx - start_idx + 1, len(assets)),
list(columns),
first_rows,
last_rows,
offsets,
read_all,
)
def _spot_col(self, colname):
"""
Get the colname from daily_bar_table and read all of it into memory,
caching the result.
Parameters
----------
colname : string
A name of a OHLCV carray in the daily_bar_table
Returns
-------
array (uint32)
Full read array of the carray in the daily_bar_table with the
given colname.
"""
try:
col = self._spot_cols[colname]
except KeyError:
col = self._spot_cols[colname] = self._table[colname]
return col
def get_last_traded_dt(self, asset, day):
volumes = self._spot_col('volume')
search_day = day
while True:
try:
ix = self.sid_day_index(asset, search_day)
except NoDataBeforeDate:
return NaT
except NoDataAfterDate:
prev_day_ix = self.sessions.get_loc(search_day) - 1
if prev_day_ix > -1:
search_day = self.sessions[prev_day_ix]
continue
except NoDataOnDate:
return NaT
if volumes[ix] != 0:
return search_day
prev_day_ix = self.sessions.get_loc(search_day) - 1
if prev_day_ix > -1:
search_day = self.sessions[prev_day_ix]
else:
return NaT
def sid_day_index(self, sid, day):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
int
Index into the data tape for the given sid and day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
"""
try:
day_loc = self.sessions.get_loc(day)
except:
raise NoDataOnDate("day={0} is outside of calendar={1}".format(
day, self.sessions))
offset = day_loc - self._calendar_offsets[sid]
if offset < 0:
raise NoDataBeforeDate(
"No data on or before day={0} for sid={1}".format(
day, sid))
ix = self._first_rows[sid] + offset
if ix > self._last_rows[sid]:
raise NoDataAfterDate(
"No data on or after day={0} for sid={1}".format(
day, sid))
return ix
def get_value(self, sid, dt, field):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
colname : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
ix = self.sid_day_index(sid, dt)
price = self._spot_col(field)[ix]
if field != 'volume':
if price == 0:
return nan
else:
return price * 0.001
else:
return price
class PanelBarReader(SessionBarReader):
"""
Reader for data passed as Panel.
DataPanel Structure
-------
items : Int64Index
Asset identifiers. Must be unique.
major_axis : DatetimeIndex
Dates for data provided provided by the Panel. Must be unique.
minor_axis : ['open', 'high', 'low', 'close', 'volume']
Price attributes. Must be unique.
Attributes
----------
The table with which this loader interacts contains the following
attributes:
panel : pd.Panel
The panel from which to read OHLCV data.
first_trading_day : pd.Timestamp
The first trading day in the dataset.
"""
@preprocess(panel=call(verify_indices_all_unique))
@expect_element(data_frequency={'daily', 'minute'})
def __init__(self, trading_calendar, panel, data_frequency):
panel = panel.copy()
if 'volume' not in panel.minor_axis:
# Fake volume if it does not exist.
panel.loc[:, :, 'volume'] = int(1e9)
self.trading_calendar = trading_calendar
self._first_trading_day = trading_calendar.minute_to_session_label(
panel.major_axis[0]
)
last_trading_day = trading_calendar.minute_to_session_label(
panel.major_axis[-1]
)
self.sessions = trading_calendar.sessions_in_range(
self.first_trading_day,
last_trading_day
)
if data_frequency == 'daily':
self._calendar = self.sessions
elif data_frequency == 'minute':
self._calendar = trading_calendar.minutes_for_sessions_in_range(
self.first_trading_day,
last_trading_day
)
self.panel = panel
sessions = None
@property
def last_available_dt(self):
return self._calendar[-1]
trading_calendar = None
def load_raw_arrays(self, columns, start_dt, end_dt, assets):
cal = self._calendar
return self.panel.loc[
list(assets),
start_dt:end_dt,
list(columns)
].reindex(major_axis=cal[cal.slice_indexer(start_dt, end_dt)]).values.T
def get_value(self, sid, dt, field):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
field : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
return self.panel.loc[sid, dt, field]
def get_last_traded_dt(self, asset, dt):
"""
Parameters
----------
asset : zipline.asset.Asset
The asset identifier.
dt : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
pd.Timestamp : The last know dt for the asset and dt;
NaT if no trade is found before the given dt.
"""
try:
return self.panel.loc[int(asset), :dt, 'close'].last_valid_index()
except IndexError:
return NaT
@property
def first_trading_day(self):
return self._first_trading_day
class SQLiteAdjustmentWriter(object):
"""
Writer for data to be read by SQLiteAdjustmentReader
Parameters
----------
conn_or_path : str or sqlite3.Connection
A handle to the target sqlite database.
equity_daily_bar_reader : BcolzDailyBarReader
Daily bar reader to use for dividend writes.
overwrite : bool, optional, default=False
If True and conn_or_path is a string, remove any existing files at the
given path before connecting.
See Also
--------
zipline.data.us_equity_pricing.SQLiteAdjustmentReader
"""
def __init__(self,
conn_or_path,
equity_daily_bar_reader,
calendar,
overwrite=False):
if isinstance(conn_or_path, sqlite3.Connection):
self.conn = conn_or_path
elif isinstance(conn_or_path, string_types):
if overwrite:
try:
remove(conn_or_path)
except OSError as e:
if e.errno != ENOENT:
raise
self.conn = sqlite3.connect(conn_or_path)
self.uri = conn_or_path
else:
raise TypeError("Unknown connection type %s" % type(conn_or_path))
self._equity_daily_bar_reader = equity_daily_bar_reader
self._calendar = calendar
def _write(self, tablename, expected_dtypes, frame):
if frame is None or frame.empty:
# keeping the dtypes correct for empty frames is not easy
frame = DataFrame(
np.array([], dtype=list(expected_dtypes.items())),
)
else:
if frozenset(frame.columns) != frozenset(expected_dtypes):
raise ValueError(
"Unexpected frame columns:\n"
"Expected Columns: %s\n"
"Received Columns: %s" % (
set(expected_dtypes),
frame.columns.tolist(),
)
)
actual_dtypes = frame.dtypes
for colname, expected in iteritems(expected_dtypes):
actual = actual_dtypes[colname]
if not issubdtype(actual, expected):
raise TypeError(
"Expected data of type {expected} for column"
" '{colname}', but got '{actual}'.".format(
expected=expected,
colname=colname,
actual=actual,
),
)
frame.to_sql(
tablename,
self.conn,
if_exists='append',
chunksize=50000,
)
def write_frame(self, tablename, frame):
if tablename not in SQLITE_ADJUSTMENT_TABLENAMES:
raise ValueError(
"Adjustment table %s not in %s" % (
tablename,
SQLITE_ADJUSTMENT_TABLENAMES,
)
)
if not (frame is None or frame.empty):
frame = frame.copy()
frame['effective_date'] = frame['effective_date'].values.astype(
'datetime64[s]',
).astype('int64')
return self._write(
tablename,
SQLITE_ADJUSTMENT_COLUMN_DTYPES,
frame,
)
def write_dividend_payouts(self, frame):
"""
Write dividend payout data to SQLite table `dividend_payouts`.
"""
return self._write(
'dividend_payouts',
SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES,
frame,
)
def write_stock_dividend_payouts(self, frame):
return self._write(
'stock_dividend_payouts',
SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES,
frame,
)
def calc_dividend_ratios(self, dividends):
"""
Calculate the ratios to apply to equities when looking back at pricing
history so that the price is smoothed over the ex_date, when the market
adjusts to the change in equity value due to upcoming dividend.
Returns
-------
DataFrame
A frame in the same format as splits and mergers, with keys
- sid, the id of the equity
- effective_date, the date in seconds on which to apply the ratio.
- ratio, the ratio to apply to backwards looking pricing data.
"""
if dividends is None or dividends.empty:
return DataFrame(np.array(
[],
dtype=[
('sid', uint32),
('effective_date', uint32),
('ratio', float64),
],
))
ex_dates = dividends.ex_date.values
sids = dividends.sid.values
amounts = dividends.amount.values
ratios = full(len(amounts), nan)
equity_daily_bar_reader = self._equity_daily_bar_reader
effective_dates = full(len(amounts), -1, dtype=int64)
calendar = self._calendar
# Calculate locs against a tz-naive cal, as the ex_dates are tz-
# naive.
#
# TODO: A better approach here would be to localize ex_date to
# the tz of the calendar, but currently get_indexer does not
# preserve tz of the target when method='bfill', which throws
# off the comparison.
tz_naive_calendar = calendar.tz_localize(None)
day_locs = tz_naive_calendar.get_indexer(ex_dates, method='bfill')
for i, amount in enumerate(amounts):
sid = sids[i]
ex_date = ex_dates[i]
day_loc = day_locs[i]
prev_close_date = calendar[day_loc - 1]
try:
prev_close = equity_daily_bar_reader.get_value(
sid, prev_close_date, 'close')
if not isnull(prev_close):
ratio = 1.0 - amount / prev_close
ratios[i] = ratio
# only assign effective_date when data is found
effective_dates[i] = ex_date
except NoDataOnDate:
logger.warn("Couldn't compute ratio for dividend %s" % {
'sid': sid,
'ex_date': ex_date,
'amount': amount,
})
continue
# Create a mask to filter out indices in the effective_date, sid, and
# ratio vectors for which a ratio was not calculable.
effective_mask = effective_dates != -1
effective_dates = effective_dates[effective_mask]
effective_dates = effective_dates.astype('datetime64[ns]').\
astype('datetime64[s]').astype(uint32)
sids = sids[effective_mask]
ratios = ratios[effective_mask]
return DataFrame({
'sid': sids,
'effective_date': effective_dates,
'ratio': ratios,
})
def _write_dividends(self, dividends):
if dividends is None:
dividend_payouts = None
else:
dividend_payouts = dividends.copy()
dividend_payouts['ex_date'] = dividend_payouts['ex_date'].values.\
astype('datetime64[s]').astype(integer)
dividend_payouts['record_date'] = \
dividend_payouts['record_date'].values.\
astype('datetime64[s]').astype(integer)
dividend_payouts['declared_date'] = \
dividend_payouts['declared_date'].values.\
astype('datetime64[s]').astype(integer)
dividend_payouts['pay_date'] = \
dividend_payouts['pay_date'].values.astype('datetime64[s]').\
astype(integer)
self.write_dividend_payouts(dividend_payouts)
def _write_stock_dividends(self, stock_dividends):
if stock_dividends is None:
stock_dividend_payouts = None
else:
stock_dividend_payouts = stock_dividends.copy()
stock_dividend_payouts['ex_date'] = \
stock_dividend_payouts['ex_date'].values.\
astype('datetime64[s]').astype(integer)
stock_dividend_payouts['record_date'] = \
stock_dividend_payouts['record_date'].values.\
astype('datetime64[s]').astype(integer)
stock_dividend_payouts['declared_date'] = \
stock_dividend_payouts['declared_date'].\
values.astype('datetime64[s]').astype(integer)
stock_dividend_payouts['pay_date'] = \
stock_dividend_payouts['pay_date'].\
values.astype('datetime64[s]').astype(integer)
self.write_stock_dividend_payouts(stock_dividend_payouts)
def write_dividend_data(self, dividends, stock_dividends=None):
"""
Write both dividend payouts and the derived price adjustment ratios.
"""
# First write the dividend payouts.
self._write_dividends(dividends)
self._write_stock_dividends(stock_dividends)
# Second from the dividend payouts, calculate ratios.
dividend_ratios = self.calc_dividend_ratios(dividends)
self.write_frame('dividends', dividend_ratios)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def write(self,
splits=None,
mergers=None,
dividends=None,
stock_dividends=None):
"""
Writes data to a SQLite file to be read by SQLiteAdjustmentReader.
Parameters
----------
splits : pandas.DataFrame, optional
Dataframe containing split data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is divided by this value.
sid : int
The asset id associated with this adjustment.
mergers : pandas.DataFrame, optional
DataFrame containing merger data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is unaffected.
sid : int
The asset id associated with this adjustment.
dividends : pandas.DataFrame, optional
DataFrame containing dividend data. The format of the dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
amount : float
The cash amount paid for each share.
Dividend ratios are calculated as:
``1.0 - (dividend_value / "close on day prior to ex_date")``
stock_dividends : pandas.DataFrame, optional
DataFrame containing stock dividend data. The format of the
dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
payment_sid : int
The asset id of the shares that should be paid instead of
cash.
ratio : float
The ratio of currently held shares in the held sid that
should be paid with new shares of the payment_sid.
See Also
--------
zipline.data.us_equity_pricing.SQLiteAdjustmentReader
"""
self.write_frame('splits', splits)
self.write_frame('mergers', mergers)
self.write_dividend_data(dividends, stock_dividends)
self.conn.execute(
"CREATE INDEX splits_sids "
"ON splits(sid)"
)
self.conn.execute(
"CREATE INDEX splits_effective_date "
"ON splits(effective_date)"
)
self.conn.execute(
"CREATE INDEX mergers_sids "
"ON mergers(sid)"
)
self.conn.execute(
"CREATE INDEX mergers_effective_date "
"ON mergers(effective_date)"
)
self.conn.execute(
"CREATE INDEX dividends_sid "
"ON dividends(sid)"
)
self.conn.execute(
"CREATE INDEX dividends_effective_date "
"ON dividends(effective_date)"
)
self.conn.execute(
"CREATE INDEX dividend_payouts_sid "
"ON dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX dividends_payouts_ex_date "
"ON dividend_payouts(ex_date)"
)
self.conn.execute(
"CREATE INDEX stock_dividend_payouts_sid "
"ON stock_dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX stock_dividends_payouts_ex_date "
"ON stock_dividend_payouts(ex_date)"
)
def close(self):
self.conn.close()
UNPAID_QUERY_TEMPLATE = """
SELECT sid, amount, pay_date from dividend_payouts
WHERE ex_date=? AND sid IN ({0})
"""
Dividend = namedtuple('Dividend', ['asset', 'amount', 'pay_date'])
UNPAID_STOCK_DIVIDEND_QUERY_TEMPLATE = """
SELECT sid, payment_sid, ratio, pay_date from stock_dividend_payouts
WHERE ex_date=? AND sid IN ({0})
"""
StockDividend = namedtuple(
'StockDividend',
['asset', 'payment_asset', 'ratio', 'pay_date'])
class SQLiteAdjustmentReader(object):
"""
Loads adjustments based on corporate actions from a SQLite database.
Expects data written in the format output by `SQLiteAdjustmentWriter`.
Parameters
----------
conn : str or sqlite3.Connection
Connection from which to load data.
See Also
--------
:class:`zipline.data.us_equity_pricing.SQLiteAdjustmentWriter`
"""
@preprocess(conn=coerce_string_to_conn)
def __init__(self, conn):
self.conn = conn
# Given the tables in the adjustments.db file, dict which knows which
# col names contain dates that have been coerced into ints.
self._datetime_int_cols = {
'dividend_payouts': ('declared_date', 'ex_date', 'pay_date',
'record_date'),
'dividends': ('effective_date',),
'mergers': ('effective_date',),
'splits': ('effective_date',),
'stock_dividend_payouts': ('declared_date', 'ex_date', 'pay_date',
'record_date')
}
def load_adjustments(self, columns, dates, assets):
return load_adjustments_from_sqlite(
self.conn,
list(columns),
dates,
assets,
)
def get_adjustments_for_sid(self, table_name, sid):
t = (sid,)
c = self.conn.cursor()
adjustments_for_sid = c.execute(
"SELECT effective_date, ratio FROM %s WHERE sid = ?" %
table_name, t).fetchall()
c.close()
return [[Timestamp(adjustment[0], unit='s', tz='UTC'), adjustment[1]]
for adjustment in
adjustments_for_sid]
def get_dividends_with_ex_date(self, assets, date, asset_finder):
seconds = date.value / int(1e9)
c = self.conn.cursor()
divs = []
for chunk in group_into_chunks(assets):
query = UNPAID_QUERY_TEMPLATE.format(
",".join(['?' for _ in chunk]))
t = (seconds,) + tuple(map(lambda x: int(x), chunk))
c.execute(query, t)
rows = c.fetchall()
for row in rows:
div = Dividend(
asset_finder.retrieve_asset(row[0]),
row[1], Timestamp(row[2], unit='s', tz='UTC'))
divs.append(div)
c.close()
return divs
def get_stock_dividends_with_ex_date(self, assets, date, asset_finder):
seconds = date.value / int(1e9)
c = self.conn.cursor()
stock_divs = []
for chunk in group_into_chunks(assets):
query = UNPAID_STOCK_DIVIDEND_QUERY_TEMPLATE.format(
",".join(['?' for _ in chunk]))
t = (seconds,) + tuple(map(lambda x: int(x), chunk))
c.execute(query, t)
rows = c.fetchall()
for row in rows:
stock_div = StockDividend(
asset_finder.retrieve_asset(row[0]), # asset
asset_finder.retrieve_asset(row[1]), # payment_asset
row[2],
Timestamp(row[3], unit='s', tz='UTC'))
stock_divs.append(stock_div)
c.close()
return stock_divs
def unpack_db_to_component_dfs(self, convert_dates=False):
"""Returns the set of known tables in the adjustments file in DataFrame
form.
Parameters
----------
convert_dates : bool, optional
By default, dates are returned in seconds since EPOCH. If
convert_dates is True, all ints in date columns will be converted
to datetimes.
Returns
-------
dfs : dict{str->DataFrame}
Dictionary which maps table name to the corresponding DataFrame
version of the table, where all date columns have been coerced back
from int to datetime.
"""
def _get_df_from_table(table_name, date_cols):
# Dates are stored in second resolution as ints in adj.db tables.
# Need to specifically convert them as UTC, not local time.
kwargs = (
{'parse_dates': {col: {'unit': 's', 'utc': True}
for col in date_cols}
}
if convert_dates
else {}
)
return read_sql(
'select * from "{}"'.format(table_name),
self.conn,
index_col='index',
**kwargs
).rename_axis(None)
return {
t_name: _get_df_from_table(
t_name,
date_cols
)
for t_name, date_cols in self._datetime_int_cols.items()
} | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/us_equity_pricing.py | us_equity_pricing.py |
from zipline.data.data_portal import DataPortal
from logbook import Logger
log = Logger('DataPortalLive')
class DataPortalLive(DataPortal):
def __init__(self, broker, *args, **kwargs):
self.broker = broker
super(DataPortalLive, self).__init__(*args, **kwargs)
def get_last_traded_dt(self, asset, dt, data_frequency):
return self.broker.get_last_traded_dt(asset)
def get_spot_value(self, assets, field, dt, data_frequency):
return self.broker.get_spot_value(assets, field, dt, data_frequency)
def get_history_window(self,
assets,
end_dt,
bar_count,
frequency,
field,
data_frequency,
ffill=True):
# This method is responsible for merging the ingested historical data
# with the real-time collected data through the Broker.
# DataPortal.get_history_window() is called with ffill=False to mark
# the missing fields with NaNs. After merge on the historical and
# real-time data the missing values (NaNs) are filled based on their
# next available values in the requested time window.
#
# Warning: setting ffill=True in DataPortal.get_history_window() call
# results a wrong behavior: The last available value reported by
# get_spot_value() will be used to fill the missing data - which is
# always representing the current spot price presented by Broker.
historical_bars = super(DataPortalLive, self).get_history_window(
assets, end_dt, bar_count, frequency, field, data_frequency,
ffill=False)
realtime_bars = self.broker.get_realtime_bars(
assets, frequency)
# Broker.get_realtime_history() returns the asset as level 0 column,
# open, high, low, close, volume returned as level 1 columns.
# To filter for field the levels needs to be swapped
realtime_bars = realtime_bars.swaplevel(0, 1, axis=1)
ohlcv_field = 'close' if field == 'price' else field
# TODO: end_dt is ignored when historical & realtime bars are merged.
# Should not cause issues as end_dt is set to current time in live
# trading, but would be more proper if merge would make use of it.
combined_bars = historical_bars.combine_first(
realtime_bars[ohlcv_field])
if ffill and field == 'price':
# Simple forward fill is not enough here as the last ingested
# value might be outside of the requested time window. That case
# the time series starts with NaN and forward filling won't help.
# To provide values for such cases we backward fill.
# Backward fill as a second operation will have no effect if the
# forward-fill was successful.
combined_bars.fillna(method='ffill', inplace=True)
combined_bars.fillna(method='bfill', inplace=True)
return combined_bars[-bar_count:] | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/data_portal_live.py | data_portal_live.py |
from abc import ABCMeta, abstractmethod, abstractproperty
from six import with_metaclass
class NoDataOnDate(Exception):
"""
Raised when a spot price cannot be found for the sid and date.
"""
pass
class NoDataBeforeDate(NoDataOnDate):
pass
class NoDataAfterDate(NoDataOnDate):
pass
class BarReader(with_metaclass(ABCMeta, object)):
@abstractproperty
def data_frequency(self):
pass
@abstractmethod
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
pass
@abstractproperty
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
pass
@abstractproperty
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
pass
@abstractproperty
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
pass
@abstractmethod
def get_value(self, sid, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
pass
@abstractmethod
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
pass | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/bar_reader.py | bar_reader.py |
import pandas as pd
import six
from toolz import curry
from toolz.curried.operator import add as prepend
COLUMN_NAMES = {
"V39063": '1month',
"V39065": '3month',
"V39066": '6month',
"V39067": '1year',
"V39051": '2year',
"V39052": '3year',
"V39053": '5year',
"V39054": '7year',
"V39055": '10year',
# Bank of Canada refers to this as 'Long' Rate, approximately 30 years.
"V39056": '30year',
}
BILL_IDS = ['V39063', 'V39065', 'V39066', 'V39067']
BOND_IDS = ['V39051', 'V39052', 'V39053', 'V39054', 'V39055', 'V39056']
@curry
def _format_url(instrument_type,
instrument_ids,
start_date,
end_date,
earliest_allowed_date):
"""
Format a URL for loading data from Bank of Canada.
"""
return (
"http://www.bankofcanada.ca/stats/results/csv"
"?lP=lookup_{instrument_type}_yields.php"
"&sR={restrict}"
"&se={instrument_ids}"
"&dF={start}"
"&dT={end}".format(
instrument_type=instrument_type,
instrument_ids='-'.join(map(prepend("L_"), instrument_ids)),
restrict=earliest_allowed_date.strftime("%Y-%m-%d"),
start=start_date.strftime("%Y-%m-%d"),
end=end_date.strftime("%Y-%m-%d"),
)
)
format_bill_url = _format_url('tbill', BILL_IDS)
format_bond_url = _format_url('bond', BOND_IDS)
def load_frame(url, skiprows):
"""
Load a DataFrame of data from a Bank of Canada site.
"""
return pd.read_csv(
url,
skiprows=skiprows,
skipinitialspace=True,
na_values=["Bank holiday", "Not available"],
parse_dates=["Date"],
index_col="Date",
).dropna(how='all') \
.tz_localize('UTC') \
.rename(columns=COLUMN_NAMES)
def check_known_inconsistencies(bill_data, bond_data):
"""
There are a couple quirks in the data provided by Bank of Canada.
Check that no new quirks have been introduced in the latest download.
"""
inconsistent_dates = bill_data.index.sym_diff(bond_data.index)
known_inconsistencies = [
# bill_data has an entry for 2010-02-15, which bond_data doesn't.
# bond_data has an entry for 2006-09-04, which bill_data doesn't.
# Both of these dates are bank holidays (Flag Day and Labor Day,
# respectively).
pd.Timestamp('2006-09-04', tz='UTC'),
pd.Timestamp('2010-02-15', tz='UTC'),
# 2013-07-25 comes back as "Not available" from the bills endpoint.
# This date doesn't seem to be a bank holiday, but the previous
# calendar implementation dropped this entry, so we drop it as well.
# If someone cares deeply about the integrity of the Canadian trading
# calendar, they may want to consider forward-filling here rather than
# dropping the row.
pd.Timestamp('2013-07-25', tz='UTC'),
]
unexpected_inconsistences = inconsistent_dates.drop(known_inconsistencies)
if len(unexpected_inconsistences):
in_bills = bill_data.index.difference(bond_data.index).difference(
known_inconsistencies
)
in_bonds = bond_data.index.difference(bill_data.index).difference(
known_inconsistencies
)
raise ValueError(
"Inconsistent dates for Canadian treasury bills vs bonds. \n"
"Dates with bills but not bonds: {in_bills}.\n"
"Dates with bonds but not bills: {in_bonds}.".format(
in_bills=in_bills,
in_bonds=in_bonds,
)
)
def earliest_possible_date():
"""
The earliest date for which we can load data from this module.
"""
today = pd.Timestamp('now', tz='UTC').normalize()
# Bank of Canada only has the last 10 years of data at any given time.
return today.replace(year=today.year - 10)
def get_treasury_data(start_date, end_date):
bill_data = load_frame(
format_bill_url(start_date, end_date, start_date),
# We skip fewer rows here because we query for fewer bill fields,
# which makes the header smaller.
skiprows=18,
)
bond_data = load_frame(
format_bond_url(start_date, end_date, start_date),
skiprows=22,
)
check_known_inconsistencies(bill_data, bond_data)
# dropna('any') removes the rows for which we only had data for one of
# bills/bonds.
out = pd.concat([bond_data, bill_data], axis=1).dropna(how='any')
assert set(out.columns) == set(six.itervalues(COLUMN_NAMES))
# Multiply by 0.01 to convert from percentages to expected output format.
return out * 0.01 | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/treasuries_can.py | treasuries_can.py |
import os
import sys
from logbook import Logger, StreamHandler
from numpy import empty
from pandas import DataFrame, read_csv, Index, Timedelta, NaT
from zipline.utils.calendars import register_calendar_alias
from zipline.utils.cli import maybe_show_progress
from . import core as bundles
handler = StreamHandler(sys.stdout, format_string=" | {record.message}")
logger = Logger(__name__)
logger.handlers.append(handler)
def csvdir_equities(tframes=None, csvdir=None):
"""
Generate an ingest function for custom data bundle
This function can be used in ~/.zipline/extension.py
to register bundle with custom parameters, e.g. with
a custom trading calendar.
Parameters
----------
tframes: tuple, optional
The data time frames, supported timeframes: 'daily' and 'minute'
csvdir : string, optional, default: CSVDIR environment variable
The path to the directory of this structure:
<directory>/<timeframe1>/<symbol1>.csv
<directory>/<timeframe1>/<symbol2>.csv
<directory>/<timeframe1>/<symbol3>.csv
<directory>/<timeframe2>/<symbol1>.csv
<directory>/<timeframe2>/<symbol2>.csv
<directory>/<timeframe2>/<symbol3>.csv
Returns
-------
ingest : callable
The bundle ingest function
Examples
--------
This code should be added to ~/.zipline/extension.py
.. code-block:: python
from zipline.data.bundles import csvdir_equities, register
register('custom-csvdir-bundle',
csvdir_equities(["daily", "minute"],
'/full/path/to/the/csvdir/directory'))
"""
return CSVDIRBundle(tframes, csvdir).ingest
class CSVDIRBundle:
"""
Wrapper class to call csvdir_bundle with provided
list of time frames and a path to the csvdir directory
"""
def __init__(self, tframes=None, csvdir=None):
self.tframes = tframes
self.csvdir = csvdir
def ingest(self,
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
csvdir_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
self.tframes,
self.csvdir)
@bundles.register("csvdir")
def csvdir_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
tframes=None,
csvdir=None):
"""
Build a zipline data bundle from the directory with csv files.
"""
if not csvdir:
csvdir = environ.get('CSVDIR')
if not csvdir:
raise ValueError("CSVDIR environment variable is not set")
if not os.path.isdir(csvdir):
raise ValueError("%s is not a directory" % csvdir)
if not tframes:
tframes = set(["daily", "minute"]).intersection(os.listdir(csvdir))
if not tframes:
raise ValueError("'daily' and 'minute' directories "
"not found in '%s'" % csvdir)
divs_splits = {'divs': DataFrame(columns=['sid', 'amount',
'ex_date', 'record_date',
'declared_date', 'pay_date']),
'splits': DataFrame(columns=['sid', 'ratio',
'effective_date'])}
for tframe in tframes:
ddir = os.path.join(csvdir, tframe)
symbols = sorted(item.split('.csv')[0]
for item in os.listdir(ddir)
if '.csv' in item)
if not symbols:
raise ValueError("no <symbol>.csv* files found in %s" % ddir)
dtype = [('start_date', 'datetime64[ns]'),
('end_date', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('symbol', 'object')]
metadata = DataFrame(empty(len(symbols), dtype=dtype))
if tframe == 'minute':
writer = minute_bar_writer
else:
writer = daily_bar_writer
writer.write(_pricing_iter(ddir, symbols, metadata,
divs_splits, show_progress),
show_progress=show_progress)
# Hardcode the exchange to "CSVDIR" for all assets and (elsewhere)
# register "CSVDIR" to resolve to the NYSE calendar, because these
# are all equities and thus can use the NYSE calendar.
metadata['exchange'] = "CSVDIR"
asset_db_writer.write(equities=metadata)
divs_splits['divs']['sid'] = divs_splits['divs']['sid'].astype(int)
divs_splits['splits']['sid'] = divs_splits['splits']['sid'].astype(int)
adjustment_writer.write(splits=divs_splits['splits'],
dividends=divs_splits['divs'])
def _pricing_iter(csvdir, symbols, metadata, divs_splits, show_progress):
with maybe_show_progress(symbols, show_progress,
label='Loading custom pricing data: ') as it:
files = os.listdir(csvdir)
for sid, symbol in enumerate(it):
logger.debug('%s: sid %s' % (symbol, sid))
try:
fname = [fname for fname in files
if '%s.csv' % symbol in fname][0]
except IndexError:
raise ValueError("%s.csv file is not in %s" % (symbol, csvdir))
dfr = read_csv(os.path.join(csvdir, fname),
parse_dates=[0],
infer_datetime_format=True,
index_col=0).sort_index()
start_date = dfr.index[0]
end_date = dfr.index[-1]
# The auto_close date is the day after the last trade.
ac_date = end_date + Timedelta(days=1)
metadata.iloc[sid] = start_date, end_date, ac_date, symbol
if 'split' in dfr.columns:
tmp = 1. / dfr[dfr['split'] != 1.0]['split']
split = DataFrame(data=tmp.index.tolist(),
columns=['effective_date'])
split['ratio'] = tmp.tolist()
split['sid'] = sid
splits = divs_splits['splits']
index = Index(range(splits.shape[0],
splits.shape[0] + split.shape[0]))
split.set_index(index, inplace=True)
divs_splits['splits'] = splits.append(split)
if 'dividend' in dfr.columns:
# ex_date amount sid record_date declared_date pay_date
tmp = dfr[dfr['dividend'] != 0.0]['dividend']
div = DataFrame(data=tmp.index.tolist(), columns=['ex_date'])
div['record_date'] = NaT
div['declared_date'] = NaT
div['pay_date'] = NaT
div['amount'] = tmp.tolist()
div['sid'] = sid
divs = divs_splits['divs']
ind = Index(range(divs.shape[0], divs.shape[0] + div.shape[0]))
div.set_index(ind, inplace=True)
divs_splits['divs'] = divs.append(div)
yield sid, dfr
register_calendar_alias("CSVDIR", "NYSE") | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/bundles/csvdir.py | csvdir.py |
from collections import namedtuple
import errno
import os
import shutil
import warnings
from contextlib2 import ExitStack
import click
import pandas as pd
from toolz import curry, complement, take
from ..us_equity_pricing import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
)
from ..minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
)
from zipline.assets import AssetDBWriter, AssetFinder, ASSET_DB_VERSION
from zipline.assets.asset_db_migrations import downgrade
from zipline.utils.cache import (
dataframe_cache,
working_dir,
working_file,
)
from zipline.utils.compat import mappingproxy
from zipline.utils.input_validation import ensure_timestamp, optionally
import zipline.utils.paths as pth
from zipline.utils.preprocess import preprocess
from zipline.utils.calendars import get_calendar
def asset_db_path(bundle_name, timestr, environ=None, db_version=None):
return pth.data_path(
asset_db_relative(bundle_name, timestr, environ, db_version),
environ=environ,
)
def minute_equity_path(bundle_name, timestr, environ=None):
return pth.data_path(
minute_equity_relative(bundle_name, timestr, environ),
environ=environ,
)
def daily_equity_path(bundle_name, timestr, environ=None):
return pth.data_path(
daily_equity_relative(bundle_name, timestr, environ),
environ=environ,
)
def adjustment_db_path(bundle_name, timestr, environ=None):
return pth.data_path(
adjustment_db_relative(bundle_name, timestr, environ),
environ=environ,
)
def cache_path(bundle_name, environ=None):
return pth.data_path(
cache_relative(bundle_name, environ),
environ=environ,
)
def adjustment_db_relative(bundle_name, timestr, environ=None):
return bundle_name, timestr, 'adjustments.sqlite'
def cache_relative(bundle_name, timestr, environ=None):
return bundle_name, '.cache'
def daily_equity_relative(bundle_name, timestr, environ=None):
return bundle_name, timestr, 'daily_equities.bcolz'
def minute_equity_relative(bundle_name, timestr, environ=None):
return bundle_name, timestr, 'minute_equities.bcolz'
def asset_db_relative(bundle_name, timestr, environ=None, db_version=None):
db_version = ASSET_DB_VERSION if db_version is None else db_version
return bundle_name, timestr, 'assets-%d.sqlite' % db_version
def to_bundle_ingest_dirname(ts):
"""Convert a pandas Timestamp into the name of the directory for the
ingestion.
Parameters
----------
ts : pandas.Timestamp
The time of the ingestions
Returns
-------
name : str
The name of the directory for this ingestion.
"""
return ts.isoformat().replace(':', ';')
def from_bundle_ingest_dirname(cs):
"""Read a bundle ingestion directory name into a pandas Timestamp.
Parameters
----------
cs : str
The name of the directory.
Returns
-------
ts : pandas.Timestamp
The time when this ingestion happened.
"""
return pd.Timestamp(cs.replace(';', ':'))
def ingestions_for_bundle(bundle, environ=None):
return sorted(
(from_bundle_ingest_dirname(ing)
for ing in os.listdir(pth.data_path([bundle], environ))
if not pth.hidden(ing)),
reverse=True,
)
RegisteredBundle = namedtuple(
'RegisteredBundle',
['calendar_name',
'start_session',
'end_session',
'minutes_per_day',
'ingest',
'create_writers']
)
BundleData = namedtuple(
'BundleData',
'asset_finder equity_minute_bar_reader equity_daily_bar_reader '
'adjustment_reader',
)
BundleCore = namedtuple(
'BundleCore',
'bundles register unregister ingest load clean',
)
class UnknownBundle(click.ClickException, LookupError):
"""Raised if no bundle with the given name was registered.
"""
exit_code = 1
def __init__(self, name):
super(UnknownBundle, self).__init__(
'No bundle registered with the name %r' % name,
)
self.name = name
def __str__(self):
return self.message
class BadClean(click.ClickException, ValueError):
"""Exception indicating that an invalid argument set was passed to
``clean``.
Parameters
----------
before, after, keep_last : any
The bad arguments to ``clean``.
See Also
--------
clean
"""
def __init__(self, before, after, keep_last):
super(BadClean, self).__init__(
'Cannot pass a combination of `before` and `after` with'
'`keep_last`. Got: before=%r, after=%r, keep_n=%r\n' % (
before,
after,
keep_last,
),
)
def __str__(self):
return self.message
def _make_bundle_core():
"""Create a family of data bundle functions that read from the same
bundle mapping.
Returns
-------
bundles : mappingproxy
The mapping of bundles to bundle payloads.
register : callable
The function which registers new bundles in the ``bundles`` mapping.
unregister : callable
The function which deregisters bundles from the ``bundles`` mapping.
ingest : callable
The function which downloads and write data for a given data bundle.
load : callable
The function which loads the ingested bundles back into memory.
clean : callable
The function which cleans up data written with ``ingest``.
"""
_bundles = {} # the registered bundles
# Expose _bundles through a proxy so that users cannot mutate this
# accidentally. Users may go through `register` to update this which will
# warn when trampling another bundle.
bundles = mappingproxy(_bundles)
@curry
def register(name,
f,
calendar_name='NYSE',
start_session=None,
end_session=None,
minutes_per_day=390,
create_writers=True):
"""Register a data bundle ingest function.
Parameters
----------
name : str
The name of the bundle.
f : callable
The ingest function. This function will be passed:
environ : mapping
The environment this is being run with.
asset_db_writer : AssetDBWriter
The asset db writer to write into.
minute_bar_writer : BcolzMinuteBarWriter
The minute bar writer to write into.
daily_bar_writer : BcolzDailyBarWriter
The daily bar writer to write into.
adjustment_writer : SQLiteAdjustmentWriter
The adjustment db writer to write into.
calendar : zipline.utils.calendars.TradingCalendar
The trading calendar to ingest for.
start_session : pd.Timestamp
The first session of data to ingest.
end_session : pd.Timestamp
The last session of data to ingest.
cache : DataFrameCache
A mapping object to temporarily store dataframes.
This should be used to cache intermediates in case the load
fails. This will be automatically cleaned up after a
successful load.
show_progress : bool
Show the progress for the current load where possible.
calendar_name : str, optional
The name of a calendar used to align bundle data.
Default is 'NYSE'.
start_session : pd.Timestamp, optional
The first session for which we want data. If not provided,
or if the date lies outside the range supported by the
calendar, the first_session of the calendar is used.
end_session : pd.Timestamp, optional
The last session for which we want data. If not provided,
or if the date lies outside the range supported by the
calendar, the last_session of the calendar is used.
minutes_per_day : int, optional
The number of minutes in each normal trading day.
create_writers : bool, optional
Should the ingest machinery create the writers for the ingest
function. This can be disabled as an optimization for cases where
they are not needed, like the ``quantopian-quandl`` bundle.
Notes
-----
This function my be used as a decorator, for example:
.. code-block:: python
@register('quandl')
def quandl_ingest_function(...):
...
See Also
--------
zipline.data.bundles.bundles
"""
if name in bundles:
warnings.warn(
'Overwriting bundle with name %r' % name,
stacklevel=3,
)
# NOTE: We don't eagerly compute calendar values here because
# `register` is called at module scope in zipline, and creating a
# calendar currently takes between 0.5 and 1 seconds, which causes a
# noticeable delay on the zipline CLI.
_bundles[name] = RegisteredBundle(
calendar_name=calendar_name,
start_session=start_session,
end_session=end_session,
minutes_per_day=minutes_per_day,
ingest=f,
create_writers=create_writers,
)
return f
def unregister(name):
"""Unregister a bundle.
Parameters
----------
name : str
The name of the bundle to unregister.
Raises
------
UnknownBundle
Raised when no bundle has been registered with the given name.
See Also
--------
zipline.data.bundles.bundles
"""
try:
del _bundles[name]
except KeyError:
raise UnknownBundle(name)
def ingest(name,
environ=os.environ,
timestamp=None,
assets_versions=(),
show_progress=False):
"""Ingest data for a given bundle.
Parameters
----------
name : str
The name of the bundle.
environ : mapping, optional
The environment variables. By default this is os.environ.
timestamp : datetime, optional
The timestamp to use for the load.
By default this is the current time.
assets_versions : Iterable[int], optional
Versions of the assets db to which to downgrade.
show_progress : bool, optional
Tell the ingest function to display the progress where possible.
"""
try:
bundle = bundles[name]
except KeyError:
raise UnknownBundle(name)
calendar = get_calendar(bundle.calendar_name)
start_session = bundle.start_session
end_session = bundle.end_session
if start_session is None or start_session < calendar.first_session:
start_session = calendar.first_session
if end_session is None or end_session > calendar.last_session:
end_session = calendar.last_session
if timestamp is None:
timestamp = pd.Timestamp.utcnow()
timestamp = timestamp.tz_convert('utc').tz_localize(None)
timestr = to_bundle_ingest_dirname(timestamp)
cachepath = cache_path(name, environ=environ)
pth.ensure_directory(pth.data_path([name, timestr], environ=environ))
pth.ensure_directory(cachepath)
with dataframe_cache(cachepath, clean_on_failure=False) as cache, \
ExitStack() as stack:
# we use `cleanup_on_failure=False` so that we don't purge the
# cache directory if the load fails in the middle
if bundle.create_writers:
wd = stack.enter_context(working_dir(
pth.data_path([], environ=environ))
)
daily_bars_path = wd.ensure_dir(
*daily_equity_relative(
name, timestr, environ=environ,
)
)
daily_bar_writer = BcolzDailyBarWriter(
daily_bars_path,
calendar,
start_session,
end_session,
)
# Do an empty write to ensure that the daily ctables exist
# when we create the SQLiteAdjustmentWriter below. The
# SQLiteAdjustmentWriter needs to open the daily ctables so
# that it can compute the adjustment ratios for the dividends.
daily_bar_writer.write(())
minute_bar_writer = BcolzMinuteBarWriter(
wd.ensure_dir(*minute_equity_relative(
name, timestr, environ=environ)
),
calendar,
start_session,
end_session,
minutes_per_day=bundle.minutes_per_day,
)
assets_db_path = wd.getpath(*asset_db_relative(
name, timestr, environ=environ,
))
asset_db_writer = AssetDBWriter(assets_db_path)
adjustment_db_writer = stack.enter_context(
SQLiteAdjustmentWriter(
wd.getpath(*adjustment_db_relative(
name, timestr, environ=environ)),
BcolzDailyBarReader(daily_bars_path),
calendar.all_sessions,
overwrite=True,
)
)
else:
daily_bar_writer = None
minute_bar_writer = None
asset_db_writer = None
adjustment_db_writer = None
if assets_versions:
raise ValueError('Need to ingest a bundle that creates '
'writers in order to downgrade the assets'
' db.')
bundle.ingest(
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_db_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
pth.data_path([name, timestr], environ=environ),
)
for version in sorted(set(assets_versions), reverse=True):
version_path = wd.getpath(*asset_db_relative(
name, timestr, environ=environ, db_version=version,
))
with working_file(version_path) as wf:
shutil.copy2(assets_db_path, wf.path)
downgrade(wf.path, version)
def most_recent_data(bundle_name, timestamp, environ=None):
"""Get the path to the most recent data after ``date``for the
given bundle.
Parameters
----------
bundle_name : str
The name of the bundle to lookup.
timestamp : datetime
The timestamp to begin searching on or before.
environ : dict, optional
An environment dict to forward to zipline_root.
"""
if bundle_name not in bundles:
raise UnknownBundle(bundle_name)
try:
candidates = os.listdir(
pth.data_path([bundle_name], environ=environ),
)
return pth.data_path(
[bundle_name,
max(
filter(complement(pth.hidden), candidates),
key=from_bundle_ingest_dirname,
)],
environ=environ,
)
except (ValueError, OSError) as e:
if getattr(e, 'errno', errno.ENOENT) != errno.ENOENT:
raise
raise ValueError(
'no data for bundle {bundle!r} on or before {timestamp}\n'
'maybe you need to run: $ zipline ingest -b {bundle}'.format(
bundle=bundle_name,
timestamp=timestamp,
),
)
def load(name, environ=os.environ, timestamp=None):
"""Loads a previously ingested bundle.
Parameters
----------
name : str
The name of the bundle.
environ : mapping, optional
The environment variables. Defaults of os.environ.
timestamp : datetime, optional
The timestamp of the data to lookup.
Defaults to the current time.
Returns
-------
bundle_data : BundleData
The raw data readers for this bundle.
"""
if timestamp is None:
timestamp = pd.Timestamp.utcnow()
timestr = most_recent_data(name, timestamp, environ=environ)
return BundleData(
asset_finder=AssetFinder(
asset_db_path(name, timestr, environ=environ),
),
equity_minute_bar_reader=BcolzMinuteBarReader(
minute_equity_path(name, timestr, environ=environ),
),
equity_daily_bar_reader=BcolzDailyBarReader(
daily_equity_path(name, timestr, environ=environ),
),
adjustment_reader=SQLiteAdjustmentReader(
adjustment_db_path(name, timestr, environ=environ),
),
)
@preprocess(
before=optionally(ensure_timestamp),
after=optionally(ensure_timestamp),
)
def clean(name,
before=None,
after=None,
keep_last=None,
environ=os.environ):
"""Clean up data that was created with ``ingest`` or
``$ python -m zipline ingest``
Parameters
----------
name : str
The name of the bundle to remove data for.
before : datetime, optional
Remove data ingested before this date.
This argument is mutually exclusive with: keep_last
after : datetime, optional
Remove data ingested after this date.
This argument is mutually exclusive with: keep_last
keep_last : int, optional
Remove all but the last ``keep_last`` ingestions.
This argument is mutually exclusive with:
before
after
environ : mapping, optional
The environment variables. Defaults of os.environ.
Returns
-------
cleaned : set[str]
The names of the runs that were removed.
Raises
------
BadClean
Raised when ``before`` and or ``after`` are passed with
``keep_last``. This is a subclass of ``ValueError``.
"""
try:
all_runs = sorted(
filter(
complement(pth.hidden),
os.listdir(pth.data_path([name], environ=environ)),
),
key=from_bundle_ingest_dirname,
)
except OSError as e:
if e.errno != errno.ENOENT:
raise
raise UnknownBundle(name)
if ((before is not None or after is not None) and
keep_last is not None):
raise BadClean(before, after, keep_last)
if keep_last is None:
def should_clean(name):
dt = from_bundle_ingest_dirname(name)
return (
(before is not None and dt < before) or
(after is not None and dt > after)
)
elif keep_last >= 0:
last_n_dts = set(take(keep_last, reversed(all_runs)))
def should_clean(name):
return name not in last_n_dts
else:
raise BadClean(before, after, keep_last)
cleaned = set()
for run in all_runs:
if should_clean(run):
path = pth.data_path([name, run], environ=environ)
shutil.rmtree(path)
cleaned.add(path)
return cleaned
return BundleCore(bundles, register, unregister, ingest, load, clean)
bundles, register, unregister, ingest, load, clean = _make_bundle_core() | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/bundles/core.py | core.py |
from io import BytesIO
from itertools import count
import tarfile
from time import time, sleep
from click import progressbar
from logbook import Logger
import pandas as pd
import requests
from six.moves.urllib.parse import urlencode
from zipline.utils.calendars import register_calendar_alias
from zipline.utils.cli import maybe_show_progress
from . import core as bundles
log = Logger(__name__)
seconds_per_call = (pd.Timedelta('10 minutes') / 2000).total_seconds()
# Invalid symbols that quandl has had in its metadata:
excluded_symbols = frozenset({'TEST123456789'})
def _fetch_raw_metadata(api_key, cache, retries, environ):
"""Generator that yields each page of data from the metadata endpoint
as a dataframe.
"""
for page_number in count(1):
key = 'metadata-page-%d' % page_number
try:
raw = cache[key]
except KeyError:
for _ in range(retries):
try:
raw = pd.read_csv(
format_metadata_url(api_key, page_number),
parse_dates=[
'oldest_available_date',
'newest_available_date',
],
usecols=[
'dataset_code',
'name',
'oldest_available_date',
'newest_available_date',
],
)
break
except ValueError:
# when we are past the last page we will get a value
# error because there will be no columns
raw = pd.DataFrame([])
break
except Exception:
pass
else:
raise ValueError(
'Failed to download metadata page %d after %d'
' attempts.' % (page_number, retries),
)
cache[key] = raw
if raw.empty:
# use the empty dataframe to signal completion
break
yield raw
def fetch_symbol_metadata_frame(api_key,
cache,
retries=5,
environ=None,
show_progress=False):
"""
Download Quandl symbol metadata.
Parameters
----------
api_key : str
The quandl api key to use. If this is None then no api key will be
sent.
cache : DataFrameCache
The cache to use for persisting the intermediate data.
retries : int, optional
The number of times to retry each request before failing.
environ : mapping[str -> str], optional
The environment to use to find the zipline home. By default this
is ``os.environ``.
show_progress : bool, optional
Show a progress bar for the download of this data.
Returns
-------
metadata_frame : pd.DataFrame
A dataframe with the following columns:
symbol: the asset's symbol
name: the full name of the asset
start_date: the first date of data for this asset
end_date: the last date of data for this asset
auto_close_date: end_date + one day
exchange: the exchange for the asset; this is always 'quandl'
The index of the dataframe will be used for symbol->sid mappings but
otherwise does not have specific meaning.
"""
raw_iter = _fetch_raw_metadata(api_key, cache, retries, environ)
def item_show_func(_, _it=iter(count())):
'Downloading page: %d' % next(_it)
with maybe_show_progress(raw_iter,
show_progress,
item_show_func=item_show_func,
label='Downloading WIKI metadata: ') as blocks:
data = pd.concat(blocks, ignore_index=True).rename(columns={
'dataset_code': 'symbol',
'name': 'asset_name',
'oldest_available_date': 'start_date',
'newest_available_date': 'end_date',
}).sort_values('symbol')
data = data[~data.symbol.isin(excluded_symbols)]
# cut out all the other stuff in the name column
# we need to escape the paren because it is actually splitting on a regex
data.asset_name = data.asset_name.str.split(r' \(', 1).str.get(0)
data['exchange'] = 'QUANDL'
data['auto_close_date'] = data['end_date'] + pd.Timedelta(days=1)
return data
def format_metadata_url(api_key, page_number):
"""Build the query RL for the quandl WIKI metadata.
"""
query_params = [
('per_page', '100'),
('sort_by', 'id'),
('page', str(page_number)),
('database_code', 'WIKI'),
]
if api_key is not None:
query_params = [('api_key', api_key)] + query_params
return (
'https://www.quandl.com/api/v3/datasets.csv?' + urlencode(query_params)
)
def format_wiki_url(api_key, symbol, start_date, end_date):
"""
Build a query URL for a quandl WIKI dataset.
"""
query_params = [
('start_date', start_date.strftime('%Y-%m-%d')),
('end_date', end_date.strftime('%Y-%m-%d')),
('order', 'asc'),
]
if api_key is not None:
query_params = [('api_key', api_key)] + query_params
return (
"https://www.quandl.com/api/v3/datasets/WIKI/"
"{symbol}.csv?{query}".format(
symbol=symbol,
query=urlencode(query_params),
)
)
def fetch_single_equity(api_key,
symbol,
start_date,
end_date,
retries=5):
"""
Download data for a single equity.
"""
for _ in range(retries):
try:
return pd.read_csv(
format_wiki_url(api_key, symbol, start_date, end_date),
parse_dates=['Date'],
index_col='Date',
usecols=[
'Open',
'High',
'Low',
'Close',
'Volume',
'Date',
'Ex-Dividend',
'Split Ratio',
],
na_values=['NA'],
).rename(columns={
'Open': 'open',
'High': 'high',
'Low': 'low',
'Close': 'close',
'Volume': 'volume',
'Date': 'date',
'Ex-Dividend': 'ex_dividend',
'Split Ratio': 'split_ratio',
})
except Exception:
log.exception("Exception raised reading Quandl data. Retrying.")
else:
raise ValueError(
"Failed to download data for %r after %d attempts." % (
symbol, retries
)
)
def _update_splits(splits, asset_id, raw_data):
split_ratios = raw_data.split_ratio
df = pd.DataFrame({'ratio': 1 / split_ratios[split_ratios != 1]})
df.index.name = 'effective_date'
df.reset_index(inplace=True)
df['sid'] = asset_id
splits.append(df)
def _update_dividends(dividends, asset_id, raw_data):
divs = raw_data.ex_dividend
df = pd.DataFrame({'amount': divs[divs != 0]})
df.index.name = 'ex_date'
df.reset_index(inplace=True)
df['sid'] = asset_id
# we do not have this data in the WIKI dataset
df['record_date'] = df['declared_date'] = df['pay_date'] = pd.NaT
dividends.append(df)
def gen_symbol_data(api_key,
cache,
symbol_map,
calendar,
start_session,
end_session,
splits,
dividends,
retries):
for asset_id, symbol in symbol_map.iteritems():
start_time = time()
try:
# see if we have this data cached.
raw_data = cache[symbol]
should_sleep = False
except KeyError:
# we need to fetch the data and then write it to our cache
raw_data = cache[symbol] = fetch_single_equity(
api_key,
symbol,
start_date=start_session,
end_date=end_session,
)
should_sleep = True
_update_splits(splits, asset_id, raw_data)
_update_dividends(dividends, asset_id, raw_data)
sessions = calendar.sessions_in_range(start_session, end_session)
raw_data = raw_data.reindex(
sessions.tz_localize(None),
copy=False,
).fillna(0.0)
yield asset_id, raw_data
if should_sleep:
remaining = seconds_per_call - time() - start_time
if remaining > 0:
sleep(remaining)
@bundles.register('quandl')
def quandl_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
"""Build a zipline data bundle from the Quandl WIKI dataset.
"""
api_key = environ.get('QUANDL_API_KEY')
metadata = fetch_symbol_metadata_frame(
api_key,
cache=cache,
show_progress=show_progress,
)
symbol_map = metadata.symbol
# data we will collect in `gen_symbol_data`
splits = []
dividends = []
asset_db_writer.write(metadata)
daily_bar_writer.write(
gen_symbol_data(
api_key,
cache,
symbol_map,
calendar,
start_session,
end_session,
splits,
dividends,
environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5),
),
show_progress=show_progress,
)
adjustment_writer.write(
splits=pd.concat(splits, ignore_index=True),
dividends=pd.concat(dividends, ignore_index=True),
)
def download_with_progress(url, chunk_size, **progress_kwargs):
"""
Download streaming data from a URL, printing progress information to the
terminal.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
chunk_size : int
Number of bytes to read at a time from requests.
**progress_kwargs
Forwarded to click.progressbar.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url, stream=True)
resp.raise_for_status()
total_size = int(resp.headers['content-length'])
data = BytesIO()
with progressbar(length=total_size, **progress_kwargs) as pbar:
for chunk in resp.iter_content(chunk_size=chunk_size):
data.write(chunk)
pbar.update(len(chunk))
data.seek(0)
return data
def download_without_progress(url):
"""
Download data from a URL, returning a BytesIO containing the loaded data.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url)
resp.raise_for_status()
return BytesIO(resp.content)
QUANTOPIAN_QUANDL_URL = (
'https://s3.amazonaws.com/quantopian-public-zipline-data/quandl'
)
ONE_MEGABYTE = 1024 * 1024
@bundles.register('quantopian-quandl', create_writers=False)
def quantopian_quandl_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
if show_progress:
data = download_with_progress(
QUANTOPIAN_QUANDL_URL,
chunk_size=ONE_MEGABYTE,
label="Downloading Bundle: quantopian-quandl",
)
else:
data = download_without_progress(QUANTOPIAN_QUANDL_URL)
with tarfile.open('r', fileobj=data) as tar:
if show_progress:
print("Writing data to %s." % output_dir)
tar.extractall(output_dir)
register_calendar_alias("QUANDL", "NYSE") | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/data/bundles/quandl.py | quandl.py |
import datetime
from copy import deepcopy
import numpy as np
import pandas as pd
def _ensure_index(x):
if not isinstance(x, pd.Index):
x = pd.Index(sorted(x))
return x
class RollingPanel(object):
"""
Preallocation strategies for rolling window over expanding data set
Restrictions: major_axis can only be a DatetimeIndex for now
"""
def __init__(self,
window,
items,
sids,
cap_multiple=2,
dtype=np.float64,
initial_dates=None):
self._pos = window
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.dtype = dtype
if initial_dates is None:
self.date_buf = np.empty(self.cap, dtype='M8[ns]') * pd.NaT
elif len(initial_dates) != window:
raise ValueError('initial_dates must be of length window')
else:
self.date_buf = np.hstack(
(
initial_dates,
np.empty(
window * (cap_multiple - 1),
dtype='datetime64[ns]',
),
),
)
self.buffer = self._create_buffer()
@property
def cap(self):
return self.cap_multiple * self._window
@property
def _start_index(self):
return self._pos - self._window
@property
def start_date(self):
return self.date_buf[self._start_index]
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._start_index, :]
return self.buffer.iloc[:, self._start_index, :]
def set_minor_axis(self, minor_axis):
self.minor_axis = _ensure_index(minor_axis)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def set_items(self, items):
self.items = _ensure_index(items)
self.buffer = self.buffer.reindex(items=self.items)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def extend_back(self, missing_dts):
"""
Resizes the buffer to hold a new window with a new cap_multiple.
If cap_multiple is None, then the old cap_multiple is used.
"""
delta = len(missing_dts)
if not delta:
raise ValueError(
'missing_dts must be a non-empty index',
)
self._window += delta
self._pos += delta
self.date_buf = self.date_buf.copy()
self.date_buf.resize(self.cap)
self.date_buf = np.roll(self.date_buf, delta)
old_vals = self.buffer.values
shape = old_vals.shape
nan_arr = np.empty((shape[0], delta, shape[2]))
nan_arr.fill(np.nan)
new_vals = np.column_stack(
(nan_arr,
old_vals,
np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2]))),
)
self.buffer = pd.Panel(
data=new_vals,
items=self.items,
minor_axis=self.minor_axis,
major_axis=np.arange(self.cap),
dtype=self.dtype,
)
# Fill the delta with the dates we calculated.
where = slice(self._start_index, self._start_index + delta)
self.date_buf[where] = missing_dts
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
values = frame
if isinstance(frame, pd.DataFrame):
values = frame.values
self.buffer.values[:, self._pos, :] = values.astype(self.dtype)
self.date_buf[self._pos] = tick
self._pos += 1
def get_current(self, item=None, raw=False, start=None, end=None):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
item_indexer = slice(None)
if item:
item_indexer = self.items.get_loc(item)
start_index = self._start_index
end_index = self._pos
# get inital date window
where = slice(start_index, end_index)
current_dates = self.date_buf[where]
def convert_datelike_to_long(dt):
if isinstance(dt, pd.Timestamp):
return dt.asm8
if isinstance(dt, datetime.datetime):
return np.datetime64(dt)
return dt
# constrict further by date
if start:
start = convert_datelike_to_long(start)
start_index += current_dates.searchsorted(start)
if end:
end = convert_datelike_to_long(end)
_end = current_dates.searchsorted(end, 'right')
end_index -= len(current_dates) - _end
where = slice(start_index, end_index)
values = self.buffer.values[item_indexer, where, :]
current_dates = self.date_buf[where]
if raw:
# return copy so we can change it without side effects here
return values.copy()
major_axis = pd.DatetimeIndex(deepcopy(current_dates), tz='utc')
if values.ndim == 3:
return pd.Panel(values, self.items, major_axis, self.minor_axis,
dtype=self.dtype)
elif values.ndim == 2:
return pd.DataFrame(values, major_axis, self.minor_axis,
dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._start_index, self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._start_index, self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
@property
def window_length(self):
return self._window
class MutableIndexRollingPanel(object):
"""
A version of RollingPanel that exists for backwards compatibility with
batch_transform. This is a copy to allow behavior of RollingPanel to drift
away from this without breaking this class.
This code should be considered frozen, and should not be used in the
future. Instead, see RollingPanel.
"""
def __init__(self, window, items, sids, cap_multiple=2, dtype=np.float64):
self._pos = 0
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.cap = cap_multiple * window
self.dtype = dtype
self.date_buf = np.empty(self.cap, dtype='M8[ns]')
self.buffer = self._create_buffer()
def _oldest_frame_idx(self):
return max(self._pos - self._window, 0)
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._oldest_frame_idx(), :]
return self.buffer.iloc[:, self._oldest_frame_idx(), :]
def set_sids(self, sids):
self.minor_axis = _ensure_index(sids)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def get_current(self):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
where = slice(self._oldest_frame_idx(), self._pos)
major_axis = pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
return pd.Panel(self.buffer.values[:, where, :], self.items,
major_axis, self.minor_axis, dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._oldest_frame_idx(), self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._oldest_frame_idx(), self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
if isinstance(frame, pd.DataFrame):
minor_axis = frame.columns
items = frame.index
if set(minor_axis).difference(set(self.minor_axis)) or \
set(items).difference(set(self.items)):
self._update_buffer(frame)
vals = frame.T.astype(self.dtype)
self.buffer.loc[:, self._pos, :] = vals
self.date_buf[self._pos] = tick
self._pos += 1
def _update_buffer(self, frame):
# Get current frame as we only need to care about the data that is in
# the active window
old_buffer = self.get_current()
if self._pos >= self._window:
# Don't count the last major_axis entry if we're past our window,
# since it's about to roll off the end of the panel.
old_buffer = old_buffer.iloc[:, 1:, :]
nans = pd.isnull(old_buffer)
# Find minor_axes that have only nans
# Note that minor is axis 2
non_nan_cols = set(old_buffer.minor_axis[~np.all(nans, axis=(0, 1))])
# Determine new columns to be added
new_cols = set(frame.columns).difference(non_nan_cols)
# Update internal minor axis
self.minor_axis = _ensure_index(new_cols.union(non_nan_cols))
# Same for items (fields)
# Find items axes that have only nans
# Note that items is axis 0
non_nan_items = set(old_buffer.items[~np.all(nans, axis=(1, 2))])
new_items = set(frame.index).difference(non_nan_items)
self.items = _ensure_index(new_items.union(non_nan_items))
# :NOTE:
# There is a simpler and 10x faster way to do this:
#
# Reindex buffer to update axes (automatically adds nans)
# self.buffer = self.buffer.reindex(items=self.items,
# major_axis=np.arange(self.cap),
# minor_axis=self.minor_axis)
#
# However, pandas==0.12.0, for which we remain backwards compatible,
# has a bug in .reindex() that this triggers. Using .update() as before
# seems to work fine.
new_buffer = self._create_buffer()
new_buffer.update(
self.buffer.loc[non_nan_items, :, non_nan_cols])
self.buffer = new_buffer | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/data.py | data.py |
from functools import reduce
from pprint import pformat
from six import viewkeys
from six.moves import map, zip
from toolz import curry, flip
from .sentinel import sentinel
@curry
def apply(f, *args, **kwargs):
"""Apply a function to arguments.
Parameters
----------
f : callable
The function to call.
*args, **kwargs
**kwargs
Arguments to feed to the callable.
Returns
-------
a : any
The result of ``f(*args, **kwargs)``
Examples
--------
>>> from toolz.curried.operator import add, sub
>>> fs = add(1), sub(1)
>>> tuple(map(apply, fs, (1, 2)))
(2, -1)
Class decorator
>>> instance = apply
>>> @instance
... class obj:
... def f(self):
... return 'f'
...
>>> obj.f()
'f'
>>> issubclass(obj, object)
Traceback (most recent call last):
...
TypeError: issubclass() arg 1 must be a class
>>> isinstance(obj, type)
False
See Also
--------
unpack_apply
mapply
"""
return f(*args, **kwargs)
# Alias for use as a class decorator.
instance = apply
def mapall(funcs, seq):
"""
Parameters
----------
funcs : iterable[function]
Sequence of functions to map over `seq`.
seq : iterable
Sequence over which to map funcs.
Yields
------
elem : object
Concatenated result of mapping each ``func`` over ``seq``.
Example
-------
>>> list(mapall([lambda x: x + 1, lambda x: x - 1], [1, 2, 3]))
[2, 3, 4, 0, 1, 2]
"""
for func in funcs:
for elem in seq:
yield func(elem)
def same(*values):
"""
Check if all values in a sequence are equal.
Returns True on empty sequences.
Example
-------
>>> same(1, 1, 1, 1)
True
>>> same(1, 2, 1)
False
>>> same()
True
"""
if not values:
return True
first, rest = values[0], values[1:]
return all(value == first for value in rest)
def _format_unequal_keys(dicts):
return pformat([sorted(d.keys()) for d in dicts])
def dzip_exact(*dicts):
"""
Parameters
----------
*dicts : iterable[dict]
A sequence of dicts all sharing the same keys.
Returns
-------
zipped : dict
A dict whose keys are the union of all keys in *dicts, and whose values
are tuples of length len(dicts) containing the result of looking up
each key in each dict.
Raises
------
ValueError
If dicts don't all have the same keys.
Example
-------
>>> result = dzip_exact({'a': 1, 'b': 2}, {'a': 3, 'b': 4})
>>> result == {'a': (1, 3), 'b': (2, 4)}
True
"""
if not same(*map(viewkeys, dicts)):
raise ValueError(
"dict keys not all equal:\n\n%s" % _format_unequal_keys(dicts)
)
return {k: tuple(d[k] for d in dicts) for k in dicts[0]}
def _gen_unzip(it, elem_len):
"""Helper for unzip which checks the lengths of each element in it.
Parameters
----------
it : iterable[tuple]
An iterable of tuples. ``unzip`` should map ensure that these are
already tuples.
elem_len : int or None
The expected element length. If this is None it is infered from the
length of the first element.
Yields
------
elem : tuple
Each element of ``it``.
Raises
------
ValueError
Raised when the lengths do not match the ``elem_len``.
"""
elem = next(it)
first_elem_len = len(elem)
if elem_len is not None and elem_len != first_elem_len:
raise ValueError(
'element at index 0 was length %d, expected %d' % (
first_elem_len,
elem_len,
)
)
else:
elem_len = first_elem_len
yield elem
for n, elem in enumerate(it, 1):
if len(elem) != elem_len:
raise ValueError(
'element at index %d was length %d, expected %d' % (
n,
len(elem),
elem_len,
),
)
yield elem
def unzip(seq, elem_len=None):
"""Unzip a length n sequence of length m sequences into m seperate length
n sequences.
Parameters
----------
seq : iterable[iterable]
The sequence to unzip.
elem_len : int, optional
The expected length of each element of ``seq``. If not provided this
will be infered from the length of the first element of ``seq``. This
can be used to ensure that code like: ``a, b = unzip(seq)`` does not
fail even when ``seq`` is empty.
Returns
-------
seqs : iterable[iterable]
The new sequences pulled out of the first iterable.
Raises
------
ValueError
Raised when ``seq`` is empty and ``elem_len`` is not provided.
Raised when elements of ``seq`` do not match the given ``elem_len`` or
the length of the first element of ``seq``.
Examples
--------
>>> seq = [('a', 1), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq)
>>> cs
('a', 'b', 'c')
>>> ns
(1, 2, 3)
# checks that the elements are the same length
>>> seq = [('a', 1), ('b', 2), ('c', 3, 'extra')]
>>> cs, ns = unzip(seq)
Traceback (most recent call last):
...
ValueError: element at index 2 was length 3, expected 2
# allows an explicit element length instead of infering
>>> seq = [('a', 1, 'extra'), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq, 2)
Traceback (most recent call last):
...
ValueError: element at index 0 was length 3, expected 2
# handles empty sequences when a length is given
>>> cs, ns = unzip([], elem_len=2)
>>> cs == ns == ()
True
Notes
-----
This function will force ``seq`` to completion.
"""
ret = tuple(zip(*_gen_unzip(map(tuple, seq), elem_len)))
if ret:
return ret
if elem_len is None:
raise ValueError("cannot unzip empty sequence without 'elem_len'")
return ((),) * elem_len
_no_default = sentinel('_no_default')
def getattrs(value, attrs, default=_no_default):
"""
Perform a chained application of ``getattr`` on ``value`` with the values
in ``attrs``.
If ``default`` is supplied, return it if any of the attribute lookups fail.
Parameters
----------
value : object
Root of the lookup chain.
attrs : iterable[str]
Sequence of attributes to look up.
default : object, optional
Value to return if any of the lookups fail.
Returns
-------
result : object
Result of the lookup sequence.
Example
-------
>>> class EmptyObject(object):
... pass
...
>>> obj = EmptyObject()
>>> obj.foo = EmptyObject()
>>> obj.foo.bar = "value"
>>> getattrs(obj, ('foo', 'bar'))
'value'
>>> getattrs(obj, ('foo', 'buzz'))
Traceback (most recent call last):
...
AttributeError: 'EmptyObject' object has no attribute 'buzz'
>>> getattrs(obj, ('foo', 'buzz'), 'default')
'default'
"""
try:
for attr in attrs:
value = getattr(value, attr)
except AttributeError:
if default is _no_default:
raise
value = default
return value
@curry
def set_attribute(name, value):
"""
Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Usage
-----
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo'
"""
def decorator(f):
setattr(f, name, value)
return f
return decorator
# Decorators for setting the __name__ and __doc__ properties of a decorated
# function.
# Example:
with_name = set_attribute('__name__')
with_doc = set_attribute('__doc__')
def foldr(f, seq, default=_no_default):
"""Fold a function over a sequence with right associativity.
Parameters
----------
f : callable[any, any]
The function to reduce the sequence with.
The first argument will be the element of the sequence; the second
argument will be the accumulator.
seq : iterable[any]
The sequence to reduce.
default : any, optional
The starting value to reduce with. If not provided, the sequence
cannot be empty, and the last value of the sequence will be used.
Returns
-------
folded : any
The folded value.
Notes
-----
This functions works by reducing the list in a right associative way.
For example, imagine we are folding with ``operator.add`` or ``+``:
.. code-block:: python
foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default)))
In the more general case with an arbitrary function, ``foldr`` will expand
like so:
.. code-block:: python
foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default))))
For a more in depth discussion of left and right folds, see:
`https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_
The images in that page are very good for showing the differences between
``foldr`` and ``foldl`` (``reduce``).
.. note::
For performance reasons is is best to pass a strict (non-lazy) sequence,
for example, a list.
See Also
--------
:func:`functools.reduce`
:func:`sum`
"""
return reduce(
flip(f),
reversed(seq),
*(default,) if default is not _no_default else ()
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/functional.py | functional.py |
from six.moves import map as imap
from toolz import compose, identity
class ApplyAsyncResult(object):
"""An object that boxes results for calls to
:meth:`~zipline.utils.pool.SequentialPool.apply_async`.
Parameters
----------
value : any
The result of calling the function, or any exception that was raised.
successful : bool
If ``True``, ``value`` is the return value of the function.
If ``False``, ``value`` is the exception that was raised when calling
the functions.
"""
def __init__(self, value, successful):
self._value = value
self._successful = successful
def successful(self):
"""Did the function execute without raising an exception?
"""
return self._successful
def get(self):
"""Return the result of calling the function or reraise any exceptions
that were raised.
"""
if not self._successful:
raise self._value
return self._value
def ready(self):
"""Has the function finished executing.
Notes
-----
In the :class:`~zipline.utils.pool.SequentialPool` case, this is always
``True``.
"""
return True
def wait(self):
"""Wait until the function is finished executing.
Notes
-----
In the :class:`~zipline.utils.pool.SequentialPool` case, this is a nop
because the function is computed eagerly in the same thread as the
call to :meth:`~zipline.utils.pool.SequentialPool.apply_async`.
"""
pass
class SequentialPool(object):
"""A dummy pool object that iterates sequentially in a single thread.
Methods
-------
map(f: callable[A, B], iterable: iterable[A]) -> list[B]
Apply a function to each of the elements of ``iterable``.
imap(f: callable[A, B], iterable: iterable[A]) -> iterable[B]
Lazily apply a function to each of the elements of ``iterable``.
imap_unordered(f: callable[A, B], iterable: iterable[A]) -> iterable[B]
Lazily apply a function to each of the elements of ``iterable`` but
yield values as they become available. The resulting iterable is
unordered.
Notes
-----
This object is useful for testing to mock out the ``Pool`` interface
provided by gevent or multiprocessing.
See Also
--------
:class:`multiprocessing.Pool`
"""
map = staticmethod(compose(list, imap))
imap = imap_unordered = staticmethod(imap)
@staticmethod
def apply_async(f, args=(), kwargs=None, callback=None):
"""Apply a function but emulate the API of an asynchronous call.
Parameters
----------
f : callable
The function to call.
args : tuple, optional
The positional arguments.
kwargs : dict, optional
The keyword arguments.
Returns
-------
future : ApplyAsyncResult
The result of calling the function boxed in a future-like api.
Notes
-----
This calls the function eagerly but wraps it so that ``SequentialPool``
can be used where a :class:`multiprocessing.Pool` or
:class:`gevent.pool.Pool` would be used.
"""
try:
value = (identity if callback is None else callback)(
f(*args, **kwargs or {}),
)
successful = True
except Exception as e:
value = e
successful = False
return ApplyAsyncResult(value, successful)
@staticmethod
def apply(f, args=(), kwargs=None):
"""Apply a function.
Parameters
----------
f : callable
The function to call.
args : tuple, optional
The positional arguments.
kwargs : dict, optional
The keyword arguments.
Returns
-------
result : any
f(*args, **kwargs)
"""
return f(*args, **kwargs or {}) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/pool.py | pool.py |
import re
from six import iteritems
from textwrap import dedent
PIPELINE_DOWNSAMPLING_FREQUENCY_DOC = dedent(
"""\
frequency : {'year_start', 'quarter_start', 'month_start', 'week_start'}
A string indicating desired sampling dates:
* 'year_start' -> first trading day of each year
* 'quarter_start' -> first trading day of January, April, July, October
* 'month_start' -> first trading day of each month
* 'week_start' -> first trading_day of each week
"""
)
PIPELINE_ALIAS_NAME_DOC = dedent(
"""\
name : str
The name to alias this term as.
""",
)
def pad_lines_after_first(prefix, s):
"""Apply a prefix to each line in s after the first."""
return ('\n' + prefix).join(s.splitlines())
def format_docstring(owner_name, docstring, formatters):
"""
Template ``formatters`` into ``docstring``.
Parameters
----------
owner_name : str
The name of the function or class whose docstring is being templated.
Only used for error messages.
docstring : str
The docstring to template.
formatters : dict[str -> str]
Parameters for a a str.format() call on ``docstring``.
Multi-line values in ``formatters`` will have leading whitespace padded
to match the leading whitespace of the substitution string.
"""
# Build a dict of parameters to a vanilla format() call by searching for
# each entry in **formatters and applying any leading whitespace to each
# line in the desired substitution.
format_params = {}
for target, doc_for_target in iteritems(formatters):
# Search for '{name}', with optional leading whitespace.
regex = re.compile('^(\s*)' + '({' + target + '})$', re.MULTILINE)
matches = regex.findall(docstring)
if not matches:
raise ValueError(
"Couldn't find template for parameter {!r} in docstring "
"for {}."
"\nParameter name must be alone on a line surrounded by "
"braces.".format(target, owner_name),
)
elif len(matches) > 1:
raise ValueError(
"Couldn't found multiple templates for parameter {!r}"
"in docstring for {}."
"\nParameter should only appear once.".format(
target, owner_name
)
)
(leading_whitespace, _) = matches[0]
format_params[target] = pad_lines_after_first(
leading_whitespace,
doc_for_target,
)
return docstring.format(**format_params)
def templated_docstring(**docs):
"""
Decorator allowing the use of templated docstrings.
Usage
-----
>>> @templated_docstring(foo='bar')
... def my_func(self, foo):
... '''{foo}'''
...
>>> my_func.__doc__
'bar'
"""
def decorator(f):
f.__doc__ = format_docstring(f.__name__, f.__doc__, docs)
return f
return decorator | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/sharedoc.py | sharedoc.py |
from abc import ABCMeta, abstractmethod
from six import with_metaclass, iteritems
# Consistent error to be thrown in various cases regarding overriding
# `final` attributes.
_type_error = TypeError('Cannot override final attribute')
def bases_mro(bases):
"""
Yield classes in the order that methods should be looked up from the
base classes of an object.
"""
for base in bases:
for class_ in base.__mro__:
yield class_
def is_final(name, mro):
"""
Checks if `name` is a `final` object in the given `mro`.
We need to check the mro because we need to directly go into the __dict__
of the classes. Because `final` objects are descriptor, we need to grab
them _BEFORE_ the `__call__` is invoked.
"""
return any(isinstance(getattr(c, '__dict__', {}).get(name), final)
for c in bases_mro(mro))
class FinalMeta(type):
"""A metaclass template for classes the want to prevent subclassess from
overriding a some methods or attributes.
"""
def __new__(mcls, name, bases, dict_):
for k, v in iteritems(dict_):
if is_final(k, bases):
raise _type_error
setattr_ = dict_.get('__setattr__')
if setattr_ is None:
# No `__setattr__` was explicitly defined, look up the super
# class's. `bases[0]` will have a `__setattr__` because
# `object` does so we don't need to worry about the mro.
setattr_ = bases[0].__setattr__
if not is_final('__setattr__', bases) \
and not isinstance(setattr_, final):
# implicitly make the `__setattr__` a `final` object so that
# users cannot just avoid the descriptor protocol.
dict_['__setattr__'] = final(setattr_)
return super(FinalMeta, mcls).__new__(mcls, name, bases, dict_)
def __setattr__(self, name, value):
"""This stops the `final` attributes from being reassigned on the
class object.
"""
if is_final(name, self.__mro__):
raise _type_error
super(FinalMeta, self).__setattr__(name, value)
class final(with_metaclass(ABCMeta)):
"""
An attribute that cannot be overridden.
This is like the final modifier in Java.
Example usage:
>>> from six import with_metaclass
>>> class C(with_metaclass(FinalMeta, object)):
... @final
... def f(self):
... return 'value'
...
This constructs a class with final method `f`. This cannot be overridden
on the class object or on any instance. You cannot override this by
subclassing `C`; attempting to do so will raise a `TypeError` at class
construction time.
"""
def __new__(cls, attr):
# Decide if this is a method wrapper or an attribute wrapper.
# We are going to cache the `callable` check by creating a
# method or attribute wrapper.
if hasattr(attr, '__get__'):
return object.__new__(finaldescriptor)
else:
return object.__new__(finalvalue)
def __init__(self, attr):
self._attr = attr
def __set__(self, instance, value):
"""
`final` objects cannot be reassigned. This is the most import concept
about `final`s.
Unlike a `property` object, this will raise a `TypeError` when you
attempt to reassign it.
"""
raise _type_error
@abstractmethod
def __get__(self, instance, owner):
raise NotImplementedError('__get__')
class finalvalue(final):
"""
A wrapper for a non-descriptor attribute.
"""
def __get__(self, instance, owner):
return self._attr
class finaldescriptor(final):
"""
A final wrapper around a descriptor.
"""
def __get__(self, instance, owner):
return self._attr.__get__(instance, owner) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/final.py | final.py |
import warnings
from datetime import datetime
from os import listdir
import os.path
import pandas as pd
import pytz
import zipline
from zipline.errors import SymbolNotFound
from zipline.finance.asset_restrictions import SecurityListRestrictions
from zipline.zipline_warnings import ZiplineDeprecationWarning
DATE_FORMAT = "%Y%m%d"
zipline_dir = os.path.dirname(zipline.__file__)
SECURITY_LISTS_DIR = os.path.join(zipline_dir, 'resources', 'security_lists')
class SecurityList(object):
def __init__(self, data, current_date_func, asset_finder):
"""
data: a nested dictionary:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': []}, delete: [symbol list]}
current_date_func: function taking no parameters, returning
current datetime
"""
self.data = data
self._cache = {}
self._knowledge_dates = self.make_knowledge_dates(self.data)
self.current_date = current_date_func
self.count = 0
self._current_set = set()
self.asset_finder = asset_finder
def make_knowledge_dates(self, data):
knowledge_dates = sorted(
[pd.Timestamp(k) for k in data.keys()])
return knowledge_dates
def __iter__(self):
warnings.warn(
'Iterating over security_lists is deprecated. Use '
'`for sid in <security_list>.current_securities(dt)` instead.',
category=ZiplineDeprecationWarning,
stacklevel=2
)
return iter(self.current_securities(self.current_date()))
def __contains__(self, item):
warnings.warn(
'Evaluating inclusion in security_lists is deprecated. Use '
'`sid in <security_list>.current_securities(dt)` instead.',
category=ZiplineDeprecationWarning,
stacklevel=2
)
return item in self.current_securities(self.current_date())
def current_securities(self, dt):
for kd in self._knowledge_dates:
if dt < kd:
break
if kd in self._cache:
self._current_set = self._cache[kd]
continue
for effective_date, changes in iter(self.data[kd].items()):
self.update_current(
effective_date,
changes['add'],
self._current_set.add
)
self.update_current(
effective_date,
changes['delete'],
self._current_set.remove
)
self._cache[kd] = self._current_set
return self._current_set
def update_current(self, effective_date, symbols, change_func):
for symbol in symbols:
try:
asset = self.asset_finder.lookup_symbol(
symbol,
as_of_date=effective_date
)
# Pass if no Asset exists for the symbol
except SymbolNotFound:
continue
change_func(asset.sid)
class SecurityListSet(object):
# provide a cut point to substitute other security
# list implementations.
security_list_type = SecurityList
def __init__(self, current_date_func, asset_finder):
self.current_date_func = current_date_func
self.asset_finder = asset_finder
self._leveraged_etf = None
@property
def leveraged_etf_list(self):
if self._leveraged_etf is None:
self._leveraged_etf = self.security_list_type(
load_from_directory('leveraged_etf_list'),
self.current_date_func,
asset_finder=self.asset_finder
)
return self._leveraged_etf
@property
def restrict_leveraged_etfs(self):
return SecurityListRestrictions(self.leveraged_etf_list)
def load_from_directory(list_name):
"""
To resolve the symbol in the LEVERAGED_ETF list,
the date on which the symbol was in effect is needed.
Furthermore, to maintain a point in time record of our own maintenance
of the restricted list, we need a knowledge date. Thus, restricted lists
are dictionaries of datetime->symbol lists.
new symbols should be entered as a new knowledge date entry.
This method assumes a directory structure of:
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt
The return value is a dictionary with:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': [symbol list]}
"""
data = {}
dir_path = os.path.join(SECURITY_LISTS_DIR, list_name)
for kd_name in listdir(dir_path):
kd = datetime.strptime(kd_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd] = {}
kd_path = os.path.join(dir_path, kd_name)
for ld_name in listdir(kd_path):
ld = datetime.strptime(ld_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd][ld] = {}
ld_path = os.path.join(kd_path, ld_name)
for fname in listdir(ld_path):
fpath = os.path.join(ld_path, fname)
with open(fpath) as f:
symbols = f.read().splitlines()
data[kd][ld][fname] = symbols
return data | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/security_list.py | security_list.py |
import pandas as pd
import pytz
# import warnings
from datetime import datetime
from dateutil import rrule
from functools import partial
# from zipline.zipline_warnings import ZiplineDeprecationWarning
# IMPORTANT: This module is deprecated and is only here for temporary backwards
# compatibility. Look at the `zipline.utils.calendars.trading_schedule`
# module, as well as the calendar definitions in `zipline.utils.calendars`.
# TODO: The new calendar API is currently in flux, so the deprecation
# warning for this module is currently disabled. Re-enable once
# the new API is stabilized.
#
# warnings.warn(
# "The `tradingcalendar` module is deprecated. See the "
# "`zipline.utils.calendars.trading_schedule` module, as well as the "
# "calendar definitions in `zipline.utils.calendars`.",
# category=ZiplineDeprecationWarning,
# stacklevel=1,
# )
start = pd.Timestamp('1990-01-01', tz='UTC')
end_base = pd.Timestamp('today', tz='UTC')
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
end = end_base + pd.Timedelta(days=365)
def canonicalize_datetime(dt):
# Strip out any HHMMSS or timezone info in the user's datetime, so that
# all the datetimes we return will be 00:00:00 UTC.
return datetime(dt.year, dt.month, dt.day, tzinfo=pytz.utc)
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
new_years = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years)
new_years_sunday = rrule.rrule(
rrule.MONTHLY,
byyearday=2,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years_sunday)
mlk_day = rrule.rrule(
rrule.MONTHLY,
bymonth=1,
byweekday=(rrule.MO(+3)),
cache=True,
dtstart=datetime(1998, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(mlk_day)
presidents_day = rrule.rrule(
rrule.MONTHLY,
bymonth=2,
byweekday=(rrule.MO(3)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(presidents_day)
good_friday = rrule.rrule(
rrule.DAILY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(good_friday)
memorial_day = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
byweekday=(rrule.MO(-1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(memorial_day)
july_4th = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=4,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th)
july_4th_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_sunday)
july_4th_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_saturday)
labor_day = rrule.rrule(
rrule.MONTHLY,
bymonth=9,
byweekday=(rrule.MO(1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(labor_day)
thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
byweekday=(rrule.TH(4)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(thanksgiving)
christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas)
christmas_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_sunday)
# If Christmas is a Saturday then 24th, a Friday is observed.
christmas_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_saturday)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
# Add September 11th closings
# http://en.wikipedia.org/wiki/Aftermath_of_the_September_11_attacks
# Due to the terrorist attacks, the stock market did not open on 9/11/2001
# It did not open again until 9/17/2001.
#
# September 2001
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
for day_num in range(11, 17):
non_trading_days.append(
datetime(2001, 9, day_num, tzinfo=pytz.utc))
# Add closings due to Hurricane Sandy in 2012
# http://en.wikipedia.org/wiki/Hurricane_sandy
#
# The stock exchange was closed due to Hurricane Sandy's
# impact on New York.
# It closed on 10/29 and 10/30, reopening on 10/31
# October 2012
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
for day_num in range(29, 31):
non_trading_days.append(
datetime(2012, 10, day_num, tzinfo=pytz.utc))
# Misc closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# National Days of Mourning
# - President Richard Nixon
non_trading_days.append(datetime(1994, 4, 27, tzinfo=pytz.utc))
# - President Ronald W. Reagan - June 11, 2004
non_trading_days.append(datetime(2004, 6, 11, tzinfo=pytz.utc))
# - President Gerald R. Ford - Jan 2, 2007
non_trading_days.append(datetime(2007, 1, 2, tzinfo=pytz.utc))
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
def get_early_closes(start, end):
# 1:00 PM close rules based on
# http://quant.stackexchange.com/questions/4083/nyse-early-close-rules-july-4th-and-dec-25th # noqa
# and verified against http://www.nyse.com/pdfs/closings.pdf
# These rules are valid starting in 1993
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
start = max(start, datetime(1993, 1, 1, tzinfo=pytz.utc))
end = max(end, datetime(1993, 1, 1, tzinfo=pytz.utc))
# Not included here are early closes prior to 1993
# or unplanned early closes
early_close_rules = []
day_after_thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
# 4th Friday isn't correct if month starts on Friday, so restrict to
# day range:
byweekday=(rrule.FR),
bymonthday=range(23, 30),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_after_thanksgiving)
christmas_eve = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(christmas_eve)
friday_after_christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# valid 1993-2007
until=min(end, datetime(2007, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(friday_after_christmas)
day_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=(rrule.MO, rrule.TU, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_before_independence_day)
day_after_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# starting in 2013: wednesday before independence day
until=min(end, datetime(2012, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(day_after_independence_day)
wednesday_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.WE,
cache=True,
# starting in 2013
dtstart=max(start, datetime(2013, 1, 1, tzinfo=pytz.utc)),
until=max(end, datetime(2013, 1, 1, tzinfo=pytz.utc))
)
early_close_rules.append(wednesday_before_independence_day)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
# Misc early closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# New Year's Eve
nye_1999 = datetime(1999, 12, 31, tzinfo=pytz.utc)
if start <= nye_1999 and nye_1999 <= end:
early_closes.append(nye_1999)
early_closes.sort()
return pd.DatetimeIndex(early_closes)
early_closes = get_early_closes(start, end)
def get_open_and_close(day, early_closes):
market_open = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=9,
minute=31),
tz='US/Eastern').tz_convert('UTC')
# 1 PM if early close, 4 PM otherwise
close_hour = 13 if day in early_closes else 16
market_close = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=close_hour),
tz='US/Eastern').tz_convert('UTC')
return market_open, market_close
def get_open_and_closes(trading_days, early_closes, get_open_and_close):
open_and_closes = pd.DataFrame(index=trading_days,
columns=('market_open', 'market_close'))
get_o_and_c = partial(get_open_and_close, early_closes=early_closes)
open_and_closes['market_open'], open_and_closes['market_close'] = \
zip(*open_and_closes.index.map(get_o_and_c))
return open_and_closes
open_and_closes = get_open_and_closes(trading_days, early_closes,
get_open_and_close) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/tradingcalendar.py | tradingcalendar.py |
from ctypes import (
Structure,
c_ubyte,
c_uint,
c_ulong,
c_ulonglong,
c_ushort,
sizeof,
)
import numpy as np
import pandas as pd
from six.moves import range
_inttypes_map = {
sizeof(t) - 1: t for t in {
c_ubyte,
c_uint,
c_ulong,
c_ulonglong,
c_ushort
}
}
_inttypes = list(
pd.Series(_inttypes_map).reindex(
range(max(_inttypes_map.keys())),
method='bfill',
),
)
def enum(option, *options):
"""
Construct a new enum object.
Parameters
----------
*options : iterable of str
The names of the fields for the enum.
Returns
-------
enum
A new enum collection.
Examples
--------
>>> e = enum('a', 'b', 'c')
>>> e
<enum: ('a', 'b', 'c')>
>>> e.a
0
>>> e.b
1
>>> e.a in e
True
>>> tuple(e)
(0, 1, 2)
Notes
-----
Identity checking is not guaranteed to work with enum members, instead
equality checks should be used. From CPython's documentation:
"The current implementation keeps an array of integer objects for all
integers between -5 and 256, when you create an int in that range you
actually just get back a reference to the existing object. So it should be
possible to change the value of 1. I suspect the behaviour of Python in
this case is undefined. :-)"
"""
options = (option,) + options
rangeob = range(len(options))
try:
inttype = _inttypes[int(np.log2(len(options) - 1)) // 8]
except IndexError:
raise OverflowError(
'Cannot store enums with more than sys.maxsize elements, got %d' %
len(options),
)
class _enum(Structure):
_fields_ = [(o, inttype) for o in options]
def __iter__(self):
return iter(rangeob)
def __contains__(self, value):
return 0 <= value < len(options)
def __repr__(self):
return '<enum: %s>' % (
('%d fields' % len(options))
if len(options) > 10 else
repr(options)
)
return _enum(*rangeob) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/enum.py | enum.py |
from operator import attrgetter
import six
def compose_types(a, *cs):
"""Compose multiple classes together.
Parameters
----------
*mcls : tuple[type]
The classes that you would like to compose
Returns
-------
cls : type
A type that subclasses all of the types in ``mcls``.
Notes
-----
A common use case for this is to build composed metaclasses, for example,
imagine you have some simple metaclass ``M`` and some instance of ``M``
named ``C`` like so:
.. code-block:: python
>>> class M(type):
... def __new__(mcls, name, bases, dict_):
... dict_['ayy'] = 'lmao'
... return super(M, mcls).__new__(mcls, name, bases, dict_)
>>> from six import with_metaclass
>>> class C(with_metaclass(M, object)):
... pass
We now want to create a sublclass of ``C`` that is also an abstract class.
We can use ``compose_types`` to create a new metaclass that is a subclass
of ``M`` and ``ABCMeta``. This is needed because a subclass of a class
with a metaclass must have a metaclass which is a subclass of the metaclass
of the superclass.
.. code-block:: python
>>> from abc import ABCMeta, abstractmethod
>>> class D(with_metaclass(compose_types(M, ABCMeta), C)):
... @abstractmethod
... def f(self):
... raise NotImplementedError('f')
We can see that this class has both metaclasses applied to it:
.. code-block:: python
>>> D.ayy
'lmao'
>>> D()
Traceback (most recent call last):
...
TypeError: Can't instantiate abstract class D with abstract methods f
An important note here is that ``M`` did not use ``type.__new__`` and
instead used ``super()``. This is to support cooperative multiple
inheritence which is needed for ``compose_types`` to work as intended.
After we have composed these types ``M.__new__``\'s super will actually
go to ``ABCMeta.__new__`` and not ``type.__new__``.
Always using ``super()`` to dispatch to your superclass is best practices
anyways so most classes should compose without much special considerations.
"""
if not cs:
# if there are no types to compose then just return the single type
return a
mcls = (a,) + cs
return type(
'compose_types(%s)' % ', '.join(map(attrgetter('__name__'), mcls)),
mcls,
{},
)
def with_metaclasses(metaclasses, *bases):
"""Make a class inheriting from ``bases`` whose metaclass inherits from
all of ``metaclasses``.
Like :func:`six.with_metaclass`, but allows multiple metaclasses.
Parameters
----------
metaclasses : iterable[type]
A tuple of types to use as metaclasses.
*bases : tuple[type]
A tuple of types to use as bases.
Returns
-------
base : type
A subtype of ``bases`` whose metaclass is a subtype of ``metaclasses``.
Notes
-----
The metaclasses must be written to support cooperative multiple
inheritance. This means that they must delegate all calls to ``super()``
instead of inlining their super class by name.
"""
return six.with_metaclass(compose_types(*metaclasses), *bases) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/metautils.py | metautils.py |
from collections import OrderedDict
from datetime import datetime
from warnings import (
catch_warnings,
filterwarnings,
)
import numpy as np
from numpy import (
broadcast,
busday_count,
datetime64,
diff,
dtype,
empty,
flatnonzero,
hstack,
isnan,
nan,
vectorize,
where
)
from numpy.lib.stride_tricks import as_strided
from toolz import flip
uint8_dtype = dtype('uint8')
bool_dtype = dtype('bool')
int64_dtype = dtype('int64')
float32_dtype = dtype('float32')
float64_dtype = dtype('float64')
complex128_dtype = dtype('complex128')
datetime64D_dtype = dtype('datetime64[D]')
datetime64ns_dtype = dtype('datetime64[ns]')
object_dtype = dtype('O')
# We use object arrays for strings.
categorical_dtype = object_dtype
make_datetime64ns = flip(datetime64, 'ns')
make_datetime64D = flip(datetime64, 'D')
NaTmap = {
dtype('datetime64[%s]' % unit): datetime64('NaT', unit)
for unit in ('ns', 'us', 'ms', 's', 'm', 'D')
}
def NaT_for_dtype(dtype):
"""Retrieve NaT with the same units as ``dtype``.
Parameters
----------
dtype : dtype-coercable
The dtype to lookup the NaT value for.
Returns
-------
NaT : dtype
The NaT value for the given dtype.
"""
return NaTmap[np.dtype(dtype)]
NaTns = NaT_for_dtype(datetime64ns_dtype)
NaTD = NaT_for_dtype(datetime64D_dtype)
_FILLVALUE_DEFAULTS = {
bool_dtype: False,
float32_dtype: nan,
float64_dtype: nan,
datetime64ns_dtype: NaTns,
object_dtype: None,
}
INT_DTYPES_BY_SIZE_BYTES = OrderedDict([
(1, dtype('int8')),
(2, dtype('int16')),
(4, dtype('int32')),
(8, dtype('int64')),
])
UNSIGNED_INT_DTYPES_BY_SIZE_BYTES = OrderedDict([
(1, dtype('uint8')),
(2, dtype('uint16')),
(4, dtype('uint32')),
(8, dtype('uint64')),
])
def int_dtype_with_size_in_bytes(size):
try:
return INT_DTYPES_BY_SIZE_BYTES[size]
except KeyError:
raise ValueError("No integral dtype whose size is %d bytes." % size)
def unsigned_int_dtype_with_size_in_bytes(size):
try:
return UNSIGNED_INT_DTYPES_BY_SIZE_BYTES[size]
except KeyError:
raise ValueError(
"No unsigned integral dtype whose size is %d bytes." % size
)
class NoDefaultMissingValue(Exception):
pass
def make_kind_check(python_types, numpy_kind):
"""
Make a function that checks whether a scalar or array is of a given kind
(e.g. float, int, datetime, timedelta).
"""
def check(value):
if hasattr(value, 'dtype'):
return value.dtype.kind == numpy_kind
return isinstance(value, python_types)
return check
is_float = make_kind_check(float, 'f')
is_int = make_kind_check(int, 'i')
is_datetime = make_kind_check(datetime, 'M')
is_object = make_kind_check(object, 'O')
def coerce_to_dtype(dtype, value):
"""
Make a value with the specified numpy dtype.
Only datetime64[ns] and datetime64[D] are supported for datetime dtypes.
"""
name = dtype.name
if name.startswith('datetime64'):
if name == 'datetime64[D]':
return make_datetime64D(value)
elif name == 'datetime64[ns]':
return make_datetime64ns(value)
else:
raise TypeError(
"Don't know how to coerce values of dtype %s" % dtype
)
return dtype.type(value)
def default_missing_value_for_dtype(dtype):
"""
Get the default fill value for `dtype`.
"""
try:
return _FILLVALUE_DEFAULTS[dtype]
except KeyError:
raise NoDefaultMissingValue(
"No default value registered for dtype %s." % dtype
)
def repeat_first_axis(array, count):
"""
Restride `array` to repeat `count` times along the first axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape (count,) + array.shape, composed of `array` repeated
`count` times along the first axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_first_axis(a, 2)
array([[0, 1, 2],
[0, 1, 2]])
>>> repeat_first_axis(a, 4)
array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis
"""
return as_strided(array, (count,) + array.shape, (0,) + array.strides)
def repeat_last_axis(array, count):
"""
Restride `array` to repeat `count` times along the last axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape array.shape + (count,) composed of `array` repeated
`count` times along the last axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_last_axis(a, 2)
array([[0, 0],
[1, 1],
[2, 2]])
>>> repeat_last_axis(a, 4)
array([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis
"""
return as_strided(array, array.shape + (count,), array.strides + (0,))
def rolling_window(array, length):
"""
Restride an array of shape
(X_0, ... X_N)
into an array of shape
(length, X_0 - length + 1, ... X_N)
where each slice at index i along the first axis is equivalent to
result[i] = array[length * i:length * (i + 1)]
Parameters
----------
array : np.ndarray
The base array.
length : int
Length of the synthetic first axis to generate.
Returns
-------
out : np.ndarray
Example
-------
>>> from numpy import arange
>>> a = arange(25).reshape(5, 5)
>>> a
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> rolling_window(a, 2)
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9]],
<BLANKLINE>
[[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]],
<BLANKLINE>
[[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
<BLANKLINE>
[[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]]])
"""
orig_shape = array.shape
if not orig_shape:
raise IndexError("Can't restride a scalar.")
elif orig_shape[0] <= length:
raise IndexError(
"Can't restride array of shape {shape} with"
" a window length of {len}".format(
shape=orig_shape,
len=length,
)
)
num_windows = (orig_shape[0] - length + 1)
new_shape = (num_windows, length) + orig_shape[1:]
new_strides = (array.strides[0],) + array.strides
return as_strided(array, new_shape, new_strides)
# Sentinel value that isn't NaT.
_notNaT = make_datetime64D(0)
iNaT = NaTns.view(int64_dtype)
assert iNaT == NaTD.view(int64_dtype), "iNaTns != iNaTD"
def isnat(obj):
"""
Check if a value is np.NaT.
"""
if obj.dtype.kind not in ('m', 'M'):
raise ValueError("%s is not a numpy datetime or timedelta")
return obj.view(int64_dtype) == iNaT
def is_missing(data, missing_value):
"""
Generic is_missing function that handles NaN and NaT.
"""
if is_float(data) and isnan(missing_value):
return isnan(data)
elif is_datetime(data) and isnat(missing_value):
return isnat(data)
return (data == missing_value)
def busday_count_mask_NaT(begindates, enddates, out=None):
"""
Simple of numpy.busday_count that returns `float` arrays rather than int
arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`.
Doesn't support custom weekdays or calendars, but probably should in the
future.
See Also
--------
np.busday_count
"""
if out is None:
out = empty(broadcast(begindates, enddates).shape, dtype=float)
beginmask = isnat(begindates)
endmask = isnat(enddates)
out = busday_count(
# Temporarily fill in non-NaT values.
where(beginmask, _notNaT, begindates),
where(endmask, _notNaT, enddates),
out=out,
)
# Fill in entries where either comparison was NaT with nan in the output.
out[beginmask | endmask] = nan
return out
class WarningContext(object):
"""
Re-usable contextmanager for contextually managing warnings.
"""
def __init__(self, *warning_specs):
self._warning_specs = warning_specs
self._catchers = []
def __enter__(self):
catcher = catch_warnings()
catcher.__enter__()
self._catchers.append(catcher)
for args, kwargs in self._warning_specs:
filterwarnings(*args, **kwargs)
return self
def __exit__(self, *exc_info):
catcher = self._catchers.pop()
return catcher.__exit__(*exc_info)
def ignore_nanwarnings():
"""
Helper for building a WarningContext that ignores warnings from numpy's
nanfunctions.
"""
return WarningContext(
(
('ignore',),
{'category': RuntimeWarning, 'module': 'numpy.lib.nanfunctions'},
)
)
def vectorized_is_element(array, choices):
"""
Check if each element of ``array`` is in choices.
Parameters
----------
array : np.ndarray
choices : object
Object implementing __contains__.
Returns
-------
was_element : np.ndarray[bool]
Array indicating whether each element of ``array`` was in ``choices``.
"""
return vectorize(choices.__contains__, otypes=[bool])(array)
def as_column(a):
"""
Convert an array of shape (N,) into an array of shape (N, 1).
This is equivalent to `a[:, np.newaxis]`.
Parameters
----------
a : np.ndarray
Example
-------
>>> import numpy as np
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> as_column(a)
array([[0],
[1],
[2],
[3],
[4]])
>>> as_column(a).shape
(5, 1)
"""
if a.ndim != 1:
raise ValueError(
"as_column expected an 1-dimensional array, "
"but got an array of shape %s" % a.shape
)
return a[:, None]
def changed_locations(a, include_first):
"""
Compute indices of values in ``a`` that differ from the previous value.
Parameters
----------
a : np.ndarray
The array on which to indices of change.
include_first : bool
Whether or not to consider the first index of the array as "changed".
Example
-------
>>> import numpy as np
>>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=False)
array([2, 4])
>>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=True)
array([0, 2, 4])
"""
if a.ndim > 1:
raise ValueError("indices_of_changed_values only supports 1D arrays.")
indices = flatnonzero(diff(a)) + 1
if not include_first:
return indices
return hstack([[0], indices]) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/numpy_utils.py | numpy_utils.py |
from collections import MutableMapping
import errno
import os
import pickle
from distutils import dir_util
from shutil import rmtree, move
from tempfile import mkdtemp, NamedTemporaryFile
import pandas as pd
from .context_tricks import nop_context
from .paths import ensure_directory
class Expired(Exception):
"""Marks that a :class:`CachedObject` has expired.
"""
class CachedObject(object):
"""
A simple struct for maintaining a cached object with an expiration date.
Parameters
----------
value : object
The object to cache.
expires : datetime-like
Expiration date of `value`. The cache is considered invalid for dates
**strictly greater** than `expires`.
Usage
-----
>>> from pandas import Timestamp, Timedelta
>>> expires = Timestamp('2014', tz='UTC')
>>> obj = CachedObject(1, expires)
>>> obj.unwrap(expires - Timedelta('1 minute'))
1
>>> obj.unwrap(expires)
1
>>> obj.unwrap(expires + Timedelta('1 minute'))
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Expired: 2014-01-01 00:00:00+00:00
"""
def __init__(self, value, expires):
self._value = value
self._expires = expires
def unwrap(self, dt):
"""
Get the cached value.
Returns
-------
value : object
The cached value.
Raises
------
Expired
Raised when `dt` is greater than self.expires.
"""
if dt > self._expires:
raise Expired(self._expires)
return self._value
def _unsafe_get_value(self):
"""You almost certainly shouldn't use this."""
return self._value
class ExpiringCache(object):
"""
A cache of multiple CachedObjects, which returns the wrapped the value
or raises and deletes the CachedObject if the value has expired.
Parameters
----------
cache : dict-like, optional
An instance of a dict-like object which needs to support at least:
`__del__`, `__getitem__`, `__setitem__`
If `None`, than a dict is used as a default.
Usage
-----
>>> from pandas import Timestamp, Timedelta
>>> expires = Timestamp('2014', tz='UTC')
>>> value = 1
>>> cache = ExpiringCache()
>>> cache.set('foo', value, expires)
>>> cache.get('foo', expires - Timedelta('1 minute'))
1
>>> cache.get('foo', expires + Timedelta('1 minute'))
Traceback (most recent call last):
...
KeyError: 'foo'
"""
def __init__(self, cache=None):
if cache is not None:
self._cache = cache
else:
self._cache = {}
def get(self, key, dt):
"""Get the value of a cached object.
Parameters
----------
key : any
The key to lookup.
dt : datetime
The time of the lookup.
Returns
-------
result : any
The value for ``key``.
Raises
------
KeyError
Raised if the key is not in the cache or the value for the key
has expired.
"""
try:
return self._cache[key].unwrap(dt)
except Expired:
del self._cache[key]
raise KeyError(key)
def set(self, key, value, expiration_dt):
"""Adds a new key value pair to the cache.
Parameters
----------
key : any
The key to use for the pair.
value : any
The value to store under the name ``key``.
expiration_dt : datetime
When should this mapping expire? The cache is considered invalid
for dates **strictly greater** than ``expiration_dt``.
"""
self._cache[key] = CachedObject(value, expiration_dt)
class dataframe_cache(MutableMapping):
"""A disk-backed cache for dataframes.
``dataframe_cache`` is a mutable mapping from string names to pandas
DataFrame objects.
This object may be used as a context manager to delete the cache directory
on exit.
Parameters
----------
path : str, optional
The directory path to the cache. Files will be written as
``path/<keyname>``.
lock : Lock, optional
Thread lock for multithreaded/multiprocessed access to the cache.
If not provided no locking will be used.
clean_on_failure : bool, optional
Should the directory be cleaned up if an exception is raised in the
context manager.
serialize : {'msgpack', 'pickle:<n>'}, optional
How should the data be serialized. If ``'pickle'`` is passed, an
optional pickle protocol can be passed like: ``'pickle:3'`` which says
to use pickle protocol 3.
Notes
-----
The syntax ``cache[:]`` will load all key:value pairs into memory as a
dictionary.
The cache uses a temporary file format that is subject to change between
versions of zipline.
"""
def __init__(self,
path=None,
lock=None,
clean_on_failure=True,
serialization='msgpack'):
self.path = path if path is not None else mkdtemp()
self.lock = lock if lock is not None else nop_context
self.clean_on_failure = clean_on_failure
if serialization == 'msgpack':
self.serialize = pd.DataFrame.to_msgpack
self.deserialize = pd.read_msgpack
self._protocol = None
else:
s = serialization.split(':', 1)
if s[0] != 'pickle':
raise ValueError(
"'serialization' must be either 'msgpack' or 'pickle[:n]'",
)
self._protocol = int(s[1]) if len(s) == 2 else None
self.serialize = self._serialize_pickle
self.deserialize = pickle.load
ensure_directory(self.path)
def _serialize_pickle(self, df, path):
with open(path, 'wb') as f:
pickle.dump(df, f, protocol=self._protocol)
def _keypath(self, key):
return os.path.join(self.path, key)
def __enter__(self):
return self
def __exit__(self, type_, value, tb):
if not (self.clean_on_failure or value is None):
# we are not cleaning up after a failure and there was an exception
return
with self.lock:
rmtree(self.path)
def __getitem__(self, key):
if key == slice(None):
return dict(self.items())
with self.lock:
try:
with open(self._keypath(key), 'rb') as f:
return self.deserialize(f)
except IOError as e:
if e.errno != errno.ENOENT:
raise
raise KeyError(key)
def __setitem__(self, key, value):
with self.lock:
self.serialize(value, self._keypath(key))
def __delitem__(self, key):
with self.lock:
try:
os.remove(self._keypath(key))
except OSError as e:
if e.errno == errno.ENOENT:
# raise a keyerror if this directory did not exist
raise KeyError(key)
# reraise the actual oserror otherwise
raise
def __iter__(self):
return iter(os.listdir(self.path))
def __len__(self):
return len(os.listdir(self.path))
def __repr__(self):
return '<%s: keys={%s}>' % (
type(self).__name__,
', '.join(map(repr, sorted(self))),
)
class working_file(object):
"""A context manager for managing a temporary file that will be moved
to a non-temporary location if no exceptions are raised in the context.
Parameters
----------
final_path : str
The location to move the file when committing.
*args, **kwargs
Forwarded to NamedTemporaryFile.
Notes
-----
The file is moved on __exit__ if there are no exceptions.
``working_file`` uses :func:`shutil.move` to move the actual files,
meaning it has as strong of guarantees as :func:`shutil.move`.
"""
def __init__(self, final_path, *args, **kwargs):
self._tmpfile = NamedTemporaryFile(delete=False, *args, **kwargs)
self._final_path = final_path
@property
def path(self):
"""Alias for ``name`` to be consistent with
:class:`~zipline.utils.cache.working_dir`.
"""
return self._tmpfile.name
def _commit(self):
"""Sync the temporary file to the final path.
"""
move(self.path, self._final_path)
def __enter__(self):
self._tmpfile.__enter__()
return self
def __exit__(self, *exc_info):
self._tmpfile.__exit__(*exc_info)
if exc_info[0] is None:
self._commit()
class working_dir(object):
"""A context manager for managing a temporary directory that will be moved
to a non-temporary location if no exceptions are raised in the context.
Parameters
----------
final_path : str
The location to move the file when committing.
*args, **kwargs
Forwarded to tmp_dir.
Notes
-----
The file is moved on __exit__ if there are no exceptions.
``working_dir`` uses :func:`dir_util.copy_tree` to move the actual files,
meaning it has as strong of guarantees as :func:`dir_util.copy_tree`.
"""
def __init__(self, final_path, *args, **kwargs):
self.path = mkdtemp()
self._final_path = final_path
def ensure_dir(self, *path_parts):
"""Ensures a subdirectory of the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory.
"""
path = self.getpath(*path_parts)
ensure_directory(path)
return path
def getpath(self, *path_parts):
"""Get a path relative to the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory.
"""
return os.path.join(self.path, *path_parts)
def _commit(self):
"""Sync the temporary directory to the final path.
"""
dir_util.copy_tree(self.path, self._final_path)
def __enter__(self):
return self
def __exit__(self, *exc_info):
if exc_info[0] is None:
self._commit()
rmtree(self.path) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/cache.py | cache.py |
from contextlib import contextmanager
from itertools import product
import operator as op
import warnings
import pandas as pd
from distutils.version import StrictVersion
pandas_version = StrictVersion(pd.__version__)
def july_5th_holiday_observance(datetime_index):
return datetime_index[datetime_index.year != 2013]
def explode(df):
"""
Take a DataFrame and return a triple of
(df.index, df.columns, df.values)
"""
return df.index, df.columns, df.values
def _time_to_micros(time):
"""Convert a time into microseconds since midnight.
Parameters
----------
time : datetime.time
The time to convert.
Returns
-------
us : int
The number of microseconds since midnight.
Notes
-----
This does not account for leap seconds or daylight savings.
"""
seconds = time.hour * 60 * 60 + time.minute * 60 + time.second
return 1000000 * seconds + time.microsecond
_opmap = dict(zip(
product((True, False), repeat=3),
product((op.le, op.lt), (op.le, op.lt), (op.and_, op.or_)),
))
def mask_between_time(dts, start, end, include_start=True, include_end=True):
"""Return a mask of all of the datetimes in ``dts`` that are between
``start`` and ``end``.
Parameters
----------
dts : pd.DatetimeIndex
The index to mask.
start : time
Mask away times less than the start.
end : time
Mask away times greater than the end.
include_start : bool, optional
Inclusive on ``start``.
include_end : bool, optional
Inclusive on ``end``.
Returns
-------
mask : np.ndarray[bool]
A bool array masking ``dts``.
See Also
--------
:meth:`pandas.DatetimeIndex.indexer_between_time`
"""
# This function is adapted from
# `pandas.Datetime.Index.indexer_between_time` which was originally
# written by Wes McKinney, Chang She, and Grant Roch.
time_micros = dts._get_time_micros()
start_micros = _time_to_micros(start)
end_micros = _time_to_micros(end)
left_op, right_op, join_op = _opmap[
bool(include_start),
bool(include_end),
start_micros <= end_micros,
]
return join_op(
left_op(start_micros, time_micros),
right_op(time_micros, end_micros),
)
def find_in_sorted_index(dts, dt):
"""
Find the index of ``dt`` in ``dts``.
This function should be used instead of `dts.get_loc(dt)` if the index is
large enough that we don't want to initialize a hash table in ``dts``. In
particular, this should always be used on minutely trading calendars.
Parameters
----------
dts : pd.DatetimeIndex
Index in which to look up ``dt``. **Must be sorted**.
dt : pd.Timestamp
``dt`` to be looked up.
Returns
-------
ix : int
Integer index such that dts[ix] == dt.
Raises
------
KeyError
If dt is not in ``dts``.
"""
ix = dts.searchsorted(dt)
if dts[ix] != dt:
raise LookupError("{dt} is not in {dts}".format(dt=dt, dts=dts))
return ix
def nearest_unequal_elements(dts, dt):
"""
Find values in ``dts`` closest but not equal to ``dt``.
Returns a pair of (last_before, first_after).
When ``dt`` is less than any element in ``dts``, ``last_before`` is None.
When ``dt`` is greater any element in ``dts``, ``first_after`` is None.
``dts`` must be unique and sorted in increasing order.
Parameters
----------
dts : pd.DatetimeIndex
Dates in which to search.
dt : pd.Timestamp
Date for which to find bounds.
"""
if not dts.is_unique:
raise ValueError("dts must be unique")
if not dts.is_monotonic_increasing:
raise ValueError("dts must be sorted in increasing order")
if not len(dts):
return None, None
sortpos = dts.searchsorted(dt, side='left')
try:
sortval = dts[sortpos]
except IndexError:
# dt is greater than any value in the array.
return dts[-1], None
if dt < sortval:
lower_ix = sortpos - 1
upper_ix = sortpos
elif dt == sortval:
lower_ix = sortpos - 1
upper_ix = sortpos + 1
else:
lower_ix = sortpos
upper_ix = sortpos + 1
lower_value = dts[lower_ix] if lower_ix >= 0 else None
upper_value = dts[upper_ix] if upper_ix < len(dts) else None
return lower_value, upper_value
def timedelta_to_integral_seconds(delta):
"""
Convert a pd.Timedelta to a number of seconds as an int.
"""
return int(delta.total_seconds())
def timedelta_to_integral_minutes(delta):
"""
Convert a pd.Timedelta to a number of minutes as an int.
"""
return timedelta_to_integral_seconds(delta) // 60
@contextmanager
def ignore_pandas_nan_categorical_warning():
with warnings.catch_warnings():
# Pandas >= 0.18 doesn't like null-ish values in catgories, but
# avoiding that requires a broader change to how missing values are
# handled in pipeline, so for now just silence the warning.
warnings.filterwarnings(
'ignore',
category=FutureWarning,
)
yield
_INDEXER_NAMES = [
'_' + name for (name, _) in pd.core.indexing.get_indexers_list()
]
def clear_dataframe_indexer_caches(df):
"""
Clear cached attributes from a pandas DataFrame.
By default pandas memoizes indexers (`iloc`, `loc`, `ix`, etc.) objects on
DataFrames, resulting in refcycles that can lead to unexpectedly long-lived
DataFrames. This function attempts to clear those cycles by deleting the
cached indexers from the frame.
Parameters
----------
df : pd.DataFrame
"""
for attr in _INDEXER_NAMES:
try:
delattr(df, attr)
except AttributeError:
pass | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/pandas_utils.py | pandas_utils.py |
from errno import EEXIST
import os
from os.path import exists, expanduser, join
import pandas as pd
def hidden(path):
"""Check if a path is hidden.
Parameters
----------
path : str
A filepath.
"""
return os.path.split(path)[1].startswith('.')
def ensure_directory(path):
"""
Ensure that a directory named "path" exists.
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == EEXIST and os.path.isdir(path):
return
raise
def ensure_directory_containing(path):
"""
Ensure that the directory containing `path` exists.
This is just a convenience wrapper for doing::
ensure_directory(os.path.dirname(path))
"""
ensure_directory(os.path.dirname(path))
def ensure_file(path):
"""
Ensure that a file exists. This will create any parent directories needed
and create an empty file if it does not exists.
Parameters
----------
path : str
The file path to ensure exists.
"""
ensure_directory_containing(path)
open(path, 'a+').close() # touch the file
def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC')
def modified_since(path, dt):
"""
Check whether `path` was modified since `dt`.
Returns False if path doesn't exist.
Parameters
----------
path : str
Path to the file to be checked.
dt : pd.Timestamp
The date against which to compare last_modified_time(path).
Returns
-------
was_modified : bool
Will be ``False`` if path doesn't exists, or if its last modified date
is earlier than or equal to `dt`
"""
return exists(path) and last_modified_time(path) > dt
def zipline_root(environ=None):
"""
Get the root directory for all zipline-managed files.
For testing purposes, this accepts a dictionary to interpret as the os
environment.
Parameters
----------
environ : dict, optional
A dict to interpret as the os environment.
Returns
-------
root : string
Path to the zipline root dir.
"""
if environ is None:
environ = os.environ
root = environ.get('ZIPLINE_ROOT', None)
if root is None:
root = expanduser('~/.zipline')
return root
def zipline_path(paths, environ=None):
"""
Get a path relative to the zipline root.
Parameters
----------
paths : list[str]
List of requested path pieces.
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
newpath : str
The requested path joined with the zipline root.
"""
return join(zipline_root(environ=environ), *paths)
def default_extension(environ=None):
"""
Get the path to the default zipline extension file.
Parameters
----------
environ : dict, optional
An environment dict to forwart to zipline_root.
Returns
-------
default_extension_path : str
The file path to the default zipline extension file.
"""
return zipline_path(['extension.py'], environ=environ)
def data_root(environ=None):
"""
The root directory for zipline data files.
Parameters
----------
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
data_root : str
The zipline data root.
"""
return zipline_path(['data'], environ=environ)
def ensure_data_root(environ=None):
"""
Ensure that the data root exists.
"""
ensure_directory(data_root(environ=environ))
def data_path(paths, environ=None):
"""
Get a path relative to the zipline data directory.
Parameters
----------
paths : iterable[str]
List of requested path pieces.
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
newpath : str
The requested path joined with the zipline data root.
"""
return zipline_path(['data'] + list(paths), environ=environ)
def cache_root(environ=None):
"""
The root directory for zipline cache files.
Parameters
----------
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
cache_root : str
The zipline cache root.
"""
return zipline_path(['cache'], environ=environ)
def ensure_cache_root(environ=None):
"""
Ensure that the data root exists.
"""
ensure_directory(cache_root(environ=environ))
def cache_path(paths, environ=None):
"""
Get a path relative to the zipline cache directory.
Parameters
----------
paths : iterable[str]
List of requested path pieces.
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
newpath : str
The requested path joined with the zipline cache root.
"""
return zipline_path(['cache'] + list(paths), environ=environ) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/paths.py | paths.py |
from collections import namedtuple
import inspect
from itertools import chain
from six.moves import map, zip_longest
from zipline.errors import ZiplineError
Argspec = namedtuple('Argspec', ['args', 'starargs', 'kwargs'])
def singleton(cls):
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
@singleton
class Ignore(object):
def __str__(self):
return 'Argument.ignore'
__repr__ = __str__
@singleton
class NoDefault(object):
def __str__(self):
return 'Argument.no_default'
__repr__ = __str__
@singleton
class AnyDefault(object):
def __str__(self):
return 'Argument.any_default'
__repr__ = __str__
class Argument(namedtuple('Argument', ['name', 'default'])):
"""
An argument to a function.
Argument.no_default is a value representing no default to the argument.
Argument.ignore is a value that says you should ignore the default value.
"""
no_default = NoDefault()
any_default = AnyDefault()
ignore = Ignore()
def __new__(cls, name=ignore, default=ignore):
return super(Argument, cls).__new__(cls, name, default)
def __str__(self):
if self.has_no_default(self) or self.ignore_default(self):
return str(self.name)
else:
return '='.join([str(self.name), str(self.default)])
def __repr__(self):
return 'Argument(%s, %s)' % (repr(self.name), repr(self.default))
def _defaults_match(self, arg):
return any(map(Argument.ignore_default, [self, arg])) \
or (self.default is Argument.any_default and
arg.default is not Argument.no_default) \
or (arg.default is Argument.any_default and
self.default is not Argument.no_default) \
or self.default == arg.default
def _names_match(self, arg):
return self.name == arg.name \
or self.name is Argument.ignore \
or arg.name is Argument.ignore
def matches(self, arg):
return self._names_match(arg) and self._defaults_match(arg)
__eq__ = matches
@staticmethod
def parse_argspec(callable_):
"""
Takes a callable and returns a tuple with the list of Argument objects,
the name of *args, and the name of **kwargs.
If *args or **kwargs is not present, it will be None.
This returns a namedtuple called Argspec that has three fields named:
args, starargs, and kwargs.
"""
args, varargs, keywords, defaults = inspect.getargspec(callable_)
defaults = list(defaults or [])
if getattr(callable_, '__self__', None) is not None:
# This is a bound method, drop the self param.
args = args[1:]
first_default = len(args) - len(defaults)
return Argspec(
[Argument(arg, Argument.no_default
if n < first_default else defaults[n - first_default])
for n, arg in enumerate(args)],
varargs,
keywords,
)
@staticmethod
def has_no_default(arg):
return arg.default is Argument.no_default
@staticmethod
def ignore_default(arg):
return arg.default is Argument.ignore
def _expect_extra(expected, present, exc_unexpected, exc_missing, exc_args):
"""
Checks for the presence of an extra to the argument list. Raises expections
if this is unexpected or if it is missing and expected.
"""
if present:
if not expected:
raise exc_unexpected(*exc_args)
elif expected and expected is not Argument.ignore:
raise exc_missing(*exc_args)
def verify_callable_argspec(callable_,
expected_args=Argument.ignore,
expect_starargs=Argument.ignore,
expect_kwargs=Argument.ignore):
"""
Checks the callable_ to make sure that it satisfies the given
expectations.
expected_args should be an iterable of Arguments in the order you expect to
receive them.
expect_starargs means that the function should or should not take a *args
param. expect_kwargs says the callable should or should not take **kwargs
param.
If expected_args, expect_starargs, or expect_kwargs is Argument.ignore,
then the checks related to that argument will not occur.
Example usage:
callable_check(
f,
[Argument('a'), Argument('b', 1)],
expect_starargs=True,
expect_kwargs=Argument.ignore
)
"""
if not callable(callable_):
raise NotCallable(callable_)
expected_arg_list = list(
expected_args if expected_args is not Argument.ignore else []
)
args, starargs, kwargs = Argument.parse_argspec(callable_)
exc_args = callable_, args, starargs, kwargs
# Check the *args.
_expect_extra(
expect_starargs,
starargs,
UnexpectedStarargs,
NoStarargs,
exc_args,
)
# Check the **kwargs.
_expect_extra(
expect_kwargs,
kwargs,
UnexpectedKwargs,
NoKwargs,
exc_args,
)
if expected_args is Argument.ignore:
# Ignore the argument list checks.
return
if len(args) < len(expected_arg_list):
# One or more argument that we expected was not present.
raise NotEnoughArguments(
callable_,
args,
starargs,
kwargs,
[arg for arg in expected_arg_list if arg not in args],
)
elif len(args) > len(expected_arg_list):
raise TooManyArguments(
callable_, args, starargs, kwargs
)
# Empty argument that will not match with any actual arguments.
missing_arg = Argument(object(), object())
for expected, provided in zip_longest(expected_arg_list,
args,
fillvalue=missing_arg):
if not expected.matches(provided):
raise MismatchedArguments(
callable_, args, starargs, kwargs
)
class BadCallable(TypeError, AssertionError, ZiplineError):
"""
The given callable is not structured in the expected way.
"""
_lambda_name = (lambda: None).__name__
def __init__(self, callable_, args, starargs, kwargs):
self.callable_ = callable_
self.args = args
self.starargs = starargs
self.kwargsname = kwargs
self.kwargs = {}
def format_callable(self):
if self.callable_.__name__ == self._lambda_name:
fmt = '%s %s'
name = 'lambda'
else:
fmt = '%s(%s)'
name = self.callable_.__name__
return fmt % (
name,
', '.join(
chain(
(str(arg) for arg in self.args),
('*' + sa for sa in (self.starargs,) if sa is not None),
('**' + ka for ka in (self.kwargsname,) if ka is not None),
)
)
)
@property
def msg(self):
return str(self)
class NoStarargs(BadCallable):
def __str__(self):
return '%s does not allow for *args' % self.format_callable()
class UnexpectedStarargs(BadCallable):
def __str__(self):
return '%s should not allow for *args' % self.format_callable()
class NoKwargs(BadCallable):
def __str__(self):
return '%s does not allow for **kwargs' % self.format_callable()
class UnexpectedKwargs(BadCallable):
def __str__(self):
return '%s should not allow for **kwargs' % self.format_callable()
class NotCallable(BadCallable):
"""
The provided 'callable' is not actually a callable.
"""
def __init__(self, callable_):
self.callable_ = callable_
def __str__(self):
return '%s is not callable' % self.format_callable()
def format_callable(self):
try:
return self.callable_.__name__
except AttributeError:
return str(self.callable_)
class NotEnoughArguments(BadCallable):
"""
The callback does not accept enough arguments.
"""
def __init__(self, callable_, args, starargs, kwargs, missing_args):
super(NotEnoughArguments, self).__init__(
callable_, args, starargs, kwargs
)
self.missing_args = missing_args
def __str__(self):
missing_args = list(map(str, self.missing_args))
return '%s is missing argument%s: %s' % (
self.format_callable(),
's' if len(missing_args) > 1 else '',
', '.join(missing_args),
)
class TooManyArguments(BadCallable):
"""
The callback cannot be called by passing the expected number of arguments.
"""
def __str__(self):
return '%s accepts too many arguments' % self.format_callable()
class MismatchedArguments(BadCallable):
"""
The argument lists are of the same lengths, but not in the correct order.
"""
def __str__(self):
return '%s accepts mismatched parameters' % self.format_callable() | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/argcheck.py | argcheck.py |
from six import BytesIO
import pickle
from functools import partial
from zipline.assets import AssetFinder
from zipline.finance.trading import TradingEnvironment
# Label for the serialization version field in the state returned by
# __getstate__.
VERSION_LABEL = '_stateversion_'
CHECKSUM_KEY = '__state_checksum'
def _persistent_id(obj):
if isinstance(obj, AssetFinder):
return AssetFinder.PERSISTENT_TOKEN
if isinstance(obj, TradingEnvironment):
return TradingEnvironment.PERSISTENT_TOKEN
return None
def _persistent_load(persid, env):
if persid == AssetFinder.PERSISTENT_TOKEN:
return env.asset_finder
if persid == TradingEnvironment.PERSISTENT_TOKEN:
return env
def dumps_with_persistent_ids(obj, protocol=None):
"""
Performs a pickle dumps on the given object, substituting all references to
a TradingEnvironment or AssetFinder with tokenized representations.
All arguments are passed to pickle.Pickler and are described therein.
"""
file = BytesIO()
pickler = pickle.Pickler(file, protocol)
pickler.persistent_id = _persistent_id
pickler.dump(obj)
return file.getvalue()
def loads_with_persistent_ids(str, env):
"""
Performs a pickle loads on the given string, substituting the given
TradingEnvironment in to any tokenized representations of a
TradingEnvironment or AssetFinder.
Parameters
----------
str : String
The string representation of the object to be unpickled.
env : TradingEnvironment
The TradingEnvironment to be inserted to the unpickled object.
Returns
-------
obj
An unpickled object formed from the parameter 'str'.
"""
file = BytesIO(str)
unpickler = pickle.Unpickler(file)
unpickler.persistent_load = partial(_persistent_load, env=env)
return unpickler.load()
def load_context(state_file_path, context, checksum):
with open(state_file_path, 'rb') as f:
try:
loaded_state = pickle.load(f)
except (pickle.UnpicklingError, IndexError):
raise ValueError("Corrupt state file: {}".format(state_file_path))
else:
if CHECKSUM_KEY not in loaded_state or \
loaded_state[CHECKSUM_KEY] != checksum:
raise TypeError("Checksum mismatch during state load. "
"The given state file was not created "
"for the algorithm in use")
else:
del loaded_state[CHECKSUM_KEY]
for k, v in loaded_state.items():
setattr(context, k, v)
def store_context(state_file_path, context, checksum, exclude_list):
state = {}
fields_to_store = list(set(context.__dict__.keys()) -
set(exclude_list))
for field in fields_to_store:
state[field] = getattr(context, field)
state[CHECKSUM_KEY] = checksum
with open(state_file_path, 'wb') as f:
# Forcing v2 protocol for compatibility between py2 and py3
pickle.dump(state, f, protocol=2) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/serialization_utils.py | serialization_utils.py |
import zipline.utils.factory as factory
from zipline.testing.core import create_data_portal_from_trade_history
from zipline.test_algorithms import TestAlgorithm
from zipline.utils.calendars import get_calendar
def create_test_zipline(**config):
"""
:param config: A configuration object that is a dict with:
- sid - an integer, which will be used as the asset ID.
- order_count - the number of orders the test algo will place,
defaults to 100
- order_amount - the number of shares per order, defaults to 100
- trade_count - the number of trades to simulate, defaults to 101
to ensure all orders are processed.
- algorithm - optional parameter providing an algorithm. defaults
to :py:class:`zipline.test.algorithms.TestAlgorithm`
- trade_source - optional parameter to specify trades, if present.
If not present :py:class:`zipline.sources.SpecificEquityTrades`
is the source, with daily frequency in trades.
- slippage: optional parameter that configures the
:py:class:`zipline.gens.tradingsimulation.TransactionSimulator`.
Expects an object with a simulate mehod, such as
:py:class:`zipline.gens.tradingsimulation.FixedSlippage`.
:py:mod:`zipline.finance.trading`
"""
assert isinstance(config, dict)
try:
sid_list = config['sid_list']
except KeyError:
try:
sid_list = [config['sid']]
except KeyError:
raise Exception("simfactory create_test_zipline() requires "
"argument 'sid_list' or 'sid'")
concurrent_trades = config.get('concurrent_trades', False)
order_count = config.get('order_count', 100)
order_amount = config.get('order_amount', 100)
trading_calendar = config.get('trading_calendar', get_calendar("NYSE"))
# -------------------
# Create the Algo
# -------------------
if 'algorithm' in config:
test_algo = config['algorithm']
else:
test_algo = TestAlgorithm(
sid_list[0],
order_amount,
order_count,
sim_params=config.get('sim_params',
factory.create_simulation_parameters()),
trading_calendar=trading_calendar,
slippage=config.get('slippage'),
identifiers=sid_list
)
# -------------------
# Trade Source
# -------------------
if 'skip_data' not in config:
if 'trade_source' in config:
trade_source = config['trade_source']
else:
trade_source = factory.create_daily_trade_source(
sid_list,
test_algo.sim_params,
test_algo.trading_environment,
trading_calendar,
concurrent=concurrent_trades,
)
trades_by_sid = {}
for trade in trade_source:
if trade.sid not in trades_by_sid:
trades_by_sid[trade.sid] = []
trades_by_sid[trade.sid].append(trade)
data_portal = create_data_portal_from_trade_history(
config['env'].asset_finder,
trading_calendar,
config['tempdir'],
config['sim_params'],
trades_by_sid
)
test_algo.data_portal = data_portal
# -------------------
# Benchmark source
# -------------------
test_algo.benchmark_return_source = config.get('benchmark_source', None)
# ------------------
# generator/simulator
sim = test_algo.get_generator()
return sim | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/simfactory.py | simfactory.py |
@object.__new__
class nop_context(object):
"""A nop context manager.
"""
def __enter__(self):
pass
def __exit__(self, *excinfo):
pass
def _nop(*args, **kwargs):
pass
class CallbackManager(object):
"""Create a context manager from a pre-execution callback and a
post-execution callback.
Parameters
----------
pre : (...) -> any, optional
A pre-execution callback. This will be passed ``*args`` and
``**kwargs``.
post : (...) -> any, optional
A post-execution callback. This will be passed ``*args`` and
``**kwargs``.
Notes
-----
The enter value of this context manager will be the result of calling
``pre(*args, **kwargs)``
Examples
--------
>>> def pre(where):
... print('entering %s block' % where)
>>> def post(where):
... print('exiting %s block' % where)
>>> manager = CallbackManager(pre, post)
>>> with manager('example'):
... print('inside example block')
entering example block
inside example block
exiting example block
These are reusable with different args:
>>> with manager('another'):
... print('inside another block')
entering another block
inside another block
exiting another block
"""
def __init__(self, pre=None, post=None):
self.pre = pre if pre is not None else _nop
self.post = post if post is not None else _nop
def __call__(self, *args, **kwargs):
return _ManagedCallbackContext(self.pre, self.post, args, kwargs)
# special case, if no extra args are passed make this a context manager
# which forwards no args to pre and post
def __enter__(self):
return self.pre()
def __exit__(self, *excinfo):
self.post()
class _ManagedCallbackContext(object):
def __init__(self, pre, post, args, kwargs):
self._pre = pre
self._post = post
self._args = args
self._kwargs = kwargs
def __enter__(self):
return self._pre(*self._args, **self._kwargs)
def __exit__(self, *excinfo):
self._post(*self._args, **self._kwargs) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/context_tricks.py | context_tricks.py |
import operator as op
from six import PY2
from toolz import peek
from zipline.utils.functional import foldr
if PY2:
class range(object):
"""Lazy range object with constant time containment check.
The arguments are the same as ``range``.
"""
__slots__ = 'start', 'stop', 'step'
def __init__(self, stop, *args):
if len(args) > 2:
raise TypeError(
'range takes at most 3 arguments (%d given)' % len(args)
)
if not args:
self.start = 0
self.stop = stop
self.step = 1
else:
self.start = stop
self.stop = args[0]
try:
self.step = args[1]
except IndexError:
self.step = 1
if self.step == 0:
raise ValueError('range step must not be zero')
def __iter__(self):
"""
Examples
--------
>>> list(range(1))
[0]
>>> list(range(5))
[0, 1, 2, 3, 4]
>>> list(range(1, 5))
[1, 2, 3, 4]
>>> list(range(0, 5, 2))
[0, 2, 4]
>>> list(range(5, 0, -1))
[5, 4, 3, 2, 1]
>>> list(range(5, 0, 1))
[]
"""
n = self.start
stop = self.stop
step = self.step
cmp_ = op.lt if step > 0 else op.gt
while cmp_(n, stop):
yield n
n += step
_ops = (
(op.gt, op.ge),
(op.le, op.lt),
)
def __contains__(self, other, _ops=_ops):
# Algorithm taken from CPython
# Objects/rangeobject.c:range_contains_long
start = self.start
step = self.step
cmp_start, cmp_stop = _ops[step > 0]
return (
cmp_start(start, other) and
cmp_stop(other, self.stop) and
(other - start) % step == 0
)
del _ops
def __len__(self):
"""
Examples
--------
>>> len(range(1))
1
>>> len(range(5))
5
>>> len(range(1, 5))
4
>>> len(range(0, 5, 2))
3
>>> len(range(5, 0, -1))
5
>>> len(range(5, 0, 1))
0
"""
# Algorithm taken from CPython
# rangeobject.c:compute_range_length
step = self.step
if step > 0:
low = self.start
high = self.stop
else:
low = self.stop
high = self.start
step = -step
if low >= high:
return 0
return (high - low - 1) // step + 1
def __repr__(self):
return '%s(%s, %s%s)' % (
type(self).__name__,
self.start,
self.stop,
(', ' + str(self.step)) if self.step != 1 else '',
)
def __hash__(self):
return hash((type(self), self.start, self.stop, self.step))
def __eq__(self, other):
"""
Examples
--------
>>> range(1) == range(1)
True
>>> range(0, 5, 2) == range(0, 5, 2)
True
>>> range(5, 0, -2) == range(5, 0, -2)
True
>>> range(1) == range(2)
False
>>> range(0, 5, 2) == range(0, 5, 3)
False
"""
return all(
getattr(self, attr) == getattr(other, attr)
for attr in self.__slots__
)
else:
range = range
def from_tuple(tup):
"""Convert a tuple into a range with error handling.
Parameters
----------
tup : tuple (len 2 or 3)
The tuple to turn into a range.
Returns
-------
range : range
The range from the tuple.
Raises
------
ValueError
Raised when the tuple length is not 2 or 3.
"""
if len(tup) not in (2, 3):
raise ValueError(
'tuple must contain 2 or 3 elements, not: %d (%r' % (
len(tup),
tup,
),
)
return range(*tup)
def maybe_from_tuple(tup_or_range):
"""Convert a tuple into a range but pass ranges through silently.
This is useful to ensure that input is a range so that attributes may
be accessed with `.start`, `.stop` or so that containment checks are
constant time.
Parameters
----------
tup_or_range : tuple or range
A tuple to pass to from_tuple or a range to return.
Returns
-------
range : range
The input to convert to a range.
Raises
------
ValueError
Raised when the input is not a tuple or a range. ValueError is also
raised if the input is a tuple whose length is not 2 or 3.
"""
if isinstance(tup_or_range, tuple):
return from_tuple(tup_or_range)
elif isinstance(tup_or_range, range):
return tup_or_range
raise ValueError(
'maybe_from_tuple expects a tuple or range, got %r: %r' % (
type(tup_or_range).__name__,
tup_or_range,
),
)
def _check_steps(a, b):
"""Check that the steps of ``a`` and ``b`` are both 1.
Parameters
----------
a : range
The first range to check.
b : range
The second range to check.
Raises
------
ValueError
Raised when either step is not 1.
"""
if a.step != 1:
raise ValueError('a.step must be equal to 1, got: %s' % a.step)
if b.step != 1:
raise ValueError('b.step must be equal to 1, got: %s' % b.step)
def overlap(a, b):
"""Check if two ranges overlap.
Parameters
----------
a : range
The first range.
b : range
The second range.
Returns
-------
overlaps : bool
Do these ranges overlap.
Notes
-----
This function does not support ranges with step != 1.
"""
_check_steps(a, b)
return a.stop >= b.start and b.stop >= a.start
def merge(a, b):
"""Merge two ranges with step == 1.
Parameters
----------
a : range
The first range.
b : range
The second range.
"""
_check_steps(a, b)
return range(min(a.start, b.start), max(a.stop, b.stop))
def _combine(n, rs):
"""helper for ``_group_ranges``
"""
try:
r, rs = peek(rs)
except StopIteration:
yield n
return
if overlap(n, r):
yield merge(n, r)
next(rs)
for r in rs:
yield r
else:
yield n
for r in rs:
yield r
def group_ranges(ranges):
"""Group any overlapping ranges into a single range.
Parameters
----------
ranges : iterable[ranges]
A sorted sequence of ranges to group.
Returns
-------
grouped : iterable[ranges]
A sorted sequence of ranges with overlapping ranges merged together.
"""
return foldr(_combine, ranges, ())
def sorted_diff(rs, ss):
try:
r, rs = peek(rs)
except StopIteration:
return
try:
s, ss = peek(ss)
except StopIteration:
for r in rs:
yield r
return
rtup = (r.start, r.stop)
stup = (s.start, s.stop)
if rtup == stup:
next(rs)
next(ss)
elif rtup < stup:
yield next(rs)
else:
next(ss)
for t in sorted_diff(rs, ss):
yield t
def intersecting_ranges(ranges):
"""Return any ranges that intersect.
Parameters
----------
ranges : iterable[ranges]
A sequence of ranges to check for intersections.
Returns
-------
intersections : iterable[ranges]
A sequence of all of the ranges that intersected in ``ranges``.
Examples
--------
>>> ranges = [range(0, 1), range(2, 5), range(4, 7)]
>>> list(intersecting_ranges(ranges))
[range(2, 5), range(4, 7)]
>>> ranges = [range(0, 1), range(2, 3)]
>>> list(intersecting_ranges(ranges))
[]
>>> ranges = [range(0, 1), range(1, 2)]
>>> list(intersecting_ranges(ranges))
[range(0, 1), range(1, 2)]
"""
ranges = sorted(ranges, key=op.attrgetter('start'))
return sorted_diff(ranges, group_ranges(ranges)) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/range.py | range.py |
import os
import re
from runpy import run_path
import sys
import warnings
from functools import partial
import pandas as pd
import click
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
PYGMENTS = True
except:
PYGMENTS = False
from toolz import valfilter, concatv
from zipline.algorithm import TradingAlgorithm
from zipline.algorithm_live import LiveTradingAlgorithm
from zipline.data.bundles.core import load
from zipline.data.data_portal import DataPortal
from zipline.data.data_portal_live import DataPortalLive
from zipline.finance.trading import TradingEnvironment
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.loaders import USEquityPricingLoader
from zipline.utils.calendars import get_calendar
from zipline.utils.factory import create_simulation_parameters
import zipline.utils.paths as pth
class _RunAlgoError(click.ClickException, ValueError):
"""Signal an error that should have a different message if invoked from
the cli.
Parameters
----------
pyfunc_msg : str
The message that will be shown when called as a python function.
cmdline_msg : str
The message that will be shown on the command line.
"""
exit_code = 1
def __init__(self, pyfunc_msg, cmdline_msg):
super(_RunAlgoError, self).__init__(cmdline_msg)
self.pyfunc_msg = pyfunc_msg
def __str__(self):
return self.pyfunc_msg
def _run(handle_data,
initialize,
before_trading_start,
analyze,
algofile,
algotext,
defines,
data_frequency,
capital_base,
data,
bundle,
bundle_timestamp,
start,
end,
output,
print_algo,
local_namespace,
environ,
broker,
state_filename,
realtime_bar_target):
"""Run a backtest for the given algorithm.
This is shared between the cli and :func:`zipline.run_algo`.
"""
if algotext is not None:
if local_namespace:
ip = get_ipython() # noqa
namespace = ip.user_ns
else:
namespace = {}
for assign in defines:
try:
name, value = assign.split('=', 2)
except ValueError:
raise ValueError(
'invalid define %r, should be of the form name=value' %
assign,
)
try:
# evaluate in the same namespace so names may refer to
# eachother
namespace[name] = eval(value, namespace)
except Exception as e:
raise ValueError(
'failed to execute definition for name %r: %s' % (name, e),
)
elif defines:
raise _RunAlgoError(
'cannot pass define without `algotext`',
"cannot pass '-D' / '--define' without '-t' / '--algotext'",
)
else:
namespace = {}
if algofile is not None:
algotext = algofile.read()
if print_algo:
if PYGMENTS:
highlight(
algotext,
PythonLexer(),
TerminalFormatter(),
outfile=sys.stdout,
)
else:
click.echo(algotext)
if bundle is not None:
bundle_data = load(
bundle,
environ,
bundle_timestamp,
)
prefix, connstr = re.split(
r'sqlite:///',
str(bundle_data.asset_finder.engine.url),
maxsplit=1,
)
if prefix:
raise ValueError(
"invalid url %r, must begin with 'sqlite:///'" %
str(bundle_data.asset_finder.engine.url),
)
env = TradingEnvironment(asset_db_path=connstr, environ=environ)
first_trading_day =\
bundle_data.equity_minute_bar_reader.first_trading_day
DataPortalClass = (partial(DataPortalLive, broker)
if broker
else DataPortal)
data = DataPortalClass(
env.asset_finder, get_calendar("NYSE"),
first_trading_day=first_trading_day,
equity_minute_reader=bundle_data.equity_minute_bar_reader,
equity_daily_reader=bundle_data.equity_daily_bar_reader,
adjustment_reader=bundle_data.adjustment_reader
)
pipeline_loader = USEquityPricingLoader(
bundle_data.equity_daily_bar_reader,
bundle_data.adjustment_reader,
)
def choose_loader(column):
if column in USEquityPricing.columns:
return pipeline_loader
raise ValueError(
"No PipelineLoader registered for column %s." % column
)
else:
env = TradingEnvironment(environ=environ)
choose_loader = None
emission_rate = 'daily'
if broker:
emission_rate = 'minute'
start = pd.Timestamp.utcnow()
end = start + pd.Timedelta('2 day')
TradingAlgorithmClass = (partial(LiveTradingAlgorithm,
broker=broker,
state_filename=state_filename,
realtime_bar_target=realtime_bar_target)
if broker else TradingAlgorithm)
perf = TradingAlgorithmClass(
namespace=namespace,
env=env,
get_pipeline_loader=choose_loader,
sim_params=create_simulation_parameters(
start=start,
end=end,
capital_base=capital_base,
emission_rate=emission_rate,
data_frequency=data_frequency,
),
**{
'initialize': initialize,
'handle_data': handle_data,
'before_trading_start': before_trading_start,
'analyze': analyze,
} if algotext is None else {
'algo_filename': getattr(algofile, 'name', '<algorithm>'),
'script': algotext,
}
).run(
data,
overwrite_sim_params=False,
)
if output == '-':
click.echo(str(perf))
elif output != os.devnull: # make the zipline magic not write any data
perf.to_pickle(output)
return perf
# All of the loaded extensions. We don't want to load an extension twice.
_loaded_extensions = set()
def load_extensions(default, extensions, strict, environ, reload=False):
"""Load all of the given extensions. This should be called by run_algo
or the cli.
Parameters
----------
default : bool
Load the default exension (~/.zipline/extension.py)?
extension : iterable[str]
The paths to the extensions to load. If the path ends in ``.py`` it is
treated as a script and executed. If it does not end in ``.py`` it is
treated as a module to be imported.
strict : bool
Should failure to load an extension raise. If this is false it will
still warn.
environ : mapping
The environment to use to find the default extension path.
reload : bool, optional
Reload any extensions that have already been loaded.
"""
if default:
default_extension_path = pth.default_extension(environ=environ)
pth.ensure_file(default_extension_path)
# put the default extension first so other extensions can depend on
# the order they are loaded
extensions = concatv([default_extension_path], extensions)
for ext in extensions:
if ext in _loaded_extensions and not reload:
continue
try:
# load all of the zipline extensionss
if ext.endswith('.py'):
run_path(ext, run_name='<extension>')
else:
__import__(ext)
except Exception as e:
if strict:
# if `strict` we should raise the actual exception and fail
raise
# without `strict` we should just log the failure
warnings.warn(
'Failed to load extension: %r\n%s' % (ext, e),
stacklevel=2
)
else:
_loaded_extensions.add(ext)
def run_algorithm(start,
end,
initialize,
capital_base,
handle_data=None,
before_trading_start=None,
analyze=None,
data_frequency='daily',
data=None,
bundle=None,
bundle_timestamp=None,
default_extension=True,
extensions=(),
strict_extensions=True,
environ=os.environ,
live_trading=False,
tws_uri=None):
"""Run a trading algorithm.
Parameters
----------
start : datetime
The start date of the backtest.
end : datetime
The end date of the backtest..
initialize : callable[context -> None]
The initialize function to use for the algorithm. This is called once
at the very begining of the backtest and should be used to set up
any state needed by the algorithm.
capital_base : float
The starting capital for the backtest.
handle_data : callable[(context, BarData) -> None], optional
The handle_data function to use for the algorithm. This is called
every minute when ``data_frequency == 'minute'`` or every day
when ``data_frequency == 'daily'``.
before_trading_start : callable[(context, BarData) -> None], optional
The before_trading_start function for the algorithm. This is called
once before each trading day (after initialize on the first day).
analyze : callable[(context, pd.DataFrame) -> None], optional
The analyze function to use for the algorithm. This function is called
once at the end of the backtest and is passed the context and the
performance data.
data_frequency : {'daily', 'minute'}, optional
The data frequency to run the algorithm at.
data : pd.DataFrame, pd.Panel, or DataPortal, optional
The ohlcv data to run the backtest with.
This argument is mutually exclusive with:
``bundle``
``bundle_timestamp``
bundle : str, optional
The name of the data bundle to use to load the data to run the backtest
with. This defaults to 'quantopian-quandl'.
This argument is mutually exclusive with ``data``.
bundle_timestamp : datetime, optional
The datetime to lookup the bundle data for. This defaults to the
current time.
This argument is mutually exclusive with ``data``.
default_extension : bool, optional
Should the default zipline extension be loaded. This is found at
``$ZIPLINE_ROOT/extension.py``
extensions : iterable[str], optional
The names of any other extensions to load. Each element may either be
a dotted module path like ``a.b.c`` or a path to a python file ending
in ``.py`` like ``a/b/c.py``.
strict_extensions : bool, optional
Should the run fail if any extensions fail to load. If this is false,
a warning will be raised instead.
environ : mapping[str -> str], optional
The os environment to use. Many extensions use this to get parameters.
This defaults to ``os.environ``.
Returns
-------
perf : pd.DataFrame
The daily performance of the algorithm.
See Also
--------
zipline.data.bundles.bundles : The available data bundles.
"""
load_extensions(default_extension, extensions, strict_extensions, environ)
non_none_data = valfilter(bool, {
'data': data is not None,
'bundle': bundle is not None,
})
if not non_none_data:
# if neither data nor bundle are passed use 'quantopian-quandl'
bundle = 'quantopian-quandl'
elif len(non_none_data) != 1:
raise ValueError(
'must specify one of `data`, `data_portal`, or `bundle`,'
' got: %r' % non_none_data,
)
elif 'bundle' not in non_none_data and bundle_timestamp is not None:
raise ValueError(
'cannot specify `bundle_timestamp` without passing `bundle`',
)
return _run(
handle_data=handle_data,
initialize=initialize,
before_trading_start=before_trading_start,
analyze=analyze,
algofile=None,
algotext=None,
defines=(),
data_frequency=data_frequency,
capital_base=capital_base,
data=data,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=os.devnull,
print_algo=False,
local_namespace=False,
environ=environ,
broker=None,
state_filename=None,
realtime_bar_target=None
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/run_algo.py | run_algo.py |
from datetime import tzinfo
from functools import partial, wraps
from operator import attrgetter
from numpy import dtype
import pandas as pd
from pytz import timezone
from six import iteritems, string_types, PY3
from toolz import valmap, complement, compose
import toolz.curried.operator as op
from zipline.utils.functional import getattrs
from zipline.utils.preprocess import call, preprocess
if PY3:
_qualified_name = attrgetter('__qualname__')
else:
def _qualified_name(obj):
"""
Return the fully-qualified name (ignoring inner classes) of a type.
"""
module = obj.__module__
if module in ('__builtin__', '__main__', 'builtins'):
return obj.__name__
return '.'.join([module, obj.__name__])
def verify_indices_all_unique(obj):
"""
Check that all axes of a pandas object are unique.
Parameters
----------
obj : pd.Series / pd.DataFrame / pd.Panel
The object to validate.
Returns
-------
obj : pd.Series / pd.DataFrame / pd.Panel
The validated object, unchanged.
Raises
------
ValueError
If any axis has duplicate entries.
"""
axis_names = [
('index',), # Series
('index', 'columns'), # DataFrame
('items', 'major_axis', 'minor_axis') # Panel
][obj.ndim - 1] # ndim = 1 should go to entry 0,
for axis_name, index in zip(axis_names, obj.axes):
if index.is_unique:
continue
raise ValueError(
"Duplicate entries in {type}.{axis}: {dupes}.".format(
type=type(obj).__name__,
axis=axis_name,
dupes=sorted(index[index.duplicated()]),
)
)
return obj
def optionally(preprocessor):
"""Modify a preprocessor to explicitly allow `None`.
Parameters
----------
preprocessor : callable[callable, str, any -> any]
A preprocessor to delegate to when `arg is not None`.
Returns
-------
optional_preprocessor : callable[callable, str, any -> any]
A preprocessor that delegates to `preprocessor` when `arg is not None`.
Usage
-----
>>> def preprocessor(func, argname, arg):
... if not isinstance(arg, int):
... raise TypeError('arg must be int')
... return arg
...
>>> @preprocess(a=optionally(preprocessor))
... def f(a):
... return a
...
>>> f(1) # call with int
1
>>> f('a') # call with not int
Traceback (most recent call last):
...
TypeError: arg must be int
>>> f(None) is None # call with explicit None
True
"""
@wraps(preprocessor)
def wrapper(func, argname, arg):
return arg if arg is None else preprocessor(func, argname, arg)
return wrapper
def ensure_upper_case(func, argname, arg):
if isinstance(arg, string_types):
return arg.upper()
else:
raise TypeError(
"{0}() expected argument '{1}' to"
" be a string, but got {2} instead.".format(
func.__name__,
argname,
arg,
),
)
def ensure_dtype(func, argname, arg):
"""
Argument preprocessor that converts the input into a numpy dtype.
Usage
-----
>>> import numpy as np
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(dtype=ensure_dtype)
... def foo(dtype):
... return dtype
...
>>> foo(float)
dtype('float64')
"""
try:
return dtype(arg)
except TypeError:
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a numpy dtype.".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
),
)
def ensure_timezone(func, argname, arg):
"""Argument preprocessor that converts the input into a tzinfo object.
Usage
-----
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(tz=ensure_timezone)
... def foo(tz):
... return tz
>>> foo('utc')
<UTC>
"""
if isinstance(arg, tzinfo):
return arg
if isinstance(arg, string_types):
return timezone(arg)
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a timezone.".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
),
)
def ensure_timestamp(func, argname, arg):
"""Argument preprocessor that converts the input into a pandas Timestamp
object.
Usage
-----
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(ts=ensure_timestamp)
... def foo(ts):
... return ts
>>> foo('2014-01-01')
Timestamp('2014-01-01 00:00:00')
"""
try:
return pd.Timestamp(arg)
except ValueError as e:
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a pandas Timestamp.\n"
"Original error was: {t}: {e}".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
t=_qualified_name(type(e)),
e=e,
),
)
def expect_dtypes(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs have expected numpy dtypes.
Usage
-----
>>> from numpy import dtype, arange, int8, float64
>>> @expect_dtypes(x=dtype(int8))
... def foo(x, y):
... return x, y
...
>>> foo(arange(3, dtype=int8), 'foo')
(array([0, 1, 2], dtype=int8), 'foo')
>>> foo(arange(3, dtype=float64), 'foo') # doctest: +NORMALIZE_WHITESPACE
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a value with dtype 'int8' for argument 'x',
but got 'float64' instead.
"""
for name, type_ in iteritems(named):
if not isinstance(type_, (dtype, tuple)):
raise TypeError(
"expect_dtypes() expected a numpy dtype or tuple of dtypes"
" for argument {name!r}, but got {dtype} instead.".format(
name=name, dtype=dtype,
)
)
if isinstance(__funcname, str):
def get_funcname(_):
return __funcname
else:
get_funcname = __funcname
@preprocess(dtypes=call(lambda x: x if isinstance(x, tuple) else (x,)))
def _expect_dtype(dtypes):
"""
Factory for dtype-checking functions that work with the @preprocess
decorator.
"""
def error_message(func, argname, value):
# If the bad value has a dtype, but it's wrong, show the dtype
# name. Otherwise just show the value.
try:
value_to_show = value.dtype.name
except AttributeError:
value_to_show = value
return (
"{funcname}() expected a value with dtype {dtype_str} "
"for argument {argname!r}, but got {value!r} instead."
).format(
funcname=get_funcname(func),
dtype_str=' or '.join(repr(d.name) for d in dtypes),
argname=argname,
value=value_to_show,
)
def _actual_preprocessor(func, argname, argvalue):
if getattr(argvalue, 'dtype', object()) not in dtypes:
raise TypeError(error_message(func, argname, argvalue))
return argvalue
return _actual_preprocessor
return preprocess(**valmap(_expect_dtype, named))
def expect_kinds(**named):
"""
Preprocessing decorator that verifies inputs have expected dtype kinds.
Usage
-----
>>> from numpy import int64, int32, float32
>>> @expect_kinds(x='i')
... def foo(x):
... return x
...
>>> foo(int64(2))
2
>>> foo(int32(2))
2
>>> foo(float32(2)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a numpy object of kind 'i' for argument 'x',
but got 'f' instead.
"""
for name, kind in iteritems(named):
if not isinstance(kind, (str, tuple)):
raise TypeError(
"expect_dtype_kinds() expected a string or tuple of strings"
" for argument {name!r}, but got {kind} instead.".format(
name=name, kind=dtype,
)
)
@preprocess(kinds=call(lambda x: x if isinstance(x, tuple) else (x,)))
def _expect_kind(kinds):
"""
Factory for kind-checking functions that work the @preprocess
decorator.
"""
def error_message(func, argname, value):
# If the bad value has a dtype, but it's wrong, show the dtype
# kind. Otherwise just show the value.
try:
value_to_show = value.dtype.kind
except AttributeError:
value_to_show = value
return (
"{funcname}() expected a numpy object of kind {kinds} "
"for argument {argname!r}, but got {value!r} instead."
).format(
funcname=_qualified_name(func),
kinds=' or '.join(map(repr, kinds)),
argname=argname,
value=value_to_show,
)
def _actual_preprocessor(func, argname, argvalue):
if getattrs(argvalue, ('dtype', 'kind'), object()) not in kinds:
raise TypeError(error_message(func, argname, argvalue))
return argvalue
return _actual_preprocessor
return preprocess(**valmap(_expect_kind, named))
def expect_types(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs have expected types.
Usage
-----
>>> @expect_types(x=int, y=str)
... def foo(x, y):
... return x, y
...
>>> foo(2, '3')
(2, '3')
>>> foo(2.0, '3') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a value of type int for argument 'x',
but got float instead.
Notes
-----
A special argument, __funcname, can be provided as a string to override the
function name shown in error messages. This is most often used on __init__
or __new__ methods to make errors refer to the class name instead of the
function name.
"""
for name, type_ in iteritems(named):
if not isinstance(type_, (type, tuple)):
raise TypeError(
"expect_types() expected a type or tuple of types for "
"argument '{name}', but got {type_} instead.".format(
name=name, type_=type_,
)
)
def _expect_type(type_):
# Slightly different messages for type and tuple of types.
_template = (
"%(funcname)s() expected a value of type {type_or_types} "
"for argument '%(argname)s', but got %(actual)s instead."
)
if isinstance(type_, tuple):
template = _template.format(
type_or_types=' or '.join(map(_qualified_name, type_))
)
else:
template = _template.format(type_or_types=_qualified_name(type_))
return make_check(
exc_type=TypeError,
template=template,
pred=lambda v: not isinstance(v, type_),
actual=compose(_qualified_name, type),
funcname=__funcname,
)
return preprocess(**valmap(_expect_type, named))
def make_check(exc_type, template, pred, actual, funcname):
"""
Factory for making preprocessing functions that check a predicate on the
input value.
Parameters
----------
exc_type : Exception
The exception type to raise if the predicate fails.
template : str
A template string to use to create error messages.
Should have %-style named template parameters for 'funcname',
'argname', and 'actual'.
pred : function[object -> bool]
A function to call on the argument being preprocessed. If the
predicate returns `True`, we raise an instance of `exc_type`.
actual : function[object -> object]
A function to call on bad values to produce the value to display in the
error message.
funcname : str or callable
Name to use in error messages, or function to call on decorated
functions to produce a name. Passing an explicit name is useful when
creating checks for __init__ or __new__ methods when you want the error
to refer to the class name instead of the method name.
"""
if isinstance(funcname, str):
def get_funcname(_):
return funcname
else:
get_funcname = funcname
def _check(func, argname, argvalue):
if pred(argvalue):
raise exc_type(
template % {
'funcname': get_funcname(func),
'argname': argname,
'actual': actual(argvalue),
},
)
return argvalue
return _check
def optional(type_):
"""
Helper for use with `expect_types` when an input can be `type_` or `None`.
Returns an object such that both `None` and instances of `type_` pass
checks of the form `isinstance(obj, optional(type_))`.
Parameters
----------
type_ : type
Type for which to produce an option.
Examples
--------
>>> isinstance({}, optional(dict))
True
>>> isinstance(None, optional(dict))
True
>>> isinstance(1, optional(dict))
False
"""
return (type_, type(None))
def expect_element(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs are elements of some
expected collection.
Usage
-----
>>> @expect_element(x=('a', 'b'))
... def foo(x):
... return x.upper()
...
>>> foo('a')
'A'
>>> foo('b')
'B'
>>> foo('c') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value in ('a', 'b') for argument 'x',
but got 'c' instead.
Notes
-----
A special argument, __funcname, can be provided as a string to override the
function name shown in error messages. This is most often used on __init__
or __new__ methods to make errors refer to the class name instead of the
function name.
This uses the `in` operator (__contains__) to make the containment check.
This allows us to use any custom container as long as the object supports
the container protocol.
"""
def _expect_element(collection):
if isinstance(collection, (set, frozenset)):
# Special case the error message for set and frozen set to make it
# less verbose.
collection_for_error_message = tuple(sorted(collection))
else:
collection_for_error_message = collection
template = (
"%(funcname)s() expected a value in {collection} "
"for argument '%(argname)s', but got %(actual)s instead."
).format(collection=collection_for_error_message)
return make_check(
ValueError,
template,
complement(op.contains(collection)),
repr,
funcname=__funcname,
)
return preprocess(**valmap(_expect_element, named))
def expect_bounded(__funcname=_qualified_name, **named):
"""
Preprocessing decorator verifying that inputs fall INCLUSIVELY between
bounds.
Bounds should be passed as a pair of ``(min_value, max_value)``.
``None`` may be passed as ``min_value`` or ``max_value`` to signify that
the input is only bounded above or below.
Usage
-----
>>> @expect_bounded(x=(1, 5))
... def foo(x):
... return x + 1
...
>>> foo(1)
2
>>> foo(5)
6
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value inclusively between 1 and 5 for
argument 'x', but got 6 instead.
>>> @expect_bounded(x=(2, None))
... def foo(x):
... return x
...
>>> foo(100000)
100000
>>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value greater than or equal to 2 for
argument 'x', but got 1 instead.
>>> @expect_bounded(x=(None, 5))
... def foo(x):
... return x
...
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value less than or equal to 5 for
argument 'x', but got 6 instead.
"""
def _make_bounded_check(bounds):
(lower, upper) = bounds
if lower is None:
def should_fail(value):
return value > upper
predicate_descr = "less than or equal to " + str(upper)
elif upper is None:
def should_fail(value):
return value < lower
predicate_descr = "greater than or equal to " + str(lower)
else:
def should_fail(value):
return not (lower <= value <= upper)
predicate_descr = "inclusively between %s and %s" % bounds
template = (
"%(funcname)s() expected a value {predicate}"
" for argument '%(argname)s', but got %(actual)s instead."
).format(predicate=predicate_descr)
return make_check(
exc_type=ValueError,
template=template,
pred=should_fail,
actual=repr,
funcname=__funcname,
)
return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named)
def expect_strictly_bounded(__funcname=_qualified_name, **named):
"""
Preprocessing decorator verifying that inputs fall EXCLUSIVELY between
bounds.
Bounds should be passed as a pair of ``(min_value, max_value)``.
``None`` may be passed as ``min_value`` or ``max_value`` to signify that
the input is only bounded above or below.
Usage
-----
>>> @expect_strictly_bounded(x=(1, 5))
... def foo(x):
... return x + 1
...
>>> foo(2)
3
>>> foo(4)
5
>>> foo(5) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value exclusively between 1 and 5 for
argument 'x', but got 5 instead.
>>> @expect_strictly_bounded(x=(2, None))
... def foo(x):
... return x
...
>>> foo(100000)
100000
>>> foo(2) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value strictly greater than 2 for
argument 'x', but got 2 instead.
>>> @expect_strictly_bounded(x=(None, 5))
... def foo(x):
... return x
...
>>> foo(5) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value strictly less than 5 for
argument 'x', but got 5 instead.
"""
def _make_bounded_check(bounds):
(lower, upper) = bounds
if lower is None:
def should_fail(value):
return value >= upper
predicate_descr = "strictly less than " + str(upper)
elif upper is None:
def should_fail(value):
return value <= lower
predicate_descr = "strictly greater than " + str(lower)
else:
def should_fail(value):
return not (lower < value < upper)
predicate_descr = "exclusively between %s and %s" % bounds
template = (
"%(funcname)s() expected a value {predicate}"
" for argument '%(argname)s', but got %(actual)s instead."
).format(predicate=predicate_descr)
return make_check(
exc_type=ValueError,
template=template,
pred=should_fail,
actual=repr,
funcname=__funcname,
)
return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named)
def _expect_bounded(make_bounded_check, __funcname, **named):
def valid_bounds(t):
return (
isinstance(t, tuple)
and len(t) == 2
and t != (None, None)
)
for name, bounds in iteritems(named):
if not valid_bounds(bounds):
raise TypeError(
"expect_bounded() expected a tuple of bounds for"
" argument '{name}', but got {bounds} instead.".format(
name=name,
bounds=bounds,
)
)
return preprocess(**valmap(make_bounded_check, named))
def expect_dimensions(__funcname=_qualified_name, **dimensions):
"""
Preprocessing decorator that verifies inputs are numpy arrays with a
specific dimensionality.
Usage
-----
>>> from numpy import array
>>> @expect_dimensions(x=1, y=2)
... def foo(x, y):
... return x[0] + y[0, 0]
...
>>> foo(array([1, 1]), array([[1, 1], [2, 2]]))
2
>>> foo(array([1, 1]), array([1, 1])) # doctest: +NORMALIZE_WHITESPACE
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a 2-D array for argument 'y',
but got a 1-D array instead.
"""
if isinstance(__funcname, str):
def get_funcname(_):
return __funcname
else:
get_funcname = __funcname
def _expect_dimension(expected_ndim):
def _check(func, argname, argvalue):
actual_ndim = argvalue.ndim
if actual_ndim != expected_ndim:
if actual_ndim == 0:
actual_repr = 'scalar'
else:
actual_repr = "%d-D array" % actual_ndim
raise ValueError(
"{func}() expected a {expected:d}-D array"
" for argument {argname!r}, but got a {actual}"
" instead.".format(
func=get_funcname(func),
expected=expected_ndim,
argname=argname,
actual=actual_repr,
)
)
return argvalue
return _check
return preprocess(**valmap(_expect_dimension, dimensions))
def coerce(from_, to, **to_kwargs):
"""
A preprocessing decorator that coerces inputs of a given type by passing
them to a callable.
Parameters
----------
from : type or tuple or types
Inputs types on which to call ``to``.
to : function
Coercion function to call on inputs.
**to_kwargs
Additional keywords to forward to every call to ``to``.
Usage
-----
>>> @preprocess(x=coerce(float, int), y=coerce(float, int))
... def floordiff(x, y):
... return x - y
...
>>> floordiff(3.2, 2.5)
1
>>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2))
... def add_binary_strings(x, y):
... return bin(x + y)[2:]
...
>>> add_binary_strings('101', '001')
'110'
"""
def preprocessor(func, argname, arg):
if isinstance(arg, from_):
return to(arg, **to_kwargs)
return arg
return preprocessor
def coerce_types(**kwargs):
"""
Preprocessing decorator that applies type coercions.
Parameters
----------
**kwargs : dict[str -> (type, callable)]
Keyword arguments mapping function parameter names to pairs of
(from_type, to_type).
Usage
-----
>>> @coerce_types(x=(float, int), y=(int, str))
... def func(x, y):
... return (x, y)
...
>>> func(1.0, 3)
(1, '3')
"""
def _coerce(types):
return coerce(*types)
return preprocess(**valmap(_coerce, kwargs))
class error_keywords(object):
def __init__(self, *args, **kwargs):
self.messages = kwargs
def __call__(self, func):
def assert_keywords_and_call(*args, **kwargs):
for field, message in iteritems(self.messages):
if field in kwargs:
raise TypeError(message)
return func(*args, **kwargs)
return assert_keywords_and_call
coerce_string = partial(coerce, string_types) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/input_validation.py | input_validation.py |
from textwrap import dedent
from types import CodeType
from functools import wraps
from inspect import getargspec
from uuid import uuid4
from toolz.curried.operator import getitem
from six import viewkeys, exec_, PY3
_code_argorder = (
('co_argcount', 'co_kwonlyargcount') if PY3 else ('co_argcount',)
) + (
'co_nlocals',
'co_stacksize',
'co_flags',
'co_code',
'co_consts',
'co_names',
'co_varnames',
'co_filename',
'co_name',
'co_firstlineno',
'co_lnotab',
'co_freevars',
'co_cellvars',
)
NO_DEFAULT = object()
def preprocess(*_unused, **processors):
"""
Decorator that applies pre-processors to the arguments of a function before
calling the function.
Parameters
----------
**processors : dict
Map from argument name -> processor function.
A processor function takes three arguments: (func, argname, argvalue).
`func` is the the function for which we're processing args.
`argname` is the name of the argument we're processing.
`argvalue` is the value of the argument we're processing.
Usage
-----
>>> def _ensure_tuple(func, argname, arg):
... if isinstance(arg, tuple):
... return argvalue
... try:
... return tuple(arg)
... except TypeError:
... raise TypeError(
... "%s() expected argument '%s' to"
... " be iterable, but got %s instead." % (
... func.__name__, argname, arg,
... )
... )
...
>>> @preprocess(arg=_ensure_tuple)
... def foo(arg):
... return arg
...
>>> foo([1, 2, 3])
(1, 2, 3)
>>> foo("a")
('a',)
>>> foo(2)
Traceback (most recent call last):
...
TypeError: foo() expected argument 'arg' to be iterable, but got 2 instead.
"""
if _unused:
raise TypeError("preprocess() doesn't accept positional arguments")
def _decorator(f):
args, varargs, varkw, defaults = argspec = getargspec(f)
if defaults is None:
defaults = ()
no_defaults = (NO_DEFAULT,) * (len(args) - len(defaults))
args_defaults = list(zip(args, no_defaults + defaults))
if varargs:
args_defaults.append((varargs, NO_DEFAULT))
if varkw:
args_defaults.append((varkw, NO_DEFAULT))
argset = set(args) | {varargs, varkw} - {None}
# Arguments can be declared as tuples in Python 2.
if not all(isinstance(arg, str) for arg in args):
raise TypeError(
"Can't validate functions using tuple unpacking: %s" %
(argspec,)
)
# Ensure that all processors map to valid names.
bad_names = viewkeys(processors) - argset
if bad_names:
raise TypeError(
"Got processors for unknown arguments: %s." % bad_names
)
return _build_preprocessed_function(
f, processors, args_defaults, varargs, varkw,
)
return _decorator
def call(f):
"""
Wrap a function in a processor that calls `f` on the argument before
passing it along.
Useful for creating simple arguments to the `@preprocess` decorator.
Parameters
----------
f : function
Function accepting a single argument and returning a replacement.
Usage
-----
>>> @preprocess(x=call(lambda x: x + 1))
... def foo(x):
... return x
...
>>> foo(1)
2
"""
@wraps(f)
def processor(func, argname, arg):
return f(arg)
return processor
def _build_preprocessed_function(func,
processors,
args_defaults,
varargs,
varkw):
"""
Build a preprocessed function with the same signature as `func`.
Uses `exec` internally to build a function that actually has the same
signature as `func.
"""
format_kwargs = {'func_name': func.__name__}
def mangle(name):
return 'a' + uuid4().hex + name
format_kwargs['mangled_func'] = mangled_funcname = mangle(func.__name__)
def make_processor_assignment(arg, processor_name):
template = "{arg} = {processor}({func}, '{arg}', {arg})"
return template.format(
arg=arg,
processor=processor_name,
func=mangled_funcname,
)
exec_globals = {mangled_funcname: func, 'wraps': wraps}
defaults_seen = 0
default_name_template = 'a' + uuid4().hex + '_%d'
signature = []
call_args = []
assignments = []
star_map = {
varargs: '*',
varkw: '**',
}
def name_as_arg(arg):
return star_map.get(arg, '') + arg
for arg, default in args_defaults:
if default is NO_DEFAULT:
signature.append(name_as_arg(arg))
else:
default_name = default_name_template % defaults_seen
exec_globals[default_name] = default
signature.append('='.join([name_as_arg(arg), default_name]))
defaults_seen += 1
if arg in processors:
procname = mangle('_processor_' + arg)
exec_globals[procname] = processors[arg]
assignments.append(make_processor_assignment(arg, procname))
call_args.append(name_as_arg(arg))
exec_str = dedent(
"""\
@wraps({wrapped_funcname})
def {func_name}({signature}):
{assignments}
return {wrapped_funcname}({call_args})
"""
).format(
func_name=func.__name__,
signature=', '.join(signature),
assignments='\n '.join(assignments),
wrapped_funcname=mangled_funcname,
call_args=', '.join(call_args),
)
compiled = compile(
exec_str,
func.__code__.co_filename,
mode='exec',
)
exec_locals = {}
exec_(compiled, exec_globals, exec_locals)
new_func = exec_locals[func.__name__]
code = new_func.__code__
args = {
attr: getattr(code, attr)
for attr in dir(code)
if attr.startswith('co_')
}
# Copy the firstlineno out of the underlying function so that exceptions
# get raised with the correct traceback.
# This also makes dynamic source inspection (like IPython `??` operator)
# work as intended.
try:
# Try to get the pycode object from the underlying function.
original_code = func.__code__
except AttributeError:
try:
# The underlying callable was not a function, try to grab the
# `__func__.__code__` which exists on method objects.
original_code = func.__func__.__code__
except AttributeError:
# The underlying callable does not have a `__code__`. There is
# nothing for us to correct.
return new_func
args['co_firstlineno'] = original_code.co_firstlineno
new_func.__code__ = CodeType(*map(getitem(args), _code_argorder))
return new_func | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/preprocess.py | preprocess.py |
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import six
import warnings
import datetime
import numpy as np
import pandas as pd
import pytz
from toolz import curry
from zipline.utils.input_validation import preprocess
from zipline.utils.memoize import lazyval
from zipline.utils.sentinel import sentinel
from .context_tricks import nop_context
__all__ = [
'EventManager',
'Event',
'EventRule',
'StatelessRule',
'ComposedRule',
'Always',
'Never',
'AfterOpen',
'BeforeClose',
'NotHalfDay',
'NthTradingDayOfWeek',
'NDaysBeforeLastTradingDayOfWeek',
'NthTradingDayOfMonth',
'NDaysBeforeLastTradingDayOfMonth',
'StatefulRule',
'OncePerDay',
# Factory API
'date_rules',
'time_rules',
'calendars',
'make_eventrule',
]
MAX_MONTH_RANGE = 23
MAX_WEEK_RANGE = 5
def naive_to_utc(ts):
"""
Converts a UTC tz-naive timestamp to a tz-aware timestamp.
"""
# Drop the nanoseconds field. warn=False suppresses the warning
# that we are losing the nanoseconds; however, this is intended.
return pd.Timestamp(ts.to_pydatetime(warn=False), tz='UTC')
def ensure_utc(time, tz='UTC'):
"""
Normalize a time. If the time is tz-naive, assume it is UTC.
"""
if not time.tzinfo:
time = time.replace(tzinfo=pytz.timezone(tz))
return time.replace(tzinfo=pytz.utc)
def _out_of_range_error(a, b=None, var='offset'):
start = 0
if b is None:
end = a - 1
else:
start = a
end = b - 1
return ValueError(
'{var} must be in between {start} and {end} inclusive'.format(
var=var,
start=start,
end=end,
)
)
def _td_check(td):
seconds = td.total_seconds()
# 43200 seconds = 12 hours
if 60 <= seconds <= 43200:
return td
else:
raise ValueError('offset must be in between 1 minute and 12 hours, '
'inclusive.')
def _build_offset(offset, kwargs, default):
"""
Builds the offset argument for event rules.
"""
if offset is None:
if not kwargs:
return default # use the default.
else:
return _td_check(datetime.timedelta(**kwargs))
elif kwargs:
raise ValueError('Cannot pass kwargs and an offset')
elif isinstance(offset, datetime.timedelta):
return _td_check(offset)
else:
raise TypeError("Must pass 'hours' and/or 'minutes' as keywords")
def _build_date(date, kwargs):
"""
Builds the date argument for event rules.
"""
if date is None:
if not kwargs:
raise ValueError('Must pass a date or kwargs')
else:
return datetime.date(**kwargs)
elif kwargs:
raise ValueError('Cannot pass kwargs and a date')
else:
return date
def _build_time(time, kwargs):
"""
Builds the time argument for event rules.
"""
tz = kwargs.pop('tz', 'UTC')
if time:
if kwargs:
raise ValueError('Cannot pass kwargs and a time')
else:
return ensure_utc(time, tz)
elif not kwargs:
raise ValueError('Must pass a time or kwargs')
else:
return datetime.time(**kwargs)
@curry
def lossless_float_to_int(funcname, func, argname, arg):
"""
A preprocessor that coerces integral floats to ints.
Receipt of non-integral floats raises a TypeError.
"""
if not isinstance(arg, float):
return arg
arg_as_int = int(arg)
if arg == arg_as_int:
warnings.warn(
"{f} expected an int for argument {name!r}, but got float {arg}."
" Coercing to int.".format(
f=funcname,
name=argname,
arg=arg,
),
)
return arg_as_int
raise TypeError(arg)
class EventManager(object):
"""Manages a list of Event objects.
This manages the logic for checking the rules and dispatching to the
handle_data function of the Events.
Parameters
----------
create_context : (BarData) -> context manager, optional
An optional callback to produce a context manager to wrap the calls
to handle_data. This will be passed the current BarData.
"""
def __init__(self, create_context=None):
self._events = []
self._create_context = (
create_context
if create_context is not None else
lambda *_: nop_context
)
def add_event(self, event, prepend=False):
"""
Adds an event to the manager.
"""
if prepend:
self._events.insert(0, event)
else:
self._events.append(event)
def handle_data(self, context, data, dt):
with self._create_context(data):
for event in self._events:
event.handle_data(
context,
data,
dt,
)
class Event(namedtuple('Event', ['rule', 'callback'])):
"""
An event is a pairing of an EventRule and a callable that will be invoked
with the current algorithm context, data, and datetime only when the rule
is triggered.
"""
def __new__(cls, rule=None, callback=None):
callback = callback or (lambda *args, **kwargs: None)
return super(cls, cls).__new__(cls, rule=rule, callback=callback)
def handle_data(self, context, data, dt):
"""
Calls the callable only when the rule is triggered.
"""
if self.rule.should_trigger(dt):
self.callback(context, data)
class EventRule(six.with_metaclass(ABCMeta)):
@abstractmethod
def should_trigger(self, dt):
"""
Checks if the rule should trigger with its current state.
This method should be pure and NOT mutate any state on the object.
"""
raise NotImplementedError('should_trigger')
class StatelessRule(EventRule):
"""
A stateless rule has no observable side effects.
This is reentrant and will always give the same result for the
same datetime.
Because these are pure, they can be composed to create new rules.
"""
def and_(self, rule):
"""
Logical and of two rules, triggers only when both rules trigger.
This follows the short circuiting rules for normal and.
"""
return ComposedRule(self, rule, ComposedRule.lazy_and)
__and__ = and_
class ComposedRule(StatelessRule):
"""
A rule that composes the results of two rules with some composing function.
The composing function should be a binary function that accepts the results
first(dt) and second(dt) as positional arguments.
For example, operator.and_.
If lazy=True, then the lazy composer is used instead. The lazy composer
expects a function that takes the two should_trigger functions and the
datetime. This is useful of you don't always want to call should_trigger
for one of the rules. For example, this is used to implement the & and |
operators so that they will have the same short circuit logic that is
expected.
"""
def __init__(self, first, second, composer):
if not (isinstance(first, StatelessRule) and
isinstance(second, StatelessRule)):
raise ValueError('Only two StatelessRules can be composed')
self.first = first
self.second = second
self.composer = composer
def should_trigger(self, dt):
"""
Composes the two rules with a lazy composer.
"""
return self.composer(
self.first.should_trigger,
self.second.should_trigger,
dt
)
@staticmethod
def lazy_and(first_should_trigger, second_should_trigger, dt):
"""
Lazily ands the two rules. This will NOT call the should_trigger of the
second rule if the first one returns False.
"""
return first_should_trigger(dt) and second_should_trigger(dt)
class Always(StatelessRule):
"""
A rule that always triggers.
"""
@staticmethod
def always_trigger(dt):
"""
A should_trigger implementation that will always trigger.
"""
return True
should_trigger = always_trigger
class Never(StatelessRule):
"""
A rule that never triggers.
"""
@staticmethod
def never_trigger(dt):
"""
A should_trigger implementation that will never trigger.
"""
return False
should_trigger = never_trigger
class AfterOpen(StatelessRule):
"""
A rule that triggers for some offset after the market opens.
Example that triggers after 30 minutes of the market opening:
>>> AfterOpen(minutes=30) # doctest: +ELLIPSIS
<zipline.utils.events.AfterOpen object at ...>
"""
def __init__(self, offset=None, **kwargs):
self.offset = _build_offset(
offset,
kwargs,
datetime.timedelta(minutes=1), # Defaults to the first minute.
)
self._period_start = None
self._period_end = None
self._period_close = None
self._one_minute = datetime.timedelta(minutes=1)
def calculate_dates(self, dt):
"""
Given a date, find that day's open and period end (open + offset).
"""
period_start, period_close = self.cal.open_and_close_for_session(
self.cal.minute_to_session_label(dt),
)
# Align the market open and close times here with the execution times
# used by the simulation clock. This ensures that scheduled functions
# trigger at the correct times.
self._period_start = self.cal.execution_time_from_open(period_start)
self._period_close = self.cal.execution_time_from_close(period_close)
self._period_end = self._period_start + self.offset - self._one_minute
def should_trigger(self, dt):
# There are two reasons why we might want to recalculate the dates.
# One is the first time we ever call should_trigger, when
# self._period_start is none. The second is when we're on a new day,
# and need to recalculate the dates. For performance reasons, we rely
# on the fact that our clock only ever ticks forward, since it's
# cheaper to do dt1 <= dt2 than dt1.date() != dt2.date(). This means
# that we will NOT correctly recognize a new date if we go backwards
# in time(which should never happen in a simulation, or in live
# trading)
if (
self._period_start is None or
self._period_close <= dt
):
self.calculate_dates(dt)
return dt == self._period_end
class BeforeClose(StatelessRule):
"""
A rule that triggers for some offset time before the market closes.
Example that triggers for the last 30 minutes every day:
>>> BeforeClose(minutes=30) # doctest: +ELLIPSIS
<zipline.utils.events.BeforeClose object at ...>
"""
def __init__(self, offset=None, **kwargs):
self.offset = _build_offset(
offset,
kwargs,
datetime.timedelta(minutes=1), # Defaults to the last minute.
)
self._period_start = None
self._period_close = None
self._period_end = None
self._one_minute = datetime.timedelta(minutes=1)
def calculate_dates(self, dt):
"""
Given a dt, find that day's close and period start (close - offset).
"""
period_end = self.cal.open_and_close_for_session(
self.cal.minute_to_session_label(dt),
)[1]
# Align the market close time here with the execution time used by the
# simulation clock. This ensures that scheduled functions trigger at
# the correct times.
self._period_end = self.cal.execution_time_from_close(period_end)
self._period_start = self._period_end - self.offset
self._period_close = self._period_end
def should_trigger(self, dt):
# There are two reasons why we might want to recalculate the dates.
# One is the first time we ever call should_trigger, when
# self._period_start is none. The second is when we're on a new day,
# and need to recalculate the dates. For performance reasons, we rely
# on the fact that our clock only ever ticks forward, since it's
# cheaper to do dt1 <= dt2 than dt1.date() != dt2.date(). This means
# that we will NOT correctly recognize a new date if we go backwards
# in time(which should never happen in a simulation, or in live
# trading)
if self._period_start is None or self._period_close <= dt:
self.calculate_dates(dt)
return self._period_start == dt
class NotHalfDay(StatelessRule):
"""
A rule that only triggers when it is not a half day.
"""
def should_trigger(self, dt):
return self.cal.minute_to_session_label(dt) \
not in self.cal.early_closes
class TradingDayOfWeekRule(six.with_metaclass(ABCMeta, StatelessRule)):
@preprocess(n=lossless_float_to_int('TradingDayOfWeekRule'))
def __init__(self, n, invert):
if not 0 <= n < MAX_WEEK_RANGE:
raise _out_of_range_error(MAX_WEEK_RANGE)
self.td_delta = (-n - 1) if invert else n
def should_trigger(self, dt):
# is this market minute's period in the list of execution periods?
val = self.cal.minute_to_session_label(dt, direction="none").value
return val in self.execution_period_values
@lazyval
def execution_period_values(self):
# calculate the list of periods that match the given criteria
sessions = self.cal.all_sessions
return set(
pd.Series(data=sessions)
.groupby([sessions.year, sessions.weekofyear])
.nth(self.td_delta)
.astype(np.int64)
)
class NthTradingDayOfWeek(TradingDayOfWeekRule):
"""
A rule that triggers on the nth trading day of the week.
This is zero-indexed, n=0 is the first trading day of the week.
"""
def __init__(self, n):
super(NthTradingDayOfWeek, self).__init__(n, invert=False)
class NDaysBeforeLastTradingDayOfWeek(TradingDayOfWeekRule):
"""
A rule that triggers n days before the last trading day of the week.
"""
def __init__(self, n):
super(NDaysBeforeLastTradingDayOfWeek, self).__init__(n, invert=True)
class TradingDayOfMonthRule(six.with_metaclass(ABCMeta, StatelessRule)):
@preprocess(n=lossless_float_to_int('TradingDayOfMonthRule'))
def __init__(self, n, invert):
if not 0 <= n < MAX_MONTH_RANGE:
raise _out_of_range_error(MAX_MONTH_RANGE)
if invert:
self.td_delta = -n - 1
else:
self.td_delta = n
def should_trigger(self, dt):
# is this market minute's period in the list of execution periods?
value = self.cal.minute_to_session_label(dt, direction="none").value
return value in self.execution_period_values
@lazyval
def execution_period_values(self):
# calculate the list of periods that match the given criteria
sessions = self.cal.all_sessions
return set(
pd.Series(data=sessions)
.groupby([sessions.year, sessions.month])
.nth(self.td_delta)
.astype(np.int64)
)
class NthTradingDayOfMonth(TradingDayOfMonthRule):
"""
A rule that triggers on the nth trading day of the month.
This is zero-indexed, n=0 is the first trading day of the month.
"""
def __init__(self, n):
super(NthTradingDayOfMonth, self).__init__(n, invert=False)
class NDaysBeforeLastTradingDayOfMonth(TradingDayOfMonthRule):
"""
A rule that triggers n days before the last trading day of the month.
"""
def __init__(self, n):
super(NDaysBeforeLastTradingDayOfMonth, self).__init__(n, invert=True)
# Stateful rules
class StatefulRule(EventRule):
"""
A stateful rule has state.
This rule will give different results for the same datetimes depending
on the internal state that this holds.
StatefulRules wrap other rules as state transformers.
"""
def __init__(self, rule=None):
self.rule = rule or Always()
def new_should_trigger(self, callable_):
"""
Replace the should trigger implementation for the current rule.
"""
self.should_trigger = callable_
class OncePerDay(StatefulRule):
def __init__(self, rule=None):
self.triggered = False
self.date = None
self.next_date = None
super(OncePerDay, self).__init__(rule)
def should_trigger(self, dt):
if self.date is None or dt >= self.next_date:
# initialize or reset for new date
self.triggered = False
self.date = dt
# record the timestamp for the next day, so that we can use it
# to know if we've moved to the next day
self.next_date = dt + pd.Timedelta(1, unit="d")
if not self.triggered and self.rule.should_trigger(dt):
self.triggered = True
return True
# Factory API
class date_rules(object):
every_day = Always
@staticmethod
def month_start(days_offset=0):
return NthTradingDayOfMonth(n=days_offset)
@staticmethod
def month_end(days_offset=0):
return NDaysBeforeLastTradingDayOfMonth(n=days_offset)
@staticmethod
def week_start(days_offset=0):
return NthTradingDayOfWeek(n=days_offset)
@staticmethod
def week_end(days_offset=0):
return NDaysBeforeLastTradingDayOfWeek(n=days_offset)
class time_rules(object):
market_open = AfterOpen
market_close = BeforeClose
every_minute = Always
class calendars(object):
US_EQUITIES = sentinel('US_EQUITIES')
US_FUTURES = sentinel('US_FUTURES')
def make_eventrule(date_rule, time_rule, cal, half_days=True):
"""
Constructs an event rule from the factory api.
"""
# Insert the calendar in to the individual rules
date_rule.cal = cal
time_rule.cal = cal
if half_days:
inner_rule = date_rule & time_rule
else:
nhd_rule = NotHalfDay()
nhd_rule.cal = cal
inner_rule = date_rule & time_rule & nhd_rule
return OncePerDay(rule=inner_rule) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/events.py | events.py |
from collections import OrderedDict, Sequence
from functools import wraps
from itertools import compress
from weakref import WeakKeyDictionary, ref
from six.moves._thread import allocate_lock as Lock
from toolz.sandbox import unzip
class lazyval(object):
"""Decorator that marks that an attribute of an instance should not be
computed until needed, and that the value should be memoized.
Example
-------
>>> from zipline.utils.memoize import lazyval
>>> class C(object):
... def __init__(self):
... self.count = 0
... @lazyval
... def val(self):
... self.count += 1
... return "val"
...
>>> c = C()
>>> c.count
0
>>> c.val, c.count
('val', 1)
>>> c.val, c.count
('val', 1)
>>> c.val = 'not_val'
Traceback (most recent call last):
...
AttributeError: Can't set read-only attribute.
>>> c.val
'val'
"""
def __init__(self, get):
self._get = get
self._cache = WeakKeyDictionary()
def __get__(self, instance, owner):
if instance is None:
return self
try:
return self._cache[instance]
except KeyError:
self._cache[instance] = val = self._get(instance)
return val
def __set__(self, instance, value):
raise AttributeError("Can't set read-only attribute.")
def __delitem__(self, instance):
del self._cache[instance]
class classlazyval(lazyval):
""" Decorator that marks that an attribute of a class should not be
computed until needed, and that the value should be memoized.
Example
-------
>>> from zipline.utils.memoize import classlazyval
>>> class C(object):
... count = 0
... @classlazyval
... def val(cls):
... cls.count += 1
... return "val"
...
>>> C.count
0
>>> C.val, C.count
('val', 1)
>>> C.val, C.count
('val', 1)
"""
# We don't reassign the name on the class to implement the caching because
# then we would need to use a metaclass to track the name of the
# descriptor.
def __get__(self, instance, owner):
return super(classlazyval, self).__get__(owner, owner)
def _weak_lru_cache(maxsize=100):
"""
Users should only access the lru_cache through its public API:
cache_info, cache_clear
The internals of the lru_cache are encapsulated for thread safety and
to allow the implementation to change.
"""
def decorating_function(
user_function, tuple=tuple, sorted=sorted, len=len,
KeyError=KeyError):
hits, misses = [0], [0]
kwd_mark = (object(),) # separates positional and keyword args
lock = Lock() # needed because OrderedDict isn't threadsafe
if maxsize is None:
cache = _WeakArgsDict() # cache without ordering or size limit
@wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += kwd_mark + tuple(sorted(kwds.items()))
try:
result = cache[key]
hits[0] += 1
return result
except KeyError:
pass
result = user_function(*args, **kwds)
cache[key] = result
misses[0] += 1
return result
else:
# ordered least recent to most recent
cache = _WeakArgsOrderedDict()
cache_popitem = cache.popitem
cache_renew = cache.move_to_end
@wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += kwd_mark + tuple(sorted(kwds.items()))
with lock:
try:
result = cache[key]
cache_renew(key) # record recent use of this key
hits[0] += 1
return result
except KeyError:
pass
result = user_function(*args, **kwds)
with lock:
cache[key] = result # record recent use of this key
misses[0] += 1
if len(cache) > maxsize:
# purge least recently used cache entry
cache_popitem(False)
return result
def cache_info():
"""Report cache statistics"""
with lock:
return hits[0], misses[0], maxsize, len(cache)
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
hits[0] = misses[0] = 0
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
class _WeakArgs(Sequence):
"""
Works with _WeakArgsDict to provide a weak cache for function args.
When any of those args are gc'd, the pair is removed from the cache.
"""
def __init__(self, items, dict_remove=None):
def remove(k, selfref=ref(self), dict_remove=dict_remove):
self = selfref()
if self is not None and dict_remove is not None:
dict_remove(self)
self._items, self._selectors = unzip(self._try_ref(item, remove)
for item in items)
self._items = tuple(self._items)
self._selectors = tuple(self._selectors)
def __getitem__(self, index):
return self._items[index]
def __len__(self):
return len(self._items)
@staticmethod
def _try_ref(item, callback):
try:
return ref(item, callback), True
except TypeError:
return item, False
@property
def alive(self):
return all(item() is not None
for item in compress(self._items, self._selectors))
def __eq__(self, other):
return self._items == other._items
def __hash__(self):
try:
return self.__hash
except AttributeError:
h = self.__hash = hash(self._items)
return h
class _WeakArgsDict(WeakKeyDictionary, object):
def __delitem__(self, key):
del self.data[_WeakArgs(key)]
def __getitem__(self, key):
return self.data[_WeakArgs(key)]
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.data)
def __setitem__(self, key, value):
self.data[_WeakArgs(key, self._remove)] = value
def __contains__(self, key):
try:
wr = _WeakArgs(key)
except TypeError:
return False
return wr in self.data
def pop(self, key, *args):
return self.data.pop(_WeakArgs(key), *args)
class _WeakArgsOrderedDict(_WeakArgsDict, object):
def __init__(self):
super(_WeakArgsOrderedDict, self).__init__()
self.data = OrderedDict()
def popitem(self, last=True):
while True:
key, value = self.data.popitem(last)
if key.alive:
return tuple(key), value
def move_to_end(self, key):
"""Move an existing element to the end.
Raises KeyError if the element does not exist.
"""
self[key] = self.pop(key)
def weak_lru_cache(maxsize=100):
"""Weak least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
Arguments to the cached function must be hashable. Any that are weak-
referenceable will be stored by weak reference. Once any of the args have
been garbage collected, the entry will be removed from the cache.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
class desc(lazyval):
def __get__(self, instance, owner):
if instance is None:
return self
try:
return self._cache[instance]
except KeyError:
inst = ref(instance)
@_weak_lru_cache(maxsize)
@wraps(self._get)
def wrapper(*args, **kwargs):
return self._get(inst(), *args, **kwargs)
self._cache[instance] = wrapper
return wrapper
@_weak_lru_cache(maxsize)
def __call__(self, *args, **kwargs):
return self._get(*args, **kwargs)
return desc
remember_last = weak_lru_cache(1) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/memoize.py | memoize.py |
from functools import wraps
import zipline.api
from zipline.utils.algo_instance import get_algo_instance, set_algo_instance
class ZiplineAPI(object):
"""
Context manager for making an algorithm instance available to zipline API
functions within a scoped block.
"""
def __init__(self, algo_instance):
self.algo_instance = algo_instance
def __enter__(self):
"""
Set the given algo instance, storing any previously-existing instance.
"""
self.old_algo_instance = get_algo_instance()
set_algo_instance(self.algo_instance)
def __exit__(self, _type, _value, _tb):
"""
Restore the algo instance stored in __enter__.
"""
set_algo_instance(self.old_algo_instance)
def api_method(f):
# Decorator that adds the decorated class method as a callable
# function (wrapped) to zipline.api
@wraps(f)
def wrapped(*args, **kwargs):
# Get the instance and call the method
algo_instance = get_algo_instance()
if algo_instance is None:
raise RuntimeError(
'zipline api method %s must be called during a simulation.'
% f.__name__
)
return getattr(algo_instance, f.__name__)(*args, **kwargs)
# Add functor to zipline.api
setattr(zipline.api, f.__name__, wrapped)
zipline.api.__all__.append(f.__name__)
f.is_api_method = True
return f
def require_not_initialized(exception):
"""
Decorator for API methods that should only be called during or before
TradingAlgorithm.initialize. `exception` will be raised if the method is
called after initialize.
Usage
-----
@require_not_initialized(SomeException("Don't do that!"))
def method(self):
# Do stuff that should only be allowed during initialize.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if self.initialized:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator
def require_initialized(exception):
"""
Decorator for API methods that should only be called after
TradingAlgorithm.initialize. `exception` will be raised if the method is
called before initialize has completed.
Usage
-----
@require_initialized(SomeException("Don't do that!"))
def method(self):
# Do stuff that should only be allowed after initialize.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if not self.initialized:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator
def disallowed_in_before_trading_start(exception):
"""
Decorator for API methods that cannot be called from within
TradingAlgorithm.before_trading_start. `exception` will be raised if the
method is called inside `before_trading_start`.
Usage
-----
@disallowed_in_before_trading_start(SomeException("Don't do that!"))
def method(self):
# Do stuff that is not allowed inside before_trading_start.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if self._in_before_trading_start:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator
def allowed_only_in_before_trading_start(exception):
"""
Decorator for API methods that can be called only from within
TradingAlgorithm.before_trading_start. `exception` will be raised if the
method is called outside `before_trading_start`.
Usage
-----
@allowed_only_in_before_trading_start(SomeException("Don't do that!"))
def method(self):
# Do stuff that is only allowed inside before_trading_start.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if not self._in_before_trading_start:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/api_support.py | api_support.py |
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from zipline.assets import Asset
from zipline.finance.transaction import Transaction
from zipline.protocol import Event, DATASOURCE_TYPE
from zipline.sources import SpecificEquityTrades
from zipline.finance.trading import SimulationParameters
from zipline.sources.test_source import create_trade
from zipline.utils.calendars import get_calendar
from zipline.utils.input_validation import expect_types
def create_simulation_parameters(year=2006, start=None, end=None,
capital_base=float("1.0e5"),
num_days=None,
data_frequency='daily',
emission_rate='daily',
trading_calendar=None):
if not trading_calendar:
trading_calendar = get_calendar("NYSE")
if start is None:
start = pd.Timestamp("{0}-01-01".format(year), tz='UTC')
elif type(start) == datetime:
start = pd.Timestamp(start)
if end is None:
if num_days:
start_index = trading_calendar.all_sessions.searchsorted(start)
end = trading_calendar.all_sessions[start_index + num_days - 1]
else:
end = pd.Timestamp("{0}-12-31".format(year), tz='UTC')
elif type(end) == datetime:
end = pd.Timestamp(end)
sim_params = SimulationParameters(
start_session=start,
end_session=end,
capital_base=capital_base,
data_frequency=data_frequency,
emission_rate=emission_rate,
trading_calendar=trading_calendar,
)
return sim_params
def get_next_trading_dt(current, interval, trading_calendar):
next_dt = pd.Timestamp(current).tz_convert(trading_calendar.tz)
while True:
# Convert timestamp to naive before adding day, otherwise the when
# stepping over EDT an hour is added.
next_dt = pd.Timestamp(next_dt.replace(tzinfo=None))
next_dt = next_dt + interval
next_dt = pd.Timestamp(next_dt, tz=trading_calendar.tz)
next_dt_utc = next_dt.tz_convert('UTC')
if trading_calendar.is_open_on_minute(next_dt_utc):
break
next_dt = next_dt_utc.tz_convert(trading_calendar.tz)
return next_dt_utc
def create_trade_history(sid, prices, amounts, interval, sim_params,
trading_calendar, source_id="test_factory"):
trades = []
current = sim_params.first_open
oneday = timedelta(days=1)
use_midnight = interval >= oneday
for price, amount in zip(prices, amounts):
if use_midnight:
trade_dt = current.replace(hour=0, minute=0)
else:
trade_dt = current
trade = create_trade(sid, price, amount, trade_dt, source_id)
trades.append(trade)
current = get_next_trading_dt(current, interval, trading_calendar)
assert len(trades) == len(prices)
return trades
def create_dividend(sid, payment, declared_date, ex_date, pay_date):
div = Event({
'sid': sid,
'gross_amount': payment,
'net_amount': payment,
'payment_sid': None,
'ratio': None,
'declared_date': pd.tslib.normalize_date(declared_date),
'ex_date': pd.tslib.normalize_date(ex_date),
'pay_date': pd.tslib.normalize_date(pay_date),
'type': DATASOURCE_TYPE.DIVIDEND,
'source_id': 'MockDividendSource'
})
return div
def create_stock_dividend(sid, payment_sid, ratio, declared_date,
ex_date, pay_date):
return Event({
'sid': sid,
'payment_sid': payment_sid,
'ratio': ratio,
'net_amount': None,
'gross_amount': None,
'dt': pd.tslib.normalize_date(declared_date),
'ex_date': pd.tslib.normalize_date(ex_date),
'pay_date': pd.tslib.normalize_date(pay_date),
'type': DATASOURCE_TYPE.DIVIDEND,
'source_id': 'MockDividendSource'
})
def create_split(sid, ratio, date):
return Event({
'sid': sid,
'ratio': ratio,
'dt': date.replace(hour=0, minute=0, second=0, microsecond=0),
'type': DATASOURCE_TYPE.SPLIT,
'source_id': 'MockSplitSource'
})
@expect_types(asset=Asset)
def create_txn(asset, price, amount, datetime, order_id):
return Transaction(
asset=asset,
price=price,
amount=amount,
dt=datetime,
order_id=order_id,
)
@expect_types(asset=Asset)
def create_txn_history(asset, priceList, amtList, interval, sim_params,
trading_calendar):
txns = []
current = sim_params.first_open
for price, amount in zip(priceList, amtList):
dt = get_next_trading_dt(current, interval, trading_calendar)
txns.append(create_txn(asset, price, amount, dt, None))
current = current + interval
return txns
def create_returns_from_range(sim_params):
return pd.Series(index=sim_params.sessions,
data=np.random.rand(len(sim_params.sessions)))
def create_returns_from_list(returns, sim_params):
return pd.Series(index=sim_params.sessions[:len(returns)],
data=returns)
def create_daily_trade_source(sids, sim_params, env, trading_calendar,
concurrent=False):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.start_session, and daily
thereafter for each sid. Thus, two sids should result in two trades per
day.
"""
return create_trade_source(
sids,
timedelta(days=1),
sim_params,
env=env,
trading_calendar=trading_calendar,
concurrent=concurrent,
)
def create_trade_source(sids, trade_time_increment, sim_params, env,
trading_calendar, concurrent=False):
# If the sim_params define an end that is during market hours, that will be
# used as the end of the data source
if trading_calendar.is_open_on_minute(sim_params.end_session):
end = sim_params.end_session
# Otherwise, the last_close after the end_session is used as the end of the
# data source
else:
end = sim_params.last_close
args = tuple()
kwargs = {
'sids': sids,
'start': sim_params.first_open,
'end': end,
'delta': trade_time_increment,
'filter': sids,
'concurrent': concurrent,
'env': env,
'trading_calendar': trading_calendar,
}
source = SpecificEquityTrades(*args, **kwargs)
return source | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/factory.py | factory.py |
import click
import pandas as pd
from .context_tricks import CallbackManager
def maybe_show_progress(it, show_progress, **kwargs):
"""Optionally show a progress bar for the given iterator.
Parameters
----------
it : iterable
The underlying iterator.
show_progress : bool
Should progress be shown.
**kwargs
Forwarded to the click progress bar.
Returns
-------
itercontext : context manager
A context manager whose enter is the actual iterator to use.
Examples
--------
.. code-block:: python
with maybe_show_progress([1, 2, 3], True) as ns:
for n in ns:
...
"""
if show_progress:
return click.progressbar(it, **kwargs)
# context manager that just return `it` when we enter it
return CallbackManager(lambda it=it: it)
class _DatetimeParam(click.ParamType):
def __init__(self, tz=None):
self.tz = tz
def parser(self, value):
return pd.Timestamp(value, tz=self.tz)
@property
def name(self):
return type(self).__name__.upper()
def convert(self, value, param, ctx):
try:
return self.parser(value)
except ValueError:
self.fail(
'%s is not a valid %s' % (value, self.name.lower()),
param,
ctx,
)
class Timestamp(_DatetimeParam):
"""A click parameter that parses the value into pandas.Timestamp objects.
Parameters
----------
tz : timezone-coercable, optional
The timezone to parse the string as.
By default the timezone will be infered from the string or naiive.
"""
class Date(_DatetimeParam):
"""A click parameter that parses the value into datetime.date objects.
Parameters
----------
tz : timezone-coercable, optional
The timezone to parse the string as.
By default the timezone will be infered from the string or naiive.
as_timestamp : bool, optional
If True, return the value as a pd.Timestamp object normalized to
midnight.
"""
def __init__(self, tz=None, as_timestamp=False):
super(Date, self).__init__(tz=tz)
self.as_timestamp = as_timestamp
def parser(self, value):
ts = super(Date, self).parser(value)
return ts.normalize() if self.as_timestamp else ts.date()
class Time(_DatetimeParam):
"""A click parameter that parses the value into timetime.time objects.
Parameters
----------
tz : timezone-coercable, optional
The timezone to parse the string as.
By default the timezone will be infered from the string or naiive.
"""
def parser(self, value):
return super(Time, self).parser(value).time()
class Timedelta(_DatetimeParam):
"""A click parameter that parses values into pd.Timedelta objects.
Parameters
----------
unit : {'D', 'h', 'm', 's', 'ms', 'us', 'ns'}, optional
Denotes the unit of the input if the input is an integer.
"""
def __init__(self, unit='ns'):
self.unit = unit
def parser(self, value):
return pd.Timedelta(value, unit=self.unit) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/cli.py | cli.py |
from abc import ABCMeta, abstractproperty
from lru import LRU
import warnings
from pandas.tseries.holiday import AbstractHolidayCalendar
from six import with_metaclass
from numpy import searchsorted
import numpy as np
import pandas as pd
from pandas import (
DataFrame,
date_range,
DatetimeIndex,
)
from pandas.tseries.offsets import CustomBusinessDay
from zipline.utils.calendars._calendar_helpers import (
next_divider_idx,
previous_divider_idx,
is_open,
minutes_to_session_labels,
)
from zipline.utils.input_validation import (
attrgetter,
coerce,
preprocess,
)
from zipline.utils.memoize import lazyval
start_default = pd.Timestamp('1990-01-01', tz='UTC')
end_base = pd.Timestamp('today', tz='UTC')
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
end_default = end_base + pd.Timedelta(days=365)
NANOS_IN_MINUTE = 60000000000
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = range(7)
class TradingCalendar(with_metaclass(ABCMeta)):
"""
An TradingCalendar represents the timing information of a single market
exchange.
The timing information is made up of two parts: sessions, and opens/closes.
A session represents a contiguous set of minutes, and has a label that is
midnight UTC. It is important to note that a session label should not be
considered a specific point in time, and that midnight UTC is just being
used for convenience.
For each session, we store the open and close time in UTC time.
"""
def __init__(self, start=start_default, end=end_default):
# Midnight in UTC for each trading day.
# In pandas 0.18.1, pandas calls into its own code here in a way that
# fires a warning. The calling code in pandas tries to suppress the
# warning, but does so incorrectly, causing it to bubble out here.
# Actually catch and suppress the warning here:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
_all_days = date_range(start, end, freq=self.day, tz='UTC')
# `DatetimeIndex`s of standard opens/closes for each day.
self._opens = days_at_time(_all_days, self.open_time, self.tz,
self.open_offset)
self._closes = days_at_time(
_all_days, self.close_time, self.tz, self.close_offset
)
# `DatetimeIndex`s of nonstandard opens/closes
_special_opens = self._calculate_special_opens(start, end)
_special_closes = self._calculate_special_closes(start, end)
# Overwrite the special opens and closes on top of the standard ones.
_overwrite_special_dates(_all_days, self._opens, _special_opens)
_overwrite_special_dates(_all_days, self._closes, _special_closes)
# In pandas 0.16.1 _opens and _closes will lose their timezone
# information. This looks like it has been resolved in 0.17.1.
# http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#datetime-with-tz # noqa
self.schedule = DataFrame(
index=_all_days,
columns=['market_open', 'market_close'],
data={
'market_open': self._opens,
'market_close': self._closes,
},
dtype='datetime64[ns]',
)
# Simple cache to avoid recalculating the same minute -> session in
# "next" mode. Analysis of current zipline code paths show that
# `minute_to_session_label` is often called consecutively with the same
# inputs.
self._minute_to_session_label_cache = LRU(1)
self.market_opens_nanos = self.schedule.market_open.values.\
astype(np.int64)
self.market_closes_nanos = self.schedule.market_close.values.\
astype(np.int64)
self._trading_minutes_nanos = self.all_minutes.values.\
astype(np.int64)
self.first_trading_session = _all_days[0]
self.last_trading_session = _all_days[-1]
self._early_closes = pd.DatetimeIndex(
_special_closes.map(self.minute_to_session_label)
)
@lazyval
def day(self):
return CustomBusinessDay(
holidays=self.adhoc_holidays,
calendar=self.regular_holidays,
)
@abstractproperty
def name(self):
raise NotImplementedError()
@abstractproperty
def tz(self):
raise NotImplementedError()
@abstractproperty
def open_time(self):
raise NotImplementedError()
@abstractproperty
def close_time(self):
raise NotImplementedError()
@property
def open_offset(self):
return 0
@property
def close_offset(self):
return 0
@lazyval
def _minutes_per_session(self):
diff = self.schedule.market_close - self.schedule.market_open
diff = diff.astype('timedelta64[m]')
return diff + 1
def minutes_count_for_sessions_in_range(self, start_session, end_session):
"""
Parameters
----------
start_session: pd.Timestamp
The first session.
end_session: pd.Timestamp
The last session.
Returns
-------
int: The total number of minutes for the contiguous chunk of sessions.
between start_session and end_session, inclusive.
"""
return int(self._minutes_per_session[start_session:end_session].sum())
@property
def regular_holidays(self):
"""
Returns
-------
pd.AbstractHolidayCalendar: a calendar containing the regular holidays
for this calendar
"""
return None
@property
def adhoc_holidays(self):
return []
@property
def special_opens(self):
"""
A list of special open times and corresponding HolidayCalendars.
Returns
-------
list: List of (time, AbstractHolidayCalendar) tuples
"""
return []
@property
def special_opens_adhoc(self):
"""
Returns
-------
list: List of (time, DatetimeIndex) tuples that represent special
closes that cannot be codified into rules.
"""
return []
@property
def special_closes(self):
"""
A list of special close times and corresponding HolidayCalendars.
Returns
-------
list: List of (time, AbstractHolidayCalendar) tuples
"""
return []
@property
def special_closes_adhoc(self):
"""
Returns
-------
list: List of (time, DatetimeIndex) tuples that represent special
closes that cannot be codified into rules.
"""
return []
# -----
@property
def opens(self):
return self.schedule.market_open
@property
def closes(self):
return self.schedule.market_close
@property
def early_closes(self):
return self._early_closes
def is_session(self, dt):
"""
Given a dt, returns whether it's a valid session label.
Parameters
----------
dt: pd.Timestamp
The dt that is being tested.
Returns
-------
bool
Whether the given dt is a valid session label.
"""
return dt in self.schedule.index
def is_open_on_minute(self, dt):
"""
Given a dt, return whether this exchange is open at the given dt.
Parameters
----------
dt: pd.Timestamp
The dt for which to check if this exchange is open.
Returns
-------
bool
Whether the exchange is open on this dt.
"""
return is_open(self.market_opens_nanos, self.market_closes_nanos,
dt.value)
def next_open(self, dt):
"""
Given a dt, returns the next open.
If the given dt happens to be a session open, the next session's open
will be returned.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next open.
Returns
-------
pd.Timestamp
The UTC timestamp of the next open.
"""
idx = next_divider_idx(self.market_opens_nanos, dt.value)
return pd.Timestamp(self.market_opens_nanos[idx], tz='UTC')
def next_close(self, dt):
"""
Given a dt, returns the next close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next close.
Returns
-------
pd.Timestamp
The UTC timestamp of the next close.
"""
idx = next_divider_idx(self.market_closes_nanos, dt.value)
return pd.Timestamp(self.market_closes_nanos[idx], tz='UTC')
def previous_open(self, dt):
"""
Given a dt, returns the previous open.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous open.
Returns
-------
pd.Timestamp
The UTC imestamp of the previous open.
"""
idx = previous_divider_idx(self.market_opens_nanos, dt.value)
return pd.Timestamp(self.market_opens_nanos[idx], tz='UTC')
def previous_close(self, dt):
"""
Given a dt, returns the previous close.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous close.
Returns
-------
pd.Timestamp
The UTC timestamp of the previous close.
"""
idx = previous_divider_idx(self.market_closes_nanos, dt.value)
return pd.Timestamp(self.market_closes_nanos[idx], tz='UTC')
def next_minute(self, dt):
"""
Given a dt, return the next exchange minute. If the given dt is not
an exchange minute, returns the next exchange open.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the next exchange minute.
Returns
-------
pd.Timestamp
The next exchange minute.
"""
idx = next_divider_idx(self._trading_minutes_nanos, dt.value)
return self.all_minutes[idx]
def previous_minute(self, dt):
"""
Given a dt, return the previous exchange minute.
Raises KeyError if the given timestamp is not an exchange minute.
Parameters
----------
dt: pd.Timestamp
The dt for which to get the previous exchange minute.
Returns
-------
pd.Timestamp
The previous exchange minute.
"""
idx = previous_divider_idx(self._trading_minutes_nanos, dt.value)
return self.all_minutes[idx]
def next_session_label(self, session_label):
"""
Given a session label, returns the label of the next session.
Parameters
----------
session_label: pd.Timestamp
A session whose next session is desired.
Returns
-------
pd.Timestamp
The next session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the last session in this
calendar.
"""
idx = self.schedule.index.get_loc(session_label)
try:
return self.schedule.index[idx + 1]
except IndexError:
if idx == len(self.schedule.index) - 1:
raise ValueError("There is no next session as this is the end"
" of the exchange calendar.")
else:
raise
def previous_session_label(self, session_label):
"""
Given a session label, returns the label of the previous session.
Parameters
----------
session_label: pd.Timestamp
A session whose previous session is desired.
Returns
-------
pd.Timestamp
The previous session label (midnight UTC).
Notes
-----
Raises ValueError if the given session is the first session in this
calendar.
"""
idx = self.schedule.index.get_loc(session_label)
if idx == 0:
raise ValueError("There is no previous session as this is the"
" beginning of the exchange calendar.")
return self.schedule.index[idx - 1]
def minutes_for_session(self, session_label):
"""
Given a session label, return the minutes for that session.
Parameters
----------
session_label: pd.Timestamp (midnight UTC)
A session label whose session's minutes are desired.
Returns
-------
pd.DateTimeIndex
All the minutes for the given session.
"""
return self.minutes_in_range(
start_minute=self.schedule.at[session_label, 'market_open'],
end_minute=self.schedule.at[session_label, 'market_close'],
)
def minutes_window(self, start_dt, count):
start_dt_nanos = start_dt.value
all_minutes_nanos = self._trading_minutes_nanos
start_idx = all_minutes_nanos.searchsorted(start_dt_nanos)
# searchsorted finds the index of the minute **on or after** start_dt.
# If the latter, push back to the prior minute.
if all_minutes_nanos[start_idx] != start_dt_nanos:
start_idx -= 1
if start_idx < 0 or start_idx >= len(all_minutes_nanos):
raise KeyError("Can't start minute window at {}".format(start_dt))
end_idx = start_idx + count
if start_idx > end_idx:
return self.all_minutes[(end_idx + 1):(start_idx + 1)]
else:
return self.all_minutes[start_idx:end_idx]
def sessions_in_range(self, start_session_label, end_session_label):
"""
Given start and end session labels, return all the sessions in that
range, inclusive.
Parameters
----------
start_session_label: pd.Timestamp (midnight UTC)
The label representing the first session of the desired range.
end_session_label: pd.Timestamp (midnight UTC)
The label representing the last session of the desired range.
Returns
-------
pd.DatetimeIndex
The desired sessions.
"""
return self.all_sessions[
self.all_sessions.slice_indexer(
start_session_label,
end_session_label
)
]
def sessions_window(self, session_label, count):
"""
Given a session label and a window size, returns a list of sessions
of size `count` + 1, that either starts with the given session
(if `count` is positive) or ends with the given session (if `count` is
negative).
Parameters
----------
session_label: pd.Timestamp
The label of the initial session.
count: int
Defines the length and the direction of the window.
Returns
-------
pd.DatetimeIndex
The desired sessions.
"""
start_idx = self.schedule.index.get_loc(session_label)
end_idx = start_idx + count
return self.all_sessions[
min(start_idx, end_idx):max(start_idx, end_idx) + 1
]
def session_distance(self, start_session_label, end_session_label):
"""
Given a start and end session label, returns the distance between
them. For example, for three consecutive sessions Mon., Tues., and
Wed, `session_distance(Mon, Wed)` would return 2.
Parameters
----------
start_session_label: pd.Timestamp
The label of the start session.
end_session_label: pd.Timestamp
The label of the ending session.
Returns
-------
int
The distance between the two sessions.
"""
start_idx = self.all_sessions.searchsorted(
self.minute_to_session_label(start_session_label)
)
end_idx = self.all_sessions.searchsorted(
self.minute_to_session_label(end_session_label)
)
return abs(end_idx - start_idx)
def minutes_in_range(self, start_minute, end_minute):
"""
Given start and end minutes, return all the calendar minutes
in that range, inclusive.
Given minutes don't need to be calendar minutes.
Parameters
----------
start_minute: pd.Timestamp
The minute representing the start of the desired range.
end_minute: pd.Timestamp
The minute representing the end of the desired range.
Returns
-------
pd.DatetimeIndex
The minutes in the desired range.
"""
start_idx = searchsorted(self._trading_minutes_nanos,
start_minute.value)
end_idx = searchsorted(self._trading_minutes_nanos,
end_minute.value)
if end_minute.value == self._trading_minutes_nanos[end_idx]:
# if the end minute is a market minute, increase by 1
end_idx += 1
return self.all_minutes[start_idx:end_idx]
def minutes_for_sessions_in_range(self, start_session_label,
end_session_label):
"""
Returns all the minutes for all the sessions from the given start
session label to the given end session label, inclusive.
Parameters
----------
start_session_label: pd.Timestamp
The label of the first session in the range.
end_session_label: pd.Timestamp
The label of the last session in the range.
Returns
-------
pd.DatetimeIndex
The minutes in the desired range.
"""
first_minute, _ = self.open_and_close_for_session(start_session_label)
_, last_minute = self.open_and_close_for_session(end_session_label)
return self.minutes_in_range(first_minute, last_minute)
def open_and_close_for_session(self, session_label):
"""
Returns a tuple of timestamps of the open and close of the session
represented by the given label.
Parameters
----------
session_label: pd.Timestamp
The session whose open and close are desired.
Returns
-------
(Timestamp, Timestamp)
The open and close for the given session.
"""
sched = self.schedule
# `market_open` and `market_close` should be timezone aware, but pandas
# 0.16.1 does not appear to support this:
# http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#datetime-with-tz # noqa
return (
sched.at[session_label, 'market_open'].tz_localize('UTC'),
sched.at[session_label, 'market_close'].tz_localize('UTC'),
)
def session_open(self, session_label):
return self.schedule.at[
session_label,
'market_open'
].tz_localize('UTC')
def session_close(self, session_label):
return self.schedule.at[
session_label,
'market_close'
].tz_localize('UTC')
def session_opens_in_range(self, start_session_label, end_session_label):
return self.schedule.loc[
start_session_label:end_session_label,
'market_open',
].dt.tz_localize('UTC')
def session_closes_in_range(self, start_session_label, end_session_label):
return self.schedule.loc[
start_session_label:end_session_label,
'market_close',
].dt.tz_localize('UTC')
@property
def all_sessions(self):
return self.schedule.index
@property
def first_session(self):
return self.all_sessions[0]
@property
def last_session(self):
return self.all_sessions[-1]
def execution_time_from_open(self, open_dates):
return open_dates
def execution_time_from_close(self, close_dates):
return close_dates
@lazyval
def all_minutes(self):
"""
Returns a DatetimeIndex representing all the minutes in this calendar.
"""
opens_in_ns = \
self._opens.values.astype('datetime64[ns]')
closes_in_ns = \
self._closes.values.astype('datetime64[ns]')
deltas = closes_in_ns - opens_in_ns
# + 1 because we want 390 days per standard day, not 389
daily_sizes = (deltas / NANOS_IN_MINUTE) + 1
num_minutes = np.sum(daily_sizes).astype(np.int64)
# One allocation for the entire thing. This assumes that each day
# represents a contiguous block of minutes.
all_minutes = np.empty(num_minutes, dtype='datetime64[ns]')
idx = 0
for day_idx, size in enumerate(daily_sizes):
# lots of small allocations, but it's fast enough for now.
# size is a np.timedelta64, so we need to int it
size_int = int(size)
all_minutes[idx:(idx + size_int)] = \
np.arange(
opens_in_ns[day_idx],
closes_in_ns[day_idx] + NANOS_IN_MINUTE,
NANOS_IN_MINUTE
)
idx += size_int
return DatetimeIndex(all_minutes).tz_localize("UTC")
@preprocess(dt=coerce(pd.Timestamp, attrgetter('value')))
def minute_to_session_label(self, dt, direction="next"):
"""
Given a minute, get the label of its containing session.
Parameters
----------
dt : pd.Timestamp or nanosecond offset
The dt for which to get the containing session.
direction: str
"next" (default) means that if the given dt is not part of a
session, return the label of the next session.
"previous" means that if the given dt is not part of a session,
return the label of the previous session.
"none" means that a KeyError will be raised if the given
dt is not part of a session.
Returns
-------
pd.Timestamp (midnight UTC)
The label of the containing session.
"""
if direction == "next":
try:
return self._minute_to_session_label_cache[dt]
except KeyError:
pass
idx = searchsorted(self.market_closes_nanos, dt)
current_or_next_session = self.schedule.index[idx]
self._minute_to_session_label_cache[dt] = current_or_next_session
if direction == "next":
return current_or_next_session
elif direction == "previous":
if not is_open(self.market_opens_nanos, self.market_closes_nanos,
dt):
# if the exchange is closed, use the previous session
return self.schedule.index[idx - 1]
elif direction == "none":
if not is_open(self.market_opens_nanos, self.market_closes_nanos,
dt):
# if the exchange is closed, blow up
raise ValueError("The given dt is not an exchange minute!")
else:
# invalid direction
raise ValueError("Invalid direction parameter: "
"{0}".format(direction))
return current_or_next_session
def minute_index_to_session_labels(self, index):
"""
Given a sorted DatetimeIndex of market minutes, return a
DatetimeIndex of the corresponding session labels.
Parameters
----------
index: pd.DatetimeIndex or pd.Series
The ordered list of market minutes we want session labels for.
Returns
-------
pd.DatetimeIndex (UTC)
The list of session labels corresponding to the given minutes.
"""
def minute_to_session_label_nanos(dt_nanos):
return self.minute_to_session_label(dt_nanos).value
return DatetimeIndex(minutes_to_session_labels(
index.values.astype(np.int64),
minute_to_session_label_nanos,
self.market_closes_nanos,
).astype('datetime64[ns]'), tz='UTC')
def _special_dates(self, calendars, ad_hoc_dates, start_date, end_date):
"""
Union an iterable of pairs of the form (time, calendar)
and an iterable of pairs of the form (time, [dates])
(This is shared logic for computing special opens and special closes.)
"""
_dates = DatetimeIndex([], tz='UTC').union_many(
[
holidays_at_time(calendar, start_date, end_date, time_,
self.tz)
for time_, calendar in calendars
] + [
days_at_time(datetimes, time_, self.tz)
for time_, datetimes in ad_hoc_dates
]
)
return _dates[(_dates >= start_date) & (_dates <= end_date)]
def _calculate_special_opens(self, start, end):
return self._special_dates(
self.special_opens,
self.special_opens_adhoc,
start,
end,
)
def _calculate_special_closes(self, start, end):
return self._special_dates(
self.special_closes,
self.special_closes_adhoc,
start,
end,
)
def days_at_time(days, t, tz, day_offset=0):
"""
Create an index of days at time ``t``, interpreted in timezone ``tz``.
The returned index is localized to UTC.
Parameters
----------
days : DatetimeIndex
An index of dates (represented as midnight).
t : datetime.time
The time to apply as an offset to each day in ``days``.
tz : pytz.timezone
The timezone to use to interpret ``t``.
day_offset : int
The number of days we want to offset @days by
Example
-------
In the example below, the times switch from 13:45 to 12:45 UTC because
March 13th is the daylight savings transition for US/Eastern. All the
times are still 8:45 when interpreted in US/Eastern.
>>> import pandas as pd; import datetime; import pprint
>>> dts = pd.date_range('2016-03-12', '2016-03-14')
>>> dts_at_845 = days_at_time(dts, datetime.time(8, 45), 'US/Eastern')
>>> pprint.pprint([str(dt) for dt in dts_at_845])
['2016-03-12 13:45:00+00:00',
'2016-03-13 12:45:00+00:00',
'2016-03-14 12:45:00+00:00']
"""
if len(days) == 0:
return days
# Offset days without tz to avoid timezone issues.
days = DatetimeIndex(days).tz_localize(None)
delta = pd.Timedelta(
days=day_offset,
hours=t.hour,
minutes=t.minute,
seconds=t.second,
)
return (days + delta).tz_localize(tz).tz_convert('UTC')
def holidays_at_time(calendar, start, end, time, tz):
return days_at_time(
calendar.holidays(start, end),
time,
tz=tz,
)
def _overwrite_special_dates(midnight_utcs,
opens_or_closes,
special_opens_or_closes):
"""
Overwrite dates in open_or_closes with corresponding dates in
special_opens_or_closes, using midnight_utcs for alignment.
"""
# Short circuit when nothing to apply.
if not len(special_opens_or_closes):
return
len_m, len_oc = len(midnight_utcs), len(opens_or_closes)
if len_m != len_oc:
raise ValueError(
"Found misaligned dates while building calendar.\n"
"Expected midnight_utcs to be the same length as open_or_closes,\n"
"but len(midnight_utcs)=%d, len(open_or_closes)=%d" % len_m, len_oc
)
# Find the array indices corresponding to each special date.
indexer = midnight_utcs.get_indexer(special_opens_or_closes.normalize())
# -1 indicates that no corresponding entry was found. If any -1s are
# present, then we have special dates that doesn't correspond to any
# trading day.
if -1 in indexer:
bad_dates = list(special_opens_or_closes[indexer == -1])
raise ValueError("Special dates %s are not trading days." % bad_dates)
# NOTE: This is a slightly dirty hack. We're in-place overwriting the
# internal data of an Index, which is conceptually immutable. Since we're
# maintaining sorting, this should be ok, but this is a good place to
# sanity check if things start going haywire with calendar computations.
opens_or_closes.values[indexer] = special_opens_or_closes.values
class HolidayCalendar(AbstractHolidayCalendar):
def __init__(self, rules):
super(HolidayCalendar, self).__init__(rules=rules) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/calendars/trading_calendar.py | trading_calendar.py |
from datetime import time
from pandas.tseries.holiday import (
Holiday,
Easter,
Day,
GoodFriday,
)
from pytz import timezone
from .trading_calendar import (
TradingCalendar,
FRIDAY,
HolidayCalendar)
# Universal Confraternization (new years day)
ConfUniversal = Holiday(
'Dia da Confraternizacao Universal',
month=1,
day=1,
)
# Sao Paulo city birthday
AniversarioSaoPaulo = Holiday(
'Aniversario de Sao Paulo',
month=1,
day=25,
)
# Carnival Monday
CarnavalSegunda = Holiday(
'Carnaval Segunda',
month=1,
day=1,
offset=[Easter(), Day(-48)]
)
# Carnival Tuesday
CarnavalTerca = Holiday(
'Carnaval Terca',
month=1,
day=1,
offset=[Easter(), Day(-47)]
)
# Ash Wednesday (short day)
QuartaCinzas = Holiday(
'Quarta Cinzas',
month=1,
day=1,
offset=[Easter(), Day(-46)]
)
# Good Friday
SextaPaixao = GoodFriday
# Feast of the Most Holy Body of Christ
CorpusChristi = Holiday(
'Corpus Christi',
month=1,
day=1,
offset=[Easter(), Day(60)]
)
# Tiradentes Memorial
Tiradentes = Holiday(
'Tiradentes',
month=4,
day=21,
)
# Labor Day
DiaTrabalho = Holiday(
'Dia Trabalho',
month=5,
day=1,
)
# Constitutionalist Revolution
Constitucionalista = Holiday(
'Constitucionalista',
month=7,
day=9,
start_date='1997-01-01'
)
# Independence Day
Independencia = Holiday(
'Independencia',
month=9,
day=7,
)
# Our Lady of Aparecida
Aparecida = Holiday(
'Nossa Senhora de Aparecida',
month=10,
day=12,
)
# All Souls' Day
Finados = Holiday(
'Dia dos Finados',
month=11,
day=2,
)
# Proclamation of the Republic
ProclamacaoRepublica = Holiday(
'Proclamacao da Republica',
month=11,
day=15,
)
# Day of Black Awareness
ConscienciaNegra = Holiday(
'Dia da Consciencia Negra',
month=11,
day=20,
start_date='2004-01-01'
)
# Christmas Eve
VesperaNatal = Holiday(
'Vespera Natal',
month=12,
day=24,
)
# Christmas
Natal = Holiday(
'Natal',
month=12,
day=25,
)
# New Year's Eve
AnoNovo = Holiday(
'Ano Novo',
month=12,
day=31,
)
# New Year's Eve falls on Saturday
AnoNovoSabado = Holiday(
'Ano Novo Sabado',
month=12,
day=30,
days_of_week=(FRIDAY,),
)
class BMFExchangeCalendar(TradingCalendar):
"""
Exchange calendar for BM&F BOVESPA
Open Time: 10:00 AM, Brazil/Sao Paulo
Close Time: 4:00 PM, Brazil/Sao Paulo
Regularly-Observed Holidays:
- Universal Confraternization (New year's day, Jan 1)
- Sao Paulo City Anniversary (Jan 25)
- Carnaval Monday (48 days before Easter)
- Carnaval Tuesday (47 days before Easter)
- Passion of the Christ (Good Friday, 2 days before Easter)
- Corpus Christi (60 days after Easter)
- Tiradentes (April 21)
- Labor day (May 1)
- Constitutionalist Revolution (July 9 after 1997)
- Independence Day (September 7)
- Our Lady of Aparecida Feast (October 12)
- All Souls' Day (November 2)
- Proclamation of the Republic (November 15)
- Day of Black Awareness (November 20 after 2004)
- Christmas (December 24 and 25)
- Day before New Year's Eve (December 30 if NYE falls on a Saturday)
- New Year's Eve (December 31)
"""
@property
def name(self):
return "BMF"
@property
def tz(self):
return timezone("America/Sao_Paulo")
@property
def open_time(self):
return time(10, 1)
@property
def close_time(self):
return time(16)
@property
def regular_holidays(self):
return HolidayCalendar([
ConfUniversal,
AniversarioSaoPaulo,
CarnavalSegunda,
CarnavalTerca,
SextaPaixao,
CorpusChristi,
Tiradentes,
DiaTrabalho,
Constitucionalista,
Independencia,
Aparecida,
Finados,
ProclamacaoRepublica,
ConscienciaNegra,
VesperaNatal,
Natal,
AnoNovo,
AnoNovoSabado,
])
@property
def special_opens(self):
return [
(time(13, 1), HolidayCalendar([QuartaCinzas]))
] | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/calendars/exchange_calendar_bmf.py | exchange_calendar_bmf.py |
from datetime import time
from itertools import chain
from pandas.tseries.holiday import (
GoodFriday,
USPresidentsDay,
USLaborDay,
USThanksgivingDay
)
from pandas.tslib import Timestamp
from pytz import timezone
from zipline.utils.calendars import TradingCalendar
from zipline.utils.calendars.trading_calendar import HolidayCalendar
from zipline.utils.calendars.us_holidays import (
USNewYearsDay,
Christmas,
USMartinLutherKingJrAfter1998,
USMemorialDay,
USIndependenceDay,
USNationalDaysofMourning)
class ICEExchangeCalendar(TradingCalendar):
"""
Exchange calendar for ICE US.
Open Time: 8pm, US/Eastern
Close Time: 6pm, US/Eastern
https://www.theice.com/publicdocs/futures_us/ICE_Futures_US_Regular_Trading_Hours.pdf # noqa
"""
@property
def name(self):
return "ICE"
@property
def tz(self):
return timezone("US/Eastern")
@property
def open_time(self):
return time(20, 1)
@property
def close_time(self):
return time(18)
@property
def open_offset(self):
return -1
@property
def special_closes(self):
return [
(time(13), HolidayCalendar([
USMartinLutherKingJrAfter1998,
USPresidentsDay,
USMemorialDay,
USIndependenceDay,
USLaborDay,
USThanksgivingDay
]))
]
@property
def adhoc_holidays(self):
return list(chain(
USNationalDaysofMourning,
# ICE was only closed on the first day of the Hurricane Sandy
# closings (was not closed on 2012-10-30)
[Timestamp('2012-10-29', tz='UTC')]
))
@property
def regular_holidays(self):
# https://www.theice.com/publicdocs/futures_us/exchange_notices/NewExNot2016Holidays.pdf # noqa
return HolidayCalendar([
USNewYearsDay,
GoodFriday,
Christmas
]) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/calendars/exchange_calendar_ice.py | exchange_calendar_ice.py |
from datetime import time
from pandas.tseries.holiday import (
Holiday,
DateOffset,
MO,
weekend_to_monday,
GoodFriday,
EasterMonday,
)
from pytz import timezone
from .trading_calendar import (
TradingCalendar,
MONDAY,
TUESDAY,
HolidayCalendar)
# New Year's Day
LSENewYearsDay = Holiday(
"New Year's Day",
month=1,
day=1,
observance=weekend_to_monday,
)
# Early May bank holiday
MayBank = Holiday(
"Early May Bank Holiday",
month=5,
offset=DateOffset(weekday=MO(1)),
day=1,
)
# Spring bank holiday
SpringBank = Holiday(
"Spring Bank Holiday",
month=5,
day=31,
offset=DateOffset(weekday=MO(-1)),
)
# Summer bank holiday
SummerBank = Holiday(
"Summer Bank Holiday",
month=8,
day=31,
offset=DateOffset(weekday=MO(-1)),
)
# Christmas
Christmas = Holiday(
"Christmas",
month=12,
day=25,
)
# If christmas day is Saturday Monday 27th is a holiday
# If christmas day is sunday the Tuesday 27th is a holiday
WeekendChristmas = Holiday(
"Weekend Christmas",
month=12,
day=27,
days_of_week=(MONDAY, TUESDAY),
)
# Boxing day
BoxingDay = Holiday(
"Boxing Day",
month=12,
day=26,
)
# If boxing day is saturday then Monday 28th is a holiday
# If boxing day is sunday then Tuesday 28th is a holiday
WeekendBoxingDay = Holiday(
"Weekend Boxing Day",
month=12,
day=28,
days_of_week=(MONDAY, TUESDAY),
)
class LSEExchangeCalendar(TradingCalendar):
"""
Exchange calendar for the London Stock Exchange
Open Time: 8:00 AM, GMT
Close Time: 4:30 PM, GMT
Regularly-Observed Holidays:
- New Years Day (observed on first business day on/after)
- Good Friday
- Easter Monday
- Early May Bank Holiday (first Monday in May)
- Spring Bank Holiday (last Monday in May)
- Summer Bank Holiday (last Monday in May)
- Christmas Day
- Dec. 27th (if Christmas is on a weekend)
- Boxing Day
- Dec. 28th (if Boxing Day is on a weekend)
"""
@property
def name(self):
return "LSE"
@property
def tz(self):
return timezone('Europe/London')
@property
def open_time(self):
return time(8, 1)
@property
def close_time(self):
return time(16, 30)
@property
def regular_holidays(self):
return HolidayCalendar([
LSENewYearsDay,
GoodFriday,
EasterMonday,
MayBank,
SpringBank,
SummerBank,
Christmas,
WeekendChristmas,
BoxingDay,
WeekendBoxingDay
]) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/calendars/exchange_calendar_lse.py | exchange_calendar_lse.py |
from zipline.errors import (
CalendarNameCollision,
CyclicCalendarAlias,
InvalidCalendarName,
)
from zipline.utils.calendars.exchange_calendar_cfe import CFEExchangeCalendar
from zipline.utils.calendars.exchange_calendar_ice import ICEExchangeCalendar
from zipline.utils.calendars.exchange_calendar_nyse import NYSEExchangeCalendar
from zipline.utils.calendars.exchange_calendar_cme import CMEExchangeCalendar
from zipline.utils.calendars.exchange_calendar_bmf import BMFExchangeCalendar
from zipline.utils.calendars.exchange_calendar_lse import LSEExchangeCalendar
from zipline.utils.calendars.exchange_calendar_tsx import TSXExchangeCalendar
from zipline.utils.calendars.us_futures_calendar import (
QuantopianUSFuturesCalendar,
)
_default_calendar_factories = {
'NYSE': NYSEExchangeCalendar,
'CME': CMEExchangeCalendar,
'ICE': ICEExchangeCalendar,
'CFE': CFEExchangeCalendar,
'BMF': BMFExchangeCalendar,
'LSE': LSEExchangeCalendar,
'TSX': TSXExchangeCalendar,
'us_futures': QuantopianUSFuturesCalendar,
}
_default_calendar_aliases = {
'SMART': 'NYSE',
'ARCA': 'NYSE',
'NASDAQ': 'NYSE',
'BATS': 'NYSE',
'CBOT': 'CME',
'COMEX': 'CME',
'NYMEX': 'CME',
'ICEUS': 'ICE',
'NYFE': 'ICE',
}
class TradingCalendarDispatcher(object):
"""
A class for dispatching and caching trading calendars.
Methods of a global instance of this class are provided by
zipline.utils.calendar_utils.
Parameters
----------
calendars : dict[str -> TradingCalendar]
Initial set of calendars.
calendar_factories : dict[str -> function]
Factories for lazy calendar creation.
aliases : dict[str -> str]
Calendar name aliases.
"""
def __init__(self, calendars, calendar_factories, aliases):
self._calendars = calendars
self._calendar_factories = calendar_factories
self._aliases = aliases
def get_calendar(self, name):
"""
Retrieves an instance of an TradingCalendar whose name is given.
Parameters
----------
name : str
The name of the TradingCalendar to be retrieved.
Returns
-------
calendar : zipline.utils.calendars.TradingCalendar
The desired calendar.
"""
canonical_name = self.resolve_alias(name)
try:
return self._calendars[canonical_name]
except KeyError:
# We haven't loaded this calendar yet, so make a new one.
pass
try:
factory = self._calendar_factories[canonical_name]
except KeyError:
# We don't have a factory registered for this name. Barf.
raise InvalidCalendarName(calendar_name=name)
# Cache the calendar for future use.
calendar = self._calendars[canonical_name] = factory()
return calendar
def has_calendar(self, name):
"""
Do we have (or have the ability to make) a calendar with ``name``?
"""
return (
name in self._calendars
or name in self._calendar_factories
or name in self._aliases
)
def register_calendar(self, name, calendar, force=False):
"""
Registers a calendar for retrieval by the get_calendar method.
Parameters
----------
name: str
The key with which to register this calendar.
calendar: TradingCalendar
The calendar to be registered for retrieval.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
Raises
------
CalendarNameCollision
If a calendar is already registered with the given calendar's name.
"""
if force:
self.deregister_calendar(name)
if self.has_calendar(name):
raise CalendarNameCollision(calendar_name=name)
self._calendars[name] = calendar
def register_calendar_type(self, name, calendar_type, force=False):
"""
Registers a calendar by type.
This is useful for registering a new calendar to be lazily instantiated
at some future point in time.
Parameters
----------
name: str
The key with which to register this calendar.
calendar_type: type
The type of the calendar to register.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
Raises
------
CalendarNameCollision
If a calendar is already registered with the given calendar's name.
"""
if force:
self.deregister_calendar(name)
if self.has_calendar(name):
raise CalendarNameCollision(calendar_name=name)
self._calendar_factories[name] = calendar_type
def register_calendar_alias(self, alias, real_name, force=False):
"""
Register an alias for a calendar.
This is useful when multiple exchanges should share a calendar, or when
there are multiple ways to refer to the same exchange.
After calling ``register_alias('alias', 'real_name')``, subsequent
calls to ``get_calendar('alias')`` will return the same result as
``get_calendar('real_name')``.
Parameters
----------
alias : str
The name to be used to refer to a calendar.
real_name : str
The canonical name of the registered calendar.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
"""
if force:
self.deregister_calendar(alias)
if self.has_calendar(alias):
raise CalendarNameCollision(calendar_name=alias)
self._aliases[alias] = real_name
# Ensure that the new alias doesn't create a cycle, and back it out if
# we did.
try:
self.resolve_alias(alias)
except CyclicCalendarAlias:
del self._aliases[alias]
raise
def resolve_alias(self, name):
"""
Resolve a calendar alias for retrieval.
Parameters
----------
name : str
The name of the requested calendar.
Returns
-------
canonical_name : str
The real name of the calendar to create/return.
"""
# Use an OrderedDict as an ordered set so that we can return the order
# of aliases in the event of a cycle.
seen = []
while name in self._aliases:
seen.append(name)
name = self._aliases[name]
# This is O(N ** 2), but if there's an alias chain longer than 2,
# something strange has happened.
if name in seen:
seen.append(name)
raise CyclicCalendarAlias(
cycle=" -> ".join(repr(k) for k in seen)
)
return name
def deregister_calendar(self, name):
"""
If a calendar is registered with the given name, it is de-registered.
Parameters
----------
cal_name : str
The name of the calendar to be deregistered.
"""
self._calendars.pop(name, None)
self._calendar_factories.pop(name, None)
self._aliases.pop(name, None)
def clear_calendars(self):
"""
Deregisters all current registered calendars
"""
self._calendars.clear()
self._calendar_factories.clear()
self._aliases.clear()
# We maintain a global calendar dispatcher so that users can just do
# `register_calendar('my_calendar', calendar) and then use `get_calendar`
# without having to thread around a dispatcher.
global_calendar_dispatcher = TradingCalendarDispatcher(
calendars={},
calendar_factories=_default_calendar_factories,
aliases=_default_calendar_aliases,
)
get_calendar = global_calendar_dispatcher.get_calendar
clear_calendars = global_calendar_dispatcher.clear_calendars
deregister_calendar = global_calendar_dispatcher.deregister_calendar
register_calendar = global_calendar_dispatcher.register_calendar
register_calendar_type = global_calendar_dispatcher.register_calendar_type
register_calendar_alias = global_calendar_dispatcher.register_calendar_alias | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/calendars/calendar_utils.py | calendar_utils.py |
from datetime import time
from pandas.tseries.holiday import (
USPresidentsDay,
USLaborDay,
USThanksgivingDay,
GoodFriday
)
from pytz import timezone
# Useful resources for making changes to this file:
# http://www.cmegroup.com/tools-information/holiday-calendar.html
from .trading_calendar import TradingCalendar, HolidayCalendar
from .us_holidays import (
USNewYearsDay,
Christmas,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
USBlackFridayInOrAfter1993,
USNationalDaysofMourning,
USMartinLutherKingJrAfter1998,
USMemorialDay,
USIndependenceDay)
class CMEExchangeCalendar(TradingCalendar):
"""
Exchange calendar for CME
Open Time: 5:00 PM, America/Chicago
Close Time: 5:00 PM, America/Chicago
Regularly-Observed Holidays:
- New Years Day
- Good Friday
- Christmas
"""
@property
def name(self):
return "CME"
@property
def tz(self):
return timezone('America/Chicago')
@property
def open_time(self):
return time(17, 1)
@property
def close_time(self):
return time(17)
@property
def open_offset(self):
return -1
@property
def regular_holidays(self):
# The CME has different holiday rules depending on the type of
# instrument. For example, http://www.cmegroup.com/tools-information/holiday-calendar/files/2016-4th-of-july-holiday-schedule.pdf # noqa
# shows that Equity, Interest Rate, FX, Energy, Metals & DME Products
# close at 1200 CT on July 4, 2016, while Grain, Oilseed & MGEX
# Products and Livestock, Dairy & Lumber products are completely
# closed.
# For now, we will treat the CME as having a single calendar, and just
# go with the most conservative hours - and treat July 4 as an early
# close at noon.
return HolidayCalendar([
USNewYearsDay,
GoodFriday,
Christmas,
])
@property
def adhoc_holidays(self):
return USNationalDaysofMourning
@property
def special_closes(self):
return [(
time(12),
HolidayCalendar([
USMartinLutherKingJrAfter1998,
USPresidentsDay,
USMemorialDay,
USLaborDay,
USIndependenceDay,
USThanksgivingDay,
USBlackFridayInOrAfter1993,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
])
)] | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/calendars/exchange_calendar_cme.py | exchange_calendar_cme.py |
from datetime import time
from itertools import chain
from pandas.tseries.holiday import (
GoodFriday,
USLaborDay,
USPresidentsDay,
USThanksgivingDay,
)
from pytz import timezone
from .trading_calendar import TradingCalendar, HolidayCalendar
from .us_holidays import (
USNewYearsDay,
USMartinLutherKingJrAfter1998,
USMemorialDay,
USIndependenceDay,
Christmas,
MonTuesThursBeforeIndependenceDay,
FridayAfterIndependenceDayExcept2013,
USBlackFridayBefore1993,
USBlackFridayInOrAfter1993,
September11Closings,
HurricaneSandyClosings,
USNationalDaysofMourning,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
)
# Useful resources for making changes to this file:
# http://www.nyse.com/pdfs/closings.pdf
# http://www.stevemorse.org/jcal/whendid.html
class NYSEExchangeCalendar(TradingCalendar):
"""
Exchange calendar for NYSE
Open Time: 9:31 AM, US/Eastern
Close Time: 4:00 PM, US/Eastern
Regularly-Observed Holidays:
- New Years Day (observed on monday when Jan 1 is a Sunday)
- Martin Luther King Jr. Day (3rd Monday in January, only after 1998)
- Washington's Birthday (aka President's Day, 3rd Monday in February)
- Good Friday (two days before Easter Sunday)
- Memorial Day (last Monday in May)
- Independence Day (observed on the nearest weekday to July 4th)
- Labor Day (first Monday in September)
- Thanksgiving (fourth Thursday in November)
- Christmas (observed on nearest weekday to December 25)
NOTE: The NYSE does not observe the following US Federal Holidays:
- Columbus Day
- Veterans Day
Regularly-Observed Early Closes:
- July 3rd (Mondays, Tuesdays, and Thursdays, 1995 onward)
- July 5th (Fridays, 1995 onward, except 2013)
- Christmas Eve (except on Fridays, when the exchange is closed entirely)
- Day After Thanksgiving (aka Black Friday, observed from 1992 onward)
NOTE: Until 1993, the standard early close time for the NYSE was 2:00 PM.
From 1993 onward, it has been 1:00 PM.
Additional Irregularities:
- Closed from 9/11/2001 to 9/16/2001 due to terrorist attacks in NYC.
- Closed on 10/29/2012 and 10/30/2012 due to Hurricane Sandy.
- Closed on 4/27/1994 due to Richard Nixon's death.
- Closed on 6/11/2004 due to Ronald Reagan's death.
- Closed on 1/2/2007 due to Gerald Ford's death.
- Closed at 1:00 PM on Wednesday, July 3rd, 2013
- Closed at 1:00 PM on Friday, December 31, 1999
- Closed at 1:00 PM on Friday, December 26, 1997
- Closed at 1:00 PM on Friday, December 26, 2003
NOTE: The exchange was **not** closed early on Friday December 26, 2008,
nor was it closed on Friday December 26, 2014. The next Thursday Christmas
will be in 2025. If someone is still maintaining this code in 2025, then
we've done alright...and we should check if it's a half day.
"""
regular_early_close = time(13)
@property
def name(self):
return "NYSE"
@property
def tz(self):
return timezone('US/Eastern')
@property
def open_time(self):
return time(9, 31)
@property
def close_time(self):
return time(16)
@property
def regular_holidays(self):
return HolidayCalendar([
USNewYearsDay,
USMartinLutherKingJrAfter1998,
USPresidentsDay,
GoodFriday,
USMemorialDay,
USIndependenceDay,
USLaborDay,
USThanksgivingDay,
Christmas,
])
@property
def adhoc_holidays(self):
return list(chain(
September11Closings,
HurricaneSandyClosings,
USNationalDaysofMourning,
))
@property
def special_closes(self):
return [
(self.regular_early_close, HolidayCalendar([
MonTuesThursBeforeIndependenceDay,
FridayAfterIndependenceDayExcept2013,
USBlackFridayInOrAfter1993,
ChristmasEveInOrAfter1993
])),
(time(14), HolidayCalendar([
ChristmasEveBefore1993,
USBlackFridayBefore1993,
])),
]
@property
def special_closes_adhoc(self):
return [
(self.regular_early_close, [
'1997-12-26',
'1999-12-31',
'2003-12-26',
'2013-07-03'
])
] | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/calendars/exchange_calendar_nyse.py | exchange_calendar_nyse.py |
from datetime import time
from pandas import Timedelta, Timestamp
from pandas.tseries.holiday import GoodFriday
from pytz import timezone
from zipline.utils.calendars import TradingCalendar
from zipline.utils.calendars.trading_calendar import (
HolidayCalendar, end_default
)
from zipline.utils.calendars.us_holidays import (
USNewYearsDay,
Christmas
)
# Number of hours of offset between the open and close times dictated by this
# calendar versus the 6:31am to 5:00pm times over which we want to simulate
# futures algos.
FUTURES_OPEN_TIME_OFFSET = 12.5
FUTURES_CLOSE_TIME_OFFSET = -1
class QuantopianUSFuturesCalendar(TradingCalendar):
"""Synthetic calendar for trading US futures.
This calendar is a superset of all of the US futures exchange
calendars provided by Zipline (CFE, CME, ICE), and is intended for
trading across all of these exchanges.
Notes
-----
Open Time: 6:00 PM, US/Eastern
Close Time: 6:00 PM, US/Eastern
Regularly-Observed Holidays:
- New Years Day
- Good Friday
- Christmas
In order to align the hours of each session, we ignore the Sunday
CME Pre-Open hour (5-6pm).
"""
# XXX: Override the default TradingCalendar start and end dates with ones
# further in the future. This is a stopgap for memory issues caused by
# upgrading to pandas 18. This calendar is the most severely affected,
# since it has the most total minutes of any of the zipline calendars.
def __init__(self,
start=Timestamp('2000-01-01', tz='UTC'),
end=end_default):
super(QuantopianUSFuturesCalendar, self).__init__(start=start, end=end)
@property
def name(self):
return "us_futures"
@property
def tz(self):
return timezone('US/Eastern')
@property
def open_time(self):
return time(18, 1)
@property
def close_time(self):
return time(18)
@property
def open_offset(self):
return -1
def execution_time_from_open(self, open_dates):
return open_dates + Timedelta(hours=FUTURES_OPEN_TIME_OFFSET)
def execution_time_from_close(self, close_dates):
return close_dates + Timedelta(hours=FUTURES_CLOSE_TIME_OFFSET)
@property
def regular_holidays(self):
return HolidayCalendar([
USNewYearsDay,
GoodFriday,
Christmas,
]) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/calendars/us_futures_calendar.py | us_futures_calendar.py |
from datetime import time
from pandas.tseries.holiday import (
Holiday,
DateOffset,
MO,
weekend_to_monday,
GoodFriday
)
from pytz import timezone
from zipline.utils.calendars.trading_calendar import TradingCalendar, \
HolidayCalendar
from zipline.utils.calendars.us_holidays import Christmas
from zipline.utils.calendars.exchange_calendar_lse import (
WeekendChristmas,
BoxingDay,
WeekendBoxingDay
)
# New Year's Day
TSXNewYearsDay = Holiday(
"New Year's Day",
month=1,
day=1,
observance=weekend_to_monday,
)
# Ontario Family Day
FamilyDay = Holiday(
"Family Day",
month=2,
day=1,
offset=DateOffset(weekday=MO(3)),
start_date='2008-01-01',
)
# Victoria Day
VictoriaDay = Holiday(
'Victoria Day',
month=5,
day=25,
offset=DateOffset(weekday=MO(-1)),
)
# Canada Day
CanadaDay = Holiday(
'Canada Day',
month=7,
day=1,
observance=weekend_to_monday,
)
# Civic Holiday
CivicHoliday = Holiday(
'Civic Holiday',
month=8,
day=1,
offset=DateOffset(weekday=MO(1)),
)
# Labor Day
LaborDay = Holiday(
'Labor Day',
month=9,
day=1,
offset=DateOffset(weekday=MO(1)),
)
# Thanksgiving
Thanksgiving = Holiday(
'Thanksgiving',
month=10,
day=1,
offset=DateOffset(weekday=MO(2)),
)
class TSXExchangeCalendar(TradingCalendar):
"""
Exchange calendar for the Toronto Stock Exchange
Open Time: 9:30 AM, EST
Close Time: 4:00 PM, EST
Regularly-Observed Holidays:
- New Years Day (observed on first business day on/after)
- Family Day (Third Monday in February after 2008)
- Good Friday
- Victoria Day (Monday before May 25th)
- Canada Day (July 1st, observed first business day after)
- Civic Holiday (First Monday in August)
- Labor Day (First Monday in September)
- Thanksgiving (Second Monday in October)
- Christmas Day
- Dec. 27th (if Christmas is on a weekend)
- Boxing Day
- Dec. 28th (if Boxing Day is on a weekend)
"""
@property
def name(self):
return "TSX"
@property
def tz(self):
return timezone('Canada/Atlantic')
@property
def open_time(self):
return time(9, 31)
@property
def close_time(self):
return time(16)
@property
def regular_holidays(self):
return HolidayCalendar([
TSXNewYearsDay,
FamilyDay,
GoodFriday,
VictoriaDay,
CanadaDay,
CivicHoliday,
LaborDay,
Thanksgiving,
Christmas,
WeekendChristmas,
BoxingDay,
WeekendBoxingDay
]) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/calendars/exchange_calendar_tsx.py | exchange_calendar_tsx.py |
from pandas import (
Timestamp,
DateOffset,
date_range,
)
from pandas.tseries.holiday import (
Holiday,
sunday_to_monday,
nearest_workday,
)
from dateutil.relativedelta import (
MO,
TH
)
from pandas.tseries.offsets import Day
from zipline.utils.calendars.trading_calendar import (
MONDAY,
TUESDAY,
WEDNESDAY,
THURSDAY,
FRIDAY,
)
# These have the same definition, but are used in different places because the
# NYSE closed at 2:00 PM on Christmas Eve until 1993.
from zipline.utils.pandas_utils import july_5th_holiday_observance
ChristmasEveBefore1993 = Holiday(
'Christmas Eve',
month=12,
day=24,
end_date=Timestamp('1993-01-01'),
# When Christmas is a Saturday, the 24th is a full holiday.
days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY),
)
ChristmasEveInOrAfter1993 = Holiday(
'Christmas Eve',
month=12,
day=24,
start_date=Timestamp('1993-01-01'),
# When Christmas is a Saturday, the 24th is a full holiday.
days_of_week=(MONDAY, TUESDAY, WEDNESDAY, THURSDAY),
)
USNewYearsDay = Holiday(
'New Years Day',
month=1,
day=1,
# When Jan 1 is a Sunday, US markets observe the subsequent Monday.
# When Jan 1 is a Saturday (as in 2005 and 2011), no holiday is observed.
observance=sunday_to_monday
)
USMartinLutherKingJrAfter1998 = Holiday(
'Dr. Martin Luther King Jr. Day',
month=1,
day=1,
# The US markets didn't observe MLK day as a holiday until 1998.
start_date=Timestamp('1998-01-01'),
offset=DateOffset(weekday=MO(3)),
)
USMemorialDay = Holiday(
# NOTE: The definition for Memorial Day is incorrect as of pandas 0.16.0.
# See https://github.com/pydata/pandas/issues/9760.
'Memorial Day',
month=5,
day=25,
offset=DateOffset(weekday=MO(1)),
)
USIndependenceDay = Holiday(
'July 4th',
month=7,
day=4,
observance=nearest_workday,
)
Christmas = Holiday(
'Christmas',
month=12,
day=25,
observance=nearest_workday,
)
MonTuesThursBeforeIndependenceDay = Holiday(
# When July 4th is a Tuesday, Wednesday, or Friday, the previous day is a
# half day.
'Mondays, Tuesdays, and Thursdays Before Independence Day',
month=7,
day=3,
days_of_week=(MONDAY, TUESDAY, THURSDAY),
start_date=Timestamp("1995-01-01"),
)
FridayAfterIndependenceDayExcept2013 = Holiday(
# When July 4th is a Thursday, the next day is a half day (except in 2013,
# when, for no explicable reason, Wednesday was a half day instead).
"Fridays after Independence Day that aren't in 2013",
month=7,
day=5,
days_of_week=(FRIDAY,),
observance=july_5th_holiday_observance,
start_date=Timestamp("1995-01-01"),
)
USBlackFridayBefore1993 = Holiday(
'Black Friday',
month=11,
day=1,
# Black Friday was not observed until 1992.
start_date=Timestamp('1992-01-01'),
end_date=Timestamp('1993-01-01'),
offset=[DateOffset(weekday=TH(4)), Day(1)],
)
USBlackFridayInOrAfter1993 = Holiday(
'Black Friday',
month=11,
day=1,
start_date=Timestamp('1993-01-01'),
offset=[DateOffset(weekday=TH(4)), Day(1)],
)
BattleOfGettysburg = Holiday(
# All of the floor traders in Chicago were sent to PA
'Markets were closed during the battle of Gettysburg',
month=7,
day=(1, 2, 3),
start_date=Timestamp("1863-07-01"),
end_date=Timestamp("1863-07-03")
)
# http://en.wikipedia.org/wiki/Aftermath_of_the_September_11_attacks
September11Closings = date_range('2001-09-11', '2001-09-16', tz='UTC')
# http://en.wikipedia.org/wiki/Hurricane_sandy
HurricaneSandyClosings = date_range(
'2012-10-29',
'2012-10-30',
tz='UTC'
)
# National Days of Mourning
# - President Richard Nixon - April 27, 1994
# - President Ronald W. Reagan - June 11, 2004
# - President Gerald R. Ford - Jan 2, 2007
USNationalDaysofMourning = [
Timestamp('1994-04-27', tz='UTC'),
Timestamp('2004-06-11', tz='UTC'),
Timestamp('2007-01-02', tz='UTC'),
] | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/utils/calendars/us_holidays.py | us_holidays.py |
import abc
import logbook
import pandas as pd
from six import with_metaclass
from zipline.errors import (
AccountControlViolation,
TradingControlViolation,
)
log = logbook.Logger('TradingControl')
class TradingControl(with_metaclass(abc.ABCMeta)):
"""
Abstract base class representing a fail-safe control on the behavior of any
algorithm.
"""
def __init__(self, on_error, **kwargs):
"""
Track any arguments that should be printed in the error message
generated by self.fail.
"""
self.on_error = on_error
self.__fail_args = kwargs
@abc.abstractmethod
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Before any order is executed by TradingAlgorithm, this method should be
called *exactly once* on each registered TradingControl object.
If the specified asset and amount do not violate this TradingControl's
restraint given the information in `portfolio`, this method should
return None and have no externally-visible side-effects.
If the desired order violates this TradingControl's contraint, this
method should call self.fail(asset, amount).
"""
raise NotImplementedError
def _constraint_msg(self, metadata):
constraint = repr(self)
if metadata:
constraint = "{constraint} (Metadata: {metadata})".format(
constraint=constraint,
metadata=metadata
)
return constraint
def handle_violation(self, asset, amount, datetime, metadata=None):
"""
Handle a TradingControlViolation, either by raising or logging and
error with information about the failure.
If dynamic information should be displayed as well, pass it in via
`metadata`.
"""
constraint = self._constraint_msg(metadata)
if self.on_error == 'fail':
raise TradingControlViolation(
asset=asset,
amount=amount,
datetime=datetime,
constraint=constraint)
elif self.on_error == 'log':
log.error("Order for {amount} shares of {asset} at {dt} "
"violates trading constraint {constraint}",
amount=amount, asset=asset, dt=datetime,
constraint=constraint)
def __repr__(self):
return "{name}({attrs})".format(name=self.__class__.__name__,
attrs=self.__fail_args)
class MaxOrderCount(TradingControl):
"""
TradingControl representing a limit on the number of orders that can be
placed in a given trading day.
"""
def __init__(self, on_error, max_count):
super(MaxOrderCount, self).__init__(on_error, max_count=max_count)
self.orders_placed = 0
self.max_count = max_count
self.current_date = None
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if we've already placed self.max_count orders today.
"""
algo_date = algo_datetime.date()
# Reset order count if it's a new day.
if self.current_date and self.current_date != algo_date:
self.orders_placed = 0
self.current_date = algo_date
if self.orders_placed >= self.max_count:
self.handle_violation(asset, amount, algo_datetime)
self.orders_placed += 1
class RestrictedListOrder(TradingControl):
"""TradingControl representing a restricted list of assets that
cannot be ordered by the algorithm.
Parameters
----------
restrictions : zipline.finance.asset_restrictions.Restrictions
Object representing restrictions of a group of assets.
"""
def __init__(self, on_error, restrictions):
super(RestrictedListOrder, self).__init__(on_error)
self.restrictions = restrictions
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the asset is in the restricted_list.
"""
if self.restrictions.is_restricted(asset, algo_datetime):
self.handle_violation(asset, amount, algo_datetime)
class MaxOrderSize(TradingControl):
"""
TradingControl representing a limit on the magnitude of any single order
placed with the given asset. Can be specified by share or by dollar
value.
"""
def __init__(self, on_error, asset=None, max_shares=None,
max_notional=None):
super(MaxOrderSize, self).__init__(on_error,
asset=asset,
max_shares=max_shares,
max_notional=max_notional)
self.asset = asset
self.max_shares = max_shares
self.max_notional = max_notional
if max_shares is None and max_notional is None:
raise ValueError(
"Must supply at least one of max_shares and max_notional"
)
if max_shares and max_shares < 0:
raise ValueError(
"max_shares cannot be negative."
)
if max_notional and max_notional < 0:
raise ValueError(
"max_notional must be positive."
)
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the magnitude of the given order exceeds either self.max_shares
or self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
if self.max_shares is not None and abs(amount) > self.max_shares:
self.handle_violation(asset, amount, algo_datetime)
current_asset_price = algo_current_data.current(asset, "price")
order_value = amount * current_asset_price
too_much_value = (self.max_notional is not None and
abs(order_value) > self.max_notional)
if too_much_value:
self.handle_violation(asset, amount, algo_datetime)
class MaxPositionSize(TradingControl):
"""
TradingControl representing a limit on the maximum position size that can
be held by an algo for a given asset.
"""
def __init__(self, on_error, asset=None, max_shares=None,
max_notional=None):
super(MaxPositionSize, self).__init__(on_error,
asset=asset,
max_shares=max_shares,
max_notional=max_notional)
self.asset = asset
self.max_shares = max_shares
self.max_notional = max_notional
if max_shares is None and max_notional is None:
raise ValueError(
"Must supply at least one of max_shares and max_notional"
)
if max_shares and max_shares < 0:
raise ValueError(
"max_shares cannot be negative."
)
if max_notional and max_notional < 0:
raise ValueError(
"max_notional must be positive."
)
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the given order would cause the magnitude of our position to be
greater in shares than self.max_shares or greater in dollar value than
self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
current_share_count = portfolio.positions[asset].amount
shares_post_order = current_share_count + amount
too_many_shares = (self.max_shares is not None and
abs(shares_post_order) > self.max_shares)
if too_many_shares:
self.handle_violation(asset, amount, algo_datetime)
current_price = algo_current_data.current(asset, "price")
value_post_order = shares_post_order * current_price
too_much_value = (self.max_notional is not None and
abs(value_post_order) > self.max_notional)
if too_much_value:
self.handle_violation(asset, amount, algo_datetime)
class LongOnly(TradingControl):
"""
TradingControl representing a prohibition against holding short positions.
"""
def __init__(self, on_error):
super(LongOnly, self).__init__(on_error)
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if we would hold negative shares of asset after completing this
order.
"""
if portfolio.positions[asset].amount + amount < 0:
self.handle_violation(asset, amount, algo_datetime)
class AssetDateBounds(TradingControl):
"""
TradingControl representing a prohibition against ordering an asset before
its start_date, or after its end_date.
"""
def __init__(self, on_error):
super(AssetDateBounds, self).__init__(on_error)
def validate(self,
asset,
amount,
portfolio,
algo_datetime,
algo_current_data):
"""
Fail if the algo has passed this Asset's end_date, or before the
Asset's start date.
"""
# If the order is for 0 shares, then silently pass through.
if amount == 0:
return
normalized_algo_dt = pd.Timestamp(algo_datetime).normalize()
# Fail if the algo is before this Asset's start_date
if asset.start_date:
normalized_start = pd.Timestamp(asset.start_date).normalize()
if normalized_algo_dt < normalized_start:
metadata = {
'asset_start_date': normalized_start
}
self.handle_violation(
asset, amount, algo_datetime, metadata=metadata)
# Fail if the algo has passed this Asset's end_date
if asset.end_date:
normalized_end = pd.Timestamp(asset.end_date).normalize()
if normalized_algo_dt > normalized_end:
metadata = {
'asset_end_date': normalized_end
}
self.handle_violation(
asset, amount, algo_datetime, metadata=metadata)
class AccountControl(with_metaclass(abc.ABCMeta)):
"""
Abstract base class representing a fail-safe control on the behavior of any
algorithm.
"""
def __init__(self, **kwargs):
"""
Track any arguments that should be printed in the error message
generated by self.fail.
"""
self.__fail_args = kwargs
@abc.abstractmethod
def validate(self,
_portfolio,
_account,
_algo_datetime,
_algo_current_data):
"""
On each call to handle data by TradingAlgorithm, this method should be
called *exactly once* on each registered AccountControl object.
If the check does not violate this AccountControl's restraint given
the information in `portfolio` and `account`, this method should
return None and have no externally-visible side-effects.
If the desired order violates this AccountControl's contraint, this
method should call self.fail().
"""
raise NotImplementedError
def fail(self):
"""
Raise an AccountControlViolation with information about the failure.
"""
raise AccountControlViolation(constraint=repr(self))
def __repr__(self):
return "{name}({attrs})".format(name=self.__class__.__name__,
attrs=self.__fail_args)
class MaxLeverage(AccountControl):
"""
AccountControl representing a limit on the maximum leverage allowed
by the algorithm.
"""
def __init__(self, max_leverage):
"""
max_leverage is the gross leverage in decimal form. For example,
2, limits an algorithm to trading at most double the account value.
"""
super(MaxLeverage, self).__init__(max_leverage=max_leverage)
self.max_leverage = max_leverage
if max_leverage is None:
raise ValueError(
"Must supply max_leverage"
)
if max_leverage < 0:
raise ValueError(
"max_leverage must be positive"
)
def validate(self,
_portfolio,
_account,
_algo_datetime,
_algo_current_data):
"""
Fail if the leverage is greater than the allowed leverage.
"""
if _account.leverage > self.max_leverage:
self.fail() | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/controls.py | controls.py |
TRADING_DAYS_IN_YEAR = 250
TRADING_HOURS_IN_DAY = 6.5
MINUTES_IN_HOUR = 60
ANNUALIZER = {'daily': TRADING_DAYS_IN_YEAR,
'hourly': TRADING_DAYS_IN_YEAR * TRADING_HOURS_IN_DAY,
'minute': TRADING_DAYS_IN_YEAR * TRADING_HOURS_IN_DAY *
MINUTES_IN_HOUR}
# NOTE: It may be worth revisiting how the keys for this dictionary are
# specified, for instance making them ContinuousFuture objects instead of
# static strings.
FUTURE_EXCHANGE_FEES_BY_SYMBOL = {
'AD': 1.60, # AUD
'AI': 0.96, # Bloomberg Commodity Index
'BD': 1.50, # Big Dow
'BO': 1.95, # Soybean Oil
'BP': 1.60, # GBP
'CD': 1.60, # CAD
'CL': 1.50, # Crude Oil
'CM': 1.03, # Corn e-mini
'CN': 1.95, # Corn
'DJ': 1.50, # Dow Jones
'EC': 1.60, # Euro FX
'ED': 1.25, # Eurodollar
'EE': 1.50, # Euro FX e-mini
'EI': 1.50, # MSCI Emerging Markets mini
'EL': 1.50, # Eurodollar NYSE LIFFE
'ER': 0.65, # Russell2000 e-mini
'ES': 1.18, # SP500 e-mini
'ET': 1.50, # Ethanol
'EU': 1.50, # Eurodollar e-micro
'FC': 2.03, # Feeder Cattle
'FF': 0.96, # 3-Day Federal Funds
'FI': 0.56, # Deliverable Interest Rate Swap 5y
'FS': 1.50, # Interest Rate Swap 5y
'FV': 0.65, # US 5y
'GC': 1.50, # Gold
'HG': 1.50, # Copper
'HO': 1.50, # Heating Oil
'HU': 1.50, # Unleaded Gasoline
'JE': 0.16, # JPY e-mini
'JY': 1.60, # JPY
'LB': 2.03, # Lumber
'LC': 2.03, # Live Cattle
'LH': 2.03, # Lean Hogs
'MB': 1.50, # Municipal Bonds
'MD': 1.50, # SP400 Midcap
'ME': 1.60, # MXN
'MG': 1.50, # MSCI EAFE mini
'MI': 1.18, # SP400 Midcap e-mini
'MS': 1.03, # Soybean e-mini
'MW': 1.03, # Wheat e-mini
'ND': 1.50, # Nasdaq100
'NG': 1.50, # Natural Gas
'NK': 2.15, # Nikkei225
'NQ': 1.18, # Nasdaq100 e-mini
'NZ': 1.60, # NZD
'OA': 1.95, # Oats
'PA': 1.50, # Palladium
'PB': 1.50, # Pork Bellies
'PL': 1.50, # Platinum
'QG': 0.50, # Natural Gas e-mini
'QM': 1.20, # Crude Oil e-mini
'RM': 1.50, # Russell1000 e-mini
'RR': 1.95, # Rough Rice
'SB': 2.10, # Sugar
'SF': 1.60, # CHF
'SM': 1.95, # Soybean Meal
'SP': 2.40, # SP500
'SV': 1.50, # Silver
'SY': 1.95, # Soybean
'TB': 1.50, # Treasury Bills
'TN': 0.56, # Deliverable Interest Rate Swap 10y
'TS': 1.50, # Interest Rate Swap 10y
'TU': 1.50, # US 2y
'TY': 0.75, # US 10y
'UB': 0.85, # Ultra Tbond
'US': 0.80, # US 30y
'VX': 1.50, # VIX
'WC': 1.95, # Wheat
'XB': 1.50, # RBOB Gasoline
'XG': 0.75, # Gold e-mini
'YM': 1.50, # Dow Jones e-mini
'YS': 0.75, # Silver e-mini
}
# See `zipline.finance.slippage.VolatilityVolumeShare` for more information on
# how these constants are used.
DEFAULT_ETA = 0.049018143225019836
ROOT_SYMBOL_TO_ETA = {
'AD': DEFAULT_ETA, # AUD
'AI': DEFAULT_ETA, # Bloomberg Commodity Index
'BD': 0.050346811117733474, # Big Dow
'BO': 0.054930995070046298, # Soybean Oil
'BP': 0.047841544238716338, # GBP
'CD': 0.051124420640250717, # CAD
'CL': 0.04852544628414196, # Crude Oil
'CM': 0.052683478163348625, # Corn e-mini
'CN': 0.053499718390037809, # Corn
'DJ': 0.02313009072076987, # Dow Jones
'EC': 0.04885131067661861, # Euro FX
'ED': 0.094184297090245755, # Eurodollar
'EE': 0.048713151357687556, # Euro FX e-mini
'EI': 0.031712708439692663, # MSCI Emerging Markets mini
'EL': 0.044207422018209361, # Eurodollar NYSE LIFFE
'ER': 0.045930567737711307, # Russell2000 e-mini
'ES': 0.047304418321993502, # SP500 e-mini
'ET': DEFAULT_ETA, # Ethanol
'EU': 0.049750396084029064, # Eurodollar e-micro
'FC': 0.058728734202178494, # Feeder Cattle
'FF': 0.048970591527624042, # 3-Day Federal Funds
'FI': 0.033477176738170772, # Deliverable Interest Rate Swap 5y
'FS': 0.034557788010453824, # Interest Rate Swap 5y
'FV': 0.046544427716056963, # US 5y
'GC': 0.048933313546125207, # Gold
'HG': 0.052238417524987799, # Copper
'HO': 0.045061318412156062, # Heating Oil
'HU': 0.017154313062463938, # Unleaded Gasoline
'JE': 0.013948949613401812, # JPY e-mini
'JY': DEFAULT_ETA, # JPY
'LB': 0.06146586386903994, # Lumber
'LC': 0.055853801862858619, # Live Cattle
'LH': 0.057557004630219781, # Lean Hogs
'MB': DEFAULT_ETA, # Municipal Bonds
'MD': DEFAULT_ETA, # SP400 Midcap
'ME': 0.030383767727818548, # MXN
'MG': 0.029579261656151684, # MSCI EAFE mini
'MI': 0.041026288873007355, # SP400 Midcap e-mini
'MS': DEFAULT_ETA, # Soybean e-mini
'MW': 0.052579919663880245, # Wheat e-mini
'ND': DEFAULT_ETA, # Nasdaq100
'NG': 0.047897809233755716, # Natural Gas
'NK': 0.044555435054791433, # Nikkei225
'NQ': 0.044772425085977945, # Nasdaq100 e-mini
'NZ': 0.049170418073872041, # NZD
'OA': 0.056973267232775522, # Oats
'PA': DEFAULT_ETA, # Palladium
'PB': DEFAULT_ETA, # Pork Bellies
'PL': 0.054579379665647493, # Platinum
'QG': DEFAULT_ETA, # Natural Gas e-mini
'QM': DEFAULT_ETA, # Crude Oil e-mini
'RM': 0.037425041244579654, # Russell1000 e-mini
'RR': DEFAULT_ETA, # Rough Rice
'SB': 0.057388160345668134, # Sugar
'SF': 0.047784825569615726, # CHF
'SM': 0.048552860559844223, # Soybean Meal
'SP': DEFAULT_ETA, # SP500
'SV': 0.052691435039931109, # Silver
'SY': 0.052041703657281613, # Soybean
'TB': DEFAULT_ETA, # Treasury Bills
'TN': 0.033363465365262503, # Deliverable Interest Rate Swap 10y
'TS': 0.032908878455069152, # Interest Rate Swap 10y
'TU': 0.063867646063840794, # US 2y
'TY': 0.050586988554700826, # US 10y
'UB': DEFAULT_ETA, # Ultra Tbond
'US': 0.047984179873590722, # US 30y
'VX': DEFAULT_ETA, # VIX
'WC': 0.052636542119329242, # Wheat
'XB': 0.044444916388854484, # RBOB Gasoline
'XG': DEFAULT_ETA, # Gold e-mini
'YM': DEFAULT_ETA, # Dow Jones e-mini
'YS': DEFAULT_ETA, # Silver e-mini
} | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/constants.py | constants.py |
import abc
from numpy import vectorize
from functools import partial, reduce
import operator
import pandas as pd
from six import with_metaclass, iteritems
from collections import namedtuple
from toolz import groupby
from zipline.utils.enum import enum
from zipline.utils.numpy_utils import vectorized_is_element
from zipline.assets import Asset
Restriction = namedtuple(
'Restriction', ['asset', 'effective_date', 'state']
)
RESTRICTION_STATES = enum(
'ALLOWED',
'FROZEN',
)
class Restrictions(with_metaclass(abc.ABCMeta)):
"""
Abstract restricted list interface, representing a set of assets that an
algorithm is restricted from trading.
"""
@abc.abstractmethod
def is_restricted(self, assets, dt):
"""
Is the asset restricted (RestrictionStates.FROZEN) on the given dt?
Parameters
----------
asset : Asset of iterable of Assets
The asset(s) for which we are querying a restriction
dt : pd.Timestamp
The timestamp of the restriction query
Returns
-------
is_restricted : bool or pd.Series[bool] indexed by asset
Is the asset or assets restricted on this dt?
"""
raise NotImplementedError('is_restricted')
def __or__(self, other_restriction):
"""Base implementation for combining two restrictions.
"""
# If the right side is a _UnionRestrictions, defers to the
# _UnionRestrictions implementation of `|`, which intelligently
# flattens restricted lists
if isinstance(other_restriction, _UnionRestrictions):
return other_restriction | self
return _UnionRestrictions([self, other_restriction])
class _UnionRestrictions(Restrictions):
"""
A union of a number of sub restrictions.
Parameters
----------
sub_restrictions : iterable of Restrictions (but not _UnionRestrictions)
The Restrictions to be added together
Notes
-----
- Consumers should not construct instances of this class directly, but
instead use the `|` operator to combine restrictions
"""
def __new__(cls, sub_restrictions):
# Filter out NoRestrictions and deal with resulting cases involving
# one or zero sub_restrictions
sub_restrictions = [
r for r in sub_restrictions if not isinstance(r, NoRestrictions)
]
if len(sub_restrictions) == 0:
return NoRestrictions()
elif len(sub_restrictions) == 1:
return sub_restrictions[0]
new_instance = super(_UnionRestrictions, cls).__new__(cls)
new_instance.sub_restrictions = sub_restrictions
return new_instance
def __or__(self, other_restriction):
"""
Overrides the base implementation for combining two restrictions, of
which the left side is a _UnionRestrictions.
"""
# Flatten the underlying sub restrictions of _UnionRestrictions
if isinstance(other_restriction, _UnionRestrictions):
new_sub_restrictions = \
self.sub_restrictions + other_restriction.sub_restrictions
else:
new_sub_restrictions = self.sub_restrictions + [other_restriction]
return _UnionRestrictions(new_sub_restrictions)
def is_restricted(self, assets, dt):
if isinstance(assets, Asset):
return any(
r.is_restricted(assets, dt) for r in self.sub_restrictions
)
return reduce(
operator.or_,
(r.is_restricted(assets, dt) for r in self.sub_restrictions)
)
class NoRestrictions(Restrictions):
"""
A no-op restrictions that contains no restrictions.
"""
def is_restricted(self, assets, dt):
if isinstance(assets, Asset):
return False
return pd.Series(index=pd.Index(assets), data=False)
class StaticRestrictions(Restrictions):
"""
Static restrictions stored in memory that are constant regardless of dt
for each asset.
Parameters
----------
restricted_list : iterable of assets
The assets to be restricted
"""
def __init__(self, restricted_list):
self._restricted_set = frozenset(restricted_list)
def is_restricted(self, assets, dt):
"""
An asset is restricted for all dts if it is in the static list.
"""
if isinstance(assets, Asset):
return assets in self._restricted_set
return pd.Series(
index=pd.Index(assets),
data=vectorized_is_element(assets, self._restricted_set)
)
class HistoricalRestrictions(Restrictions):
"""
Historical restrictions stored in memory with effective dates for each
asset.
Parameters
----------
restrictions : iterable of namedtuple Restriction
The restrictions, each defined by an asset, effective date and state
"""
def __init__(self, restrictions):
# A dict mapping each asset to its restrictions, which are sorted by
# ascending order of effective_date
self._restrictions_by_asset = {
asset: sorted(
restrictions_for_asset, key=lambda x: x.effective_date
)
for asset, restrictions_for_asset
in iteritems(groupby(lambda x: x.asset, restrictions))
}
def is_restricted(self, assets, dt):
"""
Returns whether or not an asset or iterable of assets is restricted
on a dt.
"""
if isinstance(assets, Asset):
return self._is_restricted_for_asset(assets, dt)
is_restricted = partial(self._is_restricted_for_asset, dt=dt)
return pd.Series(
index=pd.Index(assets),
data=vectorize(is_restricted, otypes=[bool])(assets)
)
def _is_restricted_for_asset(self, asset, dt):
state = RESTRICTION_STATES.ALLOWED
for r in self._restrictions_by_asset.get(asset, ()):
if r.effective_date > dt:
break
state = r.state
return state == RESTRICTION_STATES.FROZEN
class SecurityListRestrictions(Restrictions):
"""
Restrictions based on a security list.
Parameters
----------
restrictions : zipline.utils.security_list.SecurityList
The restrictions defined by a SecurityList
"""
def __init__(self, security_list_by_dt):
self.current_securities = security_list_by_dt.current_securities
def is_restricted(self, assets, dt):
securities_in_list = self.current_securities(dt)
if isinstance(assets, Asset):
return assets in securities_in_list
return pd.Series(
index=pd.Index(assets),
data=vectorized_is_element(assets, securities_in_list)
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/asset_restrictions.py | asset_restrictions.py |
from abc import abstractmethod
from collections import defaultdict
from six import with_metaclass
from toolz import merge
from zipline.assets import Equity, Future
from zipline.finance.constants import FUTURE_EXCHANGE_FEES_BY_SYMBOL
from zipline.finance.shared import AllowedAssetMarker, FinancialModelMeta
from zipline.utils.dummy import DummyMapping
DEFAULT_PER_SHARE_COST = 0.0075 # 0.75 cents per share
DEFAULT_PER_CONTRACT_COST = 0.85 # $0.85 per future contract
DEFAULT_PER_DOLLAR_COST = 0.0015 # 0.15 cents per dollar
DEFAULT_MINIMUM_COST_PER_EQUITY_TRADE = 1.0 # $1 per trade
DEFAULT_MINIMUM_COST_PER_FUTURE_TRADE = 1.0 # $1 per trade
class CommissionModel(with_metaclass(FinancialModelMeta)):
"""
Abstract commission model interface.
Commission models are responsible for accepting order/transaction pairs and
calculating how much commission should be charged to an algorithm's account
on each transaction.
"""
# Asset types that are compatible with the given model.
allowed_asset_types = (Equity, Future)
@abstractmethod
def calculate(self, order, transaction):
"""
Calculate the amount of commission to charge on ``order`` as a result
of ``transaction``.
Parameters
----------
order : zipline.finance.order.Order
The order being processed.
The ``commission`` field of ``order`` is a float indicating the
amount of commission already charged on this order.
transaction : zipline.finance.transaction.Transaction
The transaction being processed. A single order may generate
multiple transactions if there isn't enough volume in a given bar
to fill the full amount requested in the order.
Returns
-------
amount_charged : float
The additional commission, in dollars, that we should attribute to
this order.
"""
raise NotImplementedError('calculate')
class EquityCommissionModel(with_metaclass(AllowedAssetMarker,
CommissionModel)):
"""
Base class for commission models which only support equities.
"""
allowed_asset_types = (Equity,)
class FutureCommissionModel(with_metaclass(AllowedAssetMarker,
CommissionModel)):
"""
Base class for commission models which only support futures.
"""
allowed_asset_types = (Future,)
def calculate_per_unit_commission(order,
transaction,
cost_per_unit,
initial_commission,
min_trade_cost):
"""
If there is a minimum commission:
If the order hasn't had a commission paid yet, pay the minimum
commission.
If the order has paid a commission, start paying additional
commission once the minimum commission has been reached.
If there is no minimum commission:
Pay commission based on number of units in the transaction.
"""
additional_commission = abs(transaction.amount * cost_per_unit)
if order.commission == 0:
# no commission paid yet, pay at least the minimum plus a one-time
# exchange fee.
return max(min_trade_cost, additional_commission + initial_commission)
else:
# we've already paid some commission, so figure out how much we
# would be paying if we only counted per unit.
per_unit_total = \
(order.filled * cost_per_unit) + \
additional_commission + \
initial_commission
if per_unit_total < min_trade_cost:
# if we haven't hit the minimum threshold yet, don't pay
# additional commission
return 0
else:
# we've exceeded the threshold, so pay more commission.
return per_unit_total - order.commission
class PerShare(EquityCommissionModel):
"""
Calculates a commission for a transaction based on a per share cost with
an optional minimum cost per trade.
Parameters
----------
cost : float, optional
The amount of commissions paid per share traded.
min_trade_cost : float, optional
The minimum amount of commissions paid per trade.
"""
def __init__(self,
cost=DEFAULT_PER_SHARE_COST,
min_trade_cost=DEFAULT_MINIMUM_COST_PER_EQUITY_TRADE):
self.cost_per_share = float(cost)
self.min_trade_cost = min_trade_cost or 0
def __repr__(self):
return (
'{class_name}(cost_per_share={cost_per_share}, '
'min_trade_cost={min_trade_cost})'
.format(
class_name=self.__class__.__name__,
cost_per_share=self.cost_per_share,
min_trade_cost=self.min_trade_cost,
)
)
def calculate(self, order, transaction):
return calculate_per_unit_commission(
order=order,
transaction=transaction,
cost_per_unit=self.cost_per_share,
initial_commission=0,
min_trade_cost=self.min_trade_cost,
)
class PerContract(FutureCommissionModel):
"""
Calculates a commission for a transaction based on a per contract cost with
an optional minimum cost per trade.
Parameters
----------
cost : float or dict
The amount of commissions paid per contract traded. If given a float,
the commission for all futures contracts is the same. If given a
dictionary, it must map root symbols to the commission cost for
contracts of that symbol.
exchange_fee : float or dict
A flat-rate fee charged by the exchange per trade. This value is a
constant, one-time charge no matter how many contracts are being
traded. If given a float, the fee for all contracts is the same. If
given a dictionary, it must map root symbols to the fee for contracts
of that symbol.
min_trade_cost : float, optional
The minimum amount of commissions paid per trade.
"""
def __init__(self,
cost,
exchange_fee,
min_trade_cost=DEFAULT_MINIMUM_COST_PER_FUTURE_TRADE):
# If 'cost' or 'exchange fee' are constants, use a dummy mapping to
# treat them as a dictionary that always returns the same value.
# NOTE: These dictionary does not handle unknown root symbols, so it
# may be worth revisiting this behavior.
if isinstance(cost, (int, float)):
self._cost_per_contract = DummyMapping(float(cost))
else:
# Cost per contract is a dictionary. If the user's dictionary does
# not provide a commission cost for a certain contract, fall back
# on the pre-defined cost values per root symbol.
self._cost_per_contract = defaultdict(
lambda: DEFAULT_PER_CONTRACT_COST, **cost
)
if isinstance(exchange_fee, (int, float)):
self._exchange_fee = DummyMapping(float(exchange_fee))
else:
# Exchange fee is a dictionary. If the user's dictionary does not
# provide an exchange fee for a certain contract, fall back on the
# pre-defined exchange fees per root symbol.
self._exchange_fee = merge(
FUTURE_EXCHANGE_FEES_BY_SYMBOL, exchange_fee,
)
self.min_trade_cost = min_trade_cost or 0
def __repr__(self):
if isinstance(self._cost_per_contract, DummyMapping):
# Cost per contract is a constant, so extract it.
cost_per_contract = self._cost_per_contract['dummy key']
else:
cost_per_contract = '<varies>'
if isinstance(self._exchange_fee, DummyMapping):
# Exchange fee is a constant, so extract it.
exchange_fee = self._exchange_fee['dummy key']
else:
exchange_fee = '<varies>'
return (
'{class_name}(cost_per_contract={cost_per_contract}, '
'exchange_fee={exchange_fee}, min_trade_cost={min_trade_cost})'
.format(
class_name=self.__class__.__name__,
cost_per_contract=cost_per_contract,
exchange_fee=exchange_fee,
min_trade_cost=self.min_trade_cost,
)
)
def calculate(self, order, transaction):
root_symbol = order.asset.root_symbol
cost_per_contract = self._cost_per_contract[root_symbol]
exchange_fee = self._exchange_fee[root_symbol]
return calculate_per_unit_commission(
order=order,
transaction=transaction,
cost_per_unit=cost_per_contract,
initial_commission=exchange_fee,
min_trade_cost=self.min_trade_cost,
)
class PerTrade(CommissionModel):
"""
Calculates a commission for a transaction based on a per trade cost.
Parameters
----------
cost : float, optional
The flat amount of commissions paid per equity trade.
"""
def __init__(self, cost=DEFAULT_MINIMUM_COST_PER_EQUITY_TRADE):
"""
Cost parameter is the cost of a trade, regardless of share count.
$5.00 per trade is fairly typical of discount brokers.
"""
# Cost needs to be floating point so that calculation using division
# logic does not floor to an integer.
self.cost = float(cost)
def __repr__(self):
return '{class_name}(cost_per_trade={cost})'.format(
class_name=self.__class__.__name__, cost=self.cost,
)
def calculate(self, order, transaction):
"""
If the order hasn't had a commission paid yet, pay the fixed
commission.
"""
if order.commission == 0:
# if the order hasn't had a commission attributed to it yet,
# that's what we need to pay.
return self.cost
else:
# order has already had commission attributed, so no more
# commission.
return 0.0
class PerFutureTrade(PerContract):
"""
Calculates a commission for a transaction based on a per trade cost.
Parameters
----------
cost : float or dict
The flat amount of commissions paid per trade, regardless of the number
of contracts being traded. If given a float, the commission for all
futures contracts is the same. If given a dictionary, it must map root
symbols to the commission cost for trading contracts of that symbol.
"""
def __init__(self, cost=DEFAULT_MINIMUM_COST_PER_FUTURE_TRADE):
# The per-trade cost can be represented as the exchange fee in a
# per-contract model because the exchange fee is just a one time cost
# incurred on the first fill.
super(PerFutureTrade, self).__init__(
cost=0, exchange_fee=cost, min_trade_cost=0,
)
self._cost_per_trade = self._exchange_fee
def __repr__(self):
if isinstance(self._cost_per_trade, DummyMapping):
# Cost per trade is a constant, so extract it.
cost_per_trade = self._cost_per_trade['dummy key']
else:
cost_per_trade = '<varies>'
return '{class_name}(cost_per_trade={cost_per_trade})'.format(
class_name=self.__class__.__name__, cost_per_trade=cost_per_trade,
)
class PerDollar(EquityCommissionModel):
"""
Calculates a commission for a transaction based on a per dollar cost.
Parameters
----------
cost : float
The flat amount of commissions paid per dollar of equities traded.
"""
def __init__(self, cost=DEFAULT_PER_DOLLAR_COST):
"""
Cost parameter is the cost of a trade per-dollar. 0.0015
on $1 million means $1,500 commission (=1M * 0.0015)
"""
self.cost_per_dollar = float(cost)
def __repr__(self):
return "{class_name}(cost_per_dollar={cost})".format(
class_name=self.__class__.__name__,
cost=self.cost_per_dollar)
def calculate(self, order, transaction):
"""
Pay commission based on dollar value of shares.
"""
cost_per_share = transaction.price * self.cost_per_dollar
return abs(transaction.amount) * cost_per_share | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/commission.py | commission.py |
import abc
from sys import float_info
from six import with_metaclass
import zipline.utils.math_utils as zp_math
from numpy import isfinite
from zipline.errors import BadOrderParameters
class ExecutionStyle(with_metaclass(abc.ABCMeta)):
"""
Abstract base class representing a modification to a standard order.
"""
_exchange = None
@abc.abstractmethod
def get_limit_price(self, is_buy):
"""
Get the limit price for this order.
Returns either None or a numerical value >= 0.
"""
raise NotImplemented
@abc.abstractmethod
def get_stop_price(self, is_buy):
"""
Get the stop price for this order.
Returns either None or a numerical value >= 0.
"""
raise NotImplemented
@property
def exchange(self):
"""
The exchange to which this order should be routed.
"""
return self._exchange
class MarketOrder(ExecutionStyle):
"""
Class encapsulating an order to be placed at the current market price.
"""
def __init__(self, exchange=None):
self._exchange = exchange
def get_limit_price(self, _is_buy):
return None
def get_stop_price(self, _is_buy):
return None
class LimitOrder(ExecutionStyle):
"""
Execution style representing an order to be executed at a price equal to or
better than a specified limit price.
"""
def __init__(self, limit_price, exchange=None):
"""
Store the given price.
"""
check_stoplimit_prices(limit_price, 'limit')
self.limit_price = limit_price
self._exchange = exchange
def get_limit_price(self, is_buy):
return asymmetric_round_price_to_penny(self.limit_price, is_buy)
def get_stop_price(self, _is_buy):
return None
class StopOrder(ExecutionStyle):
"""
Execution style representing an order to be placed once the market price
reaches a specified stop price.
"""
def __init__(self, stop_price, exchange=None):
"""
Store the given price.
"""
check_stoplimit_prices(stop_price, 'stop')
self.stop_price = stop_price
self._exchange = exchange
def get_limit_price(self, _is_buy):
return None
def get_stop_price(self, is_buy):
return asymmetric_round_price_to_penny(self.stop_price, not is_buy)
class StopLimitOrder(ExecutionStyle):
"""
Execution style representing a limit order to be placed with a specified
limit price once the market reaches a specified stop price.
"""
def __init__(self, limit_price, stop_price, exchange=None):
"""
Store the given prices
"""
check_stoplimit_prices(limit_price, 'limit')
check_stoplimit_prices(stop_price, 'stop')
self.limit_price = limit_price
self.stop_price = stop_price
self._exchange = exchange
def get_limit_price(self, is_buy):
return asymmetric_round_price_to_penny(self.limit_price, is_buy)
def get_stop_price(self, is_buy):
return asymmetric_round_price_to_penny(self.stop_price, not is_buy)
def asymmetric_round_price_to_penny(price, prefer_round_down,
diff=(0.0095 - .005)):
"""
Asymmetric rounding function for adjusting prices to two places in a way
that "improves" the price. For limit prices, this means preferring to
round down on buys and preferring to round up on sells. For stop prices,
it means the reverse.
If prefer_round_down == True:
When .05 below to .95 above a penny, use that penny.
If prefer_round_down == False:
When .95 below to .05 above a penny, use that penny.
In math-speak:
If prefer_round_down: [<X-1>.0095, X.0195) -> round to X.01.
If not prefer_round_down: (<X-1>.0005, X.0105] -> round to X.01.
"""
# Subtracting an epsilon from diff to enforce the open-ness of the upper
# bound on buys and the lower bound on sells. Using the actual system
# epsilon doesn't quite get there, so use a slightly less epsilon-ey value.
epsilon = float_info.epsilon * 10
diff = diff - epsilon
# relies on rounding half away from zero, unlike numpy's bankers' rounding
rounded = round(price - (diff if prefer_round_down else -diff), 2)
if zp_math.tolerant_equals(rounded, 0.0):
return 0.0
return rounded
def check_stoplimit_prices(price, label):
"""
Check to make sure the stop/limit prices are reasonable and raise
a BadOrderParameters exception if not.
"""
try:
if not isfinite(price):
raise BadOrderParameters(
msg="Attempted to place an order with a {} price "
"of {}.".format(label, price)
)
# This catches arbitrary objects
except TypeError:
raise BadOrderParameters(
msg="Attempted to place an order with a {} price "
"of {}.".format(label, type(price))
)
if price < 0:
raise BadOrderParameters(
msg="Can't place a {} order with a negative price.".format(label)
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/execution.py | execution.py |
from logbook import Logger
from collections import defaultdict
from copy import copy
from six import iteritems
from zipline.assets import Equity, Future, Asset
from zipline.finance.order import Order
from zipline.finance.slippage import (
DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT,
VolatilityVolumeShare,
VolumeShareSlippage,
)
from zipline.finance.commission import (
DEFAULT_PER_CONTRACT_COST,
FUTURE_EXCHANGE_FEES_BY_SYMBOL,
PerContract,
PerShare,
)
from zipline.finance.cancel_policy import NeverCancel
from zipline.utils.input_validation import expect_types
log = Logger('Blotter')
warning_logger = Logger('AlgoWarning')
class Blotter(object):
def __init__(self, data_frequency, equity_slippage=None,
future_slippage=None, equity_commission=None,
future_commission=None, cancel_policy=None):
# these orders are aggregated by asset
self.open_orders = defaultdict(list)
# keep a dict of orders by their own id
self.orders = {}
# holding orders that have come in since the last event.
self.new_orders = []
self.current_dt = None
self.max_shares = int(1e+11)
self.slippage_models = {
Equity: equity_slippage or VolumeShareSlippage(),
Future: future_slippage or VolatilityVolumeShare(
volume_limit=DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT,
),
}
self.commission_models = {
Equity: equity_commission or PerShare(),
Future: future_commission or PerContract(
cost=DEFAULT_PER_CONTRACT_COST,
exchange_fee=FUTURE_EXCHANGE_FEES_BY_SYMBOL,
),
}
self.data_frequency = data_frequency
self.cancel_policy = cancel_policy if cancel_policy else NeverCancel()
def __repr__(self):
return """
{class_name}(
slippage_models={slippage_models},
commission_models={commission_models},
open_orders={open_orders},
orders={orders},
new_orders={new_orders},
current_dt={current_dt})
""".strip().format(class_name=self.__class__.__name__,
slippage_models=self.slippage_models,
commission_models=self.commission_models,
open_orders=self.open_orders,
orders=self.orders,
new_orders=self.new_orders,
current_dt=self.current_dt)
def set_date(self, dt):
self.current_dt = dt
@expect_types(asset=Asset)
def order(self, asset, amount, style, order_id=None):
"""Place an order.
Parameters
----------
asset : zipline.assets.Asset
The asset that this order is for.
amount : int
The amount of shares to order. If ``amount`` is positive, this is
the number of shares to buy or cover. If ``amount`` is negative,
this is the number of shares to sell or short.
style : zipline.finance.execution.ExecutionStyle
The execution style for the order.
order_id : str, optional
The unique identifier for this order.
Returns
-------
order_id : str or None
The unique identifier for this order, or None if no order was
placed.
Notes
-----
amount > 0 :: Buy/Cover
amount < 0 :: Sell/Short
Market order: order(asset, amount)
Limit order: order(asset, amount, style=LimitOrder(limit_price))
Stop order: order(asset, amount, style=StopOrder(stop_price))
StopLimit order: order(asset, amount, style=StopLimitOrder(limit_price,
stop_price))
"""
# something could be done with amount to further divide
# between buy by share count OR buy shares up to a dollar amount
# numeric == share count AND "$dollar.cents" == cost amount
if amount == 0:
# Don't bother placing orders for 0 shares.
return None
elif amount > self.max_shares:
# Arbitrary limit of 100 billion (US) shares will never be
# exceeded except by a buggy algorithm.
raise OverflowError("Can't order more than %d shares" %
self.max_shares)
is_buy = (amount > 0)
order = Order(
dt=self.current_dt,
asset=asset,
amount=amount,
stop=style.get_stop_price(is_buy),
limit=style.get_limit_price(is_buy),
id=order_id
)
self.open_orders[order.asset].append(order)
self.orders[order.id] = order
self.new_orders.append(order)
return order.id
def batch_order(self, order_arg_lists):
"""Place a batch of orders.
Parameters
----------
order_arg_lists : iterable[tuple]
Tuples of args that `order` expects.
Returns
-------
order_ids : list[str or None]
The unique identifier (or None) for each of the orders placed
(or not placed).
Notes
-----
This is required for `Blotter` subclasses to be able to place a batch
of orders, instead of being passed the order requests one at a time.
"""
return [self.order(*order_args) for order_args in order_arg_lists]
def cancel(self, order_id, relay_status=True):
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
if cur_order.open:
order_list = self.open_orders[cur_order.asset]
if cur_order in order_list:
order_list.remove(cur_order)
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.cancel()
cur_order.dt = self.current_dt
if relay_status:
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def cancel_all_orders_for_asset(self, asset, warn=False,
relay_status=True):
"""
Cancel all open orders for a given asset.
"""
# (sadly) open_orders is a defaultdict, so this will always succeed.
orders = self.open_orders[asset]
# We're making a copy here because `cancel` mutates the list of open
# orders in place. The right thing to do here would be to make
# self.open_orders no longer a defaultdict. If we do that, then we
# should just remove the orders once here and be done with the matter.
for order in orders[:]:
self.cancel(order.id, relay_status)
if warn:
# Message appropriately depending on whether there's
# been a partial fill or not.
if order.filled > 0:
warning_logger.warn(
'Your order for {order_amt} shares of '
'{order_sym} has been partially filled. '
'{order_filled} shares were successfully '
'purchased. {order_failed} shares were not '
'filled by the end of day and '
'were canceled.'.format(
order_amt=order.amount,
order_sym=order.asset.symbol,
order_filled=order.filled,
order_failed=order.amount - order.filled,
)
)
elif order.filled < 0:
warning_logger.warn(
'Your order for {order_amt} shares of '
'{order_sym} has been partially filled. '
'{order_filled} shares were successfully '
'sold. {order_failed} shares were not '
'filled by the end of day and '
'were canceled.'.format(
order_amt=order.amount,
order_sym=order.asset.symbol,
order_filled=-1 * order.filled,
order_failed=-1 * (order.amount - order.filled),
)
)
else:
warning_logger.warn(
'Your order for {order_amt} shares of '
'{order_sym} failed to fill by the end of day '
'and was canceled.'.format(
order_amt=order.amount,
order_sym=order.asset.symbol,
)
)
assert not orders
del self.open_orders[asset]
def execute_cancel_policy(self, event):
if self.cancel_policy.should_cancel(event):
warn = self.cancel_policy.warn_on_cancel
for asset in copy(self.open_orders):
self.cancel_all_orders_for_asset(asset, warn,
relay_status=False)
def reject(self, order_id, reason=''):
"""
Mark the given order as 'rejected', which is functionally similar to
cancelled. The distinction is that rejections are involuntary (and
usually include a message from a broker indicating why the order was
rejected) while cancels are typically user-driven.
"""
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
order_list = self.open_orders[cur_order.asset]
if cur_order in order_list:
order_list.remove(cur_order)
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.reject(reason=reason)
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def hold(self, order_id, reason=''):
"""
Mark the order with order_id as 'held'. Held is functionally similar
to 'open'. When a fill (full or partial) arrives, the status
will automatically change back to open/filled as necessary.
"""
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
if cur_order.open:
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.hold(reason=reason)
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def process_splits(self, splits):
"""
Processes a list of splits by modifying any open orders as needed.
Parameters
----------
splits: list
A list of splits. Each split is a tuple of (asset, ratio).
Returns
-------
None
"""
for asset, ratio in splits:
if asset not in self.open_orders:
continue
orders_to_modify = self.open_orders[asset]
for order in orders_to_modify:
order.handle_split(ratio)
def get_transactions(self, bar_data):
"""
Creates a list of transactions based on the current open orders,
slippage model, and commission model.
Parameters
----------
bar_data: zipline._protocol.BarData
Notes
-----
This method book-keeps the blotter's open_orders dictionary, so that
it is accurate by the time we're done processing open orders.
Returns
-------
transactions_list: List
transactions_list: list of transactions resulting from the current
open orders. If there were no open orders, an empty list is
returned.
commissions_list: List
commissions_list: list of commissions resulting from filling the
open orders. A commission is an object with "asset" and "cost"
parameters.
closed_orders: List
closed_orders: list of all the orders that have filled.
"""
closed_orders = []
transactions = []
commissions = []
if self.open_orders:
for asset, asset_orders in iteritems(self.open_orders):
slippage = self.slippage_models[type(asset)]
for order, txn in \
slippage.simulate(bar_data, asset, asset_orders):
commission = self.commission_models[type(asset)]
additional_commission = commission.calculate(order, txn)
if additional_commission > 0:
commissions.append({
"asset": order.asset,
"order": order,
"cost": additional_commission
})
order.filled += txn.amount
order.commission += additional_commission
order.dt = txn.dt
transactions.append(txn)
if not order.open:
closed_orders.append(order)
return transactions, commissions, closed_orders
def prune_orders(self, closed_orders):
"""
Removes all given orders from the blotter's open_orders list.
Parameters
----------
closed_orders: iterable of orders that are closed.
Returns
-------
None
"""
# remove all closed orders from our open_orders dict
for order in closed_orders:
asset = order.asset
asset_orders = self.open_orders[asset]
try:
asset_orders.remove(order)
except ValueError:
continue
# now clear out the assets from our open_orders dict that have
# zero open orders
for asset in list(self.open_orders.keys()):
if len(self.open_orders[asset]) == 0:
del self.open_orders[asset] | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/blotter.py | blotter.py |
from functools import partial
import logbook
import pandas as pd
from pandas.tslib import normalize_date
from six import string_types
from sqlalchemy import create_engine
from zipline.assets import AssetDBWriter, AssetFinder
from zipline.assets.continuous_futures import CHAIN_PREDICATES
from zipline.data.loader import load_market_data
from zipline.utils.calendars import get_calendar
from zipline.utils.memoize import remember_last
log = logbook.Logger('Trading')
DEFAULT_CAPITAL_BASE = 1e5
class TradingEnvironment(object):
"""
The financial simulations in zipline depend on information
about the benchmark index and the risk free rates of return.
The benchmark index defines the benchmark returns used in
the calculation of performance metrics such as alpha/beta. Many
components, including risk, performance, transforms, and
batch_transforms, need access to a calendar of trading days and
market hours. The TradingEnvironment maintains two time keeping
facilities:
- a DatetimeIndex of trading days for calendar calculations
- a timezone name, which should be local to the exchange
hosting the benchmark index. All dates are normalized to UTC
for serialization and storage, and the timezone is used to
ensure proper rollover through daylight savings and so on.
User code will not normally need to use TradingEnvironment
directly. If you are extending zipline's core financial
components and need to use the environment, you must import the module and
build a new TradingEnvironment object, then pass that TradingEnvironment as
the 'env' arg to your TradingAlgorithm.
Parameters
----------
load : callable, optional
The function that returns benchmark returns and treasury curves.
The treasury curves are expected to be a DataFrame with an index of
dates and columns of the curve names, e.g. '10year', '1month', etc.
bm_symbol : str, optional
The benchmark symbol
exchange_tz : tz-coercable, optional
The timezone of the exchange.
trading_calendar : TradingCalendar, optional
The trading calendar to work with in this environment.
asset_db_path : str or sa.engine.Engine, optional
The path to the assets db or sqlalchemy Engine object to use to
construct an AssetFinder.
"""
# Token used as a substitute for pickling objects that contain a
# reference to a TradingEnvironment
PERSISTENT_TOKEN = "<TradingEnvironment>"
def __init__(
self,
load=None,
bm_symbol='SPY',
exchange_tz="US/Eastern",
trading_calendar=None,
asset_db_path=':memory:',
future_chain_predicates=CHAIN_PREDICATES,
environ=None,
):
self.bm_symbol = bm_symbol
if not load:
load = partial(load_market_data, environ=environ)
if not trading_calendar:
trading_calendar = get_calendar("NYSE")
self.benchmark_returns, self.treasury_curves = load(
trading_calendar.day,
trading_calendar.schedule.index,
self.bm_symbol,
)
self.exchange_tz = exchange_tz
if isinstance(asset_db_path, string_types):
asset_db_path = 'sqlite:///' + asset_db_path
self.engine = engine = create_engine(asset_db_path)
else:
self.engine = engine = asset_db_path
if engine is not None:
AssetDBWriter(engine).init_db()
self.asset_finder = AssetFinder(
engine,
future_chain_predicates=future_chain_predicates)
else:
self.asset_finder = None
def write_data(self, **kwargs):
"""Write data into the asset_db.
Parameters
----------
**kwargs
Forwarded to AssetDBWriter.write
"""
AssetDBWriter(self.engine).write(**kwargs)
class SimulationParameters(object):
def __init__(self, start_session, end_session,
trading_calendar,
capital_base=DEFAULT_CAPITAL_BASE,
emission_rate='daily',
data_frequency='daily',
arena='backtest'):
assert type(start_session) == pd.Timestamp
assert type(end_session) == pd.Timestamp
assert trading_calendar is not None, \
"Must pass in trading calendar!"
assert start_session <= end_session, \
"Period start falls after period end."
assert start_session <= trading_calendar.last_trading_session, \
"Period start falls after the last known trading day."
assert end_session >= trading_calendar.first_trading_session, \
"Period end falls before the first known trading day."
# chop off any minutes or hours on the given start and end dates,
# as we only support session labels here (and we represent session
# labels as midnight UTC).
self._start_session = normalize_date(start_session)
self._end_session = normalize_date(end_session)
self._capital_base = capital_base
self._emission_rate = emission_rate
self._data_frequency = data_frequency
# copied to algorithm's environment for runtime access
self._arena = arena
self._trading_calendar = trading_calendar
if not trading_calendar.is_session(self._start_session):
# if the start date is not a valid session in this calendar,
# push it forward to the first valid session
self._start_session = trading_calendar.minute_to_session_label(
self._start_session
)
if not trading_calendar.is_session(self._end_session):
# if the end date is not a valid session in this calendar,
# pull it backward to the last valid session before the given
# end date.
self._end_session = trading_calendar.minute_to_session_label(
self._end_session, direction="previous"
)
self._first_open = trading_calendar.open_and_close_for_session(
self._start_session
)[0]
self._last_close = trading_calendar.open_and_close_for_session(
self._end_session
)[1]
@property
def capital_base(self):
return self._capital_base
@property
def emission_rate(self):
return self._emission_rate
@property
def data_frequency(self):
return self._data_frequency
@data_frequency.setter
def data_frequency(self, val):
self._data_frequency = val
@property
def arena(self):
return self._arena
@arena.setter
def arena(self, val):
self._arena = val
@property
def start_session(self):
return self._start_session
@property
def end_session(self):
return self._end_session
@property
def first_open(self):
return self._first_open
@property
def last_close(self):
return self._last_close
@property
@remember_last
def sessions(self):
return self._trading_calendar.sessions_in_range(
self.start_session,
self.end_session
)
def create_new(self, start_session, end_session):
return SimulationParameters(
start_session,
end_session,
self._trading_calendar,
capital_base=self.capital_base,
emission_rate=self.emission_rate,
data_frequency=self.data_frequency,
arena=self.arena
)
def __repr__(self):
return """
{class_name}(
start_session={start_session},
end_session={end_session},
capital_base={capital_base},
data_frequency={data_frequency},
emission_rate={emission_rate},
first_open={first_open},
last_close={last_close})\
""".format(class_name=self.__class__.__name__,
start_session=self.start_session,
end_session=self.end_session,
capital_base=self.capital_base,
data_frequency=self.data_frequency,
emission_rate=self.emission_rate,
first_open=self.first_open,
last_close=self.last_close)
def noop_load(*args, **kwargs):
"""
A method that can be substituted in as the load method in a
TradingEnvironment to prevent it from loading benchmarks.
Accepts any arguments, but returns only a tuple of Nones regardless
of input.
"""
return None, None | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/trading.py | trading.py |
from __future__ import division
from abc import abstractmethod
import math
import numpy as np
from pandas import isnull
from six import with_metaclass
from toolz import merge
from zipline.assets import Equity, Future
from zipline.errors import HistoryWindowStartsBeforeData
from zipline.finance.constants import ROOT_SYMBOL_TO_ETA
from zipline.finance.shared import AllowedAssetMarker, FinancialModelMeta
from zipline.finance.transaction import create_transaction
from zipline.utils.cache import ExpiringCache
from zipline.utils.dummy import DummyMapping
SELL = 1 << 0
BUY = 1 << 1
STOP = 1 << 2
LIMIT = 1 << 3
SQRT_252 = math.sqrt(252)
DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT = 0.025
DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT = 0.05
class LiquidityExceeded(Exception):
pass
def fill_price_worse_than_limit_price(fill_price, order):
"""
Checks whether the fill price is worse than the order's limit price.
Parameters
----------
fill_price: float
The price to check.
order: zipline.finance.order.Order
The order whose limit price to check.
Returns
-------
bool: Whether the fill price is above the limit price (for a buy) or below
the limit price (for a sell).
"""
if order.limit:
# this is tricky! if an order with a limit price has reached
# the limit price, we will try to fill the order. do not fill
# these shares if the impacted price is worse than the limit
# price. return early to avoid creating the transaction.
# buy order is worse if the impacted price is greater than
# the limit price. sell order is worse if the impacted price
# is less than the limit price
if (order.direction > 0 and fill_price > order.limit) or \
(order.direction < 0 and fill_price < order.limit):
return True
return False
class SlippageModel(with_metaclass(FinancialModelMeta)):
"""Abstract interface for defining a slippage model.
"""
# Asset types that are compatible with the given model.
allowed_asset_types = (Equity, Future)
def __init__(self):
self._volume_for_bar = 0
@property
def volume_for_bar(self):
return self._volume_for_bar
@abstractmethod
def process_order(self, data, order):
"""Process how orders get filled.
Parameters
----------
data : BarData
The data for the given bar.
order : Order
The order to simulate.
Returns
-------
execution_price : float
The price to execute the trade at.
execution_volume : int
The number of shares that could be filled. This may not be all
the shares ordered in which case the order will be filled over
multiple bars.
"""
pass
def simulate(self, data, asset, orders_for_asset):
self._volume_for_bar = 0
volume = data.current(asset, "volume")
if volume == 0:
return
# can use the close price, since we verified there's volume in this
# bar.
price = data.current(asset, "close")
# BEGIN
#
# Remove this block after fixing data to ensure volume always has
# corresponding price.
if isnull(price):
return
# END
dt = data.current_dt
for order in orders_for_asset:
if order.open_amount == 0:
continue
order.check_triggers(price, dt)
if not order.triggered:
continue
txn = None
try:
execution_price, execution_volume = \
self.process_order(data, order)
if execution_price is not None:
txn = create_transaction(
order,
data.current_dt,
execution_price,
execution_volume
)
except LiquidityExceeded:
break
if txn:
self._volume_for_bar += abs(txn.amount)
yield order, txn
def asdict(self):
return self.__dict__
class EquitySlippageModel(with_metaclass(AllowedAssetMarker, SlippageModel)):
"""
Base class for slippage models which only support equities.
"""
allowed_asset_types = (Equity,)
class FutureSlippageModel(with_metaclass(AllowedAssetMarker, SlippageModel)):
"""
Base class for slippage models which only support futures.
"""
allowed_asset_types = (Future,)
class VolumeShareSlippage(SlippageModel):
"""
Model slippage as a function of the volume of contracts traded.
"""
def __init__(self, volume_limit=DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT,
price_impact=0.1):
super(VolumeShareSlippage, self).__init__()
self.volume_limit = volume_limit
self.price_impact = price_impact
def __repr__(self):
return """
{class_name}(
volume_limit={volume_limit},
price_impact={price_impact})
""".strip().format(class_name=self.__class__.__name__,
volume_limit=self.volume_limit,
price_impact=self.price_impact)
def process_order(self, data, order):
volume = data.current(order.asset, "volume")
max_volume = self.volume_limit * volume
# price impact accounts for the total volume of transactions
# created against the current minute bar
remaining_volume = max_volume - self.volume_for_bar
if remaining_volume < 1:
# we can't fill any more transactions
raise LiquidityExceeded()
# the current order amount will be the min of the
# volume available in the bar or the open amount.
cur_volume = int(min(remaining_volume, abs(order.open_amount)))
if cur_volume < 1:
return None, None
# tally the current amount into our total amount ordered.
# total amount will be used to calculate price impact
total_volume = self.volume_for_bar + cur_volume
volume_share = min(total_volume / volume,
self.volume_limit)
price = data.current(order.asset, "close")
# BEGIN
#
# Remove this block after fixing data to ensure volume always has
# corresponding price.
if isnull(price):
return
# END
simulated_impact = volume_share ** 2 \
* math.copysign(self.price_impact, order.direction) \
* price
impacted_price = price + simulated_impact
if fill_price_worse_than_limit_price(impacted_price, order):
return None, None
return (
impacted_price,
math.copysign(cur_volume, order.direction)
)
class FixedSlippage(SlippageModel):
"""
Model slippage as a fixed spread.
Parameters
----------
spread : float, optional
spread / 2 will be added to buys and subtracted from sells.
"""
def __init__(self, spread=0.0):
super(FixedSlippage, self).__init__()
self.spread = spread
def __repr__(self):
return '{class_name}(spread={spread})'.format(
class_name=self.__class__.__name__, spread=self.spread,
)
def process_order(self, data, order):
price = data.current(order.asset, "close")
return (
price + (self.spread / 2.0 * order.direction),
order.amount
)
class MarketImpactBase(SlippageModel):
"""
Base class for slippage models which compute a simulated price impact
according to a history lookback.
"""
NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = 10.0 / 10000
def __init__(self):
super(MarketImpactBase, self).__init__()
self._window_data_cache = ExpiringCache()
@abstractmethod
def get_txn_volume(self, data, order):
"""
Return the number of shares we would like to order in this minute.
Parameters
----------
data : BarData
order : Order
Return
------
int : the number of shares
"""
raise NotImplementedError('get_txn_volume')
@abstractmethod
def get_simulated_impact(self,
order,
current_price,
current_volume,
txn_volume,
mean_volume,
volatility):
"""
Calculate simulated price impact.
Parameters
----------
order : The order being processed.
current_price : Current price of the asset being ordered.
current_volume : Volume of the asset being ordered for the current bar.
txn_volume : Number of shares/contracts being ordered.
mean_volume : Trailing ADV of the asset.
volatility : Annualized daily volatility of volume.
Return
------
int : impact on the current price.
"""
raise NotImplementedError('get_simulated_impact')
def process_order(self, data, order):
if order.open_amount == 0:
return None, None
minute_data = data.current(order.asset, ['volume', 'high', 'low'])
mean_volume, volatility = self._get_window_data(data, order.asset, 20)
# Price to use is the average of the minute bar's open and close.
price = np.mean([minute_data['high'], minute_data['low']])
volume = minute_data['volume']
if not volume:
return None, None
txn_volume = int(
min(self.get_txn_volume(data, order), abs(order.open_amount))
)
# If the computed transaction volume is zero or a decimal value, 'int'
# will round it down to zero. In that case just bail.
if txn_volume == 0:
return None, None
if mean_volume == 0 or np.isnan(volatility):
# If this is the first day the contract exists or there is no
# volume history, default to a conservative estimate of impact.
simulated_impact = price * self.NO_DATA_VOLATILITY_SLIPPAGE_IMPACT
else:
simulated_impact = self.get_simulated_impact(
order=order,
current_price=price,
current_volume=volume,
txn_volume=txn_volume,
mean_volume=mean_volume,
volatility=volatility,
)
impacted_price = \
price + math.copysign(simulated_impact, order.direction)
if fill_price_worse_than_limit_price(impacted_price, order):
return None, None
return impacted_price, math.copysign(txn_volume, order.direction)
def _get_window_data(self, data, asset, window_length):
"""
Internal utility method to return the trailing mean volume over the
past 'window_length' days, and volatility of close prices for a
specific asset.
Parameters
----------
data : The BarData from which to fetch the daily windows.
asset : The Asset whose data we are fetching.
window_length : Number of days of history used to calculate the mean
volume and close price volatility.
Returns
-------
(mean volume, volatility)
"""
try:
values = self._window_data_cache.get(asset, data.current_session)
except KeyError:
try:
# Add a day because we want 'window_length' complete days,
# excluding the current day.
volume_history = data.history(
asset, 'volume', window_length + 1, '1d',
)
close_history = data.history(
asset, 'close', window_length + 1, '1d',
)
except HistoryWindowStartsBeforeData:
# If there is not enough data to do a full history call, return
# values as if there was no data.
return 0, np.NaN
# Exclude the first value of the percent change array because it is
# always just NaN.
close_volatility = close_history[:-1].pct_change()[1:].std(
skipna=False,
)
values = {
'volume': volume_history[:-1].mean(),
'close': close_volatility * SQRT_252,
}
self._window_data_cache.set(asset, values, data.current_session)
return values['volume'], values['close']
class VolatilityVolumeShare(MarketImpactBase):
"""
Model slippage for futures contracts according to the following formula:
new_price = price + (price * MI / 10000),
where 'MI' is market impact, which is defined as:
MI = eta * sigma * sqrt(psi)
Eta is a constant which varies by root symbol.
Sigma is 20-day annualized volatility.
Psi is the volume traded in the given bar divided by 20-day ADV.
Parameters
----------
volume_limit : float
Maximum percentage (as a decimal) of a bar's total volume that can be
traded.
eta : float or dict
Constant used in the market impact formula. If given a float, the eta
for all futures contracts is the same. If given a dictionary, it must
map root symbols to the eta for contracts of that symbol.
"""
NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = 7.5 / 10000
allowed_asset_types = (Future,)
def __init__(self, volume_limit, eta=ROOT_SYMBOL_TO_ETA):
super(VolatilityVolumeShare, self).__init__()
self.volume_limit = volume_limit
# If 'eta' is a constant, use a dummy mapping to treat it as a
# dictionary that always returns the same value.
# NOTE: This dictionary does not handle unknown root symbols, so it may
# be worth revisiting this behavior.
if isinstance(eta, (int, float)):
self._eta = DummyMapping(float(eta))
else:
# Eta is a dictionary. If the user's dictionary does not provide a
# value for a certain contract, fall back on the pre-defined eta
# values per root symbol.
self._eta = merge(ROOT_SYMBOL_TO_ETA, eta)
def __repr__(self):
if isinstance(self._eta, DummyMapping):
# Eta is a constant, so extract it.
eta = self._eta['dummy key']
else:
eta = '<varies>'
return '{class_name}(volume_limit={volume_limit}, eta={eta})'.format(
class_name=self.__class__.__name__,
volume_limit=self.volume_limit,
eta=eta,
)
def get_simulated_impact(self,
order,
current_price,
current_volume,
txn_volume,
mean_volume,
volatility):
eta = self._eta[order.asset.root_symbol]
psi = txn_volume / mean_volume
market_impact = eta * volatility * math.sqrt(psi)
# We divide by 10,000 because this model computes to basis points.
# To convert from bps to % we need to divide by 100, then again to
# convert from % to fraction.
return (current_price * market_impact) / 10000
def get_txn_volume(self, data, order):
volume = data.current(order.asset, 'volume')
return volume * self.volume_limit | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/slippage.py | slippage.py |
import math
import uuid
from six import text_type
import zipline.protocol as zp
from zipline.assets import Asset
from zipline.utils.enum import enum
from zipline.utils.input_validation import expect_types
ORDER_STATUS = enum(
'OPEN',
'FILLED',
'CANCELLED',
'REJECTED',
'HELD',
)
SELL = 1 << 0
BUY = 1 << 1
STOP = 1 << 2
LIMIT = 1 << 3
ORDER_FIELDS_TO_IGNORE = {'type', 'direction', '_status', 'asset'}
class Order(object):
# using __slots__ to save on memory usage. Simulations can create many
# Order objects and we keep them all in memory, so it's worthwhile trying
# to cut down on the memory footprint of this object.
__slots__ = ["id", "dt", "reason", "created", "asset", "amount", "filled",
"commission", "_status", "stop", "limit", "stop_reached",
"limit_reached", "direction", "type", "broker_order_id"]
@expect_types(asset=Asset)
def __init__(self, dt, asset, amount, stop=None, limit=None, filled=0,
commission=0, id=None):
"""
@dt - datetime.datetime that the order was placed
@asset - asset for the order.
@amount - the number of shares to buy/sell
a positive sign indicates a buy
a negative sign indicates a sell
@filled - how many shares of the order have been filled so far
"""
# get a string representation of the uuid.
self.id = self.make_id() if id is None else id
self.dt = dt
self.reason = None
self.created = dt
self.asset = asset
self.amount = amount
self.filled = filled
self.commission = commission
self._status = ORDER_STATUS.OPEN
self.stop = stop
self.limit = limit
self.stop_reached = False
self.limit_reached = False
self.direction = math.copysign(1, self.amount)
self.type = zp.DATASOURCE_TYPE.ORDER
self.broker_order_id = None
def make_id(self):
return uuid.uuid4().hex
def to_dict(self):
dct = {name: getattr(self, name)
for name in self.__slots__
if name not in ORDER_FIELDS_TO_IGNORE}
if self.broker_order_id is None:
del dct['broker_order_id']
# Adding 'sid' for backwards compatibility with downstream consumers.
dct['sid'] = self.asset
dct['status'] = self.status
return dct
@property
def sid(self):
# For backwards compatibility because we pass this object to
# custom slippage models.
return self.asset
def to_api_obj(self):
pydict = self.to_dict()
obj = zp.Order(initial_values=pydict)
return obj
def check_triggers(self, price, dt):
"""
Update internal state based on price triggers and the
trade event's price.
"""
stop_reached, limit_reached, sl_stop_reached = \
self.check_order_triggers(price)
if (stop_reached, limit_reached) \
!= (self.stop_reached, self.limit_reached):
self.dt = dt
self.stop_reached = stop_reached
self.limit_reached = limit_reached
if sl_stop_reached:
# Change the STOP LIMIT order into a LIMIT order
self.stop = None
def check_order_triggers(self, current_price):
"""
Given an order and a trade event, return a tuple of
(stop_reached, limit_reached).
For market orders, will return (False, False).
For stop orders, limit_reached will always be False.
For limit orders, stop_reached will always be False.
For stop limit orders a Boolean is returned to flag
that the stop has been reached.
Orders that have been triggered already (price targets reached),
the order's current values are returned.
"""
if self.triggered:
return (self.stop_reached, self.limit_reached, False)
stop_reached = False
limit_reached = False
sl_stop_reached = False
order_type = 0
if self.amount > 0:
order_type |= BUY
else:
order_type |= SELL
if self.stop is not None:
order_type |= STOP
if self.limit is not None:
order_type |= LIMIT
if order_type == BUY | STOP | LIMIT:
if current_price >= self.stop:
sl_stop_reached = True
if current_price <= self.limit:
limit_reached = True
elif order_type == SELL | STOP | LIMIT:
if current_price <= self.stop:
sl_stop_reached = True
if current_price >= self.limit:
limit_reached = True
elif order_type == BUY | STOP:
if current_price >= self.stop:
stop_reached = True
elif order_type == SELL | STOP:
if current_price <= self.stop:
stop_reached = True
elif order_type == BUY | LIMIT:
if current_price <= self.limit:
limit_reached = True
elif order_type == SELL | LIMIT:
# This is a SELL LIMIT order
if current_price >= self.limit:
limit_reached = True
return (stop_reached, limit_reached, sl_stop_reached)
def handle_split(self, ratio):
# update the amount, limit_price, and stop_price
# by the split's ratio
# info here: http://finra.complinet.com/en/display/display_plain.html?
# rbid=2403&element_id=8950&record_id=12208&print=1
# new_share_amount = old_share_amount / ratio
# new_price = old_price * ratio
self.amount = int(self.amount / ratio)
if self.limit is not None:
self.limit = round(self.limit * ratio, 2)
if self.stop is not None:
self.stop = round(self.stop * ratio, 2)
@property
def status(self):
if not self.open_amount:
return ORDER_STATUS.FILLED
elif self._status == ORDER_STATUS.HELD and self.filled:
return ORDER_STATUS.OPEN
else:
return self._status
@status.setter
def status(self, status):
self._status = status
def cancel(self):
self.status = ORDER_STATUS.CANCELLED
def reject(self, reason=''):
self.status = ORDER_STATUS.REJECTED
self.reason = reason
def hold(self, reason=''):
self.status = ORDER_STATUS.HELD
self.reason = reason
@property
def open(self):
return self.status in [ORDER_STATUS.OPEN, ORDER_STATUS.HELD]
@property
def triggered(self):
"""
For a market order, True.
For a stop order, True IFF stop_reached.
For a limit order, True IFF limit_reached.
"""
if self.stop is not None and not self.stop_reached:
return False
if self.limit is not None and not self.limit_reached:
return False
return True
@property
def open_amount(self):
return self.amount - self.filled
def __repr__(self):
"""
String representation for this object.
"""
return "Order(%s)" % self.to_dict().__repr__()
def __unicode__(self):
"""
Unicode representation for this object.
"""
return text_type(repr(self)) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/order.py | order.py |
from logbook import Logger
from six import itervalues
from zipline.finance.blotter import Blotter
from zipline.utils.input_validation import expect_types
from zipline.assets import Asset
log = Logger('Blotter Live')
class BlotterLive(Blotter):
def __init__(self, data_frequency, broker):
self.broker = broker
self._processed_closed_orders = []
self._processed_transactions = []
self.data_frequency = data_frequency
self.new_orders = []
def __repr__(self):
return """
{class_name}(
open_orders={open_orders},
orders={orders},
new_orders={new_orders},
""".strip().format(class_name=self.__class__.__name__,
open_orders=self.open_orders,
orders=self.orders,
new_orders=self.new_orders)
@property
def orders(self):
return self.broker.orders
@property
def open_orders(self):
assets = set([order.asset for order in itervalues(self.orders)
if order.open])
return {
asset: [order for order in itervalues(self.orders)
if order.asset == asset and order.open]
for asset in assets
}
@expect_types(asset=Asset)
def order(self, asset, amount, style, order_id=None):
assert order_id is None
order = self.broker.order(asset, amount, style)
self.new_orders.append(order)
return order.id
def cancel(self, order_id, relay_status=True):
return self.broker.cancel_order(order_id)
def execute_cancel_policy(self, event):
# Cancellation is handled at the broker
pass
def reject(self, order_id, reason=''):
log.warning("Unexpected reject request for {}: '{}'".format(
order_id, reason))
def hold(self, order_id, reason=''):
log.warning("Unexpected hold request for {}: '{}'".format(
order_id, reason))
def get_transactions(self, bar_data):
# All returned values from this function are delta between
# the previous and actual call.
def _list_delta(lst_a, lst_b):
return [elem for elem in lst_a if elem not in set(lst_b)]
all_transactions = list(self.broker.transactions.values())
new_transactions = _list_delta(all_transactions,
self._processed_transactions)
self._processed_transactions = all_transactions
new_commissions = [{'asset': tx.asset,
'cost': tx.commission,
'order': self.orders[tx.order_id]}
for tx in new_transactions]
all_closed_orders = [order
for order in itervalues(self.orders)
if not order.open]
new_closed_orders = _list_delta(all_closed_orders,
self._processed_closed_orders)
self._processed_closed_orders = all_closed_orders
return new_transactions, new_commissions, new_closed_orders
def prune_orders(self, closed_orders):
# Orders are handled at the broker
pass
def process_splits(self, splits):
# Splits are handled at the broker
pass | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/blotter_live.py | blotter_live.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.