code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
assert site in ['Pro', 'HADAX']
assert _type in u.ORDER_TYPE
params = {
'account-id': acc_id,
'amount': amount,
'symbol': symbol,
'type': _type,
'source': 'api'
}
if price:
params['price'] = price
path = f'/v1{"/" if site == "Pro" else "/hadax/"}order/orders/place'
return api_key_post(params, path, _async=_async) | def send_order(self, acc_id, amount, symbol, _type, price=0, site='Pro', _async=False) | 创建并执行订单
:param amount:
:param source: 如果使用借贷资产交易,请在下单接口,请求参数source中填写'margin-api'
:param symbol:
:param _type: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖, buy-ioc:IOC买单, sell-ioc:IOC卖单}
:param price:
:return: | 3.628369 | 3.952368 | 0.918024 |
params = {}
path = f'/v1/order/orders/{order_id}/submitcancel'
return api_key_post(params, path, _async=_async) | def cancel_order(self, order_id, _async=False) | 撤销订单
:param order_id:
:return: | 5.472083 | 6.940178 | 0.788464 |
assert isinstance(order_ids, list)
params = {'order-ids': order_ids}
path = f'/v1/order/orders/batchcancel'
return api_key_post(params, path, _async=_async) | def batchcancel_orders(self, order_ids: list, _async=False) | 批量撤销订单
:param order_id:
:return: | 4.021963 | 4.08676 | 0.984145 |
params = {}
path = '/v1/order/batchCancelOpenOrders'
params['account-id'] = acc_id
if symbol:
params['symbol'] = symbol
if side:
assert side in ['buy', 'sell']
params['side'] = side
if size:
params['size'] = size
return api_key_get(params, path, _async=_async) | def batchcancel_openOrders(self, acc_id, symbol=None, side=None, size=None, _async=False) | 批量撤销未成交订单
:param acc_id: 帐号ID
:param symbol: 交易对
:param side: 方向
:param size:
:param _async:
:return: | 2.462714 | 2.811112 | 0.876064 |
params = {}
path = f'/v1/order/orders/{order_id}'
return api_key_get(params, path, _async=_async) | def get_order_info(self, order_id, _async=False) | 查询某个订单
:param order_id:
:return: | 5.284648 | 6.308768 | 0.837667 |
params = {}
path = '/v1/order/openOrders'
if all([acc_id, symbol]):
params['account-id'] = acc_id
params['symbol'] = symbol
if side:
assert side in ['buy', 'sell']
params['side'] = side
if size:
params['size'] = size
return api_key_get(params, path, _async=_async) | def get_openOrders(self, acc_id=None, symbol=None, side=None, size=None, _async=False) | 查询未成交订单
:param acc_id: 帐号ID
:param symbol: 交易对ID
:param side: 交易方向,'buy'或者'sell'
:param size: 记录条数,最大500
:return: | 2.556584 | 2.834715 | 0.901884 |
params = {}
path = f'/v1/order/orders/{order_id}/matchresults'
return api_key_get(params, path, _async=_async) | def get_order_matchresults(self, order_id, _async=False) | 查询某个订单的成交明细
:param order_id:
:return: | 4.878521 | 5.709681 | 0.85443 |
states = ','.join(states)
params = {'symbol': symbol, 'states': states}
if types:
params['types'] = ','.join(types)
if start_date:
sd = parser.parse(start_date).date()
params['start-date'] = str(sd)
if end_date:
ed = parser.parse(end_date).date()
params['end-date'] = str(ed)
if _from:
params['from'] = _from
if direct:
assert direct in ['prev', 'next']
params['direct'] = direct
if size:
params['size'] = size
path = '/v1/order/orders'
return api_key_get(params, path, _async=_async) | def get_orders_info(self,
symbol,
states:list,
types:list=None,
start_date=None,
end_date=None,
_from=None,
direct=None,
size=None,
_async=False) | 查询当前委托、历史委托
:param symbol:
:param states: 可选值 {pre-submitted 准备提交, submitted 已提交, partial-filled 部分成交, partial-canceled 部分成交撤销, filled 完全成交, canceled 已撤销}
:param types: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖}
:param start_date:
:param end_date:
:param _from:
:param direct: 可选值{prev 向前,next 向后}
:param size:
:return: | 2.020036 | 2.113863 | 0.955614 |
params = {
'address': address,
'amount': amount,
'currency': currency,
'fee': fee,
'addr-tag': addr_tag
}
path = '/v1/dw/withdraw/api/create'
return api_key_post(params, path, _async=_async) | def req_withdraw(self, address, amount, currency, fee=0, addr_tag="", _async=False) | 申请提现虚拟币
:param address:
:param amount:
:param currency:btc, ltc, bcc, eth, etc ...(火币Pro支持的币种)
:param fee:
:param addr_tag:
:return: {
"status": "ok",
"data": 700
} | 3.112602 | 3.267643 | 0.952553 |
params = {}
path = f'/v1/dw/withdraw-virtual/{address_id}/cancel'
return api_key_post(params, path, _async=_async) | def cancel_withdraw(self, address_id, _async=False) | 申请取消提现虚拟币
:param address_id:
:return: {
"status": "ok",
"data": 700
} | 8.159781 | 8.528395 | 0.956778 |
params = {
'account-id': acc_id,
'amount': amount,
'symbol': symbol,
'type': _type,
'source': 'margin-api'
}
if price:
params['price'] = price
path = '/v1/order/orders/place'
return api_key_post(params, path, _async=_async) | def send_margin_order(self, acc_id, amount, symbol, _type, price=0, _async=False) | 创建并执行借贷订单
:param amount:
:param symbol:
:param _type: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖}
:param price:
:return: | 2.695175 | 3.094605 | 0.870927 |
params = {'symbol': symbol, 'currency': currency, 'amount': amount}
path = '/v1/dw/transfer-in/margin'
return api_key_post(params, path, _async=_async) | def exchange_to_margin(self, symbol, currency, amount, _async=False) | 现货账户划入至借贷账户
:param amount:
:param currency:
:param symbol:
:return: | 4.604233 | 5.837614 | 0.788718 |
params = {'order-id': order_id, 'amount': amount}
path = f'/v1/margin/orders/{order_id}/repay'
return api_key_post(params, path, _async=_async) | def repay_loan(self, order_id, amount, _async=False) | 归还借贷
:param order_id:
:param amount:
:return: | 3.667282 | 4.352284 | 0.842611 |
params = {}
path = '/v1/margin/accounts/balance'
if symbol:
params['symbol'] = symbol
return api_key_get(params, path, _async=_async) | def get_margin_balance(self, symbol=None, _async=False) | 借贷账户详情,支持查询单个币种
:param symbol:
:return: | 4.284053 | 4.692061 | 0.913043 |
params = {}
path = '/etf/swap/config'
params['etf_name'] = etf_name
return api_key_get(params, path, _async=_async) | def get_etf_config(self, etf_name, _async=False) | 查询etf的基本信息
:param etf_name: etf基金名称
:param _async:
:return: | 5.821456 | 7.164642 | 0.812526 |
params = {}
path = '/etf/swap/in'
params['etf_name'] = etf_name
params['amount'] = amount
return api_key_post(params, path, _async=_async) | def etf_swap_in(self, etf_name, amount, _async=False) | 换入etf
:param etf_name: etf基金名称
:param amount: 数量
:param _async:
:return: | 3.853176 | 4.231962 | 0.910494 |
params = {}
path = '/etf/list'
params['etf_name'] = etf_name
params['offset'] = offset
params['limit'] = limit
return api_key_get(params, path, _async=_async) | def get_etf_records(self, etf_name, offset, limit, _async=False) | 查询etf换入换出明细
:param etf_name: eth基金名称
:param offset: 开始位置,0为最新一条
:param limit: 返回记录条数(0, 100]
:param _async:
:return: | 3.108302 | 3.729032 | 0.833541 |
params = {}
path = '/quotation/market/history/kline'
params['symbol'] = symbol
params['period'] = period
if limit:
params['limit'] = limit
return api_key_get(params, path, _async=_async) | def get_quotation_kline(self, symbol, period, limit=None, _async=False) | 获取etf净值
:param symbol: etf名称
:param period: K线类型
:param limit: 获取数量
:param _async:
:return: | 3.218361 | 3.762825 | 0.855304 |
params = {}
path = '/v1/subuser/transfer'
params['sub-uid'] = sub_uid
params['currency'] = currency
params['amount'] = amount
params['type'] = transfer_type
return api_key_post(params, path, _async=_async) | def transfer(self, sub_uid, currency, amount, transfer_type, _async=False) | 母账户执行子账户划转
:param sub_uid: 子账户id
:param currency: 币种
:param amount: 划转金额
:param transfer_type: 划转类型,master-transfer-in(子账户划转给母账户虚拟币)/ master-transfer-out (母账户划转给子账户虚拟币)/master-point-transfer-in (子账户划转给母账户点卡)/master-point-transfer-out(母账户划转给子账户点卡)
:param _async: 是否异步执行
:return: | 2.91972 | 3.829283 | 0.762472 |
params = {}
path = '/v1/subuser/aggregate-balance'
return api_key_get(params, path, _async=_async) | def get_aggregate_balance(self, _async=False) | 查询所有子账户汇总
:param _async: 是否异步执行
:return: | 9.6237 | 8.075408 | 1.191729 |
params = {}
params['sub-uid'] = sub_id
path = '/v1/account/accounts/{sub-uid}'
return api_key_get(params, path, _async=_async) | def get_sub_balance(self, sub_id, _async=False) | 查询子账户各币种账户余额
:param sub_uid: 子账户id
:param _async:
:return: | 6.359965 | 5.94306 | 1.07015 |
params = {'symbol': symbol, 'period': period, 'size': size}
url = u.MARKET_URL + '/market/history/kline'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(http_get_request(url, params))
return handle
return _wrapper | def get_kline(self, symbol, period, size=150) | 获取K线
:param symbol
:param period: 可选值:{1min, 5min, 15min, 30min, 60min, 1day, 1mon, 1week, 1year }
:param size: 可选值: [1,2000]
:return: | 4.318429 | 4.850175 | 0.890366 |
params = {'symbol': symbol, 'type': _type}
url = u.MARKET_URL + '/market/depth'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(http_get_request(url, params))
return handle
return _wrapper | def get_last_depth(self, symbol, _type) | 获取marketdepth
:param symbol
:param type: 可选值:{ percent10, step0, step1, step2, step3, step4, step5 }
:return: | 5.808034 | 5.964951 | 0.973693 |
params = {'symbol': symbol}
url = u.MARKET_URL + '/market/trade'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(http_get_request(url, params))
return handle
return _wrapper | def get_last_ticker(self, symbol) | 获取最新的ticker
:param symbol
:return: | 6.987968 | 7.854821 | 0.889641 |
params = {}
url = u.MARKET_URL + '/market/tickers'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(http_get_request(url, params))
return handle
return _wrapper | def get_all_last_24h_kline(self) | 获取所有ticker
:param _async:
:return: | 7.832867 | 7.670788 | 1.021129 |
assert site in ['Pro', 'HADAX']
params = {}
path = f'/v1{"/" if site == "Pro" else "/hadax/"}common/symbols'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_get(params, path))
return handle
return _wrapper | def get_symbols(self, site='Pro') | 获取支持的交易对
:param site:
:return: | 8.704699 | 7.13408 | 1.220157 |
assert site in ['Pro', 'HADAX']
path = f'/v1{"/" if site == "Pro" else "/hadax/"}account/accounts/{acc_id}/balance'
# params = {'account-id': self.acct_id}
params = {}
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_get(params, path))
return handle
return _wrapper | def get_balance(self, acc_id, site='Pro') | 获取当前账户资产
:return: | 6.034825 | 5.792897 | 1.041763 |
assert site in ['Pro', 'HADAX']
assert _type in u.ORDER_TYPE
params = {
'account-id': acc_id,
'amount': amount,
'symbol': symbol,
'type': _type,
'source': 'api'
}
if price:
params['price'] = price
path = f'/v1{"/" if site == "Pro" else "/hadax/"}order/orders/place'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_post(params, path))
return handle
return _wrapper | def send_order(self, acc_id, amount, symbol, _type, price=0, site='Pro') | 创建并执行订单
:param amount:
:param source: 如果使用借贷资产交易,请在下单接口,请求参数source中填写'margin-api'
:param symbol:
:param _type: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖, buy-ioc:IOC买单, sell-ioc:IOC卖单}
:param price:
:return: | 4.148664 | 4.355212 | 0.952574 |
params = {}
path = f'/v1/order/orders/{order_id}/submitcancel'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_post(params, path))
return handle
return _wrapper | def cancel_order(self, order_id) | 撤销订单
:param order_id:
:return: | 7.046802 | 7.757213 | 0.908419 |
assert isinstance(order_ids, list)
params = {'order-ids': order_ids}
path = f'/v1/order/orders/batchcancel'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_post(params, path))
return handle
return _wrapper | def batchcancel_order(self, order_ids: list) | 批量撤销订单
:param order_id:
:return: | 5.494201 | 5.256148 | 1.04529 |
params = {}
path = f'/v1/order/orders/{order_id}'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_get(params, path))
return handle
return _wrapper | def get_order_info(self, order_id) | 查询某个订单
:param order_id:
:return: | 6.181999 | 6.78056 | 0.911724 |
params = {}
path = f'/v1/order/orders/{order_id}/matchresults'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_get(params, path))
return handle
return _wrapper | def get_order_matchresults(self, order_id) | 查询某个订单的成交明细
:param order_id:
:return: | 6.153533 | 6.443613 | 0.954982 |
params = {'symbol': symbol, 'states': states}
if types:
params['types'] = types
if start_date:
sd = parser.parse(start_date).date()
params['start-date'] = str(sd)
if end_date:
ed = parser.parse(end_date).date()
params['end_date'] = str(ed)
if _from:
params['from'] = _from
if direct:
assert direct in ['prev', 'next']
params['direct'] = direct
if size:
params['size'] = size
path = '/v1/order/orders'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_get(params, path))
return handle
return _wrapper | def get_orders_info(self,
symbol,
states,
types=None,
start_date=None,
end_date=None,
_from=None,
direct=None,
size=None) | 查询当前委托、历史委托
:param symbol:
:param states: 可选值 {pre-submitted 准备提交, submitted 已提交, partial-filled 部分成交, partial-canceled 部分成交撤销, filled 完全成交, canceled 已撤销}
:param types: 可选值 买卖类型
:param start_date:
:param end_date:
:param _from:
:param direct: 可选值{prev 向前,next 向后}
:param size:
:return: | 2.384838 | 2.539595 | 0.939062 |
params = {
'address': address,
'amount': amount,
'currency': currency,
'fee': fee,
'addr-tag': addr_tag
}
path = '/v1/dw/withdraw/api/create'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_post(params, path))
return handle
return _wrapper | def req_withdraw(self, address, amount, currency, fee=0, addr_tag="") | 申请提现虚拟币
:param address_id:
:param amount:
:param currency:btc, ltc, bcc, eth, etc ...(火币Pro支持的币种)
:param fee:
:param addr-tag:
:return: {
"status": "ok",
"data": 700
} | 4.051655 | 3.969168 | 1.020782 |
params = {}
path = f'/v1/dw/withdraw-virtual/{address_id}/cancel'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_post(params, path))
return handle
return _wrapper | def cancel_withdraw(self, address_id) | 申请取消提现虚拟币
:param address_id:
:return: {
"status": "ok",
"data": 700
} | 8.618542 | 8.678246 | 0.99312 |
params = {
'account-id': acc_id,
'amount': amount,
'symbol': symbol,
'type': _type,
'source': 'margin-api'
}
if price:
params['price'] = price
path = '/v1/order/orders/place'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_post(params, path))
return handle
return _wrapper | def send_margin_order(self, acc_id, amount, symbol, _type, price=0) | 创建并执行借贷订单
:param amount:
:param symbol:
:param _type: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖}
:param price:
:return: | 3.392229 | 3.672025 | 0.923803 |
params = {'symbol': symbol, 'currency': currency, 'amount': amount}
path = '/v1/dw/transfer-out/margin'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_post(params, path))
return handle
return _wrapper | def margin_to_exchange(self, symbol, currency, amount) | 借贷账户划出至现货账户
:param amount:
:param currency:
:param symbol:
:return: | 6.197461 | 6.978409 | 0.888091 |
params = {'order-id': order_id, 'amount': amount}
path = f'/v1/margin/orders/{order_id}/repay'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_post(params, path))
return handle
return _wrapper | def repay_loan(self, order_id, amount) | 归还借贷
:param order_id:
:param amount:
:return: | 5.060484 | 5.638958 | 0.897415 |
params = {}
path = '/v1/margin/accounts/balance'
if symbol:
params['symbol'] = symbol
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_get(params, path))
return handle
return _wrapper | def get_margin_balance(self, symbol) | 借贷账户详情,支持查询单个币种
:param symbol:
:return: | 5.265412 | 5.551386 | 0.948486 |
def wrapper(_callback):
callbackList = self._req_callbacks.setdefault(req, [])
callbackList.append(_callback)
return _callback
return wrapper | def register_onRsp(self, req) | 添加回调处理函数的装饰器
:param req: 具体的topic,如
:return: | 6.668943 | 5.497612 | 1.213062 |
handler_list = self._handle_funcs.get(topic, [])
for i, h in enumerate(handler_list):
if h is _handle_func_name or h.__name__ == _handle_func_name:
handler_list.pop(i)
if self._handle_funcs.get(topic) == []:
self._handle_funcs.pop(topic) | def unregister_handle_func(self, _handle_func_name, topic) | 注销handle_func | 2.110538 | 2.077637 | 1.015836 |
topic = msg.get('topic')
self.pub_socket.send_multipart(
[pickle.dumps(topic), pickle.dumps(msg)])
for h in self._handle_funcs.get(topic, []):
h(msg) | def pub_msg(self, msg) | 核心的处理函数,如果是handle_func直接处理,如果是handler,推送到handler的队列 | 4.986199 | 4.221587 | 1.181119 |
if file:
import sys
file_path, file_name = os.path.split(file)
sys.path.append(file_path)
strategy_module = importlib.import_module(os.path.splitext(file_name)[0])
init = getattr(strategy_module, 'init', None)
handle_func = getattr(strategy_module, 'handle_func', None)
schedule = getattr(strategy_module, 'schedule', None)
else:
init, handle_func, scedule = [None] * 3
setKey(access_key, secret_key)
url = kwargs.get('url')
hostname = 'api.huobi.br.com'
if url:
hostname = urlparse(url).hostname
setUrl('https://' + hostname, 'https://' + hostname)
reconn = kwargs.get('reconn', -1)
from huobitrade import HBWebsocket, HBRestAPI
from huobitrade.datatype import HBMarket, HBAccount, HBMargin
restapi = HBRestAPI(get_acc=True)
ws = HBWebsocket(host=hostname, reconn=reconn)
auth_ws = HBWebsocket(host=hostname, auth=True, reconn=reconn)
data = HBMarket()
account = HBAccount()
margin = HBMargin()
ws_open = False
ws_auth = False
@ws.after_open
def _open():
nonlocal ws_open
click.echo('行情接口连接成功')
ws_open = True
@auth_ws.after_auth
def _auth():
nonlocal ws_auth
click.echo('鉴权接口鉴权成功')
ws_auth = True
ws.run()
auth_ws.run()
for i in range(10):
time.sleep(3)
click.echo(f'连接:第{i+1}次连接')
if ws_open&ws_auth:
break
else:
ws.stop()
auth_ws.stop()
raise Exception('连接失败')
if init:
init(restapi, ws, auth_ws)
if handle_func:
for k, v in handle_func.items():
if k.split('.')[0].lower() == 'market':
ws.register_handle_func(k)(v)
else:
auth_ws.register_handle_func(k)(v)
if schedule:
print('testing')
from huobitrade.handler import TimeHandler
interval = scedule.__kwdefaults__['interval']
timerhandler = TimeHandler('scheduler', interval)
timerhandler.handle = lambda msg: schedule(restapi, ws, auth_ws)
timerhandler.start()
while True:
try:
code = click.prompt('huobitrade>>')
if code == 'exit':
if click.confirm('是否要退出huobitrade'):
break
else:
continue
else:
result = eval(code)
click.echo(result)
except Exception as e:
click.echo(traceback.format_exc())
ws.stop()
auth_ws.stop() | def run(file, access_key, secret_key, **kwargs) | 命令行运行huobitrade | 3.044605 | 2.954743 | 1.030413 |
sorted_params = sorted(pParams.items(), key=lambda d: d[0], reverse=False)
encode_params = urllib.parse.urlencode(sorted_params)
payload = [method, host_url, request_path, encode_params]
payload = '\n'.join(payload)
payload = payload.encode(encoding='UTF8')
secret_key = secret_key.encode(encoding='UTF8')
digest = hmac.new(secret_key, payload, digestmod=hashlib.sha256).digest()
signature = base64.b64encode(digest)
signature = signature.decode()
return signature | def createSign(pParams, method, host_url, request_path, secret_key) | from 火币demo, 构造签名
:param pParams:
:param method:
:param host_url:
:param request_path:
:param secret_key:
:return: | 1.750081 | 1.83384 | 0.954326 |
headers = {
'Content-type':
'application/x-www-form-urlencoded',
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
}
if add_to_headers:
headers.update(add_to_headers)
postdata = urllib.parse.urlencode(params)
if _async:
response = async_session.get(url, params=postdata, headers=headers, timeout=5)
return response
else:
response = requests.get(url, postdata, headers=headers, timeout=5)
try:
if response.status_code == 200:
return response.json()
else:
logger.debug(
f'<GET>error_code:{response.status_code} reason:{response.reason} detail:{response.text}')
return
except BaseException as e:
logger.exception(f'<GET>httpGet failed, detail is:{response.text},{e}')
return | def http_get_request(url, params, add_to_headers=None, _async=False) | from 火币demo, get方法
:param url:
:param params:
:param add_to_headers:
:return: | 2.124963 | 2.142783 | 0.991684 |
headers = {
"Accept": "application/json",
'Content-Type': 'application/json'
}
if add_to_headers:
headers.update(add_to_headers)
postdata = json.dumps(params)
if _async:
response = async_session.post(url, postdata, headers=headers, timeout=10)
return response
else:
response = requests.post(url, postdata, headers=headers, timeout=10)
try:
if response.status_code == 200:
return response.json()
else:
logger.debug(f'<POST>error_code:{response.status_code} reason:{response.reason} detail:{response.text}')
return
except BaseException as e:
logger.exception(
f'<POST>httpPost failed, detail is:{response.text},{e}')
return | def http_post_request(url, params, add_to_headers=None, _async=False) | from 火币demo, post方法
:param url:
:param params:
:param add_to_headers:
:return: | 2.660517 | 2.614487 | 1.017606 |
method = 'GET'
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
params.update({
'AccessKeyId': ACCESS_KEY,
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'Timestamp': timestamp
})
host_url = TRADE_URL
host_name = urllib.parse.urlparse(host_url).hostname
host_name = host_name.lower()
secret_sign = createSign(params, method, host_name, request_path,
SECRET_KEY)
params['Signature'] = secret_sign
if PRIVATE_KEY:
params['PrivateSignature'] = createPrivateSign(secret_sign, PRIVATE_KEY)
url = host_url + request_path
return http_get_request(url, params, _async=_async) | def api_key_get(params, request_path, _async=False) | from 火币demo, 构造get请求并调用get方法
:param params:
:param request_path:
:return: | 2.861414 | 2.715201 | 1.05385 |
method = 'POST'
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
params_to_sign = {
'AccessKeyId': ACCESS_KEY,
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'Timestamp': timestamp
}
host_url = TRADE_URL
host_name = urllib.parse.urlparse(host_url).hostname
host_name = host_name.lower()
secret_sign = createSign(params_to_sign, method, host_name,
request_path, SECRET_KEY)
params_to_sign['Signature'] = secret_sign
if PRIVATE_KEY:
params_to_sign['PrivateSignature'] = createPrivateSign(secret_sign, PRIVATE_KEY)
url = host_url + request_path + '?' + urllib.parse.urlencode(params_to_sign)
return http_post_request(url, params, _async=_async) | def api_key_post(params, request_path, _async=False) | from 火币demo, 构造post请求并调用post方法
:param params:
:param request_path:
:return: | 2.601106 | 2.500932 | 1.040055 |
if filename == None:
f = sys.stdout
else:
f = open(filename, 'w')
def _callfunc(handle):
@wraps(handle)
def func(self, topic, msg):
t0 = time.time()
handle(self, topic, msg)
t1 = time.time()
print(f'{self.name}-handle运行时间:{t1 - t0}s', file=f)
return func
return _callfunc | def handler_profiler(filename=None) | handler的性能测试装饰器
:param filename:
:return: | 3.492797 | 3.294523 | 1.060183 |
args = super(SatUtilsParser, self).parse_args(*args, **kwargs)
args = vars(args)
args = {k: v for k, v in args.items() if v is not None}
if args.get('command', None) is None:
self.print_help()
sys.exit(0)
# set logging level
if 'verbosity' in args:
logging.basicConfig(stream=sys.stdout, level=(50-args.pop('verbosity') * 10))
# set global configuration options
if 'url' in args:
config.API_URL = args.pop('url')
if 'datadir' in args:
config.DATADIR = args.pop('datadir')
if 'filename' in args:
config.FILENAME = args.pop('filename')
return args | def parse_args(self, *args, **kwargs) | Parse arguments | 2.564679 | 2.524444 | 1.015938 |
parser = cls(*args, **kwargs)
subparser = parser.add_subparsers(dest='command')
parents = [parser.pparser, parser.output_parser]
sparser = subparser.add_parser('search', help='Perform new search of items', parents=parents)
parser.search_group = sparser.add_argument_group('search options')
parser.search_group.add_argument('-c', '--collection', help='Name of collection', default=None)
h = 'One or more scene IDs from provided collection (ignores other parameters)'
parser.search_group.add_argument('--ids', help=h, nargs='*', default=None)
parser.search_group.add_argument('--bbox', help='Bounding box (min lon, min lat, max lon, max lat)', nargs=4)
parser.search_group.add_argument('--intersects', help='GeoJSON Feature (file or string)')
parser.search_group.add_argument('--datetime', help='Single date/time or begin and end date/time (e.g., 2017-01-01/2017-02-15)')
parser.search_group.add_argument('-p', '--property', nargs='*', help='Properties of form KEY=VALUE (<, >, <=, >=, = supported)')
parser.search_group.add_argument('--sort', help='Sort by fields', nargs='*')
h = 'Only output how many Items found'
parser.search_group.add_argument('--found', help=h, action='store_true', default=False)
parser.search_group.add_argument('--url', help='URL of the API', default=config.API_URL)
parents.append(parser.download_parser)
lparser = subparser.add_parser('load', help='Load items from previous search', parents=parents)
lparser.add_argument('items', help='GeoJSON file of Items')
return parser | def newbie(cls, *args, **kwargs) | Create a newbie class, with all the skills needed | 3.494726 | 3.572241 | 0.978301 |
if items is None:
## if there are no items then perform a search
search = Search.search(**kwargs)
if found:
num = search.found()
print('%s items found' % num)
return num
items = search.items()
else:
# otherwise, load a search from a file
items = Items.load(items)
print('%s items found' % len(items))
# print metadata
if printmd is not None:
print(items.summary(printmd))
# print calendar
if printcal:
print(items.calendar())
# save all metadata in JSON file
if save is not None:
items.save(filename=save)
# download files given `download` keys
if download is not None:
if 'ALL' in download:
# get complete set of assets
download = set([k for i in items for k in i.assets])
for key in download:
items.download(key=key, path=config.DATADIR, filename=config.FILENAME, requestor_pays=requestor_pays)
return items | def main(items=None, printmd=None, printcal=False, found=False,
save=None, download=None, requestor_pays=False, **kwargs) | Main function for performing a search | 3.811658 | 3.796368 | 1.004027 |
if 'ids' in self.kwargs:
cid = self.kwargs['query']['collection']['eq']
return len(self.items_by_id(self.kwargs['ids'], cid))
kwargs = {
'page': 1,
'limit': 0
}
kwargs.update(self.kwargs)
results = self.query(**kwargs)
return results['meta']['found'] | def found(self) | Small query to determine total number of hits | 5.627522 | 5.126013 | 1.097836 |
logger.debug('Query URL: %s, Body: %s' % (url, json.dumps(kwargs)))
response = requests.post(url, data=json.dumps(kwargs))
# API error
if response.status_code != 200:
raise SatSearchError(response.text)
return response.json() | def query(cls, url=urljoin(config.API_URL, 'stac/search'), **kwargs) | Get request | 3.124697 | 3.065279 | 1.019384 |
url = urljoin(config.API_URL, 'collections/%s' % cid)
return Collection(cls.query(url=url)) | def collection(cls, cid) | Get a Collection record | 6.26813 | 5.913825 | 1.059911 |
col = cls.collection(collection)
items = []
base_url = urljoin(config.API_URL, 'collections/%s/items' % collection)
for id in ids:
try:
items.append(Item(cls.query(urljoin(base_url, id))))
except SatSearchError as err:
pass
return Items(items, collections=[col]) | def items_by_id(cls, ids, collection) | Return Items from collection with matching ids | 4.860236 | 4.604016 | 1.055651 |
_limit = 500
if 'ids' in self.kwargs:
col = self.kwargs.get('query', {}).get('collection', {}).get('eq', None)
if col is None:
raise SatSearchError('Collection required when searching by id')
return self.items_by_id(self.kwargs['ids'], col)
items = []
found = self.found()
if found > limit:
logger.warning('There are more items found (%s) than the limit (%s) provided.' % (found, limit))
maxitems = min(found, limit)
kwargs = {
'page': 1,
'limit': min(_limit, maxitems)
}
kwargs.update(self.kwargs)
while len(items) < maxitems:
items += [Item(i) for i in self.query(**kwargs)['features']]
kwargs['page'] += 1
# retrieve collections
collections = []
for c in set([item.properties['collection'] for item in items if 'collection' in item.properties]):
collections.append(self.collection(c))
#del collections[c]['links']
# merge collections into items
#_items = []
#for item in items:
# import pdb; pdb.set_trace()
# if 'collection' in item['properties']:
# item = dict_merge(item, collections[item['properties']['collection']])
# _items.append(Item(item))
return Items(items, collections=collections, search=self.kwargs) | def items(self, limit=10000) | Return all of the Items and Collections for this search | 3.783563 | 3.603893 | 1.049854 |
'''Like shutil.copyfileobj, but only copy a range of the streams.
Both start and stop are inclusive.
'''
if start is not None: infile.seek(start)
while 1:
to_read = min(bufsize, stop + 1 - infile.tell() if stop else bufsize)
buf = infile.read(to_read)
if not buf:
break
outfile.write(buf) | def copy_byte_range(infile, outfile, start=None, stop=None, bufsize=16*1024) | Like shutil.copyfileobj, but only copy a range of the streams.
Both start and stop are inclusive. | 3.185878 | 2.165422 | 1.47125 |
'''Returns the two numbers in 'bytes=123-456' or throws ValueError.
The last number or both numbers may be None.
'''
if byte_range.strip() == '':
return None, None
m = BYTE_RANGE_RE.match(byte_range)
if not m:
raise ValueError('Invalid byte range %s' % byte_range)
first, last = [x and int(x) for x in m.groups()]
if last and last < first:
raise ValueError('Invalid byte range %s' % byte_range)
return first, last | def parse_byte_range(byte_range) | Returns the two numbers in 'bytes=123-456' or throws ValueError.
The last number or both numbers may be None. | 2.920655 | 1.908219 | 1.530566 |
init_py = readfile(os.path.join(package, '__init__.py'))
author = re.search("__author__ = u?['\"]([^'\"]+)['\"]", init_py).group(1)
return UltraMagicString(author) | def get_author(package) | Return package version as listed in `__version__` in `init.py`. | 3.941319 | 3.478014 | 1.13321 |
init_py = readfile(os.path.join(package, '__init__.py'))
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1) | def get_version(package) | Return package version as listed in `__version__` in `init.py`. | 2.124436 | 1.938394 | 1.095977 |
'''
Same as django's ``cache_page`` decorator, but wraps the view into
additional decorators before and after that. Makes it possible to serve multiple
flavours without getting into trouble with django's caching that doesn't
know about flavours.
'''
decorator = _django_cache_page(*args, **kwargs)
def flavoured_decorator(func):
return vary_on_flavour_fetch(decorator(vary_on_flavour_update(func)))
return flavoured_decorator | def cache_page(*args, **kwargs) | Same as django's ``cache_page`` decorator, but wraps the view into
additional decorators before and after that. Makes it possible to serve multiple
flavours without getting into trouble with django's caching that doesn't
know about flavours. | 8.74091 | 2.79605 | 3.126164 |
xdg = os.path.join(os.path.expanduser('~'), '.config')
if 'XDG_CONFIG_DIR' in os.environ:
xdg = os.environ['XDG_CONFIG_DIR']
xdgfile = os.path.join(xdg, 'cmakelintrc')
if os.path.exists(xdgfile):
return xdgfile
return os.path.join(os.path.expanduser('~'), '.cmakelintrc') | def DefaultRC() | Check XDG_CONFIG_DIR before ~/.cmakelintrc | 2.17681 | 1.586957 | 1.371688 |
if line.find('#') == -1 and line.find('"') == -1:
if quote:
return '', quote
else:
return line, quote
# else have to check for comment
prior = []
prev = ''
for char in line:
try:
if char == '"':
if prev != '\\':
quote = not quote
prior.append(char)
continue
elif char == '#' and not quote:
break
if not quote:
prior.append(char)
finally:
prev = char
# rstrip removes trailing space between end of command and the comment # start
return ''.join(prior).rstrip(), quote | def CleanComments(line, quote=False) | quote means 'was in a quote starting this line' so that
quoted lines can be eaten/removed. | 4.729655 | 4.579609 | 1.032764 |
line = clean_lines.raw_lines[linenumber]
if len(line) > _lint_state.linelength:
return errors(
filename,
linenumber,
'linelength',
'Lines should be <= %d characters long' %
(_lint_state.linelength)) | def CheckLineLength(filename, linenumber, clean_lines, errors) | Check for lines longer than the recommended length | 5.061302 | 4.802652 | 1.053856 |
line = clean_lines.lines[linenumber]
if ContainsCommand(line):
command = GetCommand(line)
if IsCommandMixedCase(command):
return errors(
filename,
linenumber,
'readability/wonkycase',
'Do not use mixed case commands')
if clean_lines.have_seen_uppercase is None:
clean_lines.have_seen_uppercase = IsCommandUpperCase(command)
else:
is_upper = IsCommandUpperCase(command)
if is_upper != clean_lines.have_seen_uppercase:
return errors(
filename,
linenumber,
'readability/mixedcase',
'Do not mix upper and lower case commands') | def CheckUpperLowerCase(filename, linenumber, clean_lines, errors) | Check that commands are either lower case or upper case, but not both | 3.92378 | 3.596802 | 1.090908 |
line = clean_lines.lines[linenumber]
match = ContainsCommand(line)
if match and len(match.group(2)):
errors(filename, linenumber, 'whitespace/extra',
"Extra spaces between '%s' and its ()"%(match.group(1)))
if match:
spaces_after_open = len(_RE_COMMAND_START_SPACES.match(line).group(1))
initial_spaces = GetInitialSpaces(line)
initial_linenumber = linenumber
end = None
while True:
line = clean_lines.lines[linenumber]
end = _RE_COMMAND_END_SPACES.search(line)
if end:
break
linenumber += 1
if linenumber >= len(clean_lines.lines):
break
if linenumber == len(clean_lines.lines) and not end:
errors(filename, initial_linenumber, 'syntax',
'Unable to find the end of this command')
if end:
spaces_before_end = len(end.group(1))
initial_spaces = GetInitialSpaces(line)
if initial_linenumber != linenumber and spaces_before_end >= initial_spaces:
spaces_before_end -= initial_spaces
if spaces_after_open != spaces_before_end:
errors(filename, initial_linenumber, 'whitespace/mismatch',
'Mismatching spaces inside () after command') | def CheckCommandSpaces(filename, linenumber, clean_lines, errors) | No extra spaces between command and parenthesis | 3.385524 | 3.25518 | 1.040042 |
line = clean_lines.lines[linenumber]
for cmd in _logic_commands:
if re.search(r'\b%s\b'%cmd, line.lower()):
m = _RE_LOGIC_CHECK.search(line)
if m:
errors(filename, linenumber, 'readability/logic',
'Expression repeated inside %s; '
'better to use only %s()'%(cmd, m.group(1)))
break | def CheckRepeatLogic(filename, linenumber, clean_lines, errors) | Check for logic inside else, endif etc | 5.762486 | 5.528412 | 1.04234 |
CheckIndent(filename, linenumber, clean_lines, errors)
CheckCommandSpaces(filename, linenumber, clean_lines, errors)
line = clean_lines.raw_lines[linenumber]
if line.find('\t') != -1:
errors(filename, linenumber, 'whitespace/tabs', 'Tab found; please use spaces')
if line and line[-1].isspace():
errors(filename, linenumber, 'whitespace/eol', 'Line ends in whitespace')
CheckRepeatLogic(filename, linenumber, clean_lines, errors) | def CheckStyle(filename, linenumber, clean_lines, errors) | Check style issues. These are:
No extra spaces between command and parenthesis
Matching spaces between parenthesis and arguments
No repeated logic in else(), endif(), endmacro() | 3.550696 | 3.712842 | 0.956328 |
CheckLintPragma(filename, linenumber, clean_lines.raw_lines[linenumber], errors)
CheckLineLength(filename, linenumber, clean_lines, errors)
CheckUpperLowerCase(filename, linenumber, clean_lines, errors)
CheckStyle(filename, linenumber, clean_lines, errors)
if IsFindPackage(filename):
CheckFindPackage(filename, linenumber, clean_lines, errors) | def ProcessLine(filename, linenumber, clean_lines, errors) | Arguments:
filename the name of the file
linenumber the line number index
clean_lines CleansedLines instance
errors the error handling function | 3.534784 | 3.719772 | 0.950269 |
# local import to avoid dependency for non-debug use
import matplotlib.pyplot as plt
hs = [nolds.hurst_rs(np.random.random(size=10000), corrected=True) for _ in range(100)]
plt.hist(hs, bins=20)
plt.xlabel("esimated value of hurst exponent")
plt.ylabel("number of experiments")
plt.show() | def plot_hurst_hist() | Plots a histogram of values obtained for the hurst exponent of uniformly
distributed white noise.
This function requires the package ``matplotlib``. | 5.937996 | 5.433722 | 1.092805 |
import cProfile
n = 10000
data = np.cumsum(np.random.random(n) - 0.5)
cProfile.runctx('lyap_e(data)', {'lyap_e': nolds.lyap_e}, {'data': data}) | def profiling() | Runs a profiling test for the function ``lyap_e`` (mainly used for development)
This function requires the package ``cProfile``. | 4.601704 | 3.278932 | 1.403415 |
import matplotlib.pyplot as plt
data = np.asarray(data)
n_all = np.arange(2,len(data)+1)
dd_all = nolds.hurst_rs(data, nvals=n_all, debug_data=True, fit="poly")
dd_def = nolds.hurst_rs(data, debug_data=True, fit="poly")
n_def = np.round(np.exp(dd_def[1][0])).astype("int32")
n_div = n_all[np.where(len(data) % n_all[:-1] == 0)]
dd_div = nolds.hurst_rs(data, nvals=n_div, debug_data=True, fit="poly")
def corr(nvals):
return [np.log(nolds.expected_rs(n)) for n in nvals]
l_all = plt.plot(dd_all[1][0], dd_all[1][1] - corr(n_all), "o")
l_def = plt.plot(dd_def[1][0], dd_def[1][1] - corr(n_def), "o")
l_div = plt.plot(dd_div[1][0], dd_div[1][1] - corr(n_div), "o")
l_cst = []
t_cst = []
if nvals is not None:
dd_cst = nolds.hurst_rs(data, nvals=nvals, debug_data=True, fit="poly")
l_cst = plt.plot(dd_cst[1][0], dd_cst[1][1] - corr(nvals), "o")
l_cst = l_cst
t_cst = ["custom"]
plt.xlabel("log(n)")
plt.ylabel("log((R/S)_n - E[(R/S)_n])")
plt.legend(l_all + l_def + l_div + l_cst, ["all", "default", "divisors"] + t_cst)
labeled_data = zip([dd_all[0], dd_def[0], dd_div[0]], ["all", "def", "div"])
for data, label in labeled_data:
print("%s: %.3f" % (label, data))
if nvals is not None:
print("custom: %.3f" % dd_cst[0])
plt.show() | def hurst_compare_nvals(data, nvals=None) | Creates a plot that compares the results of different choices for nvals
for the function hurst_rs.
Args:
data (array-like of float):
the input data from which the hurst exponent should be estimated
Kwargs:
nvals (array of int):
a manually selected value for the nvals parameter that should be plotted
in comparison to the default choices | 2.644322 | 2.584857 | 1.023005 |
# TODO more detailed description of fbm
assert H > 0 and H < 1
def R(t, s):
twoH = 2 * H
return 0.5 * (s**twoH + t**twoH - np.abs(t - s)**twoH)
# form the matrix tau
gamma = R(*np.mgrid[0:n, 0:n]) # apply R to every element in matrix
w, P = np.linalg.eigh(gamma)
L = np.diag(w)
sigma = np.dot(np.dot(P, np.sqrt(L)), np.linalg.inv(P))
v = np.random.randn(n)
return np.dot(sigma, v) | def fbm(n, H=0.75) | Generates fractional brownian motions of desired length.
Author:
Christian Thomae
References:
.. [fbm_1] https://en.wikipedia.org/wiki/Fractional_Brownian_motion#Method_1_of_simulation
Args:
n (int):
length of sequence to generate
Kwargs:
H (float):
hurst parameter
Returns:
array of float:
simulated fractional brownian motion | 4.747005 | 5.069282 | 0.936425 |
import quantumrandom
return np.concatenate([
quantumrandom.get_data(data_type='uint16', array_length=1024)
for i in range(int(np.ceil(n/1024.0)))
])[:n] | def qrandom(n) | Creates an array of n true random numbers obtained from the quantum random
number generator at qrng.anu.edu.au
This function requires the package quantumrandom and an internet connection.
Args:
n (int):
length of the random array
Return:
array of ints:
array of truly random unsigned 16 bit int values | 5.674998 | 5.286608 | 1.073467 |
fname = "datasets/qrandom.npy"
with pkg_resources.resource_stream(__name__, fname) as f:
return np.load(f) | def load_qrandom() | Loads a set of 10000 random numbers generated by qrandom.
This dataset can be used when you want to do some limited tests with "true"
random data without an internet connection.
Returns:
int array
the dataset | 4.340001 | 4.98739 | 0.870195 |
fname = "datasets/brown72.npy"
with pkg_resources.resource_stream(__name__, fname) as f:
return np.load(f) | def load_brown72() | Loads the dataset brown72 with a prescribed Hurst exponent of 0.72
Source: http://www.bearcave.com/misl/misl_tech/wavelets/hurst/
Returns:
float array:
the dataset | 4.251221 | 4.362529 | 0.974485 |
for _ in range(steps):
x = mu * x if x < 0.5 else mu * (1 - x)
yield x | def tent_map(x, steps, mu=2) | Generates a time series of the tent map.
Characteristics and Background:
The name of the tent map is derived from the fact that the plot of x_i vs
x_i+1 looks like a tent. For mu > 1 one application of the mapping function
can be viewed as stretching the surface on which the value is located and
then folding the area that is greater than one back towards the zero. This
corresponds nicely to the definition of chaos as expansion in one dimension
which is counteracted by a compression in another dimension.
Calculating the Lyapunov exponent:
The lyapunov exponent of the tent map can be easily calculated as due to
this stretching behavior a small difference delta between two neighboring
points will indeed grow exponentially by a factor of mu in each iteration.
We thus can assume that:
delta_n = delta_0 * mu^n
We now only have to change the basis to e to obtain the exact formula that
is used for the definition of the lyapunov exponent:
delta_n = delta_0 * e^(ln(mu) * n)
Therefore the lyapunov exponent of the tent map is:
lambda = ln(mu)
References:
.. [tm_1] https://en.wikipedia.org/wiki/Tent_map
Args:
x (float):
starting point
steps (int):
number of steps for which the generator should run
Kwargs:
mu (int):
parameter mu that controls the behavior of the map
Returns:
generator object:
the generator that creates the time series | 4.316345 | 6.020553 | 0.716935 |
r
for _ in range(steps):
x = r * x * (1 - x)
yield x | def logistic_map(x, steps, r=4) | r"""
Generates a time series of the logistic map.
Characteristics and Background:
The logistic map is among the simplest examples for a time series that can
exhibit chaotic behavior depending on the parameter r. For r between 2 and
3, the series quickly becomes static. At r=3 the first bifurcation point is
reached after which the series starts to oscillate. Beginning with r = 3.6
it shows chaotic behavior with a few islands of stability until perfect
chaos is achieved at r = 4.
Calculating the Lyapunov exponent:
To calculate the "true" Lyapunov exponent of the logistic map, we first
have to make a few observations for maps in general that are repeated
applications of a function to a starting value.
If we have two starting values that differ by some infinitesimal
:math:`delta_0` then according to the definition of the lyapunov exponent
we will have an exponential divergence:
.. math::
|\delta_n| = |\delta_0| e^{\lambda n}
We can now write that:
.. math::
e^{\lambda n} = \lim_{\delta_0 -> 0} |\frac{\delta_n}{\delta_0}|
This is the definition of the derivative :math:`\frac{dx_n}{dx_0}` of a
point :math:`x_n` in the time series with respect to the starting point
:math:`x_0` (or rather the absolute value of that derivative). Now we can
use the fact that due to the definition of our map as repetitive
application of some f we have:
.. math::
f^{n\prime}(x) = f(f(f(...f(x_0)...))) = f'(x_n-1) \cdot f'(x_n-2)
\cdot ... \cdot f'(x_0)
with
.. math::
e^{\lambda n} = |f^{n\prime}(x)|
we now have
.. math::
e^{\lambda n} &= |f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0)| \\
\Leftrightarrow \\
\lambda n &= \ln |f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0)| \\
\Leftrightarrow \\
\lambda &= \frac{1}{n} \ln |f'(x_n-1) \cdot f'(x_n-2) \cdot ... \cdot f'(x_0)| \\
&= \frac{1}{n} \sum_{k=0}^{n-1} \ln |f'(x_k)|
With this sum we can now calculate the lyapunov exponent for any map.
For the logistic map we simply have to calculate :math:`f'(x)` and as we
have
.. math::
f(x) = r x (1-x) = rx - rx²
we now get
.. math::
f'(x) = r - 2 rx
References:
.. [lm_1] https://en.wikipedia.org/wiki/Tent_map
.. [lm_2] https://blog.abhranil.net/2015/05/15/lyapunov-exponent-of-the-logistic-map-mathematica-code/
Args:
x (float):
starting point
steps (int):
number of steps for which the generator should run
Kwargs:
r (int):
parameter r that controls the behavior of the map
Returns:
generator object:
the generator that creates the time series | 6.754282 | 6.508815 | 1.037713 |
data = np.asarray(data)
min_len = (emb_dim - 1) * lag + 1
if len(data) < min_len:
msg = "cannot embed data of length {} with embedding dimension {} " \
+ "and lag {}, minimum required length is {}"
raise ValueError(msg.format(len(data), emb_dim, lag, min_len))
m = len(data) - min_len + 1
indices = np.repeat([np.arange(emb_dim) * lag], m, axis=0)
indices += np.arange(m).reshape((m, 1))
return data[indices] | def delay_embedding(data, emb_dim, lag=1) | Perform a time-delay embedding of a time series
Args:
data (array-like):
the data that should be embedded
emb_dim (int):
the embedding dimension
Kwargs:
lag (int):
the lag between elements in the embedded vectors
Returns:
emb_dim x m array:
matrix of embedded vectors of the form
[data[i], data[i+lag], data[i+2*lag], ... data[i+(emb_dim-1)*lag]]
for i in 0 to m-1 (m = len(data)-(emb_dim-1)*lag) | 2.720991 | 2.66954 | 1.019273 |
# minimum length required to find single orbit vector
min_len = (kwargs['emb_dim'] - 1) * kwargs['lag'] + 1
# we need trajectory_len orbit vectors to follow a complete trajectory
min_len += kwargs['trajectory_len'] - 1
# we need min_tsep * 2 + 1 orbit vectors to find neighbors for each
min_len += kwargs['min_tsep'] * 2 + 1
return min_len | def lyap_r_len(**kwargs) | Helper function that calculates the minimum number of data points required
to use lyap_r.
Note that none of the required parameters may be set to None.
Kwargs:
kwargs(dict):
arguments used for lyap_r (required: emb_dim, lag, trajectory_len and
min_tsep)
Returns:
minimum number of data points required to call lyap_r with the given
parameters | 7.971834 | 5.261237 | 1.515202 |
m = (kwargs['emb_dim'] - 1) // (kwargs['matrix_dim'] - 1)
# minimum length required to find single orbit vector
min_len = kwargs['emb_dim']
# we need to follow each starting point of an orbit vector for m more steps
min_len += m
# we need min_tsep * 2 + 1 orbit vectors to find neighbors for each
min_len += kwargs['min_tsep'] * 2
# we need at least min_nb neighbors for each orbit vector
min_len += kwargs['min_nb']
return min_len | def lyap_e_len(**kwargs) | Helper function that calculates the minimum number of data points required
to use lyap_e.
Note that none of the required parameters may be set to None.
Kwargs:
kwargs(dict):
arguments used for lyap_e (required: emb_dim, matrix_dim, min_nb
and min_tsep)
Returns:
minimum number of data points required to call lyap_e with the given
parameters | 8.057635 | 5.478141 | 1.47087 |
max_exp = np.log2(1.0 * total_N / min_n)
max_exp = int(np.floor(max_exp))
return [int(np.floor(1.0 * total_N / (2**i))) for i in range(1, max_exp + 1)] | def binary_n(total_N, min_n=50) | Creates a list of values by successively halving the total length total_N
until the resulting value is less than min_n.
Non-integer results are rounded down.
Args:
total_N (int):
total length
Kwargs:
min_n (int):
minimal length after division
Returns:
list of integers:
total_N/2, total_N/4, total_N/8, ... until total_N/2^i < min_n | 2.880173 | 3.065529 | 0.939536 |
assert max_n > min_n
assert factor > 1
# stop condition: min * f^x = max
# => f^x = max/min
# => x = log(max/min) / log(f)
max_i = int(np.floor(np.log(1.0 * max_n / min_n) / np.log(factor)))
ns = [min_n]
for i in range(max_i + 1):
n = int(np.floor(min_n * (factor ** i)))
if n > ns[-1]:
ns.append(n)
return ns | def logarithmic_n(min_n, max_n, factor) | Creates a list of values by successively multiplying a minimum value min_n by
a factor > 1 until a maximum value max_n is reached.
Non-integer results are rounded down.
Args:
min_n (float):
minimum value (must be < max_n)
max_n (float):
maximum value (must be > min_n)
factor (float):
factor used to increase min_n (must be > 1)
Returns:
list of integers:
min_n, min_n * factor, min_n * factor^2, ... min_n * factor^i < max_n
without duplicates | 2.948823 | 3.046676 | 0.967882 |
l = np.log(max_n)
span = l * ratio
start = l * (1 - ratio) * 0.5
midrange = start + 1.0*np.arange(nsteps)/nsteps*span
nvals = np.round(np.exp(midrange)).astype("int32")
return np.unique(nvals) | def logmid_n(max_n, ratio=1/4.0, nsteps=15) | Creates an array of integers that lie evenly spaced in the "middle" of the
logarithmic scale from 0 to log(max_n).
If max_n is very small and/or nsteps is very large, this may lead to
duplicate values which will be removed from the output.
This function has benefits in hurst_rs, because it cuts away both very small
and very large n, which both can cause problems, and still produces a
logarithmically spaced sequence.
Args:
max_n (int):
largest possible output value (should be the sequence length when used in
hurst_rs)
Kwargs:
ratio (float):
width of the "middle" of the logarithmic interval relative to log(max_n).
For example, for ratio=1/2.0 the logarithm of the resulting values will
lie between 0.25 * log(max_n) and 0.75 * log(max_n).
nsteps (float):
(maximum) number of values to take from the specified range
Returns:
array of int:
a logarithmically spaced sequence of at most nsteps values (may be less,
because only unique values are returned) | 4.105708 | 4.785077 | 0.858023 |
assert max_n > min_n
assert factor > 1
max_i = int(np.floor(np.log(1.0 * max_n / min_n) / np.log(factor)))
return [min_n * (factor ** i) for i in range(max_i + 1)] | def logarithmic_r(min_n, max_n, factor) | Creates a list of values by successively multiplying a minimum value min_n by
a factor > 1 until a maximum value max_n is reached.
Args:
min_n (float):
minimum value (must be < max_n)
max_n (float):
maximum value (must be > min_n)
factor (float):
factor used to increase min_n (must be > 1)
Returns:
list of floats:
min_n, min_n * factor, min_n * factor^2, ... min_n * factor^i < max_n | 2.442999 | 2.512253 | 0.972434 |
front = (n - 0.5) / n
i = np.arange(1,n)
back = np.sum(np.sqrt((n - i) / i))
if n <= 340:
middle = math.gamma((n-1) * 0.5) / math.sqrt(math.pi) / math.gamma(n * 0.5)
else:
middle = 1.0 / math.sqrt(n * math.pi * 0.5)
return front * middle * back | def expected_rs(n) | Calculates the expected (R/S)_n for white noise for a given n.
This is used as a correction factor in the function hurst_rs. It uses the
formula of Anis-Lloyd-Peters (see [h_3]_).
Args:
n (int):
the value of n for which the expected (R/S)_n should be calculated
Returns:
float:
expected (R/S)_n for white noise | 3.824755 | 4.209919 | 0.90851 |
rsvals = [expected_rs(n) for n in nvals]
poly = poly_fit(np.log(nvals), np.log(rsvals), 1, fit=fit)
return poly[0] | def expected_h(nvals, fit="RANSAC") | Uses expected_rs to calculate the expected value for the Hurst exponent h
based on the values of n used for the calculation.
Args:
nvals (iterable of int):
the values of n used to calculate the individual (R/S)_n
KWargs:
fit (str):
the fitting method to use for the line fit, either 'poly' for normal
least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which
is more robust to outliers
Returns:
float:
expected h for white noise | 4.557558 | 4.698161 | 0.970073 |
data = np.asarray(data)
total_N = len(data)
m = total_N // n # number of sequences
# cut values at the end of data to make the array divisible by n
data = data[:total_N - (total_N % n)]
# split remaining data into subsequences of length n
seqs = np.reshape(data, (m, n))
# calculate means of subsequences
means = np.mean(seqs, axis=1)
# normalize subsequences by substracting mean
y = seqs - means.reshape((m, 1))
# build cumulative sum of subsequences
y = np.cumsum(y, axis=1)
# find ranges
r = np.max(y, axis=1) - np.min(y, axis=1)
# find standard deviation
# we should use the unbiased estimator, since we do not know the true mean
s = np.std(seqs, axis=1, ddof=1 if unbiased else 0)
# some ranges may be zero and have to be excluded from the analysis
idx = np.where(r != 0)
r = r[idx]
s = s[idx]
# it may happen that all ranges are zero (if all values in data are equal)
if len(r) == 0:
return np.nan
else:
# return mean of r/s along subsequence index
return np.mean(r / s) | def rs(data, n, unbiased=True) | Calculates an individual R/S value in the rescaled range approach for
a given n.
Note: This is just a helper function for hurst_rs and should not be called
directly.
Args:
data (array-like of float):
time series
n (float):
size of the subseries in which data should be split
Kwargs:
unbiased (boolean):
if True, the standard deviation based on the unbiased variance
(1/(N-1) instead of 1/N) will be used. This should be the default choice,
since the true mean of the sequences is not known. This parameter should
only be changed to recreate results of other implementations.
Returns:
float:
(R/S)_n | 3.583447 | 3.449047 | 1.038967 |
# local import to avoid dependency for non-debug use
import matplotlib.pyplot as plt
plt.plot(xvals, yvals, "bo", label=data_label)
if not (poly is None):
plt.plot(xvals, np.polyval(poly, xvals), "r-", label=reg_label)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend(loc="best")
if fname is None:
plt.show()
else:
plt.savefig(fname)
plt.close() | def plot_reg(xvals, yvals, poly, x_label="x", y_label="y", data_label="data",
reg_label="regression line", fname=None) | Helper function to plot trend lines for line-fitting approaches. This
function will show a plot through ``plt.show()`` and close it after the window
has been closed by the user.
Args:
xvals (list/array of float):
list of x-values
yvals (list/array of float):
list of y-values
poly (list/array of float):
polynomial parameters as accepted by ``np.polyval``
Kwargs:
x_label (str):
label of the x-axis
y_label (str):
label of the y-axis
data_label (str):
label of the data
reg_label(str):
label of the regression line
fname (str):
file name (if not None, the plot will be saved to disc instead of
showing it though ``plt.show()``) | 2.10595 | 2.148595 | 0.980152 |
if status in expected:
return
msg = ('Expect status %r from Google Storage. But got status %d.\n'
'Path: %r.\n'
'Request headers: %r.\n'
'Response headers: %r.\n'
'Body: %r.\n'
'Extra info: %r.\n' %
(expected, status, path, headers, resp_headers, body, extras))
if status == httplib.UNAUTHORIZED:
raise AuthorizationError(msg)
elif status == httplib.FORBIDDEN:
raise ForbiddenError(msg)
elif status == httplib.NOT_FOUND:
raise NotFoundError(msg)
elif status == httplib.REQUEST_TIMEOUT:
raise TimeoutError(msg)
elif status == httplib.REQUESTED_RANGE_NOT_SATISFIABLE:
raise InvalidRange(msg)
elif (status == httplib.OK and 308 in expected and
httplib.OK not in expected):
raise FileClosedError(msg)
elif status >= 500:
raise ServerError(msg)
else:
raise FatalError(msg) | def check_status(status, expected, path, headers=None,
resp_headers=None, body=None, extras=None) | Check HTTP response status is expected.
Args:
status: HTTP response status. int.
expected: a list of expected statuses. A list of ints.
path: filename or a path prefix.
headers: HTTP request headers.
resp_headers: HTTP response headers.
body: HTTP response body.
extras: extra info to be logged verbatim if error occurs.
Raises:
AuthorizationError: if authorization failed.
NotFoundError: if an object that's expected to exist doesn't.
TimeoutError: if HTTP request timed out.
ServerError: if server experienced some errors.
FatalError: if any other unexpected errors occurred. | 2.453718 | 2.345084 | 1.046324 |
default = getattr(_thread_local_settings, 'default_retry_params', None)
if default is None or not default.belong_to_current_request():
return RetryParams()
else:
return copy.copy(default) | def _get_default_retry_params() | Get default RetryParams for current request and current thread.
Returns:
A new instance of the default RetryParams. | 5.913784 | 5.451463 | 1.084807 |
return (resp.status_code == httplib.REQUEST_TIMEOUT or
(resp.status_code >= 500 and
resp.status_code < 600)) | def _should_retry(resp) | Given a urlfetch response, decide whether to retry that request. | 2.999351 | 2.493363 | 1.202934 |
@utils.wrapping(tasklet)
def eager_wrapper(*args, **kwds):
fut = tasklet(*args, **kwds)
_run_until_rpc()
return fut
return eager_wrapper | def _eager_tasklet(tasklet) | Decorator to turn tasklet to run eagerly. | 8.18343 | 6.946976 | 1.177985 |
start_time = time.time()
n = 1
while True:
e = None
result = None
got_result = False
try:
result = yield tasklet(**kwds)
got_result = True
if not self.should_retry(result):
raise ndb.Return(result)
except runtime.DeadlineExceededError:
logging.debug(
'Tasklet has exceeded request deadline after %s seconds total',
time.time() - start_time)
raise
except self.retriable_exceptions as e:
pass
if n == 1:
logging.debug('Tasklet is %r', tasklet)
delay = self.retry_params.delay(n, start_time)
if delay <= 0:
logging.debug(
'Tasklet failed after %s attempts and %s seconds in total',
n, time.time() - start_time)
if got_result:
raise ndb.Return(result)
elif e is not None:
raise e
else:
assert False, 'Should never reach here.'
if got_result:
logging.debug(
'Got result %r from tasklet.', result)
else:
logging.debug(
'Got exception "%r" from tasklet.', e)
logging.debug('Retry in %s seconds.', delay)
n += 1
yield tasklets.sleep(delay) | def run(self, tasklet, **kwds) | Run a tasklet with retry.
The retry should be transparent to the caller: if no results
are successful, the exception or result from the last retry is returned
to the caller.
Args:
tasklet: the tasklet to run.
**kwds: keywords arguments to run the tasklet.
Raises:
The exception from running the tasklet.
Returns:
The result from running the tasklet. | 3.198468 | 3.082051 | 1.037772 |
valid_types = [val_type]
if val_type is float:
valid_types.append(int)
if type(val) not in valid_types:
raise TypeError(
'Expect type %s for parameter %s' % (val_type.__name__, name))
if val < 0:
raise ValueError(
'Value for parameter %s has to be greater than 0' % name)
if not can_be_zero and val == 0:
raise ValueError(
'Value for parameter %s can not be 0' % name)
return val | def _check(cls, name, val, can_be_zero=False, val_type=float) | Check init arguments.
Args:
name: name of the argument. For logging purpose.
val: value. Value has to be non negative number.
can_be_zero: whether value can be zero.
val_type: Python type of the value.
Returns:
The value.
Raises:
ValueError: when invalid value is passed in.
TypeError: when invalid value type is passed in. | 2.268747 | 2.471967 | 0.91779 |
if (n > self.max_retries or
(n > self.min_retries and
time.time() - start_time > self.max_retry_period)):
return -1
return min(
math.pow(self.backoff_factor, n-1) * self.initial_delay,
self.max_delay) | def delay(self, n, start_time) | Calculate delay before the next retry.
Args:
n: the number of current attempt. The first attempt should be 1.
start_time: the time when retry started in unix time.
Returns:
Number of seconds to wait before next retry. -1 if retry should give up. | 3.761515 | 3.206568 | 1.173066 |
common.validate_file_path(filename)
api = storage_api._get_storage_api(retry_params=retry_params,
account_id=_account_id)
filename = api_utils._quote_filename(filename)
if mode == 'w':
common.validate_options(options)
return storage_api.StreamingBuffer(api, filename, content_type, options)
elif mode == 'r':
if content_type or options:
raise ValueError('Options and content_type can only be specified '
'for writing mode.')
return storage_api.ReadBuffer(api,
filename,
buffer_size=read_buffer_size,
offset=offset)
else:
raise ValueError('Invalid mode %s.' % mode) | def open(filename,
mode='r',
content_type=None,
options=None,
read_buffer_size=storage_api.ReadBuffer.DEFAULT_BUFFER_SIZE,
retry_params=None,
_account_id=None,
offset=0) | Opens a Google Cloud Storage file and returns it as a File-like object.
Args:
filename: A Google Cloud Storage filename of form '/bucket/filename'.
mode: 'r' for reading mode. 'w' for writing mode.
In reading mode, the file must exist. In writing mode, a file will
be created or be overrode.
content_type: The MIME type of the file. str. Only valid in writing mode.
options: A str->basestring dict to specify additional headers to pass to
GCS e.g. {'x-goog-acl': 'private', 'x-goog-meta-foo': 'foo'}.
Supported options are x-goog-acl, x-goog-meta-, cache-control,
content-disposition, and content-encoding.
Only valid in writing mode.
See https://developers.google.com/storage/docs/reference-headers
for details.
read_buffer_size: The buffer size for read. Read keeps a buffer
and prefetches another one. To minimize blocking for large files,
always read by buffer size. To minimize number of RPC requests for
small files, set a large buffer size. Max is 30MB.
retry_params: An instance of api_utils.RetryParams for subsequent calls
to GCS from this file handle. If None, the default one is used.
_account_id: Internal-use only.
offset: Number of bytes to skip at the start of the file. If None, 0 is
used.
Returns:
A reading or writing buffer that supports File-like interface. Buffer
must be closed after operations are done.
Raises:
errors.AuthorizationError: if authorization failed.
errors.NotFoundError: if an object that's expected to exist doesn't.
ValueError: invalid open mode or if content_type or options are specified
in reading mode. | 3.006832 | 3.196664 | 0.940616 |
api = storage_api._get_storage_api(retry_params=retry_params,
account_id=_account_id)
common.validate_file_path(filename)
filename = api_utils._quote_filename(filename)
status, resp_headers, content = api.delete_object(filename)
errors.check_status(status, [204], filename, resp_headers=resp_headers,
body=content) | def delete(filename, retry_params=None, _account_id=None) | Delete a Google Cloud Storage file.
Args:
filename: A Google Cloud Storage filename of form '/bucket/filename'.
retry_params: An api_utils.RetryParams for this call to GCS. If None,
the default one is used.
_account_id: Internal-use only.
Raises:
errors.NotFoundError: if the file doesn't exist prior to deletion. | 4.197329 | 4.469117 | 0.939185 |
return _get_bucket_attribute(bucket,
'location',
'LocationConstraint',
retry_params=retry_params,
_account_id=_account_id) | def get_location(bucket, retry_params=None, _account_id=None) | Returns the location for the given bucket.
https://cloud.google.com/storage/docs/bucket-locations
Args:
bucket: A Google Cloud Storage bucket of form '/bucket'.
retry_params: An api_utils.RetryParams for this call to GCS. If None,
the default one is used.
_account_id: Internal-use only.
Returns:
The location as a string.
Raises:
errors.AuthorizationError: if authorization failed.
errors.NotFoundError: if the bucket does not exist. | 3.759411 | 5.246476 | 0.716559 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.