code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def elements(self):
"""Return the identifier's elements as tuple."""
offset = self.EXTRA_DIGITS
if offset:
return (self._id[:offset], self.company_prefix, self._reference,
self.check_digit)
else:
return (self.company_prefix, self._reference, self.check_digit) | def function[elements, parameter[self]]:
constant[Return the identifier's elements as tuple.]
variable[offset] assign[=] name[self].EXTRA_DIGITS
if name[offset] begin[:]
return[tuple[[<ast.Subscript object at 0x7da18f810760>, <ast.Attribute object at 0x7da18f810730>, <ast.Attribute object at 0x7da18f810850>, <ast.Attribute object at 0x7da18f812080>]]] | keyword[def] identifier[elements] ( identifier[self] ):
literal[string]
identifier[offset] = identifier[self] . identifier[EXTRA_DIGITS]
keyword[if] identifier[offset] :
keyword[return] ( identifier[self] . identifier[_id] [: identifier[offset] ], identifier[self] . identifier[company_prefix] , identifier[self] . identifier[_reference] ,
identifier[self] . identifier[check_digit] )
keyword[else] :
keyword[return] ( identifier[self] . identifier[company_prefix] , identifier[self] . identifier[_reference] , identifier[self] . identifier[check_digit] ) | def elements(self):
"""Return the identifier's elements as tuple."""
offset = self.EXTRA_DIGITS
if offset:
return (self._id[:offset], self.company_prefix, self._reference, self.check_digit) # depends on [control=['if'], data=[]]
else:
return (self.company_prefix, self._reference, self.check_digit) |
def step1(self, pin):
"""First pairing step."""
context = SRPContext(
'Pair-Setup', str(pin),
prime=constants.PRIME_3072,
generator=constants.PRIME_3072_GEN,
hash_func=hashlib.sha512)
self._session = SRPClientSession(
context, binascii.hexlify(self._auth_private).decode()) | def function[step1, parameter[self, pin]]:
constant[First pairing step.]
variable[context] assign[=] call[name[SRPContext], parameter[constant[Pair-Setup], call[name[str], parameter[name[pin]]]]]
name[self]._session assign[=] call[name[SRPClientSession], parameter[name[context], call[call[name[binascii].hexlify, parameter[name[self]._auth_private]].decode, parameter[]]]] | keyword[def] identifier[step1] ( identifier[self] , identifier[pin] ):
literal[string]
identifier[context] = identifier[SRPContext] (
literal[string] , identifier[str] ( identifier[pin] ),
identifier[prime] = identifier[constants] . identifier[PRIME_3072] ,
identifier[generator] = identifier[constants] . identifier[PRIME_3072_GEN] ,
identifier[hash_func] = identifier[hashlib] . identifier[sha512] )
identifier[self] . identifier[_session] = identifier[SRPClientSession] (
identifier[context] , identifier[binascii] . identifier[hexlify] ( identifier[self] . identifier[_auth_private] ). identifier[decode] ()) | def step1(self, pin):
"""First pairing step."""
context = SRPContext('Pair-Setup', str(pin), prime=constants.PRIME_3072, generator=constants.PRIME_3072_GEN, hash_func=hashlib.sha512)
self._session = SRPClientSession(context, binascii.hexlify(self._auth_private).decode()) |
def add_info_to_uncommon_items(filtered_items, uncommon_items):
"""
Add extra info to the uncommon items.
"""
result = uncommon_items
url_prefix = '/prestacao-contas/analisar/comprovante'
for _, item in filtered_items.iterrows():
item_id = item['idPlanilhaItens']
item_name = uncommon_items[item_id]
result[item_id] = {
'name': item_name,
'salic_url': get_salic_url(item, url_prefix),
'has_recepit': has_receipt(item)
}
return result | def function[add_info_to_uncommon_items, parameter[filtered_items, uncommon_items]]:
constant[
Add extra info to the uncommon items.
]
variable[result] assign[=] name[uncommon_items]
variable[url_prefix] assign[=] constant[/prestacao-contas/analisar/comprovante]
for taget[tuple[[<ast.Name object at 0x7da1b26ac940>, <ast.Name object at 0x7da1b26ae7d0>]]] in starred[call[name[filtered_items].iterrows, parameter[]]] begin[:]
variable[item_id] assign[=] call[name[item]][constant[idPlanilhaItens]]
variable[item_name] assign[=] call[name[uncommon_items]][name[item_id]]
call[name[result]][name[item_id]] assign[=] dictionary[[<ast.Constant object at 0x7da1b26ac340>, <ast.Constant object at 0x7da1b26ae5f0>, <ast.Constant object at 0x7da1b26ad9c0>], [<ast.Name object at 0x7da1b26adae0>, <ast.Call object at 0x7da1b26ae320>, <ast.Call object at 0x7da1b05386d0>]]
return[name[result]] | keyword[def] identifier[add_info_to_uncommon_items] ( identifier[filtered_items] , identifier[uncommon_items] ):
literal[string]
identifier[result] = identifier[uncommon_items]
identifier[url_prefix] = literal[string]
keyword[for] identifier[_] , identifier[item] keyword[in] identifier[filtered_items] . identifier[iterrows] ():
identifier[item_id] = identifier[item] [ literal[string] ]
identifier[item_name] = identifier[uncommon_items] [ identifier[item_id] ]
identifier[result] [ identifier[item_id] ]={
literal[string] : identifier[item_name] ,
literal[string] : identifier[get_salic_url] ( identifier[item] , identifier[url_prefix] ),
literal[string] : identifier[has_receipt] ( identifier[item] )
}
keyword[return] identifier[result] | def add_info_to_uncommon_items(filtered_items, uncommon_items):
"""
Add extra info to the uncommon items.
"""
result = uncommon_items
url_prefix = '/prestacao-contas/analisar/comprovante'
for (_, item) in filtered_items.iterrows():
item_id = item['idPlanilhaItens']
item_name = uncommon_items[item_id]
result[item_id] = {'name': item_name, 'salic_url': get_salic_url(item, url_prefix), 'has_recepit': has_receipt(item)} # depends on [control=['for'], data=[]]
return result |
def input_validate_aead(aead, name='aead', expected_len=None, max_aead_len = pyhsm.defines.YSM_AEAD_MAX_SIZE):
""" Input validation for YHSM_GeneratedAEAD or string. """
if isinstance(aead, pyhsm.aead_cmd.YHSM_GeneratedAEAD):
aead = aead.data
if expected_len != None:
return input_validate_str(aead, name, exact_len = expected_len)
else:
return input_validate_str(aead, name, max_len=max_aead_len) | def function[input_validate_aead, parameter[aead, name, expected_len, max_aead_len]]:
constant[ Input validation for YHSM_GeneratedAEAD or string. ]
if call[name[isinstance], parameter[name[aead], name[pyhsm].aead_cmd.YHSM_GeneratedAEAD]] begin[:]
variable[aead] assign[=] name[aead].data
if compare[name[expected_len] not_equal[!=] constant[None]] begin[:]
return[call[name[input_validate_str], parameter[name[aead], name[name]]]] | keyword[def] identifier[input_validate_aead] ( identifier[aead] , identifier[name] = literal[string] , identifier[expected_len] = keyword[None] , identifier[max_aead_len] = identifier[pyhsm] . identifier[defines] . identifier[YSM_AEAD_MAX_SIZE] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[aead] , identifier[pyhsm] . identifier[aead_cmd] . identifier[YHSM_GeneratedAEAD] ):
identifier[aead] = identifier[aead] . identifier[data]
keyword[if] identifier[expected_len] != keyword[None] :
keyword[return] identifier[input_validate_str] ( identifier[aead] , identifier[name] , identifier[exact_len] = identifier[expected_len] )
keyword[else] :
keyword[return] identifier[input_validate_str] ( identifier[aead] , identifier[name] , identifier[max_len] = identifier[max_aead_len] ) | def input_validate_aead(aead, name='aead', expected_len=None, max_aead_len=pyhsm.defines.YSM_AEAD_MAX_SIZE):
""" Input validation for YHSM_GeneratedAEAD or string. """
if isinstance(aead, pyhsm.aead_cmd.YHSM_GeneratedAEAD):
aead = aead.data # depends on [control=['if'], data=[]]
if expected_len != None:
return input_validate_str(aead, name, exact_len=expected_len) # depends on [control=['if'], data=['expected_len']]
else:
return input_validate_str(aead, name, max_len=max_aead_len) |
def addNode(self, cls=None, point=None):
"""
Creates a new node instance in the scene. If the optional \
cls parameter is not supplied, then the default node class \
will be used when creating the node. If a point is \
supplied, then the node will be created at that position, \
otherwise the node will be created under the cursor.
:param node subclass of <XNode>
:param point <QPointF>
:return <XNode> || None
"""
# make sure we have a valid class
if not cls:
cls = self.defaultNodeClass()
if not cls:
return None
# create the new node
node = cls(self)
node.setLayer(self.currentLayer())
node.rebuild()
self.addItem(node)
if point == 'center':
x = self.sceneRect().width() / 2.0
y = self.sceneRect().height() / 2.0
x -= (node.rect().width() / 2.0)
y -= (node.rect().height() / 2.0)
node.setPos(x, y)
elif not point:
pos = self._mainView.mapFromGlobal(QCursor.pos())
point = self._mainView.mapToScene(pos)
if ( point ):
x = point.x() - node.rect().width() / 2.0
y = point.y() - node.rect().height() / 2.0
node.setPos(x, y)
else:
x = float(point.x())
y = float(point.y())
node.setPos(x, y)
return node | def function[addNode, parameter[self, cls, point]]:
constant[
Creates a new node instance in the scene. If the optional cls parameter is not supplied, then the default node class will be used when creating the node. If a point is supplied, then the node will be created at that position, otherwise the node will be created under the cursor.
:param node subclass of <XNode>
:param point <QPointF>
:return <XNode> || None
]
if <ast.UnaryOp object at 0x7da18f58faf0> begin[:]
variable[cls] assign[=] call[name[self].defaultNodeClass, parameter[]]
if <ast.UnaryOp object at 0x7da18f58f370> begin[:]
return[constant[None]]
variable[node] assign[=] call[name[cls], parameter[name[self]]]
call[name[node].setLayer, parameter[call[name[self].currentLayer, parameter[]]]]
call[name[node].rebuild, parameter[]]
call[name[self].addItem, parameter[name[node]]]
if compare[name[point] equal[==] constant[center]] begin[:]
variable[x] assign[=] binary_operation[call[call[name[self].sceneRect, parameter[]].width, parameter[]] / constant[2.0]]
variable[y] assign[=] binary_operation[call[call[name[self].sceneRect, parameter[]].height, parameter[]] / constant[2.0]]
<ast.AugAssign object at 0x7da20cabc5e0>
<ast.AugAssign object at 0x7da20cabf5e0>
call[name[node].setPos, parameter[name[x], name[y]]]
return[name[node]] | keyword[def] identifier[addNode] ( identifier[self] , identifier[cls] = keyword[None] , identifier[point] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[cls] :
identifier[cls] = identifier[self] . identifier[defaultNodeClass] ()
keyword[if] keyword[not] identifier[cls] :
keyword[return] keyword[None]
identifier[node] = identifier[cls] ( identifier[self] )
identifier[node] . identifier[setLayer] ( identifier[self] . identifier[currentLayer] ())
identifier[node] . identifier[rebuild] ()
identifier[self] . identifier[addItem] ( identifier[node] )
keyword[if] identifier[point] == literal[string] :
identifier[x] = identifier[self] . identifier[sceneRect] (). identifier[width] ()/ literal[int]
identifier[y] = identifier[self] . identifier[sceneRect] (). identifier[height] ()/ literal[int]
identifier[x] -=( identifier[node] . identifier[rect] (). identifier[width] ()/ literal[int] )
identifier[y] -=( identifier[node] . identifier[rect] (). identifier[height] ()/ literal[int] )
identifier[node] . identifier[setPos] ( identifier[x] , identifier[y] )
keyword[elif] keyword[not] identifier[point] :
identifier[pos] = identifier[self] . identifier[_mainView] . identifier[mapFromGlobal] ( identifier[QCursor] . identifier[pos] ())
identifier[point] = identifier[self] . identifier[_mainView] . identifier[mapToScene] ( identifier[pos] )
keyword[if] ( identifier[point] ):
identifier[x] = identifier[point] . identifier[x] ()- identifier[node] . identifier[rect] (). identifier[width] ()/ literal[int]
identifier[y] = identifier[point] . identifier[y] ()- identifier[node] . identifier[rect] (). identifier[height] ()/ literal[int]
identifier[node] . identifier[setPos] ( identifier[x] , identifier[y] )
keyword[else] :
identifier[x] = identifier[float] ( identifier[point] . identifier[x] ())
identifier[y] = identifier[float] ( identifier[point] . identifier[y] ())
identifier[node] . identifier[setPos] ( identifier[x] , identifier[y] )
keyword[return] identifier[node] | def addNode(self, cls=None, point=None):
"""
Creates a new node instance in the scene. If the optional cls parameter is not supplied, then the default node class will be used when creating the node. If a point is supplied, then the node will be created at that position, otherwise the node will be created under the cursor.
:param node subclass of <XNode>
:param point <QPointF>
:return <XNode> || None
"""
# make sure we have a valid class
if not cls:
cls = self.defaultNodeClass() # depends on [control=['if'], data=[]]
if not cls:
return None # depends on [control=['if'], data=[]]
# create the new node
node = cls(self)
node.setLayer(self.currentLayer())
node.rebuild()
self.addItem(node)
if point == 'center':
x = self.sceneRect().width() / 2.0
y = self.sceneRect().height() / 2.0
x -= node.rect().width() / 2.0
y -= node.rect().height() / 2.0
node.setPos(x, y) # depends on [control=['if'], data=[]]
elif not point:
pos = self._mainView.mapFromGlobal(QCursor.pos())
point = self._mainView.mapToScene(pos)
if point:
x = point.x() - node.rect().width() / 2.0
y = point.y() - node.rect().height() / 2.0
node.setPos(x, y) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
x = float(point.x())
y = float(point.y())
node.setPos(x, y)
return node |
def adapt_sum(line, cfg, filter_obj):
"""Determine best filter by sum of all row values"""
lines = filter_obj.filter_all(line)
res_s = [sum(it) for it in lines]
r = res_s.index(min(res_s))
return lines[r] | def function[adapt_sum, parameter[line, cfg, filter_obj]]:
constant[Determine best filter by sum of all row values]
variable[lines] assign[=] call[name[filter_obj].filter_all, parameter[name[line]]]
variable[res_s] assign[=] <ast.ListComp object at 0x7da18f58e7a0>
variable[r] assign[=] call[name[res_s].index, parameter[call[name[min], parameter[name[res_s]]]]]
return[call[name[lines]][name[r]]] | keyword[def] identifier[adapt_sum] ( identifier[line] , identifier[cfg] , identifier[filter_obj] ):
literal[string]
identifier[lines] = identifier[filter_obj] . identifier[filter_all] ( identifier[line] )
identifier[res_s] =[ identifier[sum] ( identifier[it] ) keyword[for] identifier[it] keyword[in] identifier[lines] ]
identifier[r] = identifier[res_s] . identifier[index] ( identifier[min] ( identifier[res_s] ))
keyword[return] identifier[lines] [ identifier[r] ] | def adapt_sum(line, cfg, filter_obj):
"""Determine best filter by sum of all row values"""
lines = filter_obj.filter_all(line)
res_s = [sum(it) for it in lines]
r = res_s.index(min(res_s))
return lines[r] |
def CI_calc(mean, SE, CV=1.96):
"""
Calculate confidence interval.
:param mean: mean of data
:type mean : float
:param SE: standard error of data
:type SE : float
:param CV: critical value
:type CV:float
:return: confidence interval as tuple
"""
try:
CI_down = mean - CV * SE
CI_up = mean + CV * SE
return (CI_down, CI_up)
except Exception:
return ("None", "None") | def function[CI_calc, parameter[mean, SE, CV]]:
constant[
Calculate confidence interval.
:param mean: mean of data
:type mean : float
:param SE: standard error of data
:type SE : float
:param CV: critical value
:type CV:float
:return: confidence interval as tuple
]
<ast.Try object at 0x7da1b16202b0> | keyword[def] identifier[CI_calc] ( identifier[mean] , identifier[SE] , identifier[CV] = literal[int] ):
literal[string]
keyword[try] :
identifier[CI_down] = identifier[mean] - identifier[CV] * identifier[SE]
identifier[CI_up] = identifier[mean] + identifier[CV] * identifier[SE]
keyword[return] ( identifier[CI_down] , identifier[CI_up] )
keyword[except] identifier[Exception] :
keyword[return] ( literal[string] , literal[string] ) | def CI_calc(mean, SE, CV=1.96):
"""
Calculate confidence interval.
:param mean: mean of data
:type mean : float
:param SE: standard error of data
:type SE : float
:param CV: critical value
:type CV:float
:return: confidence interval as tuple
"""
try:
CI_down = mean - CV * SE
CI_up = mean + CV * SE
return (CI_down, CI_up) # depends on [control=['try'], data=[]]
except Exception:
return ('None', 'None') # depends on [control=['except'], data=[]] |
def _object_to_json_dict(obj: Any) -> Union[bool, int, float, str, Dict[str, Any]]:
"""Convert an object to a dictionary suitable for the JSON output.
"""
if isinstance(obj, Enum):
# Properly serialize Enums (such as OpenSslVersionEnum)
result = obj.name
elif isinstance(obj, ObjectIdentifier):
# Use dotted string representation for OIDs
result = obj.dotted_string
elif isinstance(obj, x509._Certificate):
# Properly serialize certificates
certificate = obj
result = { # type: ignore
# Add general info
'as_pem': obj.public_bytes(Encoding.PEM).decode('ascii'),
'hpkp_pin': CertificateUtils.get_hpkp_pin(obj),
# Add some of the fields of the cert
'subject': CertificateUtils.get_name_as_text(certificate.subject),
'issuer': CertificateUtils.get_name_as_text(certificate.issuer),
'serialNumber': str(certificate.serial_number),
'notBefore': certificate.not_valid_before.strftime("%Y-%m-%d %H:%M:%S"),
'notAfter': certificate.not_valid_after.strftime("%Y-%m-%d %H:%M:%S"),
'signatureAlgorithm': certificate.signature_hash_algorithm.name,
'publicKey': {
'algorithm': CertificateUtils.get_public_key_type(certificate)
},
}
dns_alt_names = CertificateUtils.get_dns_subject_alternative_names(certificate)
if dns_alt_names:
result['subjectAlternativeName'] = {'DNS': dns_alt_names} # type: ignore
# Add some info about the public key
public_key = certificate.public_key()
if isinstance(public_key, EllipticCurvePublicKey):
result['publicKey']['size'] = str(public_key.curve.key_size) # type: ignore
result['publicKey']['curve'] = public_key.curve.name # type: ignore
else:
result['publicKey']['size'] = str(public_key.key_size)
result['publicKey']['exponent'] = str(public_key.public_numbers().e)
elif isinstance(obj, object):
# Some objects (like str) don't have a __dict__
if hasattr(obj, '__dict__'):
result = {}
for key, value in obj.__dict__.items():
# Remove private attributes
if key.startswith('_'):
continue
result[key] = _object_to_json_dict(value)
else:
# Simple object like a bool
result = obj
else:
raise TypeError('Unknown type: {}'.format(repr(obj)))
return result | def function[_object_to_json_dict, parameter[obj]]:
constant[Convert an object to a dictionary suitable for the JSON output.
]
if call[name[isinstance], parameter[name[obj], name[Enum]]] begin[:]
variable[result] assign[=] name[obj].name
return[name[result]] | keyword[def] identifier[_object_to_json_dict] ( identifier[obj] : identifier[Any] )-> identifier[Union] [ identifier[bool] , identifier[int] , identifier[float] , identifier[str] , identifier[Dict] [ identifier[str] , identifier[Any] ]]:
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[Enum] ):
identifier[result] = identifier[obj] . identifier[name]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[ObjectIdentifier] ):
identifier[result] = identifier[obj] . identifier[dotted_string]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[x509] . identifier[_Certificate] ):
identifier[certificate] = identifier[obj]
identifier[result] ={
literal[string] : identifier[obj] . identifier[public_bytes] ( identifier[Encoding] . identifier[PEM] ). identifier[decode] ( literal[string] ),
literal[string] : identifier[CertificateUtils] . identifier[get_hpkp_pin] ( identifier[obj] ),
literal[string] : identifier[CertificateUtils] . identifier[get_name_as_text] ( identifier[certificate] . identifier[subject] ),
literal[string] : identifier[CertificateUtils] . identifier[get_name_as_text] ( identifier[certificate] . identifier[issuer] ),
literal[string] : identifier[str] ( identifier[certificate] . identifier[serial_number] ),
literal[string] : identifier[certificate] . identifier[not_valid_before] . identifier[strftime] ( literal[string] ),
literal[string] : identifier[certificate] . identifier[not_valid_after] . identifier[strftime] ( literal[string] ),
literal[string] : identifier[certificate] . identifier[signature_hash_algorithm] . identifier[name] ,
literal[string] :{
literal[string] : identifier[CertificateUtils] . identifier[get_public_key_type] ( identifier[certificate] )
},
}
identifier[dns_alt_names] = identifier[CertificateUtils] . identifier[get_dns_subject_alternative_names] ( identifier[certificate] )
keyword[if] identifier[dns_alt_names] :
identifier[result] [ literal[string] ]={ literal[string] : identifier[dns_alt_names] }
identifier[public_key] = identifier[certificate] . identifier[public_key] ()
keyword[if] identifier[isinstance] ( identifier[public_key] , identifier[EllipticCurvePublicKey] ):
identifier[result] [ literal[string] ][ literal[string] ]= identifier[str] ( identifier[public_key] . identifier[curve] . identifier[key_size] )
identifier[result] [ literal[string] ][ literal[string] ]= identifier[public_key] . identifier[curve] . identifier[name]
keyword[else] :
identifier[result] [ literal[string] ][ literal[string] ]= identifier[str] ( identifier[public_key] . identifier[key_size] )
identifier[result] [ literal[string] ][ literal[string] ]= identifier[str] ( identifier[public_key] . identifier[public_numbers] (). identifier[e] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[object] ):
keyword[if] identifier[hasattr] ( identifier[obj] , literal[string] ):
identifier[result] ={}
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[obj] . identifier[__dict__] . identifier[items] ():
keyword[if] identifier[key] . identifier[startswith] ( literal[string] ):
keyword[continue]
identifier[result] [ identifier[key] ]= identifier[_object_to_json_dict] ( identifier[value] )
keyword[else] :
identifier[result] = identifier[obj]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[obj] )))
keyword[return] identifier[result] | def _object_to_json_dict(obj: Any) -> Union[bool, int, float, str, Dict[str, Any]]:
"""Convert an object to a dictionary suitable for the JSON output.
"""
if isinstance(obj, Enum):
# Properly serialize Enums (such as OpenSslVersionEnum)
result = obj.name # depends on [control=['if'], data=[]]
elif isinstance(obj, ObjectIdentifier):
# Use dotted string representation for OIDs
result = obj.dotted_string # depends on [control=['if'], data=[]]
elif isinstance(obj, x509._Certificate):
# Properly serialize certificates
certificate = obj # type: ignore
# Add general info
# Add some of the fields of the cert
result = {'as_pem': obj.public_bytes(Encoding.PEM).decode('ascii'), 'hpkp_pin': CertificateUtils.get_hpkp_pin(obj), 'subject': CertificateUtils.get_name_as_text(certificate.subject), 'issuer': CertificateUtils.get_name_as_text(certificate.issuer), 'serialNumber': str(certificate.serial_number), 'notBefore': certificate.not_valid_before.strftime('%Y-%m-%d %H:%M:%S'), 'notAfter': certificate.not_valid_after.strftime('%Y-%m-%d %H:%M:%S'), 'signatureAlgorithm': certificate.signature_hash_algorithm.name, 'publicKey': {'algorithm': CertificateUtils.get_public_key_type(certificate)}}
dns_alt_names = CertificateUtils.get_dns_subject_alternative_names(certificate)
if dns_alt_names:
result['subjectAlternativeName'] = {'DNS': dns_alt_names} # type: ignore # depends on [control=['if'], data=[]]
# Add some info about the public key
public_key = certificate.public_key()
if isinstance(public_key, EllipticCurvePublicKey):
result['publicKey']['size'] = str(public_key.curve.key_size) # type: ignore
result['publicKey']['curve'] = public_key.curve.name # type: ignore # depends on [control=['if'], data=[]]
else:
result['publicKey']['size'] = str(public_key.key_size)
result['publicKey']['exponent'] = str(public_key.public_numbers().e) # depends on [control=['if'], data=[]]
elif isinstance(obj, object):
# Some objects (like str) don't have a __dict__
if hasattr(obj, '__dict__'):
result = {}
for (key, value) in obj.__dict__.items():
# Remove private attributes
if key.startswith('_'):
continue # depends on [control=['if'], data=[]]
result[key] = _object_to_json_dict(value) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
# Simple object like a bool
result = obj # depends on [control=['if'], data=[]]
else:
raise TypeError('Unknown type: {}'.format(repr(obj)))
return result |
def _set_wmi_setting(wmi_class_name, setting, value, server):
'''
Set the value of the setting for the provided class.
'''
with salt.utils.winapi.Com():
try:
connection = wmi.WMI(namespace=_WMI_NAMESPACE)
wmi_class = getattr(connection, wmi_class_name)
objs = wmi_class(Name=server)[0]
except wmi.x_wmi as error:
_LOG.error('Encountered WMI error: %s', error.com_error)
except (AttributeError, IndexError) as error:
_LOG.error('Error getting %s: %s', wmi_class_name, error)
try:
setattr(objs, setting, value)
return True
except wmi.x_wmi as error:
_LOG.error('Encountered WMI error: %s', error.com_error)
except AttributeError as error:
_LOG.error('Error setting %s: %s', setting, error)
return False | def function[_set_wmi_setting, parameter[wmi_class_name, setting, value, server]]:
constant[
Set the value of the setting for the provided class.
]
with call[name[salt].utils.winapi.Com, parameter[]] begin[:]
<ast.Try object at 0x7da1b26af340>
<ast.Try object at 0x7da1b26afc10>
return[constant[False]] | keyword[def] identifier[_set_wmi_setting] ( identifier[wmi_class_name] , identifier[setting] , identifier[value] , identifier[server] ):
literal[string]
keyword[with] identifier[salt] . identifier[utils] . identifier[winapi] . identifier[Com] ():
keyword[try] :
identifier[connection] = identifier[wmi] . identifier[WMI] ( identifier[namespace] = identifier[_WMI_NAMESPACE] )
identifier[wmi_class] = identifier[getattr] ( identifier[connection] , identifier[wmi_class_name] )
identifier[objs] = identifier[wmi_class] ( identifier[Name] = identifier[server] )[ literal[int] ]
keyword[except] identifier[wmi] . identifier[x_wmi] keyword[as] identifier[error] :
identifier[_LOG] . identifier[error] ( literal[string] , identifier[error] . identifier[com_error] )
keyword[except] ( identifier[AttributeError] , identifier[IndexError] ) keyword[as] identifier[error] :
identifier[_LOG] . identifier[error] ( literal[string] , identifier[wmi_class_name] , identifier[error] )
keyword[try] :
identifier[setattr] ( identifier[objs] , identifier[setting] , identifier[value] )
keyword[return] keyword[True]
keyword[except] identifier[wmi] . identifier[x_wmi] keyword[as] identifier[error] :
identifier[_LOG] . identifier[error] ( literal[string] , identifier[error] . identifier[com_error] )
keyword[except] identifier[AttributeError] keyword[as] identifier[error] :
identifier[_LOG] . identifier[error] ( literal[string] , identifier[setting] , identifier[error] )
keyword[return] keyword[False] | def _set_wmi_setting(wmi_class_name, setting, value, server):
"""
Set the value of the setting for the provided class.
"""
with salt.utils.winapi.Com():
try:
connection = wmi.WMI(namespace=_WMI_NAMESPACE)
wmi_class = getattr(connection, wmi_class_name)
objs = wmi_class(Name=server)[0] # depends on [control=['try'], data=[]]
except wmi.x_wmi as error:
_LOG.error('Encountered WMI error: %s', error.com_error) # depends on [control=['except'], data=['error']]
except (AttributeError, IndexError) as error:
_LOG.error('Error getting %s: %s', wmi_class_name, error) # depends on [control=['except'], data=['error']]
try:
setattr(objs, setting, value)
return True # depends on [control=['try'], data=[]]
except wmi.x_wmi as error:
_LOG.error('Encountered WMI error: %s', error.com_error) # depends on [control=['except'], data=['error']]
except AttributeError as error:
_LOG.error('Error setting %s: %s', setting, error) # depends on [control=['except'], data=['error']] # depends on [control=['with'], data=[]]
return False |
def _runMainLoop(self, rootJob):
"""
Runs the main loop with the given job.
:param toil.job.Job rootJob: The root job for the workflow.
:rtype: Any
"""
logProcessContext(self.config)
with RealtimeLogger(self._batchSystem,
level=self.options.logLevel if self.options.realTimeLogging else None):
# FIXME: common should not import from leader
from toil.leader import Leader
return Leader(config=self.config,
batchSystem=self._batchSystem,
provisioner=self._provisioner,
jobStore=self._jobStore,
rootJob=rootJob,
jobCache=self._jobCache).run() | def function[_runMainLoop, parameter[self, rootJob]]:
constant[
Runs the main loop with the given job.
:param toil.job.Job rootJob: The root job for the workflow.
:rtype: Any
]
call[name[logProcessContext], parameter[name[self].config]]
with call[name[RealtimeLogger], parameter[name[self]._batchSystem]] begin[:]
from relative_module[toil.leader] import module[Leader]
return[call[call[name[Leader], parameter[]].run, parameter[]]] | keyword[def] identifier[_runMainLoop] ( identifier[self] , identifier[rootJob] ):
literal[string]
identifier[logProcessContext] ( identifier[self] . identifier[config] )
keyword[with] identifier[RealtimeLogger] ( identifier[self] . identifier[_batchSystem] ,
identifier[level] = identifier[self] . identifier[options] . identifier[logLevel] keyword[if] identifier[self] . identifier[options] . identifier[realTimeLogging] keyword[else] keyword[None] ):
keyword[from] identifier[toil] . identifier[leader] keyword[import] identifier[Leader]
keyword[return] identifier[Leader] ( identifier[config] = identifier[self] . identifier[config] ,
identifier[batchSystem] = identifier[self] . identifier[_batchSystem] ,
identifier[provisioner] = identifier[self] . identifier[_provisioner] ,
identifier[jobStore] = identifier[self] . identifier[_jobStore] ,
identifier[rootJob] = identifier[rootJob] ,
identifier[jobCache] = identifier[self] . identifier[_jobCache] ). identifier[run] () | def _runMainLoop(self, rootJob):
"""
Runs the main loop with the given job.
:param toil.job.Job rootJob: The root job for the workflow.
:rtype: Any
"""
logProcessContext(self.config)
with RealtimeLogger(self._batchSystem, level=self.options.logLevel if self.options.realTimeLogging else None):
# FIXME: common should not import from leader
from toil.leader import Leader
return Leader(config=self.config, batchSystem=self._batchSystem, provisioner=self._provisioner, jobStore=self._jobStore, rootJob=rootJob, jobCache=self._jobCache).run() # depends on [control=['with'], data=[]] |
def print_vcf(data):
"""Print vcf line following rules."""
id_name = "."
qual = "."
chrom = data['chrom']
pos = data['pre_pos']
nt_ref = data['nt'][1]
nt_snp = data['nt'][0]
flt = "PASS"
info = "ID=%s" % data['mature']
frmt = "GT:NR:NS"
gntp = "%s:%s:%s" % (_genotype(data), data["counts"], data["diff"])
print("\t".join(map(str, [chrom, pos, id_name, nt_ref, nt_snp, qual, flt, info, frmt, gntp])), file=STDOUT, end="") | def function[print_vcf, parameter[data]]:
constant[Print vcf line following rules.]
variable[id_name] assign[=] constant[.]
variable[qual] assign[=] constant[.]
variable[chrom] assign[=] call[name[data]][constant[chrom]]
variable[pos] assign[=] call[name[data]][constant[pre_pos]]
variable[nt_ref] assign[=] call[call[name[data]][constant[nt]]][constant[1]]
variable[nt_snp] assign[=] call[call[name[data]][constant[nt]]][constant[0]]
variable[flt] assign[=] constant[PASS]
variable[info] assign[=] binary_operation[constant[ID=%s] <ast.Mod object at 0x7da2590d6920> call[name[data]][constant[mature]]]
variable[frmt] assign[=] constant[GT:NR:NS]
variable[gntp] assign[=] binary_operation[constant[%s:%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da18ede71f0>, <ast.Subscript object at 0x7da18ede4970>, <ast.Subscript object at 0x7da18ede6e90>]]]
call[name[print], parameter[call[constant[ ].join, parameter[call[name[map], parameter[name[str], list[[<ast.Name object at 0x7da18ede4670>, <ast.Name object at 0x7da18ede7eb0>, <ast.Name object at 0x7da18ede51e0>, <ast.Name object at 0x7da18ede4c70>, <ast.Name object at 0x7da18ede5840>, <ast.Name object at 0x7da18ede6c80>, <ast.Name object at 0x7da18ede7370>, <ast.Name object at 0x7da18ede51b0>, <ast.Name object at 0x7da18ede5510>, <ast.Name object at 0x7da18ede53f0>]]]]]]]] | keyword[def] identifier[print_vcf] ( identifier[data] ):
literal[string]
identifier[id_name] = literal[string]
identifier[qual] = literal[string]
identifier[chrom] = identifier[data] [ literal[string] ]
identifier[pos] = identifier[data] [ literal[string] ]
identifier[nt_ref] = identifier[data] [ literal[string] ][ literal[int] ]
identifier[nt_snp] = identifier[data] [ literal[string] ][ literal[int] ]
identifier[flt] = literal[string]
identifier[info] = literal[string] % identifier[data] [ literal[string] ]
identifier[frmt] = literal[string]
identifier[gntp] = literal[string] %( identifier[_genotype] ( identifier[data] ), identifier[data] [ literal[string] ], identifier[data] [ literal[string] ])
identifier[print] ( literal[string] . identifier[join] ( identifier[map] ( identifier[str] ,[ identifier[chrom] , identifier[pos] , identifier[id_name] , identifier[nt_ref] , identifier[nt_snp] , identifier[qual] , identifier[flt] , identifier[info] , identifier[frmt] , identifier[gntp] ])), identifier[file] = identifier[STDOUT] , identifier[end] = literal[string] ) | def print_vcf(data):
"""Print vcf line following rules."""
id_name = '.'
qual = '.'
chrom = data['chrom']
pos = data['pre_pos']
nt_ref = data['nt'][1]
nt_snp = data['nt'][0]
flt = 'PASS'
info = 'ID=%s' % data['mature']
frmt = 'GT:NR:NS'
gntp = '%s:%s:%s' % (_genotype(data), data['counts'], data['diff'])
print('\t'.join(map(str, [chrom, pos, id_name, nt_ref, nt_snp, qual, flt, info, frmt, gntp])), file=STDOUT, end='') |
def get_artist_hotttnesss(self, cache=True):
"""Get our numerical description of how hottt a song's artist currently is
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing hotttnesss.
Example:
>>> s = song.Song('SOOLGAZ127F3E1B87C')
>>> s.artist_hotttnesss
0.45645633000000002
>>> s.get_artist_hotttnesss()
0.45645633000000002
>>>
"""
if not (cache and ('artist_hotttnesss' in self.cache)):
response = self.get_attribute('profile', bucket='artist_hotttnesss')
self.cache['artist_hotttnesss'] = response['songs'][0]['artist_hotttnesss']
return self.cache['artist_hotttnesss'] | def function[get_artist_hotttnesss, parameter[self, cache]]:
constant[Get our numerical description of how hottt a song's artist currently is
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing hotttnesss.
Example:
>>> s = song.Song('SOOLGAZ127F3E1B87C')
>>> s.artist_hotttnesss
0.45645633000000002
>>> s.get_artist_hotttnesss()
0.45645633000000002
>>>
]
if <ast.UnaryOp object at 0x7da1b040ea10> begin[:]
variable[response] assign[=] call[name[self].get_attribute, parameter[constant[profile]]]
call[name[self].cache][constant[artist_hotttnesss]] assign[=] call[call[call[name[response]][constant[songs]]][constant[0]]][constant[artist_hotttnesss]]
return[call[name[self].cache][constant[artist_hotttnesss]]] | keyword[def] identifier[get_artist_hotttnesss] ( identifier[self] , identifier[cache] = keyword[True] ):
literal[string]
keyword[if] keyword[not] ( identifier[cache] keyword[and] ( literal[string] keyword[in] identifier[self] . identifier[cache] )):
identifier[response] = identifier[self] . identifier[get_attribute] ( literal[string] , identifier[bucket] = literal[string] )
identifier[self] . identifier[cache] [ literal[string] ]= identifier[response] [ literal[string] ][ literal[int] ][ literal[string] ]
keyword[return] identifier[self] . identifier[cache] [ literal[string] ] | def get_artist_hotttnesss(self, cache=True):
"""Get our numerical description of how hottt a song's artist currently is
Args:
Kwargs:
cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True.
Returns:
A float representing hotttnesss.
Example:
>>> s = song.Song('SOOLGAZ127F3E1B87C')
>>> s.artist_hotttnesss
0.45645633000000002
>>> s.get_artist_hotttnesss()
0.45645633000000002
>>>
"""
if not (cache and 'artist_hotttnesss' in self.cache):
response = self.get_attribute('profile', bucket='artist_hotttnesss')
self.cache['artist_hotttnesss'] = response['songs'][0]['artist_hotttnesss'] # depends on [control=['if'], data=[]]
return self.cache['artist_hotttnesss'] |
def decode_data_items(self, concatenated_str):
"""Decodes a concatenated string into a list of integers and strings.
Example:
``decode_data_items('abc|~B7|xyz')`` returns ``['abc', 123, 'xyz']``
"""
data_items = []
str_list = concatenated_str.split(self.SEPARATOR)
for str in str_list:
# '~base-64-strings' are decoded into integers.
if len(str)>=1 and str[0]==self.INTEGER_PREFIX:
item = self.decode_int(str[1:])
# Strings are decoded as-is.
else:
item = str
data_items.append(item)
# Return list of data items
return data_items | def function[decode_data_items, parameter[self, concatenated_str]]:
constant[Decodes a concatenated string into a list of integers and strings.
Example:
``decode_data_items('abc|~B7|xyz')`` returns ``['abc', 123, 'xyz']``
]
variable[data_items] assign[=] list[[]]
variable[str_list] assign[=] call[name[concatenated_str].split, parameter[name[self].SEPARATOR]]
for taget[name[str]] in starred[name[str_list]] begin[:]
if <ast.BoolOp object at 0x7da1b1d9a9e0> begin[:]
variable[item] assign[=] call[name[self].decode_int, parameter[call[name[str]][<ast.Slice object at 0x7da1b1d997e0>]]]
call[name[data_items].append, parameter[name[item]]]
return[name[data_items]] | keyword[def] identifier[decode_data_items] ( identifier[self] , identifier[concatenated_str] ):
literal[string]
identifier[data_items] =[]
identifier[str_list] = identifier[concatenated_str] . identifier[split] ( identifier[self] . identifier[SEPARATOR] )
keyword[for] identifier[str] keyword[in] identifier[str_list] :
keyword[if] identifier[len] ( identifier[str] )>= literal[int] keyword[and] identifier[str] [ literal[int] ]== identifier[self] . identifier[INTEGER_PREFIX] :
identifier[item] = identifier[self] . identifier[decode_int] ( identifier[str] [ literal[int] :])
keyword[else] :
identifier[item] = identifier[str]
identifier[data_items] . identifier[append] ( identifier[item] )
keyword[return] identifier[data_items] | def decode_data_items(self, concatenated_str):
"""Decodes a concatenated string into a list of integers and strings.
Example:
``decode_data_items('abc|~B7|xyz')`` returns ``['abc', 123, 'xyz']``
"""
data_items = []
str_list = concatenated_str.split(self.SEPARATOR)
for str in str_list:
# '~base-64-strings' are decoded into integers.
if len(str) >= 1 and str[0] == self.INTEGER_PREFIX:
item = self.decode_int(str[1:]) # depends on [control=['if'], data=[]]
else:
# Strings are decoded as-is.
item = str
data_items.append(item) # depends on [control=['for'], data=['str']]
# Return list of data items
return data_items |
def get_user_role_model():
"""
Returns the UserRole model that is active in this project.
"""
app_model = getattr(settings, "ARCTIC_USER_ROLE_MODEL", "arctic.UserRole")
try:
return django_apps.get_model(app_model)
except ValueError:
raise ImproperlyConfigured(
"ARCTIC_USER_ROLE_MODEL must be of the "
"form 'app_label.model_name'"
)
except LookupError:
raise ImproperlyConfigured(
"ARCTIC_USER_ROLE_MODEL refers to model '%s' that has not been "
"installed" % settings.ARCTIC_USER_ROLE_MODEL
) | def function[get_user_role_model, parameter[]]:
constant[
Returns the UserRole model that is active in this project.
]
variable[app_model] assign[=] call[name[getattr], parameter[name[settings], constant[ARCTIC_USER_ROLE_MODEL], constant[arctic.UserRole]]]
<ast.Try object at 0x7da1b04ecd30> | keyword[def] identifier[get_user_role_model] ():
literal[string]
identifier[app_model] = identifier[getattr] ( identifier[settings] , literal[string] , literal[string] )
keyword[try] :
keyword[return] identifier[django_apps] . identifier[get_model] ( identifier[app_model] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ImproperlyConfigured] (
literal[string]
literal[string]
)
keyword[except] identifier[LookupError] :
keyword[raise] identifier[ImproperlyConfigured] (
literal[string]
literal[string] % identifier[settings] . identifier[ARCTIC_USER_ROLE_MODEL]
) | def get_user_role_model():
"""
Returns the UserRole model that is active in this project.
"""
app_model = getattr(settings, 'ARCTIC_USER_ROLE_MODEL', 'arctic.UserRole')
try:
return django_apps.get_model(app_model) # depends on [control=['try'], data=[]]
except ValueError:
raise ImproperlyConfigured("ARCTIC_USER_ROLE_MODEL must be of the form 'app_label.model_name'") # depends on [control=['except'], data=[]]
except LookupError:
raise ImproperlyConfigured("ARCTIC_USER_ROLE_MODEL refers to model '%s' that has not been installed" % settings.ARCTIC_USER_ROLE_MODEL) # depends on [control=['except'], data=[]] |
def boxify(message, border_color=None):
"""Put a message inside a box.
Args:
message (unicode): message to decorate.
border_color (unicode): name of the color to outline the box with.
"""
lines = message.split("\n")
max_width = max(_visual_width(line) for line in lines)
padding_horizontal = 5
padding_vertical = 1
box_size_horizontal = max_width + (padding_horizontal * 2)
chars = {"corner": "+", "horizontal": "-", "vertical": "|", "empty": " "}
margin = "{corner}{line}{corner}\n".format(
corner=chars["corner"], line=chars["horizontal"] * box_size_horizontal
)
padding_lines = [
"{border}{space}{border}\n".format(
border=colorize(chars["vertical"], color=border_color),
space=chars["empty"] * box_size_horizontal,
)
* padding_vertical
]
content_lines = [
"{border}{space}{content}{space}{border}\n".format(
border=colorize(chars["vertical"], color=border_color),
space=chars["empty"] * padding_horizontal,
content=_visual_center(line, max_width),
)
for line in lines
]
box_str = "{margin}{padding}{content}{padding}{margin}".format(
margin=colorize(margin, color=border_color),
padding="".join(padding_lines),
content="".join(content_lines),
)
return box_str | def function[boxify, parameter[message, border_color]]:
constant[Put a message inside a box.
Args:
message (unicode): message to decorate.
border_color (unicode): name of the color to outline the box with.
]
variable[lines] assign[=] call[name[message].split, parameter[constant[
]]]
variable[max_width] assign[=] call[name[max], parameter[<ast.GeneratorExp object at 0x7da1b1f60610>]]
variable[padding_horizontal] assign[=] constant[5]
variable[padding_vertical] assign[=] constant[1]
variable[box_size_horizontal] assign[=] binary_operation[name[max_width] + binary_operation[name[padding_horizontal] * constant[2]]]
variable[chars] assign[=] dictionary[[<ast.Constant object at 0x7da1b1f627a0>, <ast.Constant object at 0x7da1b1f63970>, <ast.Constant object at 0x7da1b1f61810>, <ast.Constant object at 0x7da1b1f60eb0>], [<ast.Constant object at 0x7da1b1f60100>, <ast.Constant object at 0x7da1b1f630d0>, <ast.Constant object at 0x7da1b1f60bb0>, <ast.Constant object at 0x7da1b1f60640>]]
variable[margin] assign[=] call[constant[{corner}{line}{corner}
].format, parameter[]]
variable[padding_lines] assign[=] list[[<ast.BinOp object at 0x7da1b1f63730>]]
variable[content_lines] assign[=] <ast.ListComp object at 0x7da1b1f626b0>
variable[box_str] assign[=] call[constant[{margin}{padding}{content}{padding}{margin}].format, parameter[]]
return[name[box_str]] | keyword[def] identifier[boxify] ( identifier[message] , identifier[border_color] = keyword[None] ):
literal[string]
identifier[lines] = identifier[message] . identifier[split] ( literal[string] )
identifier[max_width] = identifier[max] ( identifier[_visual_width] ( identifier[line] ) keyword[for] identifier[line] keyword[in] identifier[lines] )
identifier[padding_horizontal] = literal[int]
identifier[padding_vertical] = literal[int]
identifier[box_size_horizontal] = identifier[max_width] +( identifier[padding_horizontal] * literal[int] )
identifier[chars] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] }
identifier[margin] = literal[string] . identifier[format] (
identifier[corner] = identifier[chars] [ literal[string] ], identifier[line] = identifier[chars] [ literal[string] ]* identifier[box_size_horizontal]
)
identifier[padding_lines] =[
literal[string] . identifier[format] (
identifier[border] = identifier[colorize] ( identifier[chars] [ literal[string] ], identifier[color] = identifier[border_color] ),
identifier[space] = identifier[chars] [ literal[string] ]* identifier[box_size_horizontal] ,
)
* identifier[padding_vertical]
]
identifier[content_lines] =[
literal[string] . identifier[format] (
identifier[border] = identifier[colorize] ( identifier[chars] [ literal[string] ], identifier[color] = identifier[border_color] ),
identifier[space] = identifier[chars] [ literal[string] ]* identifier[padding_horizontal] ,
identifier[content] = identifier[_visual_center] ( identifier[line] , identifier[max_width] ),
)
keyword[for] identifier[line] keyword[in] identifier[lines]
]
identifier[box_str] = literal[string] . identifier[format] (
identifier[margin] = identifier[colorize] ( identifier[margin] , identifier[color] = identifier[border_color] ),
identifier[padding] = literal[string] . identifier[join] ( identifier[padding_lines] ),
identifier[content] = literal[string] . identifier[join] ( identifier[content_lines] ),
)
keyword[return] identifier[box_str] | def boxify(message, border_color=None):
"""Put a message inside a box.
Args:
message (unicode): message to decorate.
border_color (unicode): name of the color to outline the box with.
"""
lines = message.split('\n')
max_width = max((_visual_width(line) for line in lines))
padding_horizontal = 5
padding_vertical = 1
box_size_horizontal = max_width + padding_horizontal * 2
chars = {'corner': '+', 'horizontal': '-', 'vertical': '|', 'empty': ' '}
margin = '{corner}{line}{corner}\n'.format(corner=chars['corner'], line=chars['horizontal'] * box_size_horizontal)
padding_lines = ['{border}{space}{border}\n'.format(border=colorize(chars['vertical'], color=border_color), space=chars['empty'] * box_size_horizontal) * padding_vertical]
content_lines = ['{border}{space}{content}{space}{border}\n'.format(border=colorize(chars['vertical'], color=border_color), space=chars['empty'] * padding_horizontal, content=_visual_center(line, max_width)) for line in lines]
box_str = '{margin}{padding}{content}{padding}{margin}'.format(margin=colorize(margin, color=border_color), padding=''.join(padding_lines), content=''.join(content_lines))
return box_str |
def _validate_jp2_colr(self, boxes):
"""
Validate JP2 requirements on colour specification boxes.
"""
lst = [box for box in boxes if box.box_id == 'jp2h']
jp2h = lst[0]
for colr in [box for box in jp2h.box if box.box_id == 'colr']:
if colr.approximation != 0:
msg = ("A JP2 colr box cannot have a non-zero approximation "
"field.")
raise IOError(msg) | def function[_validate_jp2_colr, parameter[self, boxes]]:
constant[
Validate JP2 requirements on colour specification boxes.
]
variable[lst] assign[=] <ast.ListComp object at 0x7da1b26add80>
variable[jp2h] assign[=] call[name[lst]][constant[0]]
for taget[name[colr]] in starred[<ast.ListComp object at 0x7da1b26ac220>] begin[:]
if compare[name[colr].approximation not_equal[!=] constant[0]] begin[:]
variable[msg] assign[=] constant[A JP2 colr box cannot have a non-zero approximation field.]
<ast.Raise object at 0x7da204621b40> | keyword[def] identifier[_validate_jp2_colr] ( identifier[self] , identifier[boxes] ):
literal[string]
identifier[lst] =[ identifier[box] keyword[for] identifier[box] keyword[in] identifier[boxes] keyword[if] identifier[box] . identifier[box_id] == literal[string] ]
identifier[jp2h] = identifier[lst] [ literal[int] ]
keyword[for] identifier[colr] keyword[in] [ identifier[box] keyword[for] identifier[box] keyword[in] identifier[jp2h] . identifier[box] keyword[if] identifier[box] . identifier[box_id] == literal[string] ]:
keyword[if] identifier[colr] . identifier[approximation] != literal[int] :
identifier[msg] =( literal[string]
literal[string] )
keyword[raise] identifier[IOError] ( identifier[msg] ) | def _validate_jp2_colr(self, boxes):
"""
Validate JP2 requirements on colour specification boxes.
"""
lst = [box for box in boxes if box.box_id == 'jp2h']
jp2h = lst[0]
for colr in [box for box in jp2h.box if box.box_id == 'colr']:
if colr.approximation != 0:
msg = 'A JP2 colr box cannot have a non-zero approximation field.'
raise IOError(msg) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['colr']] |
def exists(self, storagemodel:object, modeldefinition = None) -> bool:
""" delete the blob from storage """
exists = False
blobservice = modeldefinition['blobservice']
container_name = modeldefinition['container']
blob_name = storagemodel.name
try:
blobs = self.list(storagemodel, modeldefinition, where=storagemodel.name)
if len(blobs) == 1:
storagemodel.__mergeblob__(blobs[0])
exists = True
except Exception as e:
msg = 'can not retireve blob {} from storage because {!s}'.format(blob_name, e)
raise AzureStorageWrapException(storagemodel, msg=msg)
return exists | def function[exists, parameter[self, storagemodel, modeldefinition]]:
constant[ delete the blob from storage ]
variable[exists] assign[=] constant[False]
variable[blobservice] assign[=] call[name[modeldefinition]][constant[blobservice]]
variable[container_name] assign[=] call[name[modeldefinition]][constant[container]]
variable[blob_name] assign[=] name[storagemodel].name
<ast.Try object at 0x7da1b0a608e0>
return[name[exists]] | keyword[def] identifier[exists] ( identifier[self] , identifier[storagemodel] : identifier[object] , identifier[modeldefinition] = keyword[None] )-> identifier[bool] :
literal[string]
identifier[exists] = keyword[False]
identifier[blobservice] = identifier[modeldefinition] [ literal[string] ]
identifier[container_name] = identifier[modeldefinition] [ literal[string] ]
identifier[blob_name] = identifier[storagemodel] . identifier[name]
keyword[try] :
identifier[blobs] = identifier[self] . identifier[list] ( identifier[storagemodel] , identifier[modeldefinition] , identifier[where] = identifier[storagemodel] . identifier[name] )
keyword[if] identifier[len] ( identifier[blobs] )== literal[int] :
identifier[storagemodel] . identifier[__mergeblob__] ( identifier[blobs] [ literal[int] ])
identifier[exists] = keyword[True]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[msg] = literal[string] . identifier[format] ( identifier[blob_name] , identifier[e] )
keyword[raise] identifier[AzureStorageWrapException] ( identifier[storagemodel] , identifier[msg] = identifier[msg] )
keyword[return] identifier[exists] | def exists(self, storagemodel: object, modeldefinition=None) -> bool:
""" delete the blob from storage """
exists = False
blobservice = modeldefinition['blobservice']
container_name = modeldefinition['container']
blob_name = storagemodel.name
try:
blobs = self.list(storagemodel, modeldefinition, where=storagemodel.name)
if len(blobs) == 1:
storagemodel.__mergeblob__(blobs[0])
exists = True # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
msg = 'can not retireve blob {} from storage because {!s}'.format(blob_name, e)
raise AzureStorageWrapException(storagemodel, msg=msg) # depends on [control=['except'], data=['e']]
return exists |
def get(self, name, mask=None):
"""
Issue a GET command
Return a Deferred which fires with the size of the name being requested
"""
mypeer = self.transport.getQ2QPeer()
tl = self.nexus.transloads[name]
peerz = tl.peers
if mypeer in peerz:
peerk = peerz[mypeer]
else:
# all turned on initially; we aren't going to send them anything.
peerk = PeerKnowledge(bits.BitArray(size=len(tl.mask), default=1))
peerz[mypeer] = peerk
peerk.sentGet = True
return self.callRemote(
Get, name=name, mask=mask).addCallback(lambda r: r['size']) | def function[get, parameter[self, name, mask]]:
constant[
Issue a GET command
Return a Deferred which fires with the size of the name being requested
]
variable[mypeer] assign[=] call[name[self].transport.getQ2QPeer, parameter[]]
variable[tl] assign[=] call[name[self].nexus.transloads][name[name]]
variable[peerz] assign[=] name[tl].peers
if compare[name[mypeer] in name[peerz]] begin[:]
variable[peerk] assign[=] call[name[peerz]][name[mypeer]]
name[peerk].sentGet assign[=] constant[True]
return[call[call[name[self].callRemote, parameter[name[Get]]].addCallback, parameter[<ast.Lambda object at 0x7da18fe90a30>]]] | keyword[def] identifier[get] ( identifier[self] , identifier[name] , identifier[mask] = keyword[None] ):
literal[string]
identifier[mypeer] = identifier[self] . identifier[transport] . identifier[getQ2QPeer] ()
identifier[tl] = identifier[self] . identifier[nexus] . identifier[transloads] [ identifier[name] ]
identifier[peerz] = identifier[tl] . identifier[peers]
keyword[if] identifier[mypeer] keyword[in] identifier[peerz] :
identifier[peerk] = identifier[peerz] [ identifier[mypeer] ]
keyword[else] :
identifier[peerk] = identifier[PeerKnowledge] ( identifier[bits] . identifier[BitArray] ( identifier[size] = identifier[len] ( identifier[tl] . identifier[mask] ), identifier[default] = literal[int] ))
identifier[peerz] [ identifier[mypeer] ]= identifier[peerk]
identifier[peerk] . identifier[sentGet] = keyword[True]
keyword[return] identifier[self] . identifier[callRemote] (
identifier[Get] , identifier[name] = identifier[name] , identifier[mask] = identifier[mask] ). identifier[addCallback] ( keyword[lambda] identifier[r] : identifier[r] [ literal[string] ]) | def get(self, name, mask=None):
"""
Issue a GET command
Return a Deferred which fires with the size of the name being requested
"""
mypeer = self.transport.getQ2QPeer()
tl = self.nexus.transloads[name]
peerz = tl.peers
if mypeer in peerz:
peerk = peerz[mypeer] # depends on [control=['if'], data=['mypeer', 'peerz']]
else:
# all turned on initially; we aren't going to send them anything.
peerk = PeerKnowledge(bits.BitArray(size=len(tl.mask), default=1))
peerz[mypeer] = peerk
peerk.sentGet = True
return self.callRemote(Get, name=name, mask=mask).addCallback(lambda r: r['size']) |
def walk_rows(self, mapping=identity):
"""Iterate over rows.
:return: an iterator over :class:`rows <RowsInGrid>`
:param mapping: funcion to map the result, see
:meth:`walk_instructions` for an example usage
"""
row_in_grid = self._walk.row_in_grid
return map(lambda row: mapping(row_in_grid(row)), self._rows) | def function[walk_rows, parameter[self, mapping]]:
constant[Iterate over rows.
:return: an iterator over :class:`rows <RowsInGrid>`
:param mapping: funcion to map the result, see
:meth:`walk_instructions` for an example usage
]
variable[row_in_grid] assign[=] name[self]._walk.row_in_grid
return[call[name[map], parameter[<ast.Lambda object at 0x7da1affc10c0>, name[self]._rows]]] | keyword[def] identifier[walk_rows] ( identifier[self] , identifier[mapping] = identifier[identity] ):
literal[string]
identifier[row_in_grid] = identifier[self] . identifier[_walk] . identifier[row_in_grid]
keyword[return] identifier[map] ( keyword[lambda] identifier[row] : identifier[mapping] ( identifier[row_in_grid] ( identifier[row] )), identifier[self] . identifier[_rows] ) | def walk_rows(self, mapping=identity):
"""Iterate over rows.
:return: an iterator over :class:`rows <RowsInGrid>`
:param mapping: funcion to map the result, see
:meth:`walk_instructions` for an example usage
"""
row_in_grid = self._walk.row_in_grid
return map(lambda row: mapping(row_in_grid(row)), self._rows) |
def create_y_axis(self, name, label=None, format=None, custom_format=False):
"""
Create Y-axis
"""
axis = {}
if custom_format and format:
axis['tickFormat'] = format
elif format:
axis['tickFormat'] = "d3.format(',%s')" % format
if label:
axis['axisLabel'] = "'" + label + "'"
# Add new axis to list of axis
self.axislist[name] = axis | def function[create_y_axis, parameter[self, name, label, format, custom_format]]:
constant[
Create Y-axis
]
variable[axis] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da1b03fad70> begin[:]
call[name[axis]][constant[tickFormat]] assign[=] name[format]
if name[label] begin[:]
call[name[axis]][constant[axisLabel]] assign[=] binary_operation[binary_operation[constant['] + name[label]] + constant[']]
call[name[self].axislist][name[name]] assign[=] name[axis] | keyword[def] identifier[create_y_axis] ( identifier[self] , identifier[name] , identifier[label] = keyword[None] , identifier[format] = keyword[None] , identifier[custom_format] = keyword[False] ):
literal[string]
identifier[axis] ={}
keyword[if] identifier[custom_format] keyword[and] identifier[format] :
identifier[axis] [ literal[string] ]= identifier[format]
keyword[elif] identifier[format] :
identifier[axis] [ literal[string] ]= literal[string] % identifier[format]
keyword[if] identifier[label] :
identifier[axis] [ literal[string] ]= literal[string] + identifier[label] + literal[string]
identifier[self] . identifier[axislist] [ identifier[name] ]= identifier[axis] | def create_y_axis(self, name, label=None, format=None, custom_format=False):
"""
Create Y-axis
"""
axis = {}
if custom_format and format:
axis['tickFormat'] = format # depends on [control=['if'], data=[]]
elif format:
axis['tickFormat'] = "d3.format(',%s')" % format # depends on [control=['if'], data=[]]
if label:
axis['axisLabel'] = "'" + label + "'" # depends on [control=['if'], data=[]]
# Add new axis to list of axis
self.axislist[name] = axis |
def hpai_body(self):
""" Create a body with HPAI information.
This is used for disconnect and connection state requests.
"""
body = []
# ============ IP Body ==========
body.extend([self.channel]) # Communication Channel Id
body.extend([0x00]) # Reserverd
# =========== Client HPAI ===========
body.extend([0x08]) # HPAI Length
body.extend([0x01]) # Host Protocol
# Tunnel Client Socket IP
body.extend(ip_to_array(self.control_socket.getsockname()[0]))
# Tunnel Client Socket Port
body.extend(int_to_array(self.control_socket.getsockname()[1]))
return body | def function[hpai_body, parameter[self]]:
constant[ Create a body with HPAI information.
This is used for disconnect and connection state requests.
]
variable[body] assign[=] list[[]]
call[name[body].extend, parameter[list[[<ast.Attribute object at 0x7da18f812e60>]]]]
call[name[body].extend, parameter[list[[<ast.Constant object at 0x7da18f8103d0>]]]]
call[name[body].extend, parameter[list[[<ast.Constant object at 0x7da18f8112a0>]]]]
call[name[body].extend, parameter[list[[<ast.Constant object at 0x7da18f8107f0>]]]]
call[name[body].extend, parameter[call[name[ip_to_array], parameter[call[call[name[self].control_socket.getsockname, parameter[]]][constant[0]]]]]]
call[name[body].extend, parameter[call[name[int_to_array], parameter[call[call[name[self].control_socket.getsockname, parameter[]]][constant[1]]]]]]
return[name[body]] | keyword[def] identifier[hpai_body] ( identifier[self] ):
literal[string]
identifier[body] =[]
identifier[body] . identifier[extend] ([ identifier[self] . identifier[channel] ])
identifier[body] . identifier[extend] ([ literal[int] ])
identifier[body] . identifier[extend] ([ literal[int] ])
identifier[body] . identifier[extend] ([ literal[int] ])
identifier[body] . identifier[extend] ( identifier[ip_to_array] ( identifier[self] . identifier[control_socket] . identifier[getsockname] ()[ literal[int] ]))
identifier[body] . identifier[extend] ( identifier[int_to_array] ( identifier[self] . identifier[control_socket] . identifier[getsockname] ()[ literal[int] ]))
keyword[return] identifier[body] | def hpai_body(self):
""" Create a body with HPAI information.
This is used for disconnect and connection state requests.
"""
body = []
# ============ IP Body ==========
body.extend([self.channel]) # Communication Channel Id
body.extend([0]) # Reserverd
# =========== Client HPAI ===========
body.extend([8]) # HPAI Length
body.extend([1]) # Host Protocol
# Tunnel Client Socket IP
body.extend(ip_to_array(self.control_socket.getsockname()[0]))
# Tunnel Client Socket Port
body.extend(int_to_array(self.control_socket.getsockname()[1]))
return body |
def stop(self):
"""Stop watching the socket."""
if self.closed:
raise ConnectionClosed()
if self.read_watcher.active:
self.read_watcher.stop()
if self.write_watcher.active:
self.write_watcher.stop() | def function[stop, parameter[self]]:
constant[Stop watching the socket.]
if name[self].closed begin[:]
<ast.Raise object at 0x7da207f9bdf0>
if name[self].read_watcher.active begin[:]
call[name[self].read_watcher.stop, parameter[]]
if name[self].write_watcher.active begin[:]
call[name[self].write_watcher.stop, parameter[]] | keyword[def] identifier[stop] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[closed] :
keyword[raise] identifier[ConnectionClosed] ()
keyword[if] identifier[self] . identifier[read_watcher] . identifier[active] :
identifier[self] . identifier[read_watcher] . identifier[stop] ()
keyword[if] identifier[self] . identifier[write_watcher] . identifier[active] :
identifier[self] . identifier[write_watcher] . identifier[stop] () | def stop(self):
"""Stop watching the socket."""
if self.closed:
raise ConnectionClosed() # depends on [control=['if'], data=[]]
if self.read_watcher.active:
self.read_watcher.stop() # depends on [control=['if'], data=[]]
if self.write_watcher.active:
self.write_watcher.stop() # depends on [control=['if'], data=[]] |
def MK(T, Tc, omega):
r'''Calculates enthalpy of vaporization at arbitrary temperatures using a
the work of [1]_; requires a chemical's critical temperature and
acentric factor.
The enthalpy of vaporization is given by:
.. math::
\Delta H_{vap} = \Delta H_{vap}^{(0)} + \omega \Delta H_{vap}^{(1)} + \omega^2 \Delta H_{vap}^{(2)}
\frac{\Delta H_{vap}^{(i)}}{RT_c} = b^{(j)} \tau^{1/3} + b_2^{(j)} \tau^{5/6}
+ b_3^{(j)} \tau^{1.2083} + b_4^{(j)}\tau + b_5^{(j)} \tau^2 + b_6^{(j)} \tau^3
\tau = 1-T/T_c
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
omega : float
Acentric factor [-]
Returns
-------
Hvap : float
Enthalpy of vaporization, [J/mol]
Notes
-----
The original article has been reviewed. A total of 18 coefficients are used:
WARNING: The correlation has been implemented as described in the article,
but its results seem different and with some error.
Its results match with other functions however.
Has poor behavior for low-temperature use.
Examples
--------
Problem in article for SMK function.
>>> MK(553.15, 751.35, 0.302)
38727.993546377205
References
----------
.. [1] Morgan, David L., and Riki Kobayashi. "Extension of Pitzer CSP
Models for Vapor Pressures and Heats of Vaporization to Long-Chain
Hydrocarbons." Fluid Phase Equilibria 94 (March 15, 1994): 51-87.
doi:10.1016/0378-3812(94)87051-9.
'''
bs = [[5.2804, 0.080022, 7.2543],
[12.8650, 273.23, -346.45],
[1.1710, 465.08, -610.48],
[-13.1160, -638.51, 839.89],
[0.4858, -145.12, 160.05],
[-1.0880, 74.049, -50.711]]
tau = 1. - T/Tc
H0 = (bs[0][0]*tau**(0.3333) + bs[1][0]*tau**(0.8333) + bs[2][0]*tau**(1.2083) +
bs[3][0]*tau + bs[4][0]*tau**(2) + bs[5][0]*tau**(3))*R*Tc
H1 = (bs[0][1]*tau**(0.3333) + bs[1][1]*tau**(0.8333) + bs[2][1]*tau**(1.2083) +
bs[3][1]*tau + bs[4][1]*tau**(2) + bs[5][1]*tau**(3))*R*Tc
H2 = (bs[0][2]*tau**(0.3333) + bs[1][2]*tau**(0.8333) + bs[2][2]*tau**(1.2083) +
bs[3][2]*tau + bs[4][2]*tau**(2) + bs[5][2]*tau**(3))*R*Tc
return H0 + omega*H1 + omega**2*H2 | def function[MK, parameter[T, Tc, omega]]:
constant[Calculates enthalpy of vaporization at arbitrary temperatures using a
the work of [1]_; requires a chemical's critical temperature and
acentric factor.
The enthalpy of vaporization is given by:
.. math::
\Delta H_{vap} = \Delta H_{vap}^{(0)} + \omega \Delta H_{vap}^{(1)} + \omega^2 \Delta H_{vap}^{(2)}
\frac{\Delta H_{vap}^{(i)}}{RT_c} = b^{(j)} \tau^{1/3} + b_2^{(j)} \tau^{5/6}
+ b_3^{(j)} \tau^{1.2083} + b_4^{(j)}\tau + b_5^{(j)} \tau^2 + b_6^{(j)} \tau^3
\tau = 1-T/T_c
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
omega : float
Acentric factor [-]
Returns
-------
Hvap : float
Enthalpy of vaporization, [J/mol]
Notes
-----
The original article has been reviewed. A total of 18 coefficients are used:
WARNING: The correlation has been implemented as described in the article,
but its results seem different and with some error.
Its results match with other functions however.
Has poor behavior for low-temperature use.
Examples
--------
Problem in article for SMK function.
>>> MK(553.15, 751.35, 0.302)
38727.993546377205
References
----------
.. [1] Morgan, David L., and Riki Kobayashi. "Extension of Pitzer CSP
Models for Vapor Pressures and Heats of Vaporization to Long-Chain
Hydrocarbons." Fluid Phase Equilibria 94 (March 15, 1994): 51-87.
doi:10.1016/0378-3812(94)87051-9.
]
variable[bs] assign[=] list[[<ast.List object at 0x7da18c4cc610>, <ast.List object at 0x7da18c4cd090>, <ast.List object at 0x7da18c4cef20>, <ast.List object at 0x7da18c4cce80>, <ast.List object at 0x7da18c4cec80>, <ast.List object at 0x7da18c4cd000>]]
variable[tau] assign[=] binary_operation[constant[1.0] - binary_operation[name[T] / name[Tc]]]
variable[H0] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[call[name[bs]][constant[0]]][constant[0]] * binary_operation[name[tau] ** constant[0.3333]]] + binary_operation[call[call[name[bs]][constant[1]]][constant[0]] * binary_operation[name[tau] ** constant[0.8333]]]] + binary_operation[call[call[name[bs]][constant[2]]][constant[0]] * binary_operation[name[tau] ** constant[1.2083]]]] + binary_operation[call[call[name[bs]][constant[3]]][constant[0]] * name[tau]]] + binary_operation[call[call[name[bs]][constant[4]]][constant[0]] * binary_operation[name[tau] ** constant[2]]]] + binary_operation[call[call[name[bs]][constant[5]]][constant[0]] * binary_operation[name[tau] ** constant[3]]]] * name[R]] * name[Tc]]
variable[H1] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[call[name[bs]][constant[0]]][constant[1]] * binary_operation[name[tau] ** constant[0.3333]]] + binary_operation[call[call[name[bs]][constant[1]]][constant[1]] * binary_operation[name[tau] ** constant[0.8333]]]] + binary_operation[call[call[name[bs]][constant[2]]][constant[1]] * binary_operation[name[tau] ** constant[1.2083]]]] + binary_operation[call[call[name[bs]][constant[3]]][constant[1]] * name[tau]]] + binary_operation[call[call[name[bs]][constant[4]]][constant[1]] * binary_operation[name[tau] ** constant[2]]]] + binary_operation[call[call[name[bs]][constant[5]]][constant[1]] * binary_operation[name[tau] ** constant[3]]]] * name[R]] * name[Tc]]
variable[H2] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[call[name[bs]][constant[0]]][constant[2]] * binary_operation[name[tau] ** constant[0.3333]]] + binary_operation[call[call[name[bs]][constant[1]]][constant[2]] * binary_operation[name[tau] ** constant[0.8333]]]] + binary_operation[call[call[name[bs]][constant[2]]][constant[2]] * binary_operation[name[tau] ** constant[1.2083]]]] + binary_operation[call[call[name[bs]][constant[3]]][constant[2]] * name[tau]]] + binary_operation[call[call[name[bs]][constant[4]]][constant[2]] * binary_operation[name[tau] ** constant[2]]]] + binary_operation[call[call[name[bs]][constant[5]]][constant[2]] * binary_operation[name[tau] ** constant[3]]]] * name[R]] * name[Tc]]
return[binary_operation[binary_operation[name[H0] + binary_operation[name[omega] * name[H1]]] + binary_operation[binary_operation[name[omega] ** constant[2]] * name[H2]]]] | keyword[def] identifier[MK] ( identifier[T] , identifier[Tc] , identifier[omega] ):
literal[string]
identifier[bs] =[[ literal[int] , literal[int] , literal[int] ],
[ literal[int] , literal[int] ,- literal[int] ],
[ literal[int] , literal[int] ,- literal[int] ],
[- literal[int] ,- literal[int] , literal[int] ],
[ literal[int] ,- literal[int] , literal[int] ],
[- literal[int] , literal[int] ,- literal[int] ]]
identifier[tau] = literal[int] - identifier[T] / identifier[Tc]
identifier[H0] =( identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] )+ identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] )+ identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] )+
identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] + identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] )+ identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] ))* identifier[R] * identifier[Tc]
identifier[H1] =( identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] )+ identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] )+ identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] )+
identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] + identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] )+ identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] ))* identifier[R] * identifier[Tc]
identifier[H2] =( identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] )+ identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] )+ identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] )+
identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] + identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] )+ identifier[bs] [ literal[int] ][ literal[int] ]* identifier[tau] **( literal[int] ))* identifier[R] * identifier[Tc]
keyword[return] identifier[H0] + identifier[omega] * identifier[H1] + identifier[omega] ** literal[int] * identifier[H2] | def MK(T, Tc, omega):
"""Calculates enthalpy of vaporization at arbitrary temperatures using a
the work of [1]_; requires a chemical's critical temperature and
acentric factor.
The enthalpy of vaporization is given by:
.. math::
\\Delta H_{vap} = \\Delta H_{vap}^{(0)} + \\omega \\Delta H_{vap}^{(1)} + \\omega^2 \\Delta H_{vap}^{(2)}
\\frac{\\Delta H_{vap}^{(i)}}{RT_c} = b^{(j)} \\tau^{1/3} + b_2^{(j)} \\tau^{5/6}
+ b_3^{(j)} \\tau^{1.2083} + b_4^{(j)}\\tau + b_5^{(j)} \\tau^2 + b_6^{(j)} \\tau^3
\\tau = 1-T/T_c
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
omega : float
Acentric factor [-]
Returns
-------
Hvap : float
Enthalpy of vaporization, [J/mol]
Notes
-----
The original article has been reviewed. A total of 18 coefficients are used:
WARNING: The correlation has been implemented as described in the article,
but its results seem different and with some error.
Its results match with other functions however.
Has poor behavior for low-temperature use.
Examples
--------
Problem in article for SMK function.
>>> MK(553.15, 751.35, 0.302)
38727.993546377205
References
----------
.. [1] Morgan, David L., and Riki Kobayashi. "Extension of Pitzer CSP
Models for Vapor Pressures and Heats of Vaporization to Long-Chain
Hydrocarbons." Fluid Phase Equilibria 94 (March 15, 1994): 51-87.
doi:10.1016/0378-3812(94)87051-9.
"""
bs = [[5.2804, 0.080022, 7.2543], [12.865, 273.23, -346.45], [1.171, 465.08, -610.48], [-13.116, -638.51, 839.89], [0.4858, -145.12, 160.05], [-1.088, 74.049, -50.711]]
tau = 1.0 - T / Tc
H0 = (bs[0][0] * tau ** 0.3333 + bs[1][0] * tau ** 0.8333 + bs[2][0] * tau ** 1.2083 + bs[3][0] * tau + bs[4][0] * tau ** 2 + bs[5][0] * tau ** 3) * R * Tc
H1 = (bs[0][1] * tau ** 0.3333 + bs[1][1] * tau ** 0.8333 + bs[2][1] * tau ** 1.2083 + bs[3][1] * tau + bs[4][1] * tau ** 2 + bs[5][1] * tau ** 3) * R * Tc
H2 = (bs[0][2] * tau ** 0.3333 + bs[1][2] * tau ** 0.8333 + bs[2][2] * tau ** 1.2083 + bs[3][2] * tau + bs[4][2] * tau ** 2 + bs[5][2] * tau ** 3) * R * Tc
return H0 + omega * H1 + omega ** 2 * H2 |
def turn_on(self, time):
"""(Helper) Turn on an output"""
self._elk.send(cn_encode(self._index, time)) | def function[turn_on, parameter[self, time]]:
constant[(Helper) Turn on an output]
call[name[self]._elk.send, parameter[call[name[cn_encode], parameter[name[self]._index, name[time]]]]] | keyword[def] identifier[turn_on] ( identifier[self] , identifier[time] ):
literal[string]
identifier[self] . identifier[_elk] . identifier[send] ( identifier[cn_encode] ( identifier[self] . identifier[_index] , identifier[time] )) | def turn_on(self, time):
"""(Helper) Turn on an output"""
self._elk.send(cn_encode(self._index, time)) |
def urlopen(self, method, url, body=None, headers=None, **kwargs):
"""Implementation of urllib3's urlopen."""
# pylint: disable=arguments-differ
# We use kwargs to collect additional args that we don't need to
# introspect here. However, we do explicitly collect the two
# positional arguments.
# Use a kwarg for this instead of an attribute to maintain
# thread-safety.
_credential_refresh_attempt = kwargs.pop(
'_credential_refresh_attempt', 0)
if headers is None:
headers = self.headers
# Make a copy of the headers. They will be modified by the credentials
# and we want to pass the original headers if we recurse.
request_headers = headers.copy()
self.credentials.before_request(
self._request, method, url, request_headers)
response = self.http.urlopen(
method, url, body=body, headers=request_headers, **kwargs)
# If the response indicated that the credentials needed to be
# refreshed, then refresh the credentials and re-attempt the
# request.
# A stored token may expire between the time it is retrieved and
# the time the request is made, so we may need to try twice.
# The reason urllib3's retries aren't used is because they
# don't allow you to modify the request headers. :/
if (response.status in self._refresh_status_codes
and _credential_refresh_attempt < self._max_refresh_attempts):
_LOGGER.info(
'Refreshing credentials due to a %s response. Attempt %s/%s.',
response.status, _credential_refresh_attempt + 1,
self._max_refresh_attempts)
self.credentials.refresh(self._request)
# Recurse. Pass in the original headers, not our modified set.
return self.urlopen(
method, url, body=body, headers=headers,
_credential_refresh_attempt=_credential_refresh_attempt + 1,
**kwargs)
return response | def function[urlopen, parameter[self, method, url, body, headers]]:
constant[Implementation of urllib3's urlopen.]
variable[_credential_refresh_attempt] assign[=] call[name[kwargs].pop, parameter[constant[_credential_refresh_attempt], constant[0]]]
if compare[name[headers] is constant[None]] begin[:]
variable[headers] assign[=] name[self].headers
variable[request_headers] assign[=] call[name[headers].copy, parameter[]]
call[name[self].credentials.before_request, parameter[name[self]._request, name[method], name[url], name[request_headers]]]
variable[response] assign[=] call[name[self].http.urlopen, parameter[name[method], name[url]]]
if <ast.BoolOp object at 0x7da2041da890> begin[:]
call[name[_LOGGER].info, parameter[constant[Refreshing credentials due to a %s response. Attempt %s/%s.], name[response].status, binary_operation[name[_credential_refresh_attempt] + constant[1]], name[self]._max_refresh_attempts]]
call[name[self].credentials.refresh, parameter[name[self]._request]]
return[call[name[self].urlopen, parameter[name[method], name[url]]]]
return[name[response]] | keyword[def] identifier[urlopen] ( identifier[self] , identifier[method] , identifier[url] , identifier[body] = keyword[None] , identifier[headers] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[_credential_refresh_attempt] = identifier[kwargs] . identifier[pop] (
literal[string] , literal[int] )
keyword[if] identifier[headers] keyword[is] keyword[None] :
identifier[headers] = identifier[self] . identifier[headers]
identifier[request_headers] = identifier[headers] . identifier[copy] ()
identifier[self] . identifier[credentials] . identifier[before_request] (
identifier[self] . identifier[_request] , identifier[method] , identifier[url] , identifier[request_headers] )
identifier[response] = identifier[self] . identifier[http] . identifier[urlopen] (
identifier[method] , identifier[url] , identifier[body] = identifier[body] , identifier[headers] = identifier[request_headers] ,** identifier[kwargs] )
keyword[if] ( identifier[response] . identifier[status] keyword[in] identifier[self] . identifier[_refresh_status_codes]
keyword[and] identifier[_credential_refresh_attempt] < identifier[self] . identifier[_max_refresh_attempts] ):
identifier[_LOGGER] . identifier[info] (
literal[string] ,
identifier[response] . identifier[status] , identifier[_credential_refresh_attempt] + literal[int] ,
identifier[self] . identifier[_max_refresh_attempts] )
identifier[self] . identifier[credentials] . identifier[refresh] ( identifier[self] . identifier[_request] )
keyword[return] identifier[self] . identifier[urlopen] (
identifier[method] , identifier[url] , identifier[body] = identifier[body] , identifier[headers] = identifier[headers] ,
identifier[_credential_refresh_attempt] = identifier[_credential_refresh_attempt] + literal[int] ,
** identifier[kwargs] )
keyword[return] identifier[response] | def urlopen(self, method, url, body=None, headers=None, **kwargs):
"""Implementation of urllib3's urlopen."""
# pylint: disable=arguments-differ
# We use kwargs to collect additional args that we don't need to
# introspect here. However, we do explicitly collect the two
# positional arguments.
# Use a kwarg for this instead of an attribute to maintain
# thread-safety.
_credential_refresh_attempt = kwargs.pop('_credential_refresh_attempt', 0)
if headers is None:
headers = self.headers # depends on [control=['if'], data=['headers']]
# Make a copy of the headers. They will be modified by the credentials
# and we want to pass the original headers if we recurse.
request_headers = headers.copy()
self.credentials.before_request(self._request, method, url, request_headers)
response = self.http.urlopen(method, url, body=body, headers=request_headers, **kwargs)
# If the response indicated that the credentials needed to be
# refreshed, then refresh the credentials and re-attempt the
# request.
# A stored token may expire between the time it is retrieved and
# the time the request is made, so we may need to try twice.
# The reason urllib3's retries aren't used is because they
# don't allow you to modify the request headers. :/
if response.status in self._refresh_status_codes and _credential_refresh_attempt < self._max_refresh_attempts:
_LOGGER.info('Refreshing credentials due to a %s response. Attempt %s/%s.', response.status, _credential_refresh_attempt + 1, self._max_refresh_attempts)
self.credentials.refresh(self._request)
# Recurse. Pass in the original headers, not our modified set.
return self.urlopen(method, url, body=body, headers=headers, _credential_refresh_attempt=_credential_refresh_attempt + 1, **kwargs) # depends on [control=['if'], data=[]]
return response |
async def close(self, exception: BaseException = None) -> None:
"""
Close this context and call any necessary resource teardown callbacks.
If a teardown callback returns an awaitable, the return value is awaited on before calling
any further teardown callbacks.
All callbacks will be processed, even if some of them raise exceptions. If at least one
callback raised an error, this method will raise a :exc:`~.TeardownError` at the end.
After this method has been called, resources can no longer be requested or published on
this context.
:param exception: the exception, if any, that caused this context to be closed
:raises .TeardownError: if one or more teardown callbacks raise an exception
"""
self._check_closed()
self._closed = True
exceptions = []
for callback, pass_exception in reversed(self._teardown_callbacks):
try:
retval = callback(exception) if pass_exception else callback()
if isawaitable(retval):
await retval
except Exception as e:
exceptions.append(e)
del self._teardown_callbacks
if exceptions:
raise TeardownError(exceptions) | <ast.AsyncFunctionDef object at 0x7da1b0569b40> | keyword[async] keyword[def] identifier[close] ( identifier[self] , identifier[exception] : identifier[BaseException] = keyword[None] )-> keyword[None] :
literal[string]
identifier[self] . identifier[_check_closed] ()
identifier[self] . identifier[_closed] = keyword[True]
identifier[exceptions] =[]
keyword[for] identifier[callback] , identifier[pass_exception] keyword[in] identifier[reversed] ( identifier[self] . identifier[_teardown_callbacks] ):
keyword[try] :
identifier[retval] = identifier[callback] ( identifier[exception] ) keyword[if] identifier[pass_exception] keyword[else] identifier[callback] ()
keyword[if] identifier[isawaitable] ( identifier[retval] ):
keyword[await] identifier[retval]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[exceptions] . identifier[append] ( identifier[e] )
keyword[del] identifier[self] . identifier[_teardown_callbacks]
keyword[if] identifier[exceptions] :
keyword[raise] identifier[TeardownError] ( identifier[exceptions] ) | async def close(self, exception: BaseException=None) -> None:
"""
Close this context and call any necessary resource teardown callbacks.
If a teardown callback returns an awaitable, the return value is awaited on before calling
any further teardown callbacks.
All callbacks will be processed, even if some of them raise exceptions. If at least one
callback raised an error, this method will raise a :exc:`~.TeardownError` at the end.
After this method has been called, resources can no longer be requested or published on
this context.
:param exception: the exception, if any, that caused this context to be closed
:raises .TeardownError: if one or more teardown callbacks raise an exception
"""
self._check_closed()
self._closed = True
exceptions = []
for (callback, pass_exception) in reversed(self._teardown_callbacks):
try:
retval = callback(exception) if pass_exception else callback()
if isawaitable(retval):
await retval # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception as e:
exceptions.append(e) # depends on [control=['except'], data=['e']] # depends on [control=['for'], data=[]]
del self._teardown_callbacks
if exceptions:
raise TeardownError(exceptions) # depends on [control=['if'], data=[]] |
def add_boolean_proxy_for(self, label: str, shape: Collection[int] = None) -> Vertex:
"""
Creates a proxy vertex for the given label and adds to the sequence item
"""
if shape is None:
return Vertex._from_java_vertex(self.unwrap().addBooleanProxyFor(_VertexLabel(label).unwrap()))
else:
return Vertex._from_java_vertex(self.unwrap().addBooleanProxyFor(_VertexLabel(label).unwrap(), shape)) | def function[add_boolean_proxy_for, parameter[self, label, shape]]:
constant[
Creates a proxy vertex for the given label and adds to the sequence item
]
if compare[name[shape] is constant[None]] begin[:]
return[call[name[Vertex]._from_java_vertex, parameter[call[call[name[self].unwrap, parameter[]].addBooleanProxyFor, parameter[call[call[name[_VertexLabel], parameter[name[label]]].unwrap, parameter[]]]]]]] | keyword[def] identifier[add_boolean_proxy_for] ( identifier[self] , identifier[label] : identifier[str] , identifier[shape] : identifier[Collection] [ identifier[int] ]= keyword[None] )-> identifier[Vertex] :
literal[string]
keyword[if] identifier[shape] keyword[is] keyword[None] :
keyword[return] identifier[Vertex] . identifier[_from_java_vertex] ( identifier[self] . identifier[unwrap] (). identifier[addBooleanProxyFor] ( identifier[_VertexLabel] ( identifier[label] ). identifier[unwrap] ()))
keyword[else] :
keyword[return] identifier[Vertex] . identifier[_from_java_vertex] ( identifier[self] . identifier[unwrap] (). identifier[addBooleanProxyFor] ( identifier[_VertexLabel] ( identifier[label] ). identifier[unwrap] (), identifier[shape] )) | def add_boolean_proxy_for(self, label: str, shape: Collection[int]=None) -> Vertex:
"""
Creates a proxy vertex for the given label and adds to the sequence item
"""
if shape is None:
return Vertex._from_java_vertex(self.unwrap().addBooleanProxyFor(_VertexLabel(label).unwrap())) # depends on [control=['if'], data=[]]
else:
return Vertex._from_java_vertex(self.unwrap().addBooleanProxyFor(_VertexLabel(label).unwrap(), shape)) |
def fetch(self):
"""
Fetch a SampleInstance
:returns: Fetched SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
) | def function[fetch, parameter[self]]:
constant[
Fetch a SampleInstance
:returns: Fetched SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
]
variable[params] assign[=] call[name[values].of, parameter[dictionary[[], []]]]
variable[payload] assign[=] call[name[self]._version.fetch, parameter[constant[GET], name[self]._uri]]
return[call[name[SampleInstance], parameter[name[self]._version, name[payload]]]] | keyword[def] identifier[fetch] ( identifier[self] ):
literal[string]
identifier[params] = identifier[values] . identifier[of] ({})
identifier[payload] = identifier[self] . identifier[_version] . identifier[fetch] (
literal[string] ,
identifier[self] . identifier[_uri] ,
identifier[params] = identifier[params] ,
)
keyword[return] identifier[SampleInstance] (
identifier[self] . identifier[_version] ,
identifier[payload] ,
identifier[assistant_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
identifier[task_sid] = identifier[self] . identifier[_solution] [ literal[string] ],
identifier[sid] = identifier[self] . identifier[_solution] [ literal[string] ],
) | def fetch(self):
"""
Fetch a SampleInstance
:returns: Fetched SampleInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.sample.SampleInstance
"""
params = values.of({})
payload = self._version.fetch('GET', self._uri, params=params)
return SampleInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], sid=self._solution['sid']) |
def get_name_levels(node):
"""Return a list of ``(name, level)`` tuples for assigned names
The `level` is `None` for simple assignments and is a list of
numbers for tuple assignments for example in::
a, (b, c) = x
The levels for for `a` is ``[0]``, for `b` is ``[1, 0]`` and for
`c` is ``[1, 1]``.
"""
visitor = _NodeNameCollector()
ast.walk(node, visitor)
return visitor.names | def function[get_name_levels, parameter[node]]:
constant[Return a list of ``(name, level)`` tuples for assigned names
The `level` is `None` for simple assignments and is a list of
numbers for tuple assignments for example in::
a, (b, c) = x
The levels for for `a` is ``[0]``, for `b` is ``[1, 0]`` and for
`c` is ``[1, 1]``.
]
variable[visitor] assign[=] call[name[_NodeNameCollector], parameter[]]
call[name[ast].walk, parameter[name[node], name[visitor]]]
return[name[visitor].names] | keyword[def] identifier[get_name_levels] ( identifier[node] ):
literal[string]
identifier[visitor] = identifier[_NodeNameCollector] ()
identifier[ast] . identifier[walk] ( identifier[node] , identifier[visitor] )
keyword[return] identifier[visitor] . identifier[names] | def get_name_levels(node):
"""Return a list of ``(name, level)`` tuples for assigned names
The `level` is `None` for simple assignments and is a list of
numbers for tuple assignments for example in::
a, (b, c) = x
The levels for for `a` is ``[0]``, for `b` is ``[1, 0]`` and for
`c` is ``[1, 1]``.
"""
visitor = _NodeNameCollector()
ast.walk(node, visitor)
return visitor.names |
def to_json(self):
"""Return a JSON-serializable representation."""
return {
'network': self.network,
'state': self.state,
'nodes': self.node_indices,
'cut': self.cut,
} | def function[to_json, parameter[self]]:
constant[Return a JSON-serializable representation.]
return[dictionary[[<ast.Constant object at 0x7da1b2344ca0>, <ast.Constant object at 0x7da1b2346fb0>, <ast.Constant object at 0x7da1b23471c0>, <ast.Constant object at 0x7da1b2344820>], [<ast.Attribute object at 0x7da1b2347ee0>, <ast.Attribute object at 0x7da1b2346380>, <ast.Attribute object at 0x7da1b23442b0>, <ast.Attribute object at 0x7da1b23466b0>]]] | keyword[def] identifier[to_json] ( identifier[self] ):
literal[string]
keyword[return] {
literal[string] : identifier[self] . identifier[network] ,
literal[string] : identifier[self] . identifier[state] ,
literal[string] : identifier[self] . identifier[node_indices] ,
literal[string] : identifier[self] . identifier[cut] ,
} | def to_json(self):
"""Return a JSON-serializable representation."""
return {'network': self.network, 'state': self.state, 'nodes': self.node_indices, 'cut': self.cut} |
def request(self, method, url, **params):
"""Constructs and sends a request to a remote server.
It returns a :class:`.Future` which results in a
:class:`.HttpResponse` object.
:param method: request method for the :class:`HttpRequest`.
:param url: URL for the :class:`HttpRequest`.
:param params: optional parameters for the :class:`HttpRequest`
initialisation.
:rtype: a coroutine
"""
response = self._request(method, url, **params)
if not self._loop.is_running():
return self._loop.run_until_complete(response)
else:
return response | def function[request, parameter[self, method, url]]:
constant[Constructs and sends a request to a remote server.
It returns a :class:`.Future` which results in a
:class:`.HttpResponse` object.
:param method: request method for the :class:`HttpRequest`.
:param url: URL for the :class:`HttpRequest`.
:param params: optional parameters for the :class:`HttpRequest`
initialisation.
:rtype: a coroutine
]
variable[response] assign[=] call[name[self]._request, parameter[name[method], name[url]]]
if <ast.UnaryOp object at 0x7da18bc719c0> begin[:]
return[call[name[self]._loop.run_until_complete, parameter[name[response]]]] | keyword[def] identifier[request] ( identifier[self] , identifier[method] , identifier[url] ,** identifier[params] ):
literal[string]
identifier[response] = identifier[self] . identifier[_request] ( identifier[method] , identifier[url] ,** identifier[params] )
keyword[if] keyword[not] identifier[self] . identifier[_loop] . identifier[is_running] ():
keyword[return] identifier[self] . identifier[_loop] . identifier[run_until_complete] ( identifier[response] )
keyword[else] :
keyword[return] identifier[response] | def request(self, method, url, **params):
"""Constructs and sends a request to a remote server.
It returns a :class:`.Future` which results in a
:class:`.HttpResponse` object.
:param method: request method for the :class:`HttpRequest`.
:param url: URL for the :class:`HttpRequest`.
:param params: optional parameters for the :class:`HttpRequest`
initialisation.
:rtype: a coroutine
"""
response = self._request(method, url, **params)
if not self._loop.is_running():
return self._loop.run_until_complete(response) # depends on [control=['if'], data=[]]
else:
return response |
def subclasses(cls):
"""Return a set of all Ent subclasses, recursively."""
seen = set()
queue = set([cls])
while queue:
c = queue.pop()
seen.add(c)
sc = c.__subclasses__()
for c in sc:
if c not in seen:
queue.add(c)
seen.remove(cls)
return seen | def function[subclasses, parameter[cls]]:
constant[Return a set of all Ent subclasses, recursively.]
variable[seen] assign[=] call[name[set], parameter[]]
variable[queue] assign[=] call[name[set], parameter[list[[<ast.Name object at 0x7da20c6c6dd0>]]]]
while name[queue] begin[:]
variable[c] assign[=] call[name[queue].pop, parameter[]]
call[name[seen].add, parameter[name[c]]]
variable[sc] assign[=] call[name[c].__subclasses__, parameter[]]
for taget[name[c]] in starred[name[sc]] begin[:]
if compare[name[c] <ast.NotIn object at 0x7da2590d7190> name[seen]] begin[:]
call[name[queue].add, parameter[name[c]]]
call[name[seen].remove, parameter[name[cls]]]
return[name[seen]] | keyword[def] identifier[subclasses] ( identifier[cls] ):
literal[string]
identifier[seen] = identifier[set] ()
identifier[queue] = identifier[set] ([ identifier[cls] ])
keyword[while] identifier[queue] :
identifier[c] = identifier[queue] . identifier[pop] ()
identifier[seen] . identifier[add] ( identifier[c] )
identifier[sc] = identifier[c] . identifier[__subclasses__] ()
keyword[for] identifier[c] keyword[in] identifier[sc] :
keyword[if] identifier[c] keyword[not] keyword[in] identifier[seen] :
identifier[queue] . identifier[add] ( identifier[c] )
identifier[seen] . identifier[remove] ( identifier[cls] )
keyword[return] identifier[seen] | def subclasses(cls):
"""Return a set of all Ent subclasses, recursively."""
seen = set()
queue = set([cls])
while queue:
c = queue.pop()
seen.add(c)
sc = c.__subclasses__()
for c in sc:
if c not in seen:
queue.add(c) # depends on [control=['if'], data=['c']] # depends on [control=['for'], data=['c']] # depends on [control=['while'], data=[]]
seen.remove(cls)
return seen |
def add_blacklisted_filepaths(self, filepaths, remove_from_stored=True):
"""
Add `filepaths` to blacklisted filepaths.
If `remove_from_stored` is `True`, any `filepaths` in
internal state will be automatically removed.
"""
self.file_manager.add_blacklisted_filepaths(filepaths,
remove_from_stored) | def function[add_blacklisted_filepaths, parameter[self, filepaths, remove_from_stored]]:
constant[
Add `filepaths` to blacklisted filepaths.
If `remove_from_stored` is `True`, any `filepaths` in
internal state will be automatically removed.
]
call[name[self].file_manager.add_blacklisted_filepaths, parameter[name[filepaths], name[remove_from_stored]]] | keyword[def] identifier[add_blacklisted_filepaths] ( identifier[self] , identifier[filepaths] , identifier[remove_from_stored] = keyword[True] ):
literal[string]
identifier[self] . identifier[file_manager] . identifier[add_blacklisted_filepaths] ( identifier[filepaths] ,
identifier[remove_from_stored] ) | def add_blacklisted_filepaths(self, filepaths, remove_from_stored=True):
"""
Add `filepaths` to blacklisted filepaths.
If `remove_from_stored` is `True`, any `filepaths` in
internal state will be automatically removed.
"""
self.file_manager.add_blacklisted_filepaths(filepaths, remove_from_stored) |
def to_native(self, value, context=None):
""" Schematics deserializer override
:return: ToOne instance
"""
if isinstance(value, ToOne):
return value
value = self._cast_rid(value)
return ToOne(self.rtype, self.field, rid=value) | def function[to_native, parameter[self, value, context]]:
constant[ Schematics deserializer override
:return: ToOne instance
]
if call[name[isinstance], parameter[name[value], name[ToOne]]] begin[:]
return[name[value]]
variable[value] assign[=] call[name[self]._cast_rid, parameter[name[value]]]
return[call[name[ToOne], parameter[name[self].rtype, name[self].field]]] | keyword[def] identifier[to_native] ( identifier[self] , identifier[value] , identifier[context] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[ToOne] ):
keyword[return] identifier[value]
identifier[value] = identifier[self] . identifier[_cast_rid] ( identifier[value] )
keyword[return] identifier[ToOne] ( identifier[self] . identifier[rtype] , identifier[self] . identifier[field] , identifier[rid] = identifier[value] ) | def to_native(self, value, context=None):
""" Schematics deserializer override
:return: ToOne instance
"""
if isinstance(value, ToOne):
return value # depends on [control=['if'], data=[]]
value = self._cast_rid(value)
return ToOne(self.rtype, self.field, rid=value) |
def character(prompt=None, empty=False):
"""Prompt a single character.
Parameters
----------
prompt : str, optional
Use an alternative prompt.
empty : bool, optional
Allow an empty response.
Returns
-------
str or None
A str if the user entered a single-character, non-empty string.
None if the user pressed only Enter and ``empty`` was True.
"""
s = _prompt_input(prompt)
if empty and not s:
return None
elif len(s) == 1:
return s
else:
return character(prompt=prompt, empty=empty) | def function[character, parameter[prompt, empty]]:
constant[Prompt a single character.
Parameters
----------
prompt : str, optional
Use an alternative prompt.
empty : bool, optional
Allow an empty response.
Returns
-------
str or None
A str if the user entered a single-character, non-empty string.
None if the user pressed only Enter and ``empty`` was True.
]
variable[s] assign[=] call[name[_prompt_input], parameter[name[prompt]]]
if <ast.BoolOp object at 0x7da20c6c4040> begin[:]
return[constant[None]] | keyword[def] identifier[character] ( identifier[prompt] = keyword[None] , identifier[empty] = keyword[False] ):
literal[string]
identifier[s] = identifier[_prompt_input] ( identifier[prompt] )
keyword[if] identifier[empty] keyword[and] keyword[not] identifier[s] :
keyword[return] keyword[None]
keyword[elif] identifier[len] ( identifier[s] )== literal[int] :
keyword[return] identifier[s]
keyword[else] :
keyword[return] identifier[character] ( identifier[prompt] = identifier[prompt] , identifier[empty] = identifier[empty] ) | def character(prompt=None, empty=False):
"""Prompt a single character.
Parameters
----------
prompt : str, optional
Use an alternative prompt.
empty : bool, optional
Allow an empty response.
Returns
-------
str or None
A str if the user entered a single-character, non-empty string.
None if the user pressed only Enter and ``empty`` was True.
"""
s = _prompt_input(prompt)
if empty and (not s):
return None # depends on [control=['if'], data=[]]
elif len(s) == 1:
return s # depends on [control=['if'], data=[]]
else:
return character(prompt=prompt, empty=empty) |
def write_multi(self, frames, encoded_frames=None):
"""Writes multiple video frames."""
if encoded_frames is None:
# Infinite iterator.
encoded_frames = iter(lambda: None, 1)
for (frame, encoded_frame) in zip(frames, encoded_frames):
self.write(frame, encoded_frame) | def function[write_multi, parameter[self, frames, encoded_frames]]:
constant[Writes multiple video frames.]
if compare[name[encoded_frames] is constant[None]] begin[:]
variable[encoded_frames] assign[=] call[name[iter], parameter[<ast.Lambda object at 0x7da1b208b190>, constant[1]]]
for taget[tuple[[<ast.Name object at 0x7da1b208a9b0>, <ast.Name object at 0x7da1b208a0b0>]]] in starred[call[name[zip], parameter[name[frames], name[encoded_frames]]]] begin[:]
call[name[self].write, parameter[name[frame], name[encoded_frame]]] | keyword[def] identifier[write_multi] ( identifier[self] , identifier[frames] , identifier[encoded_frames] = keyword[None] ):
literal[string]
keyword[if] identifier[encoded_frames] keyword[is] keyword[None] :
identifier[encoded_frames] = identifier[iter] ( keyword[lambda] : keyword[None] , literal[int] )
keyword[for] ( identifier[frame] , identifier[encoded_frame] ) keyword[in] identifier[zip] ( identifier[frames] , identifier[encoded_frames] ):
identifier[self] . identifier[write] ( identifier[frame] , identifier[encoded_frame] ) | def write_multi(self, frames, encoded_frames=None):
"""Writes multiple video frames."""
if encoded_frames is None:
# Infinite iterator.
encoded_frames = iter(lambda : None, 1) # depends on [control=['if'], data=['encoded_frames']]
for (frame, encoded_frame) in zip(frames, encoded_frames):
self.write(frame, encoded_frame) # depends on [control=['for'], data=[]] |
def chart(symbol, timeframe='1m', date=None, token='', version=''):
'''Historical price/volume data, daily and intraday
https://iexcloud.io/docs/api/#historical-prices
Data Schedule
1d: -9:30-4pm ET Mon-Fri on regular market trading days
-9:30-1pm ET on early close trading days
All others:
-Prior trading day available after 4am ET Tue-Sat
Args:
symbol (string); Ticker to request
timeframe (string); Timeframe to request e.g. 1m
date (datetime): date, if requesting intraday
token (string); Access token
version (string); API version
Returns:
dict: result
'''
_raiseIfNotStr(symbol)
if timeframe is not None and timeframe != '1d':
if timeframe not in _TIMEFRAME_CHART:
raise PyEXception('Range must be in %s' % str(_TIMEFRAME_CHART))
return _getJson('stock/' + symbol + '/chart' + '/' + timeframe, token, version)
if date:
date = _strOrDate(date)
return _getJson('stock/' + symbol + '/chart' + '/date/' + date, token, version)
return _getJson('stock/' + symbol + '/chart', token, version) | def function[chart, parameter[symbol, timeframe, date, token, version]]:
constant[Historical price/volume data, daily and intraday
https://iexcloud.io/docs/api/#historical-prices
Data Schedule
1d: -9:30-4pm ET Mon-Fri on regular market trading days
-9:30-1pm ET on early close trading days
All others:
-Prior trading day available after 4am ET Tue-Sat
Args:
symbol (string); Ticker to request
timeframe (string); Timeframe to request e.g. 1m
date (datetime): date, if requesting intraday
token (string); Access token
version (string); API version
Returns:
dict: result
]
call[name[_raiseIfNotStr], parameter[name[symbol]]]
if <ast.BoolOp object at 0x7da1b0150730> begin[:]
if compare[name[timeframe] <ast.NotIn object at 0x7da2590d7190> name[_TIMEFRAME_CHART]] begin[:]
<ast.Raise object at 0x7da1b01500a0>
return[call[name[_getJson], parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[stock/] + name[symbol]] + constant[/chart]] + constant[/]] + name[timeframe]], name[token], name[version]]]]
if name[date] begin[:]
variable[date] assign[=] call[name[_strOrDate], parameter[name[date]]]
return[call[name[_getJson], parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[stock/] + name[symbol]] + constant[/chart]] + constant[/date/]] + name[date]], name[token], name[version]]]]
return[call[name[_getJson], parameter[binary_operation[binary_operation[constant[stock/] + name[symbol]] + constant[/chart]], name[token], name[version]]]] | keyword[def] identifier[chart] ( identifier[symbol] , identifier[timeframe] = literal[string] , identifier[date] = keyword[None] , identifier[token] = literal[string] , identifier[version] = literal[string] ):
literal[string]
identifier[_raiseIfNotStr] ( identifier[symbol] )
keyword[if] identifier[timeframe] keyword[is] keyword[not] keyword[None] keyword[and] identifier[timeframe] != literal[string] :
keyword[if] identifier[timeframe] keyword[not] keyword[in] identifier[_TIMEFRAME_CHART] :
keyword[raise] identifier[PyEXception] ( literal[string] % identifier[str] ( identifier[_TIMEFRAME_CHART] ))
keyword[return] identifier[_getJson] ( literal[string] + identifier[symbol] + literal[string] + literal[string] + identifier[timeframe] , identifier[token] , identifier[version] )
keyword[if] identifier[date] :
identifier[date] = identifier[_strOrDate] ( identifier[date] )
keyword[return] identifier[_getJson] ( literal[string] + identifier[symbol] + literal[string] + literal[string] + identifier[date] , identifier[token] , identifier[version] )
keyword[return] identifier[_getJson] ( literal[string] + identifier[symbol] + literal[string] , identifier[token] , identifier[version] ) | def chart(symbol, timeframe='1m', date=None, token='', version=''):
"""Historical price/volume data, daily and intraday
https://iexcloud.io/docs/api/#historical-prices
Data Schedule
1d: -9:30-4pm ET Mon-Fri on regular market trading days
-9:30-1pm ET on early close trading days
All others:
-Prior trading day available after 4am ET Tue-Sat
Args:
symbol (string); Ticker to request
timeframe (string); Timeframe to request e.g. 1m
date (datetime): date, if requesting intraday
token (string); Access token
version (string); API version
Returns:
dict: result
"""
_raiseIfNotStr(symbol)
if timeframe is not None and timeframe != '1d':
if timeframe not in _TIMEFRAME_CHART:
raise PyEXception('Range must be in %s' % str(_TIMEFRAME_CHART)) # depends on [control=['if'], data=['_TIMEFRAME_CHART']]
return _getJson('stock/' + symbol + '/chart' + '/' + timeframe, token, version) # depends on [control=['if'], data=[]]
if date:
date = _strOrDate(date)
return _getJson('stock/' + symbol + '/chart' + '/date/' + date, token, version) # depends on [control=['if'], data=[]]
return _getJson('stock/' + symbol + '/chart', token, version) |
def search(self, initial_ids, initial_cache):
"""Beam search for sequences with highest scores."""
state, state_shapes = self._create_initial_state(initial_ids, initial_cache)
finished_state = tf.while_loop(
self._continue_search, self._search_step, loop_vars=[state],
shape_invariants=[state_shapes], parallel_iterations=1, back_prop=False)
finished_state = finished_state[0]
alive_seq = finished_state[_StateKeys.ALIVE_SEQ]
alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS]
finished_seq = finished_state[_StateKeys.FINISHED_SEQ]
finished_scores = finished_state[_StateKeys.FINISHED_SCORES]
finished_flags = finished_state[_StateKeys.FINISHED_FLAGS]
# Account for corner case where there are no finished sequences for a
# particular batch item. In that case, return alive sequences for that batch
# item.
finished_seq = tf.where(
tf.reduce_any(finished_flags, 1), finished_seq, alive_seq)
finished_scores = tf.where(
tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs)
return finished_seq, finished_scores | def function[search, parameter[self, initial_ids, initial_cache]]:
constant[Beam search for sequences with highest scores.]
<ast.Tuple object at 0x7da18f811ba0> assign[=] call[name[self]._create_initial_state, parameter[name[initial_ids], name[initial_cache]]]
variable[finished_state] assign[=] call[name[tf].while_loop, parameter[name[self]._continue_search, name[self]._search_step]]
variable[finished_state] assign[=] call[name[finished_state]][constant[0]]
variable[alive_seq] assign[=] call[name[finished_state]][name[_StateKeys].ALIVE_SEQ]
variable[alive_log_probs] assign[=] call[name[finished_state]][name[_StateKeys].ALIVE_LOG_PROBS]
variable[finished_seq] assign[=] call[name[finished_state]][name[_StateKeys].FINISHED_SEQ]
variable[finished_scores] assign[=] call[name[finished_state]][name[_StateKeys].FINISHED_SCORES]
variable[finished_flags] assign[=] call[name[finished_state]][name[_StateKeys].FINISHED_FLAGS]
variable[finished_seq] assign[=] call[name[tf].where, parameter[call[name[tf].reduce_any, parameter[name[finished_flags], constant[1]]], name[finished_seq], name[alive_seq]]]
variable[finished_scores] assign[=] call[name[tf].where, parameter[call[name[tf].reduce_any, parameter[name[finished_flags], constant[1]]], name[finished_scores], name[alive_log_probs]]]
return[tuple[[<ast.Name object at 0x7da1b21a1900>, <ast.Name object at 0x7da1b21a3790>]]] | keyword[def] identifier[search] ( identifier[self] , identifier[initial_ids] , identifier[initial_cache] ):
literal[string]
identifier[state] , identifier[state_shapes] = identifier[self] . identifier[_create_initial_state] ( identifier[initial_ids] , identifier[initial_cache] )
identifier[finished_state] = identifier[tf] . identifier[while_loop] (
identifier[self] . identifier[_continue_search] , identifier[self] . identifier[_search_step] , identifier[loop_vars] =[ identifier[state] ],
identifier[shape_invariants] =[ identifier[state_shapes] ], identifier[parallel_iterations] = literal[int] , identifier[back_prop] = keyword[False] )
identifier[finished_state] = identifier[finished_state] [ literal[int] ]
identifier[alive_seq] = identifier[finished_state] [ identifier[_StateKeys] . identifier[ALIVE_SEQ] ]
identifier[alive_log_probs] = identifier[finished_state] [ identifier[_StateKeys] . identifier[ALIVE_LOG_PROBS] ]
identifier[finished_seq] = identifier[finished_state] [ identifier[_StateKeys] . identifier[FINISHED_SEQ] ]
identifier[finished_scores] = identifier[finished_state] [ identifier[_StateKeys] . identifier[FINISHED_SCORES] ]
identifier[finished_flags] = identifier[finished_state] [ identifier[_StateKeys] . identifier[FINISHED_FLAGS] ]
identifier[finished_seq] = identifier[tf] . identifier[where] (
identifier[tf] . identifier[reduce_any] ( identifier[finished_flags] , literal[int] ), identifier[finished_seq] , identifier[alive_seq] )
identifier[finished_scores] = identifier[tf] . identifier[where] (
identifier[tf] . identifier[reduce_any] ( identifier[finished_flags] , literal[int] ), identifier[finished_scores] , identifier[alive_log_probs] )
keyword[return] identifier[finished_seq] , identifier[finished_scores] | def search(self, initial_ids, initial_cache):
"""Beam search for sequences with highest scores."""
(state, state_shapes) = self._create_initial_state(initial_ids, initial_cache)
finished_state = tf.while_loop(self._continue_search, self._search_step, loop_vars=[state], shape_invariants=[state_shapes], parallel_iterations=1, back_prop=False)
finished_state = finished_state[0]
alive_seq = finished_state[_StateKeys.ALIVE_SEQ]
alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS]
finished_seq = finished_state[_StateKeys.FINISHED_SEQ]
finished_scores = finished_state[_StateKeys.FINISHED_SCORES]
finished_flags = finished_state[_StateKeys.FINISHED_FLAGS]
# Account for corner case where there are no finished sequences for a
# particular batch item. In that case, return alive sequences for that batch
# item.
finished_seq = tf.where(tf.reduce_any(finished_flags, 1), finished_seq, alive_seq)
finished_scores = tf.where(tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs)
return (finished_seq, finished_scores) |
def windowed_weir_cockerham_fst(pos, g, subpops, size=None, start=None,
stop=None, step=None, windows=None,
fill=np.nan, max_allele=None):
"""Estimate average Fst in windows over a single chromosome/contig,
following the method of Weir and Cockerham (1984).
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
subpops : sequence of sequences of ints
Sample indices for each subpopulation.
size : int
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where there are no variants within a window.
max_allele : int, optional
The highest allele index to consider.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
"""
# compute values per-variant
a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele)
# define the statistic to compute within each window
def average_fst(wa, wb, wc):
return np.nansum(wa) / (np.nansum(wa) + np.nansum(wb) + np.nansum(wc))
# calculate average Fst in windows
fst, windows, counts = windowed_statistic(pos, values=(a, b, c),
statistic=average_fst,
size=size, start=start,
stop=stop, step=step,
windows=windows, fill=fill)
return fst, windows, counts | def function[windowed_weir_cockerham_fst, parameter[pos, g, subpops, size, start, stop, step, windows, fill, max_allele]]:
constant[Estimate average Fst in windows over a single chromosome/contig,
following the method of Weir and Cockerham (1984).
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
subpops : sequence of sequences of ints
Sample indices for each subpopulation.
size : int
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where there are no variants within a window.
max_allele : int, optional
The highest allele index to consider.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
]
<ast.Tuple object at 0x7da1b2345270> assign[=] call[name[weir_cockerham_fst], parameter[name[g], name[subpops]]]
def function[average_fst, parameter[wa, wb, wc]]:
return[binary_operation[call[name[np].nansum, parameter[name[wa]]] / binary_operation[binary_operation[call[name[np].nansum, parameter[name[wa]]] + call[name[np].nansum, parameter[name[wb]]]] + call[name[np].nansum, parameter[name[wc]]]]]]
<ast.Tuple object at 0x7da1b2347880> assign[=] call[name[windowed_statistic], parameter[name[pos]]]
return[tuple[[<ast.Name object at 0x7da2041d94b0>, <ast.Name object at 0x7da2041d8a30>, <ast.Name object at 0x7da2041da350>]]] | keyword[def] identifier[windowed_weir_cockerham_fst] ( identifier[pos] , identifier[g] , identifier[subpops] , identifier[size] = keyword[None] , identifier[start] = keyword[None] ,
identifier[stop] = keyword[None] , identifier[step] = keyword[None] , identifier[windows] = keyword[None] ,
identifier[fill] = identifier[np] . identifier[nan] , identifier[max_allele] = keyword[None] ):
literal[string]
identifier[a] , identifier[b] , identifier[c] = identifier[weir_cockerham_fst] ( identifier[g] , identifier[subpops] , identifier[max_allele] = identifier[max_allele] )
keyword[def] identifier[average_fst] ( identifier[wa] , identifier[wb] , identifier[wc] ):
keyword[return] identifier[np] . identifier[nansum] ( identifier[wa] )/( identifier[np] . identifier[nansum] ( identifier[wa] )+ identifier[np] . identifier[nansum] ( identifier[wb] )+ identifier[np] . identifier[nansum] ( identifier[wc] ))
identifier[fst] , identifier[windows] , identifier[counts] = identifier[windowed_statistic] ( identifier[pos] , identifier[values] =( identifier[a] , identifier[b] , identifier[c] ),
identifier[statistic] = identifier[average_fst] ,
identifier[size] = identifier[size] , identifier[start] = identifier[start] ,
identifier[stop] = identifier[stop] , identifier[step] = identifier[step] ,
identifier[windows] = identifier[windows] , identifier[fill] = identifier[fill] )
keyword[return] identifier[fst] , identifier[windows] , identifier[counts] | def windowed_weir_cockerham_fst(pos, g, subpops, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan, max_allele=None):
"""Estimate average Fst in windows over a single chromosome/contig,
following the method of Weir and Cockerham (1984).
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
subpops : sequence of sequences of ints
Sample indices for each subpopulation.
size : int
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where there are no variants within a window.
max_allele : int, optional
The highest allele index to consider.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
"""
# compute values per-variant
(a, b, c) = weir_cockerham_fst(g, subpops, max_allele=max_allele)
# define the statistic to compute within each window
def average_fst(wa, wb, wc):
return np.nansum(wa) / (np.nansum(wa) + np.nansum(wb) + np.nansum(wc))
# calculate average Fst in windows
(fst, windows, counts) = windowed_statistic(pos, values=(a, b, c), statistic=average_fst, size=size, start=start, stop=stop, step=step, windows=windows, fill=fill)
return (fst, windows, counts) |
def load(cls, data, promote=False):
"""Create a new ent from an existing value. The value must either
be an instance of Ent, or must be an instance of SAFE_TYPES. If
the value is a base type (bool, int, string, etc), it will just be
returned. Iterable types will be loaded recursively, transforming
dictionaries into Ent instances, but otherwise maintaining the
hierarchy of the input data."""
t = type(data)
if t == cls:
# same class, create new copy
return cls({k: cls.load(v, promote)
for k, v in data.__dict__.items()})
elif isinstance(data, cls):
# child class, always use directly
return data.copy()
elif isinstance(data, Ent):
# parent class, promote or preserve
if promote:
return cls({k: cls.load(v, promote)
for k, v in data.__dict__.items()})
else:
return data.copy()
elif t not in SAFE_TYPES:
return None
elif t in (tuple, list, set):
return t(cls.load(i) for i in data)
elif t == dict:
return cls({k: cls.load(v) for k, v in data.items()})
else:
return data | def function[load, parameter[cls, data, promote]]:
constant[Create a new ent from an existing value. The value must either
be an instance of Ent, or must be an instance of SAFE_TYPES. If
the value is a base type (bool, int, string, etc), it will just be
returned. Iterable types will be loaded recursively, transforming
dictionaries into Ent instances, but otherwise maintaining the
hierarchy of the input data.]
variable[t] assign[=] call[name[type], parameter[name[data]]]
if compare[name[t] equal[==] name[cls]] begin[:]
return[call[name[cls], parameter[<ast.DictComp object at 0x7da20c6e7940>]]] | keyword[def] identifier[load] ( identifier[cls] , identifier[data] , identifier[promote] = keyword[False] ):
literal[string]
identifier[t] = identifier[type] ( identifier[data] )
keyword[if] identifier[t] == identifier[cls] :
keyword[return] identifier[cls] ({ identifier[k] : identifier[cls] . identifier[load] ( identifier[v] , identifier[promote] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[data] . identifier[__dict__] . identifier[items] ()})
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[cls] ):
keyword[return] identifier[data] . identifier[copy] ()
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[Ent] ):
keyword[if] identifier[promote] :
keyword[return] identifier[cls] ({ identifier[k] : identifier[cls] . identifier[load] ( identifier[v] , identifier[promote] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[data] . identifier[__dict__] . identifier[items] ()})
keyword[else] :
keyword[return] identifier[data] . identifier[copy] ()
keyword[elif] identifier[t] keyword[not] keyword[in] identifier[SAFE_TYPES] :
keyword[return] keyword[None]
keyword[elif] identifier[t] keyword[in] ( identifier[tuple] , identifier[list] , identifier[set] ):
keyword[return] identifier[t] ( identifier[cls] . identifier[load] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[data] )
keyword[elif] identifier[t] == identifier[dict] :
keyword[return] identifier[cls] ({ identifier[k] : identifier[cls] . identifier[load] ( identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[data] . identifier[items] ()})
keyword[else] :
keyword[return] identifier[data] | def load(cls, data, promote=False):
"""Create a new ent from an existing value. The value must either
be an instance of Ent, or must be an instance of SAFE_TYPES. If
the value is a base type (bool, int, string, etc), it will just be
returned. Iterable types will be loaded recursively, transforming
dictionaries into Ent instances, but otherwise maintaining the
hierarchy of the input data."""
t = type(data)
if t == cls:
# same class, create new copy
return cls({k: cls.load(v, promote) for (k, v) in data.__dict__.items()}) # depends on [control=['if'], data=['cls']]
elif isinstance(data, cls):
# child class, always use directly
return data.copy() # depends on [control=['if'], data=[]]
elif isinstance(data, Ent):
# parent class, promote or preserve
if promote:
return cls({k: cls.load(v, promote) for (k, v) in data.__dict__.items()}) # depends on [control=['if'], data=[]]
else:
return data.copy() # depends on [control=['if'], data=[]]
elif t not in SAFE_TYPES:
return None # depends on [control=['if'], data=[]]
elif t in (tuple, list, set):
return t((cls.load(i) for i in data)) # depends on [control=['if'], data=['t']]
elif t == dict:
return cls({k: cls.load(v) for (k, v) in data.items()}) # depends on [control=['if'], data=[]]
else:
return data |
def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing.")
if not self.supports_chunked_reads():
raise BodyNotHttplibCompatible(
"Body should be httplib.HTTPResponse like. "
"It should have have an fp attribute which returns raw chunks.")
with self._error_catcher():
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return
# If a response is already read and closed
# then return immediately.
if self._fp.fp is None:
return
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(chunk, decode_content=decode_content,
flush_decoder=False)
if decoded:
yield decoded
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b'\r\n':
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close() | def function[read_chunked, parameter[self, amt, decode_content]]:
constant[
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
]
call[name[self]._init_decoder, parameter[]]
if <ast.UnaryOp object at 0x7da2041d8910> begin[:]
<ast.Raise object at 0x7da2041d9480>
if <ast.UnaryOp object at 0x7da18eb572b0> begin[:]
<ast.Raise object at 0x7da20c6c7880>
with call[name[self]._error_catcher, parameter[]] begin[:]
if <ast.BoolOp object at 0x7da20c6c6620> begin[:]
call[name[self]._original_response.close, parameter[]]
return[None]
if compare[name[self]._fp.fp is constant[None]] begin[:]
return[None]
while constant[True] begin[:]
call[name[self]._update_chunk_length, parameter[]]
if compare[name[self].chunk_left equal[==] constant[0]] begin[:]
break
variable[chunk] assign[=] call[name[self]._handle_chunk, parameter[name[amt]]]
variable[decoded] assign[=] call[name[self]._decode, parameter[name[chunk]]]
if name[decoded] begin[:]
<ast.Yield object at 0x7da20c6c58a0>
if name[decode_content] begin[:]
variable[decoded] assign[=] call[name[self]._flush_decoder, parameter[]]
if name[decoded] begin[:]
<ast.Yield object at 0x7da20c6c6320>
while constant[True] begin[:]
variable[line] assign[=] call[name[self]._fp.fp.readline, parameter[]]
if <ast.UnaryOp object at 0x7da20c6c7010> begin[:]
break
if compare[name[line] equal[==] constant[b'\r\n']] begin[:]
break
if name[self]._original_response begin[:]
call[name[self]._original_response.close, parameter[]] | keyword[def] identifier[read_chunked] ( identifier[self] , identifier[amt] = keyword[None] , identifier[decode_content] = keyword[None] ):
literal[string]
identifier[self] . identifier[_init_decoder] ()
keyword[if] keyword[not] identifier[self] . identifier[chunked] :
keyword[raise] identifier[ResponseNotChunked] (
literal[string]
literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[supports_chunked_reads] ():
keyword[raise] identifier[BodyNotHttplibCompatible] (
literal[string]
literal[string] )
keyword[with] identifier[self] . identifier[_error_catcher] ():
keyword[if] identifier[self] . identifier[_original_response] keyword[and] identifier[is_response_to_head] ( identifier[self] . identifier[_original_response] ):
identifier[self] . identifier[_original_response] . identifier[close] ()
keyword[return]
keyword[if] identifier[self] . identifier[_fp] . identifier[fp] keyword[is] keyword[None] :
keyword[return]
keyword[while] keyword[True] :
identifier[self] . identifier[_update_chunk_length] ()
keyword[if] identifier[self] . identifier[chunk_left] == literal[int] :
keyword[break]
identifier[chunk] = identifier[self] . identifier[_handle_chunk] ( identifier[amt] )
identifier[decoded] = identifier[self] . identifier[_decode] ( identifier[chunk] , identifier[decode_content] = identifier[decode_content] ,
identifier[flush_decoder] = keyword[False] )
keyword[if] identifier[decoded] :
keyword[yield] identifier[decoded]
keyword[if] identifier[decode_content] :
identifier[decoded] = identifier[self] . identifier[_flush_decoder] ()
keyword[if] identifier[decoded] :
keyword[yield] identifier[decoded]
keyword[while] keyword[True] :
identifier[line] = identifier[self] . identifier[_fp] . identifier[fp] . identifier[readline] ()
keyword[if] keyword[not] identifier[line] :
keyword[break]
keyword[if] identifier[line] == literal[string] :
keyword[break]
keyword[if] identifier[self] . identifier[_original_response] :
identifier[self] . identifier[_original_response] . identifier[close] () | def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked("Response is not chunked. Header 'transfer-encoding: chunked' is missing.") # depends on [control=['if'], data=[]]
if not self.supports_chunked_reads():
raise BodyNotHttplibCompatible('Body should be httplib.HTTPResponse like. It should have have an fp attribute which returns raw chunks.') # depends on [control=['if'], data=[]]
with self._error_catcher():
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return # depends on [control=['if'], data=[]]
# If a response is already read and closed
# then return immediately.
if self._fp.fp is None:
return # depends on [control=['if'], data=[]]
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break # depends on [control=['if'], data=[]]
chunk = self._handle_chunk(amt)
decoded = self._decode(chunk, decode_content=decode_content, flush_decoder=False)
if decoded:
yield decoded # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break # depends on [control=['if'], data=[]]
if line == b'\r\n':
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
# We read everything; close the "file".
if self._original_response:
self._original_response.close() # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]] |
def community_topic_subscription_create(self, topic_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/subscriptions#create-topic-subscription"
api_path = "/api/v2/community/topics/{topic_id}/subscriptions.json"
api_path = api_path.format(topic_id=topic_id)
return self.call(api_path, method="POST", data=data, **kwargs) | def function[community_topic_subscription_create, parameter[self, topic_id, data]]:
constant[https://developer.zendesk.com/rest_api/docs/help_center/subscriptions#create-topic-subscription]
variable[api_path] assign[=] constant[/api/v2/community/topics/{topic_id}/subscriptions.json]
variable[api_path] assign[=] call[name[api_path].format, parameter[]]
return[call[name[self].call, parameter[name[api_path]]]] | keyword[def] identifier[community_topic_subscription_create] ( identifier[self] , identifier[topic_id] , identifier[data] ,** identifier[kwargs] ):
literal[string]
identifier[api_path] = literal[string]
identifier[api_path] = identifier[api_path] . identifier[format] ( identifier[topic_id] = identifier[topic_id] )
keyword[return] identifier[self] . identifier[call] ( identifier[api_path] , identifier[method] = literal[string] , identifier[data] = identifier[data] ,** identifier[kwargs] ) | def community_topic_subscription_create(self, topic_id, data, **kwargs):
"""https://developer.zendesk.com/rest_api/docs/help_center/subscriptions#create-topic-subscription"""
api_path = '/api/v2/community/topics/{topic_id}/subscriptions.json'
api_path = api_path.format(topic_id=topic_id)
return self.call(api_path, method='POST', data=data, **kwargs) |
def _from_dict(cls, _dict):
"""Initialize a RuntimeIntent object from a json dictionary."""
args = {}
xtra = _dict.copy()
if 'intent' in _dict:
args['intent'] = _dict.get('intent')
del xtra['intent']
else:
raise ValueError(
'Required property \'intent\' not present in RuntimeIntent JSON'
)
if 'confidence' in _dict:
args['confidence'] = _dict.get('confidence')
del xtra['confidence']
else:
raise ValueError(
'Required property \'confidence\' not present in RuntimeIntent JSON'
)
args.update(xtra)
return cls(**args) | def function[_from_dict, parameter[cls, _dict]]:
constant[Initialize a RuntimeIntent object from a json dictionary.]
variable[args] assign[=] dictionary[[], []]
variable[xtra] assign[=] call[name[_dict].copy, parameter[]]
if compare[constant[intent] in name[_dict]] begin[:]
call[name[args]][constant[intent]] assign[=] call[name[_dict].get, parameter[constant[intent]]]
<ast.Delete object at 0x7da20c76db10>
if compare[constant[confidence] in name[_dict]] begin[:]
call[name[args]][constant[confidence]] assign[=] call[name[_dict].get, parameter[constant[confidence]]]
<ast.Delete object at 0x7da20c76e7d0>
call[name[args].update, parameter[name[xtra]]]
return[call[name[cls], parameter[]]] | keyword[def] identifier[_from_dict] ( identifier[cls] , identifier[_dict] ):
literal[string]
identifier[args] ={}
identifier[xtra] = identifier[_dict] . identifier[copy] ()
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[del] identifier[xtra] [ literal[string] ]
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
)
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[del] identifier[xtra] [ literal[string] ]
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
)
identifier[args] . identifier[update] ( identifier[xtra] )
keyword[return] identifier[cls] (** identifier[args] ) | def _from_dict(cls, _dict):
"""Initialize a RuntimeIntent object from a json dictionary."""
args = {}
xtra = _dict.copy()
if 'intent' in _dict:
args['intent'] = _dict.get('intent')
del xtra['intent'] # depends on [control=['if'], data=['_dict']]
else:
raise ValueError("Required property 'intent' not present in RuntimeIntent JSON")
if 'confidence' in _dict:
args['confidence'] = _dict.get('confidence')
del xtra['confidence'] # depends on [control=['if'], data=['_dict']]
else:
raise ValueError("Required property 'confidence' not present in RuntimeIntent JSON")
args.update(xtra)
return cls(**args) |
def reverse(self, point, language=None, sensor=False):
'''Reverse geocode a point.
Pls refer to the Google Maps Web API for the details of the parameters
'''
params = {
'latlng': point,
'sensor': str(sensor).lower()
}
if language:
params['language'] = language
if not self.premier:
url = self.get_url(params)
else:
url = self.get_signed_url(params)
return self.GetService_url(url) | def function[reverse, parameter[self, point, language, sensor]]:
constant[Reverse geocode a point.
Pls refer to the Google Maps Web API for the details of the parameters
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da2054a6710>, <ast.Constant object at 0x7da2054a4b80>], [<ast.Name object at 0x7da2054a5720>, <ast.Call object at 0x7da2054a4f40>]]
if name[language] begin[:]
call[name[params]][constant[language]] assign[=] name[language]
if <ast.UnaryOp object at 0x7da2054a7580> begin[:]
variable[url] assign[=] call[name[self].get_url, parameter[name[params]]]
return[call[name[self].GetService_url, parameter[name[url]]]] | keyword[def] identifier[reverse] ( identifier[self] , identifier[point] , identifier[language] = keyword[None] , identifier[sensor] = keyword[False] ):
literal[string]
identifier[params] ={
literal[string] : identifier[point] ,
literal[string] : identifier[str] ( identifier[sensor] ). identifier[lower] ()
}
keyword[if] identifier[language] :
identifier[params] [ literal[string] ]= identifier[language]
keyword[if] keyword[not] identifier[self] . identifier[premier] :
identifier[url] = identifier[self] . identifier[get_url] ( identifier[params] )
keyword[else] :
identifier[url] = identifier[self] . identifier[get_signed_url] ( identifier[params] )
keyword[return] identifier[self] . identifier[GetService_url] ( identifier[url] ) | def reverse(self, point, language=None, sensor=False):
"""Reverse geocode a point.
Pls refer to the Google Maps Web API for the details of the parameters
"""
params = {'latlng': point, 'sensor': str(sensor).lower()}
if language:
params['language'] = language # depends on [control=['if'], data=[]]
if not self.premier:
url = self.get_url(params) # depends on [control=['if'], data=[]]
else:
url = self.get_signed_url(params)
return self.GetService_url(url) |
def private_config_content(self, private_config):
"""
Update the private config
:param private_config: content of the private configuration file
"""
try:
private_config_path = os.path.join(self.working_dir, "private-config.cfg")
if private_config is None:
private_config = ''
# We disallow erasing the private config file
if len(private_config) == 0 and os.path.exists(private_config_path):
return
with open(private_config_path, 'w+', encoding='utf-8') as f:
if len(private_config) == 0:
f.write('')
else:
private_config = private_config.replace("%h", self._name)
f.write(private_config)
except OSError as e:
raise IOUError("Can't write private-config file '{}': {}".format(private_config_path, e)) | def function[private_config_content, parameter[self, private_config]]:
constant[
Update the private config
:param private_config: content of the private configuration file
]
<ast.Try object at 0x7da2044c2410> | keyword[def] identifier[private_config_content] ( identifier[self] , identifier[private_config] ):
literal[string]
keyword[try] :
identifier[private_config_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[working_dir] , literal[string] )
keyword[if] identifier[private_config] keyword[is] keyword[None] :
identifier[private_config] = literal[string]
keyword[if] identifier[len] ( identifier[private_config] )== literal[int] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[private_config_path] ):
keyword[return]
keyword[with] identifier[open] ( identifier[private_config_path] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
keyword[if] identifier[len] ( identifier[private_config] )== literal[int] :
identifier[f] . identifier[write] ( literal[string] )
keyword[else] :
identifier[private_config] = identifier[private_config] . identifier[replace] ( literal[string] , identifier[self] . identifier[_name] )
identifier[f] . identifier[write] ( identifier[private_config] )
keyword[except] identifier[OSError] keyword[as] identifier[e] :
keyword[raise] identifier[IOUError] ( literal[string] . identifier[format] ( identifier[private_config_path] , identifier[e] )) | def private_config_content(self, private_config):
"""
Update the private config
:param private_config: content of the private configuration file
"""
try:
private_config_path = os.path.join(self.working_dir, 'private-config.cfg')
if private_config is None:
private_config = '' # depends on [control=['if'], data=['private_config']]
# We disallow erasing the private config file
if len(private_config) == 0 and os.path.exists(private_config_path):
return # depends on [control=['if'], data=[]]
with open(private_config_path, 'w+', encoding='utf-8') as f:
if len(private_config) == 0:
f.write('') # depends on [control=['if'], data=[]]
else:
private_config = private_config.replace('%h', self._name)
f.write(private_config) # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except OSError as e:
raise IOUError("Can't write private-config file '{}': {}".format(private_config_path, e)) # depends on [control=['except'], data=['e']] |
def ui_clear_clicked_image(self, value):
"""
Setter for **self.__ui_clear_clicked_image** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"ui_clear_clicked_image", value)
assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format(
"ui_clear_clicked_image", value)
self.__ui_clear_clicked_image = value | def function[ui_clear_clicked_image, parameter[self, value]]:
constant[
Setter for **self.__ui_clear_clicked_image** attribute.
:param value: Attribute value.
:type value: unicode
]
if compare[name[value] is_not constant[None]] begin[:]
assert[compare[call[name[type], parameter[name[value]]] is name[unicode]]]
assert[call[name[os].path.exists, parameter[name[value]]]]
name[self].__ui_clear_clicked_image assign[=] name[value] | keyword[def] identifier[ui_clear_clicked_image] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[type] ( identifier[value] ) keyword[is] identifier[unicode] , literal[string] . identifier[format] (
literal[string] , identifier[value] )
keyword[assert] identifier[os] . identifier[path] . identifier[exists] ( identifier[value] ), literal[string] . identifier[format] (
literal[string] , identifier[value] )
identifier[self] . identifier[__ui_clear_clicked_image] = identifier[value] | def ui_clear_clicked_image(self, value):
"""
Setter for **self.__ui_clear_clicked_image** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format('ui_clear_clicked_image', value)
assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format('ui_clear_clicked_image', value) # depends on [control=['if'], data=['value']]
self.__ui_clear_clicked_image = value |
def visit_module(self, node):
"""
A interface will be called when visiting a module.
@param node: The module node to check.
"""
recorder = PyCodeStyleWarningRecorder(node.file)
self._outputMessages(recorder.warnings, node) | def function[visit_module, parameter[self, node]]:
constant[
A interface will be called when visiting a module.
@param node: The module node to check.
]
variable[recorder] assign[=] call[name[PyCodeStyleWarningRecorder], parameter[name[node].file]]
call[name[self]._outputMessages, parameter[name[recorder].warnings, name[node]]] | keyword[def] identifier[visit_module] ( identifier[self] , identifier[node] ):
literal[string]
identifier[recorder] = identifier[PyCodeStyleWarningRecorder] ( identifier[node] . identifier[file] )
identifier[self] . identifier[_outputMessages] ( identifier[recorder] . identifier[warnings] , identifier[node] ) | def visit_module(self, node):
"""
A interface will be called when visiting a module.
@param node: The module node to check.
"""
recorder = PyCodeStyleWarningRecorder(node.file)
self._outputMessages(recorder.warnings, node) |
def contributor_director(**kwargs):
"""Define the expanded qualifier name."""
if kwargs.get('qualifier') in ETD_MS_CONTRIBUTOR_EXPANSION:
# Return the element object.
return ETD_MSContributor(
role=ETD_MS_CONTRIBUTOR_EXPANSION[kwargs.get('qualifier')],
**kwargs
)
else:
return None | def function[contributor_director, parameter[]]:
constant[Define the expanded qualifier name.]
if compare[call[name[kwargs].get, parameter[constant[qualifier]]] in name[ETD_MS_CONTRIBUTOR_EXPANSION]] begin[:]
return[call[name[ETD_MSContributor], parameter[]]] | keyword[def] identifier[contributor_director] (** identifier[kwargs] ):
literal[string]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ) keyword[in] identifier[ETD_MS_CONTRIBUTOR_EXPANSION] :
keyword[return] identifier[ETD_MSContributor] (
identifier[role] = identifier[ETD_MS_CONTRIBUTOR_EXPANSION] [ identifier[kwargs] . identifier[get] ( literal[string] )],
** identifier[kwargs]
)
keyword[else] :
keyword[return] keyword[None] | def contributor_director(**kwargs):
"""Define the expanded qualifier name."""
if kwargs.get('qualifier') in ETD_MS_CONTRIBUTOR_EXPANSION:
# Return the element object.
return ETD_MSContributor(role=ETD_MS_CONTRIBUTOR_EXPANSION[kwargs.get('qualifier')], **kwargs) # depends on [control=['if'], data=['ETD_MS_CONTRIBUTOR_EXPANSION']]
else:
return None |
def _call(self, method, params=None, request_id=None):
""" Calls the JSON-RPC endpoint. """
params = params or []
# Determines which 'id' value to use and increment the counter associated with the current
# client instance if applicable.
rid = request_id or self._id_counter
if request_id is None:
self._id_counter += 1
# Prepares the payload and the headers that will be used to forge the request.
payload = {'jsonrpc': '2.0', 'method': method, 'params': params, 'id': rid}
headers = {'Content-Type': 'application/json'}
scheme = 'https' if self.tls else 'http'
url = '{}://{}:{}'.format(scheme, self.host, self.port)
# Calls the JSON-RPC endpoint!
try:
response = self.session.post(url, headers=headers, data=json.dumps(payload))
response.raise_for_status()
except HTTPError:
raise TransportError(
'Got unsuccessful response from server (status code: {})'.format(
response.status_code),
response=response)
# Ensures the response body can be deserialized to JSON.
try:
response_data = response.json()
except ValueError as e:
raise ProtocolError(
'Unable to deserialize response body: {}'.format(e), response=response)
# Properly handles potential errors.
if response_data.get('error'):
code = response_data['error'].get('code', '')
message = response_data['error'].get('message', '')
raise ProtocolError(
'Error[{}] {}'.format(code, message), response=response, data=response_data)
elif 'result' not in response_data:
raise ProtocolError(
'Response is empty (result field is missing)', response=response,
data=response_data)
return response_data['result'] | def function[_call, parameter[self, method, params, request_id]]:
constant[ Calls the JSON-RPC endpoint. ]
variable[params] assign[=] <ast.BoolOp object at 0x7da1b0217430>
variable[rid] assign[=] <ast.BoolOp object at 0x7da1b0217a60>
if compare[name[request_id] is constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b0214fd0>
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b0217af0>, <ast.Constant object at 0x7da1b02146d0>, <ast.Constant object at 0x7da1b0214a00>, <ast.Constant object at 0x7da1b02149d0>], [<ast.Constant object at 0x7da1b02178b0>, <ast.Name object at 0x7da1b0217a90>, <ast.Name object at 0x7da1b0217df0>, <ast.Name object at 0x7da1b0217dc0>]]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b0217820>], [<ast.Constant object at 0x7da1b0214a90>]]
variable[scheme] assign[=] <ast.IfExp object at 0x7da1b0214940>
variable[url] assign[=] call[constant[{}://{}:{}].format, parameter[name[scheme], name[self].host, name[self].port]]
<ast.Try object at 0x7da1b0215e40>
<ast.Try object at 0x7da1b0215a20>
if call[name[response_data].get, parameter[constant[error]]] begin[:]
variable[code] assign[=] call[call[name[response_data]][constant[error]].get, parameter[constant[code], constant[]]]
variable[message] assign[=] call[call[name[response_data]][constant[error]].get, parameter[constant[message], constant[]]]
<ast.Raise object at 0x7da1b02159c0>
return[call[name[response_data]][constant[result]]] | keyword[def] identifier[_call] ( identifier[self] , identifier[method] , identifier[params] = keyword[None] , identifier[request_id] = keyword[None] ):
literal[string]
identifier[params] = identifier[params] keyword[or] []
identifier[rid] = identifier[request_id] keyword[or] identifier[self] . identifier[_id_counter]
keyword[if] identifier[request_id] keyword[is] keyword[None] :
identifier[self] . identifier[_id_counter] += literal[int]
identifier[payload] ={ literal[string] : literal[string] , literal[string] : identifier[method] , literal[string] : identifier[params] , literal[string] : identifier[rid] }
identifier[headers] ={ literal[string] : literal[string] }
identifier[scheme] = literal[string] keyword[if] identifier[self] . identifier[tls] keyword[else] literal[string]
identifier[url] = literal[string] . identifier[format] ( identifier[scheme] , identifier[self] . identifier[host] , identifier[self] . identifier[port] )
keyword[try] :
identifier[response] = identifier[self] . identifier[session] . identifier[post] ( identifier[url] , identifier[headers] = identifier[headers] , identifier[data] = identifier[json] . identifier[dumps] ( identifier[payload] ))
identifier[response] . identifier[raise_for_status] ()
keyword[except] identifier[HTTPError] :
keyword[raise] identifier[TransportError] (
literal[string] . identifier[format] (
identifier[response] . identifier[status_code] ),
identifier[response] = identifier[response] )
keyword[try] :
identifier[response_data] = identifier[response] . identifier[json] ()
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
keyword[raise] identifier[ProtocolError] (
literal[string] . identifier[format] ( identifier[e] ), identifier[response] = identifier[response] )
keyword[if] identifier[response_data] . identifier[get] ( literal[string] ):
identifier[code] = identifier[response_data] [ literal[string] ]. identifier[get] ( literal[string] , literal[string] )
identifier[message] = identifier[response_data] [ literal[string] ]. identifier[get] ( literal[string] , literal[string] )
keyword[raise] identifier[ProtocolError] (
literal[string] . identifier[format] ( identifier[code] , identifier[message] ), identifier[response] = identifier[response] , identifier[data] = identifier[response_data] )
keyword[elif] literal[string] keyword[not] keyword[in] identifier[response_data] :
keyword[raise] identifier[ProtocolError] (
literal[string] , identifier[response] = identifier[response] ,
identifier[data] = identifier[response_data] )
keyword[return] identifier[response_data] [ literal[string] ] | def _call(self, method, params=None, request_id=None):
""" Calls the JSON-RPC endpoint. """
params = params or []
# Determines which 'id' value to use and increment the counter associated with the current
# client instance if applicable.
rid = request_id or self._id_counter
if request_id is None:
self._id_counter += 1 # depends on [control=['if'], data=[]]
# Prepares the payload and the headers that will be used to forge the request.
payload = {'jsonrpc': '2.0', 'method': method, 'params': params, 'id': rid}
headers = {'Content-Type': 'application/json'}
scheme = 'https' if self.tls else 'http'
url = '{}://{}:{}'.format(scheme, self.host, self.port)
# Calls the JSON-RPC endpoint!
try:
response = self.session.post(url, headers=headers, data=json.dumps(payload))
response.raise_for_status() # depends on [control=['try'], data=[]]
except HTTPError:
raise TransportError('Got unsuccessful response from server (status code: {})'.format(response.status_code), response=response) # depends on [control=['except'], data=[]]
# Ensures the response body can be deserialized to JSON.
try:
response_data = response.json() # depends on [control=['try'], data=[]]
except ValueError as e:
raise ProtocolError('Unable to deserialize response body: {}'.format(e), response=response) # depends on [control=['except'], data=['e']]
# Properly handles potential errors.
if response_data.get('error'):
code = response_data['error'].get('code', '')
message = response_data['error'].get('message', '')
raise ProtocolError('Error[{}] {}'.format(code, message), response=response, data=response_data) # depends on [control=['if'], data=[]]
elif 'result' not in response_data:
raise ProtocolError('Response is empty (result field is missing)', response=response, data=response_data) # depends on [control=['if'], data=['response_data']]
return response_data['result'] |
def offset_gaussian(data):
"""Fit a gaussian model to `data` and return its center"""
nbins = 2 * int(np.ceil(np.sqrt(data.size)))
mind, maxd = data.min(), data.max()
drange = (mind - (maxd - mind) / 2, maxd + (maxd - mind) / 2)
histo = np.histogram(data, nbins, density=True, range=drange)
dx = abs(histo[1][1] - histo[1][2]) / 2
hx = histo[1][1:] - dx
hy = histo[0]
# fit gaussian
gauss = lmfit.models.GaussianModel()
pars = gauss.guess(hy, x=hx)
out = gauss.fit(hy, pars, x=hx)
return out.params["center"] | def function[offset_gaussian, parameter[data]]:
constant[Fit a gaussian model to `data` and return its center]
variable[nbins] assign[=] binary_operation[constant[2] * call[name[int], parameter[call[name[np].ceil, parameter[call[name[np].sqrt, parameter[name[data].size]]]]]]]
<ast.Tuple object at 0x7da1b11979d0> assign[=] tuple[[<ast.Call object at 0x7da1b1196890>, <ast.Call object at 0x7da1b1197bb0>]]
variable[drange] assign[=] tuple[[<ast.BinOp object at 0x7da1b1196ce0>, <ast.BinOp object at 0x7da1b1196ef0>]]
variable[histo] assign[=] call[name[np].histogram, parameter[name[data], name[nbins]]]
variable[dx] assign[=] binary_operation[call[name[abs], parameter[binary_operation[call[call[name[histo]][constant[1]]][constant[1]] - call[call[name[histo]][constant[1]]][constant[2]]]]] / constant[2]]
variable[hx] assign[=] binary_operation[call[call[name[histo]][constant[1]]][<ast.Slice object at 0x7da1b1197940>] - name[dx]]
variable[hy] assign[=] call[name[histo]][constant[0]]
variable[gauss] assign[=] call[name[lmfit].models.GaussianModel, parameter[]]
variable[pars] assign[=] call[name[gauss].guess, parameter[name[hy]]]
variable[out] assign[=] call[name[gauss].fit, parameter[name[hy], name[pars]]]
return[call[name[out].params][constant[center]]] | keyword[def] identifier[offset_gaussian] ( identifier[data] ):
literal[string]
identifier[nbins] = literal[int] * identifier[int] ( identifier[np] . identifier[ceil] ( identifier[np] . identifier[sqrt] ( identifier[data] . identifier[size] )))
identifier[mind] , identifier[maxd] = identifier[data] . identifier[min] (), identifier[data] . identifier[max] ()
identifier[drange] =( identifier[mind] -( identifier[maxd] - identifier[mind] )/ literal[int] , identifier[maxd] +( identifier[maxd] - identifier[mind] )/ literal[int] )
identifier[histo] = identifier[np] . identifier[histogram] ( identifier[data] , identifier[nbins] , identifier[density] = keyword[True] , identifier[range] = identifier[drange] )
identifier[dx] = identifier[abs] ( identifier[histo] [ literal[int] ][ literal[int] ]- identifier[histo] [ literal[int] ][ literal[int] ])/ literal[int]
identifier[hx] = identifier[histo] [ literal[int] ][ literal[int] :]- identifier[dx]
identifier[hy] = identifier[histo] [ literal[int] ]
identifier[gauss] = identifier[lmfit] . identifier[models] . identifier[GaussianModel] ()
identifier[pars] = identifier[gauss] . identifier[guess] ( identifier[hy] , identifier[x] = identifier[hx] )
identifier[out] = identifier[gauss] . identifier[fit] ( identifier[hy] , identifier[pars] , identifier[x] = identifier[hx] )
keyword[return] identifier[out] . identifier[params] [ literal[string] ] | def offset_gaussian(data):
"""Fit a gaussian model to `data` and return its center"""
nbins = 2 * int(np.ceil(np.sqrt(data.size)))
(mind, maxd) = (data.min(), data.max())
drange = (mind - (maxd - mind) / 2, maxd + (maxd - mind) / 2)
histo = np.histogram(data, nbins, density=True, range=drange)
dx = abs(histo[1][1] - histo[1][2]) / 2
hx = histo[1][1:] - dx
hy = histo[0]
# fit gaussian
gauss = lmfit.models.GaussianModel()
pars = gauss.guess(hy, x=hx)
out = gauss.fit(hy, pars, x=hx)
return out.params['center'] |
def load_bulk(cls, bulk_data, parent=None, keep_ids=False):
"""Loads a list/dictionary structure to the tree."""
cls = get_result_class(cls)
# tree, iterative preorder
added = []
if parent:
parent_id = parent.pk
else:
parent_id = None
# stack of nodes to analize
stack = [(parent_id, node) for node in bulk_data[::-1]]
foreign_keys = cls.get_foreign_keys()
while stack:
parent_id, node_struct = stack.pop()
# shallow copy of the data strucure so it doesn't persist...
node_data = node_struct['data'].copy()
cls._process_foreign_keys(foreign_keys, node_data)
if keep_ids:
node_data['id'] = node_struct['id']
if parent_id:
parent = cls.objects.get(pk=parent_id)
node_obj = parent.add_child(**node_data)
else:
node_obj = cls.add_root(**node_data)
added.append(node_obj.pk)
if 'children' in node_struct:
# extending the stack with the current node as the parent of
# the new nodes
stack.extend([
(node_obj.pk, node)
for node in node_struct['children'][::-1]
])
return added | def function[load_bulk, parameter[cls, bulk_data, parent, keep_ids]]:
constant[Loads a list/dictionary structure to the tree.]
variable[cls] assign[=] call[name[get_result_class], parameter[name[cls]]]
variable[added] assign[=] list[[]]
if name[parent] begin[:]
variable[parent_id] assign[=] name[parent].pk
variable[stack] assign[=] <ast.ListComp object at 0x7da1b20b73a0>
variable[foreign_keys] assign[=] call[name[cls].get_foreign_keys, parameter[]]
while name[stack] begin[:]
<ast.Tuple object at 0x7da1b20b5180> assign[=] call[name[stack].pop, parameter[]]
variable[node_data] assign[=] call[call[name[node_struct]][constant[data]].copy, parameter[]]
call[name[cls]._process_foreign_keys, parameter[name[foreign_keys], name[node_data]]]
if name[keep_ids] begin[:]
call[name[node_data]][constant[id]] assign[=] call[name[node_struct]][constant[id]]
if name[parent_id] begin[:]
variable[parent] assign[=] call[name[cls].objects.get, parameter[]]
variable[node_obj] assign[=] call[name[parent].add_child, parameter[]]
call[name[added].append, parameter[name[node_obj].pk]]
if compare[constant[children] in name[node_struct]] begin[:]
call[name[stack].extend, parameter[<ast.ListComp object at 0x7da1b1d559c0>]]
return[name[added]] | keyword[def] identifier[load_bulk] ( identifier[cls] , identifier[bulk_data] , identifier[parent] = keyword[None] , identifier[keep_ids] = keyword[False] ):
literal[string]
identifier[cls] = identifier[get_result_class] ( identifier[cls] )
identifier[added] =[]
keyword[if] identifier[parent] :
identifier[parent_id] = identifier[parent] . identifier[pk]
keyword[else] :
identifier[parent_id] = keyword[None]
identifier[stack] =[( identifier[parent_id] , identifier[node] ) keyword[for] identifier[node] keyword[in] identifier[bulk_data] [::- literal[int] ]]
identifier[foreign_keys] = identifier[cls] . identifier[get_foreign_keys] ()
keyword[while] identifier[stack] :
identifier[parent_id] , identifier[node_struct] = identifier[stack] . identifier[pop] ()
identifier[node_data] = identifier[node_struct] [ literal[string] ]. identifier[copy] ()
identifier[cls] . identifier[_process_foreign_keys] ( identifier[foreign_keys] , identifier[node_data] )
keyword[if] identifier[keep_ids] :
identifier[node_data] [ literal[string] ]= identifier[node_struct] [ literal[string] ]
keyword[if] identifier[parent_id] :
identifier[parent] = identifier[cls] . identifier[objects] . identifier[get] ( identifier[pk] = identifier[parent_id] )
identifier[node_obj] = identifier[parent] . identifier[add_child] (** identifier[node_data] )
keyword[else] :
identifier[node_obj] = identifier[cls] . identifier[add_root] (** identifier[node_data] )
identifier[added] . identifier[append] ( identifier[node_obj] . identifier[pk] )
keyword[if] literal[string] keyword[in] identifier[node_struct] :
identifier[stack] . identifier[extend] ([
( identifier[node_obj] . identifier[pk] , identifier[node] )
keyword[for] identifier[node] keyword[in] identifier[node_struct] [ literal[string] ][::- literal[int] ]
])
keyword[return] identifier[added] | def load_bulk(cls, bulk_data, parent=None, keep_ids=False):
"""Loads a list/dictionary structure to the tree."""
cls = get_result_class(cls)
# tree, iterative preorder
added = []
if parent:
parent_id = parent.pk # depends on [control=['if'], data=[]]
else:
parent_id = None
# stack of nodes to analize
stack = [(parent_id, node) for node in bulk_data[::-1]]
foreign_keys = cls.get_foreign_keys()
while stack:
(parent_id, node_struct) = stack.pop()
# shallow copy of the data strucure so it doesn't persist...
node_data = node_struct['data'].copy()
cls._process_foreign_keys(foreign_keys, node_data)
if keep_ids:
node_data['id'] = node_struct['id'] # depends on [control=['if'], data=[]]
if parent_id:
parent = cls.objects.get(pk=parent_id)
node_obj = parent.add_child(**node_data) # depends on [control=['if'], data=[]]
else:
node_obj = cls.add_root(**node_data)
added.append(node_obj.pk)
if 'children' in node_struct:
# extending the stack with the current node as the parent of
# the new nodes
stack.extend([(node_obj.pk, node) for node in node_struct['children'][::-1]]) # depends on [control=['if'], data=['node_struct']] # depends on [control=['while'], data=[]]
return added |
def zlist(columns, items, print_columns=None,
text="", title="", width=DEFAULT_WIDTH,
height=ZLIST_HEIGHT, timeout=None):
"""
Display a list of values
:param columns: a list of columns name
:type columns: list of strings
:param items: a list of values
:type items: list of strings
:param print_columns: index of a column (return just the values from this column)
:type print_columns: int (None if all the columns)
:param text: text inside the window
:type text: str
:param title: title of the window
:type title: str
:param width: window width
:type width: int
:param height: window height
:type height: int
:param timeout: close the window after n seconds
:type timeout: int
:return: A row of values from the table
:rtype: list
"""
dialog = ZList(columns, items, print_columns,
text, title, width, height, timeout)
dialog.run()
return dialog.response | def function[zlist, parameter[columns, items, print_columns, text, title, width, height, timeout]]:
constant[
Display a list of values
:param columns: a list of columns name
:type columns: list of strings
:param items: a list of values
:type items: list of strings
:param print_columns: index of a column (return just the values from this column)
:type print_columns: int (None if all the columns)
:param text: text inside the window
:type text: str
:param title: title of the window
:type title: str
:param width: window width
:type width: int
:param height: window height
:type height: int
:param timeout: close the window after n seconds
:type timeout: int
:return: A row of values from the table
:rtype: list
]
variable[dialog] assign[=] call[name[ZList], parameter[name[columns], name[items], name[print_columns], name[text], name[title], name[width], name[height], name[timeout]]]
call[name[dialog].run, parameter[]]
return[name[dialog].response] | keyword[def] identifier[zlist] ( identifier[columns] , identifier[items] , identifier[print_columns] = keyword[None] ,
identifier[text] = literal[string] , identifier[title] = literal[string] , identifier[width] = identifier[DEFAULT_WIDTH] ,
identifier[height] = identifier[ZLIST_HEIGHT] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[dialog] = identifier[ZList] ( identifier[columns] , identifier[items] , identifier[print_columns] ,
identifier[text] , identifier[title] , identifier[width] , identifier[height] , identifier[timeout] )
identifier[dialog] . identifier[run] ()
keyword[return] identifier[dialog] . identifier[response] | def zlist(columns, items, print_columns=None, text='', title='', width=DEFAULT_WIDTH, height=ZLIST_HEIGHT, timeout=None):
"""
Display a list of values
:param columns: a list of columns name
:type columns: list of strings
:param items: a list of values
:type items: list of strings
:param print_columns: index of a column (return just the values from this column)
:type print_columns: int (None if all the columns)
:param text: text inside the window
:type text: str
:param title: title of the window
:type title: str
:param width: window width
:type width: int
:param height: window height
:type height: int
:param timeout: close the window after n seconds
:type timeout: int
:return: A row of values from the table
:rtype: list
"""
dialog = ZList(columns, items, print_columns, text, title, width, height, timeout)
dialog.run()
return dialog.response |
def cloneQuery(self, limit=_noItem, sort=_noItem):
"""
Clone the original query which this distinct query wraps, and return a new
wrapper around that clone.
"""
newq = self.query.cloneQuery(limit=limit, sort=sort)
return self.__class__(newq) | def function[cloneQuery, parameter[self, limit, sort]]:
constant[
Clone the original query which this distinct query wraps, and return a new
wrapper around that clone.
]
variable[newq] assign[=] call[name[self].query.cloneQuery, parameter[]]
return[call[name[self].__class__, parameter[name[newq]]]] | keyword[def] identifier[cloneQuery] ( identifier[self] , identifier[limit] = identifier[_noItem] , identifier[sort] = identifier[_noItem] ):
literal[string]
identifier[newq] = identifier[self] . identifier[query] . identifier[cloneQuery] ( identifier[limit] = identifier[limit] , identifier[sort] = identifier[sort] )
keyword[return] identifier[self] . identifier[__class__] ( identifier[newq] ) | def cloneQuery(self, limit=_noItem, sort=_noItem):
"""
Clone the original query which this distinct query wraps, and return a new
wrapper around that clone.
"""
newq = self.query.cloneQuery(limit=limit, sort=sort)
return self.__class__(newq) |
def spawn_mutect(job, tumor_bam, normal_bam, univ_options, mutect_options):
"""
This module will spawn a mutect job for each chromosome on the DNA bams.
ARGUMENTS
1. tumor_bam: Dict of input tumor WGS/WSQ bam + bai
tumor_bam
|- 'tumor_fix_pg_sorted.bam': <JSid>
+- 'tumor_fix_pg_sorted.bam.bai': <JSid>
2. normal_bam: Dict of input normal WGS/WSQ bam + bai
normal_bam
|- 'normal_fix_pg_sorted.bam': <JSid>
+- 'normal_fix_pg_sorted.bam.bai': <JSid>
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
4. mutect_options: Dict of parameters specific to mutect
mutect_options
|- 'dbsnp_vcf': <JSid for dnsnp vcf file>
|- 'dbsnp_idx': <JSid for dnsnp vcf index file>
|- 'cosmic_vcf': <JSid for cosmic vcf file>
|- 'cosmic_idx': <JSid for cosmic vcf index file>
+- 'genome_fasta': <JSid for genome fasta file>
RETURN VALUES
1. perchrom_mutect: Dict of results of mutect per chromosome
perchrom_mutect
|- 'chr1'
| +- 'mutect_chr1.vcf': <JSid>
| +- 'mutect_chr1.out': <JSid>
|- 'chr2'
| |- 'mutect_chr2.vcf': <JSid>
| +- 'mutect_chr2.out': <JSid>
etc...
This module corresponds to node 11 on the tree
"""
job.fileStore.logToMaster('Running spawn_mutect on %s' % univ_options['patient'])
# Make a dict object to hold the return values for each of the chromosome
# jobs. Then run mutect on each chromosome.
chromosomes = [''.join(['chr', str(x)]) for x in range(1, 23) + ['X', 'Y']]
perchrom_mutect = defaultdict()
for chrom in chromosomes:
perchrom_mutect[chrom] = job.addChildJobFn(run_mutect, tumor_bam, normal_bam, univ_options,
mutect_options, chrom, disk='60G',
memory='3.5G').rv()
return perchrom_mutect | def function[spawn_mutect, parameter[job, tumor_bam, normal_bam, univ_options, mutect_options]]:
constant[
This module will spawn a mutect job for each chromosome on the DNA bams.
ARGUMENTS
1. tumor_bam: Dict of input tumor WGS/WSQ bam + bai
tumor_bam
|- 'tumor_fix_pg_sorted.bam': <JSid>
+- 'tumor_fix_pg_sorted.bam.bai': <JSid>
2. normal_bam: Dict of input normal WGS/WSQ bam + bai
normal_bam
|- 'normal_fix_pg_sorted.bam': <JSid>
+- 'normal_fix_pg_sorted.bam.bai': <JSid>
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
4. mutect_options: Dict of parameters specific to mutect
mutect_options
|- 'dbsnp_vcf': <JSid for dnsnp vcf file>
|- 'dbsnp_idx': <JSid for dnsnp vcf index file>
|- 'cosmic_vcf': <JSid for cosmic vcf file>
|- 'cosmic_idx': <JSid for cosmic vcf index file>
+- 'genome_fasta': <JSid for genome fasta file>
RETURN VALUES
1. perchrom_mutect: Dict of results of mutect per chromosome
perchrom_mutect
|- 'chr1'
| +- 'mutect_chr1.vcf': <JSid>
| +- 'mutect_chr1.out': <JSid>
|- 'chr2'
| |- 'mutect_chr2.vcf': <JSid>
| +- 'mutect_chr2.out': <JSid>
etc...
This module corresponds to node 11 on the tree
]
call[name[job].fileStore.logToMaster, parameter[binary_operation[constant[Running spawn_mutect on %s] <ast.Mod object at 0x7da2590d6920> call[name[univ_options]][constant[patient]]]]]
variable[chromosomes] assign[=] <ast.ListComp object at 0x7da18dc07070>
variable[perchrom_mutect] assign[=] call[name[defaultdict], parameter[]]
for taget[name[chrom]] in starred[name[chromosomes]] begin[:]
call[name[perchrom_mutect]][name[chrom]] assign[=] call[call[name[job].addChildJobFn, parameter[name[run_mutect], name[tumor_bam], name[normal_bam], name[univ_options], name[mutect_options], name[chrom]]].rv, parameter[]]
return[name[perchrom_mutect]] | keyword[def] identifier[spawn_mutect] ( identifier[job] , identifier[tumor_bam] , identifier[normal_bam] , identifier[univ_options] , identifier[mutect_options] ):
literal[string]
identifier[job] . identifier[fileStore] . identifier[logToMaster] ( literal[string] % identifier[univ_options] [ literal[string] ])
identifier[chromosomes] =[ literal[string] . identifier[join] ([ literal[string] , identifier[str] ( identifier[x] )]) keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] , literal[int] )+[ literal[string] , literal[string] ]]
identifier[perchrom_mutect] = identifier[defaultdict] ()
keyword[for] identifier[chrom] keyword[in] identifier[chromosomes] :
identifier[perchrom_mutect] [ identifier[chrom] ]= identifier[job] . identifier[addChildJobFn] ( identifier[run_mutect] , identifier[tumor_bam] , identifier[normal_bam] , identifier[univ_options] ,
identifier[mutect_options] , identifier[chrom] , identifier[disk] = literal[string] ,
identifier[memory] = literal[string] ). identifier[rv] ()
keyword[return] identifier[perchrom_mutect] | def spawn_mutect(job, tumor_bam, normal_bam, univ_options, mutect_options):
"""
This module will spawn a mutect job for each chromosome on the DNA bams.
ARGUMENTS
1. tumor_bam: Dict of input tumor WGS/WSQ bam + bai
tumor_bam
|- 'tumor_fix_pg_sorted.bam': <JSid>
+- 'tumor_fix_pg_sorted.bam.bai': <JSid>
2. normal_bam: Dict of input normal WGS/WSQ bam + bai
normal_bam
|- 'normal_fix_pg_sorted.bam': <JSid>
+- 'normal_fix_pg_sorted.bam.bai': <JSid>
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
4. mutect_options: Dict of parameters specific to mutect
mutect_options
|- 'dbsnp_vcf': <JSid for dnsnp vcf file>
|- 'dbsnp_idx': <JSid for dnsnp vcf index file>
|- 'cosmic_vcf': <JSid for cosmic vcf file>
|- 'cosmic_idx': <JSid for cosmic vcf index file>
+- 'genome_fasta': <JSid for genome fasta file>
RETURN VALUES
1. perchrom_mutect: Dict of results of mutect per chromosome
perchrom_mutect
|- 'chr1'
| +- 'mutect_chr1.vcf': <JSid>
| +- 'mutect_chr1.out': <JSid>
|- 'chr2'
| |- 'mutect_chr2.vcf': <JSid>
| +- 'mutect_chr2.out': <JSid>
etc...
This module corresponds to node 11 on the tree
"""
job.fileStore.logToMaster('Running spawn_mutect on %s' % univ_options['patient'])
# Make a dict object to hold the return values for each of the chromosome
# jobs. Then run mutect on each chromosome.
chromosomes = [''.join(['chr', str(x)]) for x in range(1, 23) + ['X', 'Y']]
perchrom_mutect = defaultdict()
for chrom in chromosomes:
perchrom_mutect[chrom] = job.addChildJobFn(run_mutect, tumor_bam, normal_bam, univ_options, mutect_options, chrom, disk='60G', memory='3.5G').rv() # depends on [control=['for'], data=['chrom']]
return perchrom_mutect |
def create_access_token_response(self, uri, http_method='GET', body=None,
headers=None, credentials=None):
"""Create an access token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of extra credentials to include in the token.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
An example of a valid request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AccessTokenEndpoint
>>> endpoint = AccessTokenEndpoint(your_validator)
>>> h, b, s = endpoint.create_access_token_response(
... 'https://your.provider/access_token?foo=bar',
... headers={
... 'Authorization': 'OAuth oauth_token=234lsdkf....'
... },
... credentials={
... 'my_specific': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_authorized_realms=movies+pics&my_specific=argument'
>>> s
200
An response to invalid request would have a different body and status::
>>> b
'error=invalid_request&description=missing+resource+owner+key'
>>> s
400
The same goes for an an unauthorized request:
>>> b
''
>>> s
401
"""
resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
try:
request = self._create_request(uri, http_method, body, headers)
valid, processed_request = self.validate_access_token_request(
request)
if valid:
token = self.create_access_token(request, credentials or {})
self.request_validator.invalidate_request_token(
request.client_key,
request.resource_owner_key,
request)
return resp_headers, token, 200
else:
return {}, None, 401
except errors.OAuth1Error as e:
return resp_headers, e.urlencoded, e.status_code | def function[create_access_token_response, parameter[self, uri, http_method, body, headers, credentials]]:
constant[Create an access token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of extra credentials to include in the token.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
An example of a valid request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AccessTokenEndpoint
>>> endpoint = AccessTokenEndpoint(your_validator)
>>> h, b, s = endpoint.create_access_token_response(
... 'https://your.provider/access_token?foo=bar',
... headers={
... 'Authorization': 'OAuth oauth_token=234lsdkf....'
... },
... credentials={
... 'my_specific': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_authorized_realms=movies+pics&my_specific=argument'
>>> s
200
An response to invalid request would have a different body and status::
>>> b
'error=invalid_request&description=missing+resource+owner+key'
>>> s
400
The same goes for an an unauthorized request:
>>> b
''
>>> s
401
]
variable[resp_headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b1798c70>], [<ast.Constant object at 0x7da1b1798c40>]]
<ast.Try object at 0x7da1b1799a50> | keyword[def] identifier[create_access_token_response] ( identifier[self] , identifier[uri] , identifier[http_method] = literal[string] , identifier[body] = keyword[None] ,
identifier[headers] = keyword[None] , identifier[credentials] = keyword[None] ):
literal[string]
identifier[resp_headers] ={ literal[string] : literal[string] }
keyword[try] :
identifier[request] = identifier[self] . identifier[_create_request] ( identifier[uri] , identifier[http_method] , identifier[body] , identifier[headers] )
identifier[valid] , identifier[processed_request] = identifier[self] . identifier[validate_access_token_request] (
identifier[request] )
keyword[if] identifier[valid] :
identifier[token] = identifier[self] . identifier[create_access_token] ( identifier[request] , identifier[credentials] keyword[or] {})
identifier[self] . identifier[request_validator] . identifier[invalidate_request_token] (
identifier[request] . identifier[client_key] ,
identifier[request] . identifier[resource_owner_key] ,
identifier[request] )
keyword[return] identifier[resp_headers] , identifier[token] , literal[int]
keyword[else] :
keyword[return] {}, keyword[None] , literal[int]
keyword[except] identifier[errors] . identifier[OAuth1Error] keyword[as] identifier[e] :
keyword[return] identifier[resp_headers] , identifier[e] . identifier[urlencoded] , identifier[e] . identifier[status_code] | def create_access_token_response(self, uri, http_method='GET', body=None, headers=None, credentials=None):
"""Create an access token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param credentials: A list of extra credentials to include in the token.
:returns: A tuple of 3 elements.
1. A dict of headers to set on the response.
2. The response body as a string.
3. The response status code as an integer.
An example of a valid request::
>>> from your_validator import your_validator
>>> from oauthlib.oauth1 import AccessTokenEndpoint
>>> endpoint = AccessTokenEndpoint(your_validator)
>>> h, b, s = endpoint.create_access_token_response(
... 'https://your.provider/access_token?foo=bar',
... headers={
... 'Authorization': 'OAuth oauth_token=234lsdkf....'
... },
... credentials={
... 'my_specific': 'argument',
... })
>>> h
{'Content-Type': 'application/x-www-form-urlencoded'}
>>> b
'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_authorized_realms=movies+pics&my_specific=argument'
>>> s
200
An response to invalid request would have a different body and status::
>>> b
'error=invalid_request&description=missing+resource+owner+key'
>>> s
400
The same goes for an an unauthorized request:
>>> b
''
>>> s
401
"""
resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
try:
request = self._create_request(uri, http_method, body, headers)
(valid, processed_request) = self.validate_access_token_request(request)
if valid:
token = self.create_access_token(request, credentials or {})
self.request_validator.invalidate_request_token(request.client_key, request.resource_owner_key, request)
return (resp_headers, token, 200) # depends on [control=['if'], data=[]]
else:
return ({}, None, 401) # depends on [control=['try'], data=[]]
except errors.OAuth1Error as e:
return (resp_headers, e.urlencoded, e.status_code) # depends on [control=['except'], data=['e']] |
def _get_elements(mol, label):
"""
The the elements of the atoms in the specified order
Args:
mol: The molecule. OpenBabel OBMol object.
label: The atom indices. List of integers.
Returns:
Elements. List of integers.
"""
elements = [int(mol.GetAtom(i).GetAtomicNum()) for i in label]
return elements | def function[_get_elements, parameter[mol, label]]:
constant[
The the elements of the atoms in the specified order
Args:
mol: The molecule. OpenBabel OBMol object.
label: The atom indices. List of integers.
Returns:
Elements. List of integers.
]
variable[elements] assign[=] <ast.ListComp object at 0x7da18eb57d00>
return[name[elements]] | keyword[def] identifier[_get_elements] ( identifier[mol] , identifier[label] ):
literal[string]
identifier[elements] =[ identifier[int] ( identifier[mol] . identifier[GetAtom] ( identifier[i] ). identifier[GetAtomicNum] ()) keyword[for] identifier[i] keyword[in] identifier[label] ]
keyword[return] identifier[elements] | def _get_elements(mol, label):
"""
The the elements of the atoms in the specified order
Args:
mol: The molecule. OpenBabel OBMol object.
label: The atom indices. List of integers.
Returns:
Elements. List of integers.
"""
elements = [int(mol.GetAtom(i).GetAtomicNum()) for i in label]
return elements |
def write_manifest (self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
# The manifest must be UTF-8 encodable. See #303.
if sys.version_info >= (3,):
files = []
for file in self.filelist.files:
try:
file.encode("utf-8")
except UnicodeEncodeError:
log.warn("'%s' not UTF-8 encodable -- skipping" % file)
else:
files.append(file)
self.filelist.files = files
files = self.filelist.files
if os.sep!='/':
files = [f.replace(os.sep,'/') for f in files]
self.execute(write_file, (self.manifest, files),
"writing manifest file '%s'" % self.manifest) | def function[write_manifest, parameter[self]]:
constant[Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
]
if compare[name[sys].version_info greater_or_equal[>=] tuple[[<ast.Constant object at 0x7da20e962920>]]] begin[:]
variable[files] assign[=] list[[]]
for taget[name[file]] in starred[name[self].filelist.files] begin[:]
<ast.Try object at 0x7da20e960be0>
name[self].filelist.files assign[=] name[files]
variable[files] assign[=] name[self].filelist.files
if compare[name[os].sep not_equal[!=] constant[/]] begin[:]
variable[files] assign[=] <ast.ListComp object at 0x7da2054a5f30>
call[name[self].execute, parameter[name[write_file], tuple[[<ast.Attribute object at 0x7da20e962110>, <ast.Name object at 0x7da20e962fe0>]], binary_operation[constant[writing manifest file '%s'] <ast.Mod object at 0x7da2590d6920> name[self].manifest]]] | keyword[def] identifier[write_manifest] ( identifier[self] ):
literal[string]
keyword[if] identifier[sys] . identifier[version_info] >=( literal[int] ,):
identifier[files] =[]
keyword[for] identifier[file] keyword[in] identifier[self] . identifier[filelist] . identifier[files] :
keyword[try] :
identifier[file] . identifier[encode] ( literal[string] )
keyword[except] identifier[UnicodeEncodeError] :
identifier[log] . identifier[warn] ( literal[string] % identifier[file] )
keyword[else] :
identifier[files] . identifier[append] ( identifier[file] )
identifier[self] . identifier[filelist] . identifier[files] = identifier[files]
identifier[files] = identifier[self] . identifier[filelist] . identifier[files]
keyword[if] identifier[os] . identifier[sep] != literal[string] :
identifier[files] =[ identifier[f] . identifier[replace] ( identifier[os] . identifier[sep] , literal[string] ) keyword[for] identifier[f] keyword[in] identifier[files] ]
identifier[self] . identifier[execute] ( identifier[write_file] ,( identifier[self] . identifier[manifest] , identifier[files] ),
literal[string] % identifier[self] . identifier[manifest] ) | def write_manifest(self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
# The manifest must be UTF-8 encodable. See #303.
if sys.version_info >= (3,):
files = []
for file in self.filelist.files:
try:
file.encode('utf-8') # depends on [control=['try'], data=[]]
except UnicodeEncodeError:
log.warn("'%s' not UTF-8 encodable -- skipping" % file) # depends on [control=['except'], data=[]]
else:
files.append(file) # depends on [control=['for'], data=['file']]
self.filelist.files = files # depends on [control=['if'], data=[]]
files = self.filelist.files
if os.sep != '/':
files = [f.replace(os.sep, '/') for f in files] # depends on [control=['if'], data=[]]
self.execute(write_file, (self.manifest, files), "writing manifest file '%s'" % self.manifest) |
def zipFile(source):
"""Compress file under zip mode
when compress failed with return source path
:param source: source file path
:return: zip file path
"""
# source = source.decode('UTF-8')
target = source[0:source.rindex(".")] + '.zip'
try:
with zipfile.ZipFile(target, 'w') as zip_file:
zip_file.write(source, source[source.startswith('/'):], zipfile.ZIP_DEFLATED)
zip_file.close()
__cps_rate__(source, target)
except IOError as e:
logger.error('Compress file[%s] with zip mode failed. Case: %s', source, str(e))
target = source
return target | def function[zipFile, parameter[source]]:
constant[Compress file under zip mode
when compress failed with return source path
:param source: source file path
:return: zip file path
]
variable[target] assign[=] binary_operation[call[name[source]][<ast.Slice object at 0x7da18bcc85e0>] + constant[.zip]]
<ast.Try object at 0x7da18bccb8b0>
return[name[target]] | keyword[def] identifier[zipFile] ( identifier[source] ):
literal[string]
identifier[target] = identifier[source] [ literal[int] : identifier[source] . identifier[rindex] ( literal[string] )]+ literal[string]
keyword[try] :
keyword[with] identifier[zipfile] . identifier[ZipFile] ( identifier[target] , literal[string] ) keyword[as] identifier[zip_file] :
identifier[zip_file] . identifier[write] ( identifier[source] , identifier[source] [ identifier[source] . identifier[startswith] ( literal[string] ):], identifier[zipfile] . identifier[ZIP_DEFLATED] )
identifier[zip_file] . identifier[close] ()
identifier[__cps_rate__] ( identifier[source] , identifier[target] )
keyword[except] identifier[IOError] keyword[as] identifier[e] :
identifier[logger] . identifier[error] ( literal[string] , identifier[source] , identifier[str] ( identifier[e] ))
identifier[target] = identifier[source]
keyword[return] identifier[target] | def zipFile(source):
"""Compress file under zip mode
when compress failed with return source path
:param source: source file path
:return: zip file path
"""
# source = source.decode('UTF-8')
target = source[0:source.rindex('.')] + '.zip'
try:
with zipfile.ZipFile(target, 'w') as zip_file:
zip_file.write(source, source[source.startswith('/'):], zipfile.ZIP_DEFLATED)
zip_file.close()
__cps_rate__(source, target) # depends on [control=['with'], data=['zip_file']] # depends on [control=['try'], data=[]]
except IOError as e:
logger.error('Compress file[%s] with zip mode failed. Case: %s', source, str(e))
target = source # depends on [control=['except'], data=['e']]
return target |
def compare_to_rm(data):
"""Compare final variant calls against reference materials of known calls.
"""
if isinstance(data, (list, tuple)) and cwlutils.is_cwl_run(utils.to_single_data(data[0])):
data = _normalize_cwl_inputs(data)
toval_data = _get_validate(data)
toval_data = cwlutils.unpack_tarballs(toval_data, toval_data)
if toval_data:
caller = _get_caller(toval_data)
sample = dd.get_sample_name(toval_data)
base_dir = utils.safe_makedir(os.path.join(toval_data["dirs"]["work"], "validate", sample, caller))
if isinstance(toval_data["vrn_file"], (list, tuple)):
raise NotImplementedError("Multiple input files for validation: %s" % toval_data["vrn_file"])
else:
vrn_file = os.path.abspath(toval_data["vrn_file"])
rm_file = normalize_input_path(toval_data["config"]["algorithm"]["validate"], toval_data)
rm_interval_file = _gunzip(normalize_input_path(toval_data["config"]["algorithm"].get("validate_regions"),
toval_data),
toval_data)
rm_interval_file = bedutils.clean_file(rm_interval_file, toval_data, prefix="validateregions-",
bedprep_dir=utils.safe_makedir(os.path.join(base_dir, "bedprep")))
rm_file = naming.handle_synonyms(rm_file, dd.get_ref_file(toval_data), data.get("genome_build"),
base_dir, data)
rm_interval_file = (naming.handle_synonyms(rm_interval_file, dd.get_ref_file(toval_data),
data.get("genome_build"), base_dir, data)
if rm_interval_file else None)
vmethod = tz.get_in(["config", "algorithm", "validate_method"], data, "rtg")
# RTG can fail on totally empty files. Call everything in truth set as false negatives
if not vcfutils.vcf_has_variants(vrn_file):
eval_files = _setup_call_false(rm_file, rm_interval_file, base_dir, toval_data, "fn")
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
# empty validation file, every call is a false positive
elif not vcfutils.vcf_has_variants(rm_file):
eval_files = _setup_call_fps(vrn_file, rm_interval_file, base_dir, toval_data, "fp")
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
elif vmethod in ["rtg", "rtg-squash-ploidy"]:
eval_files = _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data, vmethod)
eval_files = _annotate_validations(eval_files, toval_data)
data["validate"] = _rtg_add_summary_file(eval_files, base_dir, toval_data)
elif vmethod == "hap.py":
data["validate"] = _run_happy_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data)
elif vmethod == "bcbio.variation":
data["validate"] = _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir,
sample, caller, toval_data)
return [[data]] | def function[compare_to_rm, parameter[data]]:
constant[Compare final variant calls against reference materials of known calls.
]
if <ast.BoolOp object at 0x7da1b1986800> begin[:]
variable[data] assign[=] call[name[_normalize_cwl_inputs], parameter[name[data]]]
variable[toval_data] assign[=] call[name[_get_validate], parameter[name[data]]]
variable[toval_data] assign[=] call[name[cwlutils].unpack_tarballs, parameter[name[toval_data], name[toval_data]]]
if name[toval_data] begin[:]
variable[caller] assign[=] call[name[_get_caller], parameter[name[toval_data]]]
variable[sample] assign[=] call[name[dd].get_sample_name, parameter[name[toval_data]]]
variable[base_dir] assign[=] call[name[utils].safe_makedir, parameter[call[name[os].path.join, parameter[call[call[name[toval_data]][constant[dirs]]][constant[work]], constant[validate], name[sample], name[caller]]]]]
if call[name[isinstance], parameter[call[name[toval_data]][constant[vrn_file]], tuple[[<ast.Name object at 0x7da1b1985480>, <ast.Name object at 0x7da1b1987160>]]]] begin[:]
<ast.Raise object at 0x7da1b1984190>
variable[rm_file] assign[=] call[name[normalize_input_path], parameter[call[call[call[name[toval_data]][constant[config]]][constant[algorithm]]][constant[validate]], name[toval_data]]]
variable[rm_interval_file] assign[=] call[name[_gunzip], parameter[call[name[normalize_input_path], parameter[call[call[call[name[toval_data]][constant[config]]][constant[algorithm]].get, parameter[constant[validate_regions]]], name[toval_data]]], name[toval_data]]]
variable[rm_interval_file] assign[=] call[name[bedutils].clean_file, parameter[name[rm_interval_file], name[toval_data]]]
variable[rm_file] assign[=] call[name[naming].handle_synonyms, parameter[name[rm_file], call[name[dd].get_ref_file, parameter[name[toval_data]]], call[name[data].get, parameter[constant[genome_build]]], name[base_dir], name[data]]]
variable[rm_interval_file] assign[=] <ast.IfExp object at 0x7da1b1985c90>
variable[vmethod] assign[=] call[name[tz].get_in, parameter[list[[<ast.Constant object at 0x7da1b1987dc0>, <ast.Constant object at 0x7da1b1987fa0>, <ast.Constant object at 0x7da1b1984070>]], name[data], constant[rtg]]]
if <ast.UnaryOp object at 0x7da1b1987a60> begin[:]
variable[eval_files] assign[=] call[name[_setup_call_false], parameter[name[rm_file], name[rm_interval_file], name[base_dir], name[toval_data], constant[fn]]]
call[name[data]][constant[validate]] assign[=] call[name[_rtg_add_summary_file], parameter[name[eval_files], name[base_dir], name[toval_data]]]
return[list[[<ast.List object at 0x7da1b18bd5a0>]]] | keyword[def] identifier[compare_to_rm] ( identifier[data] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[data] ,( identifier[list] , identifier[tuple] )) keyword[and] identifier[cwlutils] . identifier[is_cwl_run] ( identifier[utils] . identifier[to_single_data] ( identifier[data] [ literal[int] ])):
identifier[data] = identifier[_normalize_cwl_inputs] ( identifier[data] )
identifier[toval_data] = identifier[_get_validate] ( identifier[data] )
identifier[toval_data] = identifier[cwlutils] . identifier[unpack_tarballs] ( identifier[toval_data] , identifier[toval_data] )
keyword[if] identifier[toval_data] :
identifier[caller] = identifier[_get_caller] ( identifier[toval_data] )
identifier[sample] = identifier[dd] . identifier[get_sample_name] ( identifier[toval_data] )
identifier[base_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[toval_data] [ literal[string] ][ literal[string] ], literal[string] , identifier[sample] , identifier[caller] ))
keyword[if] identifier[isinstance] ( identifier[toval_data] [ literal[string] ],( identifier[list] , identifier[tuple] )):
keyword[raise] identifier[NotImplementedError] ( literal[string] % identifier[toval_data] [ literal[string] ])
keyword[else] :
identifier[vrn_file] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[toval_data] [ literal[string] ])
identifier[rm_file] = identifier[normalize_input_path] ( identifier[toval_data] [ literal[string] ][ literal[string] ][ literal[string] ], identifier[toval_data] )
identifier[rm_interval_file] = identifier[_gunzip] ( identifier[normalize_input_path] ( identifier[toval_data] [ literal[string] ][ literal[string] ]. identifier[get] ( literal[string] ),
identifier[toval_data] ),
identifier[toval_data] )
identifier[rm_interval_file] = identifier[bedutils] . identifier[clean_file] ( identifier[rm_interval_file] , identifier[toval_data] , identifier[prefix] = literal[string] ,
identifier[bedprep_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[base_dir] , literal[string] )))
identifier[rm_file] = identifier[naming] . identifier[handle_synonyms] ( identifier[rm_file] , identifier[dd] . identifier[get_ref_file] ( identifier[toval_data] ), identifier[data] . identifier[get] ( literal[string] ),
identifier[base_dir] , identifier[data] )
identifier[rm_interval_file] =( identifier[naming] . identifier[handle_synonyms] ( identifier[rm_interval_file] , identifier[dd] . identifier[get_ref_file] ( identifier[toval_data] ),
identifier[data] . identifier[get] ( literal[string] ), identifier[base_dir] , identifier[data] )
keyword[if] identifier[rm_interval_file] keyword[else] keyword[None] )
identifier[vmethod] = identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[data] , literal[string] )
keyword[if] keyword[not] identifier[vcfutils] . identifier[vcf_has_variants] ( identifier[vrn_file] ):
identifier[eval_files] = identifier[_setup_call_false] ( identifier[rm_file] , identifier[rm_interval_file] , identifier[base_dir] , identifier[toval_data] , literal[string] )
identifier[data] [ literal[string] ]= identifier[_rtg_add_summary_file] ( identifier[eval_files] , identifier[base_dir] , identifier[toval_data] )
keyword[elif] keyword[not] identifier[vcfutils] . identifier[vcf_has_variants] ( identifier[rm_file] ):
identifier[eval_files] = identifier[_setup_call_fps] ( identifier[vrn_file] , identifier[rm_interval_file] , identifier[base_dir] , identifier[toval_data] , literal[string] )
identifier[data] [ literal[string] ]= identifier[_rtg_add_summary_file] ( identifier[eval_files] , identifier[base_dir] , identifier[toval_data] )
keyword[elif] identifier[vmethod] keyword[in] [ literal[string] , literal[string] ]:
identifier[eval_files] = identifier[_run_rtg_eval] ( identifier[vrn_file] , identifier[rm_file] , identifier[rm_interval_file] , identifier[base_dir] , identifier[toval_data] , identifier[vmethod] )
identifier[eval_files] = identifier[_annotate_validations] ( identifier[eval_files] , identifier[toval_data] )
identifier[data] [ literal[string] ]= identifier[_rtg_add_summary_file] ( identifier[eval_files] , identifier[base_dir] , identifier[toval_data] )
keyword[elif] identifier[vmethod] == literal[string] :
identifier[data] [ literal[string] ]= identifier[_run_happy_eval] ( identifier[vrn_file] , identifier[rm_file] , identifier[rm_interval_file] , identifier[base_dir] , identifier[toval_data] )
keyword[elif] identifier[vmethod] == literal[string] :
identifier[data] [ literal[string] ]= identifier[_run_bcbio_variation] ( identifier[vrn_file] , identifier[rm_file] , identifier[rm_interval_file] , identifier[base_dir] ,
identifier[sample] , identifier[caller] , identifier[toval_data] )
keyword[return] [[ identifier[data] ]] | def compare_to_rm(data):
"""Compare final variant calls against reference materials of known calls.
"""
if isinstance(data, (list, tuple)) and cwlutils.is_cwl_run(utils.to_single_data(data[0])):
data = _normalize_cwl_inputs(data) # depends on [control=['if'], data=[]]
toval_data = _get_validate(data)
toval_data = cwlutils.unpack_tarballs(toval_data, toval_data)
if toval_data:
caller = _get_caller(toval_data)
sample = dd.get_sample_name(toval_data)
base_dir = utils.safe_makedir(os.path.join(toval_data['dirs']['work'], 'validate', sample, caller))
if isinstance(toval_data['vrn_file'], (list, tuple)):
raise NotImplementedError('Multiple input files for validation: %s' % toval_data['vrn_file']) # depends on [control=['if'], data=[]]
else:
vrn_file = os.path.abspath(toval_data['vrn_file'])
rm_file = normalize_input_path(toval_data['config']['algorithm']['validate'], toval_data)
rm_interval_file = _gunzip(normalize_input_path(toval_data['config']['algorithm'].get('validate_regions'), toval_data), toval_data)
rm_interval_file = bedutils.clean_file(rm_interval_file, toval_data, prefix='validateregions-', bedprep_dir=utils.safe_makedir(os.path.join(base_dir, 'bedprep')))
rm_file = naming.handle_synonyms(rm_file, dd.get_ref_file(toval_data), data.get('genome_build'), base_dir, data)
rm_interval_file = naming.handle_synonyms(rm_interval_file, dd.get_ref_file(toval_data), data.get('genome_build'), base_dir, data) if rm_interval_file else None
vmethod = tz.get_in(['config', 'algorithm', 'validate_method'], data, 'rtg')
# RTG can fail on totally empty files. Call everything in truth set as false negatives
if not vcfutils.vcf_has_variants(vrn_file):
eval_files = _setup_call_false(rm_file, rm_interval_file, base_dir, toval_data, 'fn')
data['validate'] = _rtg_add_summary_file(eval_files, base_dir, toval_data) # depends on [control=['if'], data=[]]
# empty validation file, every call is a false positive
elif not vcfutils.vcf_has_variants(rm_file):
eval_files = _setup_call_fps(vrn_file, rm_interval_file, base_dir, toval_data, 'fp')
data['validate'] = _rtg_add_summary_file(eval_files, base_dir, toval_data) # depends on [control=['if'], data=[]]
elif vmethod in ['rtg', 'rtg-squash-ploidy']:
eval_files = _run_rtg_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data, vmethod)
eval_files = _annotate_validations(eval_files, toval_data)
data['validate'] = _rtg_add_summary_file(eval_files, base_dir, toval_data) # depends on [control=['if'], data=['vmethod']]
elif vmethod == 'hap.py':
data['validate'] = _run_happy_eval(vrn_file, rm_file, rm_interval_file, base_dir, toval_data) # depends on [control=['if'], data=[]]
elif vmethod == 'bcbio.variation':
data['validate'] = _run_bcbio_variation(vrn_file, rm_file, rm_interval_file, base_dir, sample, caller, toval_data) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return [[data]] |
def construct(self, **bindings):
"""Constructs the graph and returns either a tensor or a sequence.
Args:
**bindings: Arguments for every deferred parameter.
Returns:
The value that is placed into this.
"""
context = _assign_values_to_unbound_vars(self._unbound_vars, bindings)
context.update(self._partial_context)
return self._construct(context) | def function[construct, parameter[self]]:
constant[Constructs the graph and returns either a tensor or a sequence.
Args:
**bindings: Arguments for every deferred parameter.
Returns:
The value that is placed into this.
]
variable[context] assign[=] call[name[_assign_values_to_unbound_vars], parameter[name[self]._unbound_vars, name[bindings]]]
call[name[context].update, parameter[name[self]._partial_context]]
return[call[name[self]._construct, parameter[name[context]]]] | keyword[def] identifier[construct] ( identifier[self] ,** identifier[bindings] ):
literal[string]
identifier[context] = identifier[_assign_values_to_unbound_vars] ( identifier[self] . identifier[_unbound_vars] , identifier[bindings] )
identifier[context] . identifier[update] ( identifier[self] . identifier[_partial_context] )
keyword[return] identifier[self] . identifier[_construct] ( identifier[context] ) | def construct(self, **bindings):
"""Constructs the graph and returns either a tensor or a sequence.
Args:
**bindings: Arguments for every deferred parameter.
Returns:
The value that is placed into this.
"""
context = _assign_values_to_unbound_vars(self._unbound_vars, bindings)
context.update(self._partial_context)
return self._construct(context) |
def check(self):
""" Check the stats if enabled. """
if not self.enabled():
return
try:
self.fetch()
except (xmlrpclib.Fault, did.base.ConfigError) as error:
log.error(error)
self._error = True
# Raise the exception if debugging
if not self.options or self.options.debug:
raise
# Show the results stats (unless merging)
if self.options and not self.options.merge:
self.show() | def function[check, parameter[self]]:
constant[ Check the stats if enabled. ]
if <ast.UnaryOp object at 0x7da1b208b010> begin[:]
return[None]
<ast.Try object at 0x7da1b2088d60>
if <ast.BoolOp object at 0x7da1b1e98370> begin[:]
call[name[self].show, parameter[]] | keyword[def] identifier[check] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[enabled] ():
keyword[return]
keyword[try] :
identifier[self] . identifier[fetch] ()
keyword[except] ( identifier[xmlrpclib] . identifier[Fault] , identifier[did] . identifier[base] . identifier[ConfigError] ) keyword[as] identifier[error] :
identifier[log] . identifier[error] ( identifier[error] )
identifier[self] . identifier[_error] = keyword[True]
keyword[if] keyword[not] identifier[self] . identifier[options] keyword[or] identifier[self] . identifier[options] . identifier[debug] :
keyword[raise]
keyword[if] identifier[self] . identifier[options] keyword[and] keyword[not] identifier[self] . identifier[options] . identifier[merge] :
identifier[self] . identifier[show] () | def check(self):
""" Check the stats if enabled. """
if not self.enabled():
return # depends on [control=['if'], data=[]]
try:
self.fetch() # depends on [control=['try'], data=[]]
except (xmlrpclib.Fault, did.base.ConfigError) as error:
log.error(error)
self._error = True
# Raise the exception if debugging
if not self.options or self.options.debug:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['error']]
# Show the results stats (unless merging)
if self.options and (not self.options.merge):
self.show() # depends on [control=['if'], data=[]] |
def _xfer_file(self, source_file=None, source_config=None, dest_file=None, file_system=None,
TransferClass=FileTransfer):
"""Transfer file to remote device.
By default, this will use Secure Copy if self.inline_transfer is set, then will use
Netmiko InlineTransfer method to transfer inline using either SSH or telnet (plus TCL
onbox).
Return (status, msg)
status = boolean
msg = details on what happened
"""
if not source_file and not source_config:
raise ValueError("File source not specified for transfer.")
if not dest_file or not file_system:
raise ValueError("Destination file or file system not specified.")
if source_file:
kwargs = dict(ssh_conn=self.device, source_file=source_file, dest_file=dest_file,
direction='put', file_system=file_system)
elif source_config:
kwargs = dict(ssh_conn=self.device, source_config=source_config, dest_file=dest_file,
direction='put', file_system=file_system)
enable_scp = True
if self.inline_transfer:
enable_scp = False
with TransferClass(**kwargs) as transfer:
# Check if file already exists and has correct MD5
if transfer.check_file_exists() and transfer.compare_md5():
msg = "File already exists and has correct MD5: no SCP needed"
return (True, msg)
if not transfer.verify_space_available():
msg = "Insufficient space available on remote device"
return (False, msg)
if enable_scp:
transfer.enable_scp()
# Transfer file
transfer.transfer_file()
# Compares MD5 between local-remote files
if transfer.verify_file():
msg = "File successfully transferred to remote device"
return (True, msg)
else:
msg = "File transfer to remote device failed"
return (False, msg)
return (False, '') | def function[_xfer_file, parameter[self, source_file, source_config, dest_file, file_system, TransferClass]]:
constant[Transfer file to remote device.
By default, this will use Secure Copy if self.inline_transfer is set, then will use
Netmiko InlineTransfer method to transfer inline using either SSH or telnet (plus TCL
onbox).
Return (status, msg)
status = boolean
msg = details on what happened
]
if <ast.BoolOp object at 0x7da18bccace0> begin[:]
<ast.Raise object at 0x7da18bcc86a0>
if <ast.BoolOp object at 0x7da18bcc8100> begin[:]
<ast.Raise object at 0x7da18bcc8700>
if name[source_file] begin[:]
variable[kwargs] assign[=] call[name[dict], parameter[]]
variable[enable_scp] assign[=] constant[True]
if name[self].inline_transfer begin[:]
variable[enable_scp] assign[=] constant[False]
with call[name[TransferClass], parameter[]] begin[:]
if <ast.BoolOp object at 0x7da20c6a8f70> begin[:]
variable[msg] assign[=] constant[File already exists and has correct MD5: no SCP needed]
return[tuple[[<ast.Constant object at 0x7da20c6aa5f0>, <ast.Name object at 0x7da20c6a9c60>]]]
if <ast.UnaryOp object at 0x7da20c6aa380> begin[:]
variable[msg] assign[=] constant[Insufficient space available on remote device]
return[tuple[[<ast.Constant object at 0x7da20c6aa050>, <ast.Name object at 0x7da20c6a9e40>]]]
if name[enable_scp] begin[:]
call[name[transfer].enable_scp, parameter[]]
call[name[transfer].transfer_file, parameter[]]
if call[name[transfer].verify_file, parameter[]] begin[:]
variable[msg] assign[=] constant[File successfully transferred to remote device]
return[tuple[[<ast.Constant object at 0x7da207f9ab30>, <ast.Name object at 0x7da207f9a590>]]]
return[tuple[[<ast.Constant object at 0x7da207f995d0>, <ast.Constant object at 0x7da207f9a920>]]] | keyword[def] identifier[_xfer_file] ( identifier[self] , identifier[source_file] = keyword[None] , identifier[source_config] = keyword[None] , identifier[dest_file] = keyword[None] , identifier[file_system] = keyword[None] ,
identifier[TransferClass] = identifier[FileTransfer] ):
literal[string]
keyword[if] keyword[not] identifier[source_file] keyword[and] keyword[not] identifier[source_config] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[dest_file] keyword[or] keyword[not] identifier[file_system] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[source_file] :
identifier[kwargs] = identifier[dict] ( identifier[ssh_conn] = identifier[self] . identifier[device] , identifier[source_file] = identifier[source_file] , identifier[dest_file] = identifier[dest_file] ,
identifier[direction] = literal[string] , identifier[file_system] = identifier[file_system] )
keyword[elif] identifier[source_config] :
identifier[kwargs] = identifier[dict] ( identifier[ssh_conn] = identifier[self] . identifier[device] , identifier[source_config] = identifier[source_config] , identifier[dest_file] = identifier[dest_file] ,
identifier[direction] = literal[string] , identifier[file_system] = identifier[file_system] )
identifier[enable_scp] = keyword[True]
keyword[if] identifier[self] . identifier[inline_transfer] :
identifier[enable_scp] = keyword[False]
keyword[with] identifier[TransferClass] (** identifier[kwargs] ) keyword[as] identifier[transfer] :
keyword[if] identifier[transfer] . identifier[check_file_exists] () keyword[and] identifier[transfer] . identifier[compare_md5] ():
identifier[msg] = literal[string]
keyword[return] ( keyword[True] , identifier[msg] )
keyword[if] keyword[not] identifier[transfer] . identifier[verify_space_available] ():
identifier[msg] = literal[string]
keyword[return] ( keyword[False] , identifier[msg] )
keyword[if] identifier[enable_scp] :
identifier[transfer] . identifier[enable_scp] ()
identifier[transfer] . identifier[transfer_file] ()
keyword[if] identifier[transfer] . identifier[verify_file] ():
identifier[msg] = literal[string]
keyword[return] ( keyword[True] , identifier[msg] )
keyword[else] :
identifier[msg] = literal[string]
keyword[return] ( keyword[False] , identifier[msg] )
keyword[return] ( keyword[False] , literal[string] ) | def _xfer_file(self, source_file=None, source_config=None, dest_file=None, file_system=None, TransferClass=FileTransfer):
"""Transfer file to remote device.
By default, this will use Secure Copy if self.inline_transfer is set, then will use
Netmiko InlineTransfer method to transfer inline using either SSH or telnet (plus TCL
onbox).
Return (status, msg)
status = boolean
msg = details on what happened
"""
if not source_file and (not source_config):
raise ValueError('File source not specified for transfer.') # depends on [control=['if'], data=[]]
if not dest_file or not file_system:
raise ValueError('Destination file or file system not specified.') # depends on [control=['if'], data=[]]
if source_file:
kwargs = dict(ssh_conn=self.device, source_file=source_file, dest_file=dest_file, direction='put', file_system=file_system) # depends on [control=['if'], data=[]]
elif source_config:
kwargs = dict(ssh_conn=self.device, source_config=source_config, dest_file=dest_file, direction='put', file_system=file_system) # depends on [control=['if'], data=[]]
enable_scp = True
if self.inline_transfer:
enable_scp = False # depends on [control=['if'], data=[]]
with TransferClass(**kwargs) as transfer:
# Check if file already exists and has correct MD5
if transfer.check_file_exists() and transfer.compare_md5():
msg = 'File already exists and has correct MD5: no SCP needed'
return (True, msg) # depends on [control=['if'], data=[]]
if not transfer.verify_space_available():
msg = 'Insufficient space available on remote device'
return (False, msg) # depends on [control=['if'], data=[]]
if enable_scp:
transfer.enable_scp() # depends on [control=['if'], data=[]]
# Transfer file
transfer.transfer_file()
# Compares MD5 between local-remote files
if transfer.verify_file():
msg = 'File successfully transferred to remote device'
return (True, msg) # depends on [control=['if'], data=[]]
else:
msg = 'File transfer to remote device failed'
return (False, msg)
return (False, '') # depends on [control=['with'], data=['transfer']] |
def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if (zi.size > 1) and (Mi.size > 1):
if(zi.size != Mi.size):
print("Error ambiguous request")
print("Need individual redshifts for all haloes provided ")
print("Or have all haloes at same redshift ")
return(-1)
elif (zi.size == 1) and (Mi.size > 1):
if verbose:
print("Assume zi is the same for all Mi halo masses provided")
# Replicate redshift for all halo masses
zi = np.ones_like(Mi)*zi[0]
elif (Mi.size == 1) and (zi.size > 1):
if verbose:
print("Assume Mi halo masses are the same for all zi provided")
# Replicate redshift for all halo masses
Mi = np.ones_like(zi)*Mi[0]
else:
if verbose:
print("A single Mi and zi provided")
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return(zi, Mi, z, zi.size, Mi.size, lenzout) | def function[_checkinput, parameter[zi, Mi, z, verbose]]:
constant[ Check and convert any input scalar or array to numpy array ]
variable[zi] assign[=] call[name[np].array, parameter[name[zi]]]
variable[Mi] assign[=] call[name[np].array, parameter[name[Mi]]]
if <ast.BoolOp object at 0x7da1b10a61d0> begin[:]
if compare[name[zi].size not_equal[!=] name[Mi].size] begin[:]
call[name[print], parameter[constant[Error ambiguous request]]]
call[name[print], parameter[constant[Need individual redshifts for all haloes provided ]]]
call[name[print], parameter[constant[Or have all haloes at same redshift ]]]
return[<ast.UnaryOp object at 0x7da1b10a7790>]
if compare[name[z] is constant[False]] begin[:]
variable[lenzout] assign[=] constant[1]
return[tuple[[<ast.Name object at 0x7da1b10a7f10>, <ast.Name object at 0x7da1b10a4640>, <ast.Name object at 0x7da1b10a7f40>, <ast.Attribute object at 0x7da1b10a6890>, <ast.Attribute object at 0x7da1b10a7fa0>, <ast.Name object at 0x7da1b10a6800>]]] | keyword[def] identifier[_checkinput] ( identifier[zi] , identifier[Mi] , identifier[z] = keyword[False] , identifier[verbose] = keyword[None] ):
literal[string]
identifier[zi] = identifier[np] . identifier[array] ( identifier[zi] , identifier[ndmin] = literal[int] , identifier[dtype] = identifier[float] )
identifier[Mi] = identifier[np] . identifier[array] ( identifier[Mi] , identifier[ndmin] = literal[int] , identifier[dtype] = identifier[float] )
keyword[if] ( identifier[zi] . identifier[size] > literal[int] ) keyword[and] ( identifier[Mi] . identifier[size] > literal[int] ):
keyword[if] ( identifier[zi] . identifier[size] != identifier[Mi] . identifier[size] ):
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[return] (- literal[int] )
keyword[elif] ( identifier[zi] . identifier[size] == literal[int] ) keyword[and] ( identifier[Mi] . identifier[size] > literal[int] ):
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] )
identifier[zi] = identifier[np] . identifier[ones_like] ( identifier[Mi] )* identifier[zi] [ literal[int] ]
keyword[elif] ( identifier[Mi] . identifier[size] == literal[int] ) keyword[and] ( identifier[zi] . identifier[size] > literal[int] ):
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] )
identifier[Mi] = identifier[np] . identifier[ones_like] ( identifier[zi] )* identifier[Mi] [ literal[int] ]
keyword[else] :
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] )
keyword[if] identifier[z] keyword[is] keyword[False] :
identifier[lenzout] = literal[int]
keyword[else] :
identifier[z] = identifier[np] . identifier[array] ( identifier[z] , identifier[ndmin] = literal[int] , identifier[dtype] = identifier[float] )
identifier[lenzout] = identifier[z] . identifier[size]
keyword[return] ( identifier[zi] , identifier[Mi] , identifier[z] , identifier[zi] . identifier[size] , identifier[Mi] . identifier[size] , identifier[lenzout] ) | def _checkinput(zi, Mi, z=False, verbose=None):
""" Check and convert any input scalar or array to numpy array """
# How many halo redshifts provided?
zi = np.array(zi, ndmin=1, dtype=float)
# How many halo masses provided?
Mi = np.array(Mi, ndmin=1, dtype=float)
# Check the input sizes for zi and Mi make sense, if not then exit unless
# one axis is length one, then replicate values to the size of the other
if zi.size > 1 and Mi.size > 1:
if zi.size != Mi.size:
print('Error ambiguous request')
print('Need individual redshifts for all haloes provided ')
print('Or have all haloes at same redshift ')
return -1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif zi.size == 1 and Mi.size > 1:
if verbose:
print('Assume zi is the same for all Mi halo masses provided') # depends on [control=['if'], data=[]]
# Replicate redshift for all halo masses
zi = np.ones_like(Mi) * zi[0] # depends on [control=['if'], data=[]]
elif Mi.size == 1 and zi.size > 1:
if verbose:
print('Assume Mi halo masses are the same for all zi provided') # depends on [control=['if'], data=[]]
# Replicate redshift for all halo masses
Mi = np.ones_like(zi) * Mi[0] # depends on [control=['if'], data=[]]
elif verbose:
print('A single Mi and zi provided') # depends on [control=['if'], data=[]]
# Very simple test for size / type of incoming array
# just in case numpy / list given
if z is False:
# Didn't pass anything, set zi = z
lenzout = 1 # depends on [control=['if'], data=[]]
else:
# If something was passed, convert to 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
lenzout = z.size
return (zi, Mi, z, zi.size, Mi.size, lenzout) |
def delete_pre_shared_key(self, endpoint_name, **kwargs): # noqa: E501
"""Remove a pre-shared key. # noqa: E501
Remove a pre-shared key. **Example usage:** ``` curl -H \"authorization: Bearer ${API_TOKEN}\" -X DELETE https://api.us-east-1.mbedcloud.com/v2/device-shared-keys/my-endpoint-0001 ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_pre_shared_key(endpoint_name, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str endpoint_name: The unique endpoint identifier that this pre-shared key applies to. [Reserved characters](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_reserved_characters) must be percent-encoded. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.delete_pre_shared_key_with_http_info(endpoint_name, **kwargs) # noqa: E501
else:
(data) = self.delete_pre_shared_key_with_http_info(endpoint_name, **kwargs) # noqa: E501
return data | def function[delete_pre_shared_key, parameter[self, endpoint_name]]:
constant[Remove a pre-shared key. # noqa: E501
Remove a pre-shared key. **Example usage:** ``` curl -H "authorization: Bearer ${API_TOKEN}" -X DELETE https://api.us-east-1.mbedcloud.com/v2/device-shared-keys/my-endpoint-0001 ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_pre_shared_key(endpoint_name, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str endpoint_name: The unique endpoint identifier that this pre-shared key applies to. [Reserved characters](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_reserved_characters) must be percent-encoded. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[asynchronous]]] begin[:]
return[call[name[self].delete_pre_shared_key_with_http_info, parameter[name[endpoint_name]]]] | keyword[def] identifier[delete_pre_shared_key] ( identifier[self] , identifier[endpoint_name] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[self] . identifier[delete_pre_shared_key_with_http_info] ( identifier[endpoint_name] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[self] . identifier[delete_pre_shared_key_with_http_info] ( identifier[endpoint_name] ,** identifier[kwargs] )
keyword[return] identifier[data] | def delete_pre_shared_key(self, endpoint_name, **kwargs): # noqa: E501
'Remove a pre-shared key. # noqa: E501\n\n Remove a pre-shared key. **Example usage:** ``` curl -H "authorization: Bearer ${API_TOKEN}" -X DELETE https://api.us-east-1.mbedcloud.com/v2/device-shared-keys/my-endpoint-0001 ``` # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass asynchronous=True\n >>> thread = api.delete_pre_shared_key(endpoint_name, asynchronous=True)\n >>> result = thread.get()\n\n :param asynchronous bool\n :param str endpoint_name: The unique endpoint identifier that this pre-shared key applies to. [Reserved characters](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_reserved_characters) must be percent-encoded. (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n '
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.delete_pre_shared_key_with_http_info(endpoint_name, **kwargs) # noqa: E501 # depends on [control=['if'], data=[]]
else:
data = self.delete_pre_shared_key_with_http_info(endpoint_name, **kwargs) # noqa: E501
return data |
def readline(self, size=-1):
"""
readline([size]) -> next line from the file, as a string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
data = ''
while '\n' not in data and (size < 0 or len(data) < size):
if size < 0:
chunk = self.read(1024)
else:
chunk = self.read(size - len(data))
if not chunk:
break
data += chunk
if '\n' in data:
data, sep, rest = data.partition('\n')
data += sep
if self.buf:
self.buf = rest + self.buf
else:
self.buf = rest
return data | def function[readline, parameter[self, size]]:
constant[
readline([size]) -> next line from the file, as a string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF.
]
if name[self].closed begin[:]
<ast.Raise object at 0x7da18f00fee0>
variable[data] assign[=] constant[]
while <ast.BoolOp object at 0x7da18f00c0d0> begin[:]
if compare[name[size] less[<] constant[0]] begin[:]
variable[chunk] assign[=] call[name[self].read, parameter[constant[1024]]]
if <ast.UnaryOp object at 0x7da18f00d060> begin[:]
break
<ast.AugAssign object at 0x7da18f00e440>
if compare[constant[
] in name[data]] begin[:]
<ast.Tuple object at 0x7da18f00fd00> assign[=] call[name[data].partition, parameter[constant[
]]]
<ast.AugAssign object at 0x7da20c6aa410>
if name[self].buf begin[:]
name[self].buf assign[=] binary_operation[name[rest] + name[self].buf]
return[name[data]] | keyword[def] identifier[readline] ( identifier[self] , identifier[size] =- literal[int] ):
literal[string]
keyword[if] identifier[self] . identifier[closed] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[data] = literal[string]
keyword[while] literal[string] keyword[not] keyword[in] identifier[data] keyword[and] ( identifier[size] < literal[int] keyword[or] identifier[len] ( identifier[data] )< identifier[size] ):
keyword[if] identifier[size] < literal[int] :
identifier[chunk] = identifier[self] . identifier[read] ( literal[int] )
keyword[else] :
identifier[chunk] = identifier[self] . identifier[read] ( identifier[size] - identifier[len] ( identifier[data] ))
keyword[if] keyword[not] identifier[chunk] :
keyword[break]
identifier[data] += identifier[chunk]
keyword[if] literal[string] keyword[in] identifier[data] :
identifier[data] , identifier[sep] , identifier[rest] = identifier[data] . identifier[partition] ( literal[string] )
identifier[data] += identifier[sep]
keyword[if] identifier[self] . identifier[buf] :
identifier[self] . identifier[buf] = identifier[rest] + identifier[self] . identifier[buf]
keyword[else] :
identifier[self] . identifier[buf] = identifier[rest]
keyword[return] identifier[data] | def readline(self, size=-1):
"""
readline([size]) -> next line from the file, as a string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF.
"""
if self.closed:
raise ValueError('I/O operation on closed file') # depends on [control=['if'], data=[]]
data = ''
while '\n' not in data and (size < 0 or len(data) < size):
if size < 0:
chunk = self.read(1024) # depends on [control=['if'], data=[]]
else:
chunk = self.read(size - len(data))
if not chunk:
break # depends on [control=['if'], data=[]]
data += chunk # depends on [control=['while'], data=[]]
if '\n' in data:
(data, sep, rest) = data.partition('\n')
data += sep
if self.buf:
self.buf = rest + self.buf # depends on [control=['if'], data=[]]
else:
self.buf = rest # depends on [control=['if'], data=['data']]
return data |
def get_items_by_genus_type(self, item_genus_type):
"""Gets an ``ItemList`` corresponding to the given assessment item genus ``Type`` which does not include assessment items of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known assessment
items or an error results. Otherwise, the returned list may
contain only those assessment items that are accessible through
this session.
arg: item_genus_type (osid.type.Type): an assessment item
genus type
return: (osid.assessment.ItemList) - the returned ``Item`` list
raise: NullArgument - ``item_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_genus_type
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment',
collection='Item',
runtime=self._runtime)
result = collection.find(
dict({'genusTypeId': str(item_genus_type)},
**self._view_filter())).sort('_id', DESCENDING)
return objects.ItemList(result, runtime=self._runtime, proxy=self._proxy) | def function[get_items_by_genus_type, parameter[self, item_genus_type]]:
constant[Gets an ``ItemList`` corresponding to the given assessment item genus ``Type`` which does not include assessment items of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known assessment
items or an error results. Otherwise, the returned list may
contain only those assessment items that are accessible through
this session.
arg: item_genus_type (osid.type.Type): an assessment item
genus type
return: (osid.assessment.ItemList) - the returned ``Item`` list
raise: NullArgument - ``item_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
]
variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[assessment]]]
variable[result] assign[=] call[call[name[collection].find, parameter[call[name[dict], parameter[dictionary[[<ast.Constant object at 0x7da1b26ac3a0>], [<ast.Call object at 0x7da1b26ae410>]]]]]].sort, parameter[constant[_id], name[DESCENDING]]]
return[call[name[objects].ItemList, parameter[name[result]]]] | keyword[def] identifier[get_items_by_genus_type] ( identifier[self] , identifier[item_genus_type] ):
literal[string]
identifier[collection] = identifier[JSONClientValidated] ( literal[string] ,
identifier[collection] = literal[string] ,
identifier[runtime] = identifier[self] . identifier[_runtime] )
identifier[result] = identifier[collection] . identifier[find] (
identifier[dict] ({ literal[string] : identifier[str] ( identifier[item_genus_type] )},
** identifier[self] . identifier[_view_filter] ())). identifier[sort] ( literal[string] , identifier[DESCENDING] )
keyword[return] identifier[objects] . identifier[ItemList] ( identifier[result] , identifier[runtime] = identifier[self] . identifier[_runtime] , identifier[proxy] = identifier[self] . identifier[_proxy] ) | def get_items_by_genus_type(self, item_genus_type):
"""Gets an ``ItemList`` corresponding to the given assessment item genus ``Type`` which does not include assessment items of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known assessment
items or an error results. Otherwise, the returned list may
contain only those assessment items that are accessible through
this session.
arg: item_genus_type (osid.type.Type): an assessment item
genus type
return: (osid.assessment.ItemList) - the returned ``Item`` list
raise: NullArgument - ``item_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_genus_type
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment', collection='Item', runtime=self._runtime)
result = collection.find(dict({'genusTypeId': str(item_genus_type)}, **self._view_filter())).sort('_id', DESCENDING)
return objects.ItemList(result, runtime=self._runtime, proxy=self._proxy) |
def _removeBPoint(self, index, **kwargs):
"""
index will be a valid index.
Subclasses may override this method.
"""
bPoint = self.bPoints[index]
nextSegment = bPoint._nextSegment
offCurves = nextSegment.offCurve
if offCurves:
offCurve = offCurves[0]
self.removePoint(offCurve)
segment = bPoint._segment
offCurves = segment.offCurve
if offCurves:
offCurve = offCurves[-1]
self.removePoint(offCurve)
self.removePoint(bPoint._point) | def function[_removeBPoint, parameter[self, index]]:
constant[
index will be a valid index.
Subclasses may override this method.
]
variable[bPoint] assign[=] call[name[self].bPoints][name[index]]
variable[nextSegment] assign[=] name[bPoint]._nextSegment
variable[offCurves] assign[=] name[nextSegment].offCurve
if name[offCurves] begin[:]
variable[offCurve] assign[=] call[name[offCurves]][constant[0]]
call[name[self].removePoint, parameter[name[offCurve]]]
variable[segment] assign[=] name[bPoint]._segment
variable[offCurves] assign[=] name[segment].offCurve
if name[offCurves] begin[:]
variable[offCurve] assign[=] call[name[offCurves]][<ast.UnaryOp object at 0x7da204961c30>]
call[name[self].removePoint, parameter[name[offCurve]]]
call[name[self].removePoint, parameter[name[bPoint]._point]] | keyword[def] identifier[_removeBPoint] ( identifier[self] , identifier[index] ,** identifier[kwargs] ):
literal[string]
identifier[bPoint] = identifier[self] . identifier[bPoints] [ identifier[index] ]
identifier[nextSegment] = identifier[bPoint] . identifier[_nextSegment]
identifier[offCurves] = identifier[nextSegment] . identifier[offCurve]
keyword[if] identifier[offCurves] :
identifier[offCurve] = identifier[offCurves] [ literal[int] ]
identifier[self] . identifier[removePoint] ( identifier[offCurve] )
identifier[segment] = identifier[bPoint] . identifier[_segment]
identifier[offCurves] = identifier[segment] . identifier[offCurve]
keyword[if] identifier[offCurves] :
identifier[offCurve] = identifier[offCurves] [- literal[int] ]
identifier[self] . identifier[removePoint] ( identifier[offCurve] )
identifier[self] . identifier[removePoint] ( identifier[bPoint] . identifier[_point] ) | def _removeBPoint(self, index, **kwargs):
"""
index will be a valid index.
Subclasses may override this method.
"""
bPoint = self.bPoints[index]
nextSegment = bPoint._nextSegment
offCurves = nextSegment.offCurve
if offCurves:
offCurve = offCurves[0]
self.removePoint(offCurve) # depends on [control=['if'], data=[]]
segment = bPoint._segment
offCurves = segment.offCurve
if offCurves:
offCurve = offCurves[-1]
self.removePoint(offCurve) # depends on [control=['if'], data=[]]
self.removePoint(bPoint._point) |
def rolling_count(self, window_start, window_end):
"""
Count the number of non-NULL values of different subsets over this
SArray.
The subset that the count is executed on is defined as an inclusive
range relative to the position to each value in the SArray, using
`window_start` and `window_end`. For a better understanding of this,
see the examples below.
Parameters
----------
window_start : int
The start of the subset to count relative to the current value.
window_end : int
The end of the subset to count relative to the current value. Must
be greater than `window_start`.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,None,5])
>>> series = pandas.Series([1,2,3,None,5])
A rolling count with a window including the previous 2 entries including
the current:
>>> sa.rolling_count(-2,0)
dtype: int
Rows: 5
[1, 2, 3, 2, 2]
Pandas equivalent:
>>> pandas.rolling_count(series, 3)
0 1
1 2
2 3
3 2
4 2
dtype: float64
A rolling count with a size of 3, centered around the current:
>>> sa.rolling_count(-1,1)
dtype: int
Rows: 5
[2, 3, 2, 2, 1]
Pandas equivalent:
>>> pandas.rolling_count(series, 3, center=True)
0 2
1 3
2 2
3 2
4 1
dtype: float64
A rolling count with a window including the current and the 2 entries
following:
>>> sa.rolling_count(0,2)
dtype: int
Rows: 5
[3, 2, 2, 1, 1]
A rolling count with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_count(-2,-1)
dtype: int
Rows: 5
[0, 1, 2, 2, 1]
"""
agg_op = '__builtin__nonnull__count__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, 0)) | def function[rolling_count, parameter[self, window_start, window_end]]:
constant[
Count the number of non-NULL values of different subsets over this
SArray.
The subset that the count is executed on is defined as an inclusive
range relative to the position to each value in the SArray, using
`window_start` and `window_end`. For a better understanding of this,
see the examples below.
Parameters
----------
window_start : int
The start of the subset to count relative to the current value.
window_end : int
The end of the subset to count relative to the current value. Must
be greater than `window_start`.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,None,5])
>>> series = pandas.Series([1,2,3,None,5])
A rolling count with a window including the previous 2 entries including
the current:
>>> sa.rolling_count(-2,0)
dtype: int
Rows: 5
[1, 2, 3, 2, 2]
Pandas equivalent:
>>> pandas.rolling_count(series, 3)
0 1
1 2
2 3
3 2
4 2
dtype: float64
A rolling count with a size of 3, centered around the current:
>>> sa.rolling_count(-1,1)
dtype: int
Rows: 5
[2, 3, 2, 2, 1]
Pandas equivalent:
>>> pandas.rolling_count(series, 3, center=True)
0 2
1 3
2 2
3 2
4 1
dtype: float64
A rolling count with a window including the current and the 2 entries
following:
>>> sa.rolling_count(0,2)
dtype: int
Rows: 5
[3, 2, 2, 1, 1]
A rolling count with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_count(-2,-1)
dtype: int
Rows: 5
[0, 1, 2, 2, 1]
]
variable[agg_op] assign[=] constant[__builtin__nonnull__count__]
return[call[name[SArray], parameter[]]] | keyword[def] identifier[rolling_count] ( identifier[self] , identifier[window_start] , identifier[window_end] ):
literal[string]
identifier[agg_op] = literal[string]
keyword[return] identifier[SArray] ( identifier[_proxy] = identifier[self] . identifier[__proxy__] . identifier[builtin_rolling_apply] ( identifier[agg_op] , identifier[window_start] , identifier[window_end] , literal[int] )) | def rolling_count(self, window_start, window_end):
"""
Count the number of non-NULL values of different subsets over this
SArray.
The subset that the count is executed on is defined as an inclusive
range relative to the position to each value in the SArray, using
`window_start` and `window_end`. For a better understanding of this,
see the examples below.
Parameters
----------
window_start : int
The start of the subset to count relative to the current value.
window_end : int
The end of the subset to count relative to the current value. Must
be greater than `window_start`.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,None,5])
>>> series = pandas.Series([1,2,3,None,5])
A rolling count with a window including the previous 2 entries including
the current:
>>> sa.rolling_count(-2,0)
dtype: int
Rows: 5
[1, 2, 3, 2, 2]
Pandas equivalent:
>>> pandas.rolling_count(series, 3)
0 1
1 2
2 3
3 2
4 2
dtype: float64
A rolling count with a size of 3, centered around the current:
>>> sa.rolling_count(-1,1)
dtype: int
Rows: 5
[2, 3, 2, 2, 1]
Pandas equivalent:
>>> pandas.rolling_count(series, 3, center=True)
0 2
1 3
2 2
3 2
4 1
dtype: float64
A rolling count with a window including the current and the 2 entries
following:
>>> sa.rolling_count(0,2)
dtype: int
Rows: 5
[3, 2, 2, 1, 1]
A rolling count with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_count(-2,-1)
dtype: int
Rows: 5
[0, 1, 2, 2, 1]
"""
agg_op = '__builtin__nonnull__count__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, 0)) |
def type_named(obj, name):
"""
Similar to the type() builtin, but looks in class bases
for named instance.
Parameters
----------
obj: object to look for class of
name : str, name of class
Returns
----------
named class, or None
"""
# if obj is a member of the named class, return True
name = str(name)
if obj.__class__.__name__ == name:
return obj.__class__
for base in type_bases(obj):
if base.__name__ == name:
return base
raise ValueError('Unable to extract class of name ' + name) | def function[type_named, parameter[obj, name]]:
constant[
Similar to the type() builtin, but looks in class bases
for named instance.
Parameters
----------
obj: object to look for class of
name : str, name of class
Returns
----------
named class, or None
]
variable[name] assign[=] call[name[str], parameter[name[name]]]
if compare[name[obj].__class__.__name__ equal[==] name[name]] begin[:]
return[name[obj].__class__]
for taget[name[base]] in starred[call[name[type_bases], parameter[name[obj]]]] begin[:]
if compare[name[base].__name__ equal[==] name[name]] begin[:]
return[name[base]]
<ast.Raise object at 0x7da204566b30> | keyword[def] identifier[type_named] ( identifier[obj] , identifier[name] ):
literal[string]
identifier[name] = identifier[str] ( identifier[name] )
keyword[if] identifier[obj] . identifier[__class__] . identifier[__name__] == identifier[name] :
keyword[return] identifier[obj] . identifier[__class__]
keyword[for] identifier[base] keyword[in] identifier[type_bases] ( identifier[obj] ):
keyword[if] identifier[base] . identifier[__name__] == identifier[name] :
keyword[return] identifier[base]
keyword[raise] identifier[ValueError] ( literal[string] + identifier[name] ) | def type_named(obj, name):
"""
Similar to the type() builtin, but looks in class bases
for named instance.
Parameters
----------
obj: object to look for class of
name : str, name of class
Returns
----------
named class, or None
"""
# if obj is a member of the named class, return True
name = str(name)
if obj.__class__.__name__ == name:
return obj.__class__ # depends on [control=['if'], data=[]]
for base in type_bases(obj):
if base.__name__ == name:
return base # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['base']]
raise ValueError('Unable to extract class of name ' + name) |
def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple(pretty_req(r) for r in requirements)
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req))
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(
dist, timid_relpath(dist.location), req
))
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req)
if errors:
raise InstallationError('\n'.join(errors))
return result | def function[trace_requirements, parameter[requirements]]:
constant[given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
]
variable[requirements] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da1b26afc10>]]
variable[working_set] assign[=] call[name[fresh_working_set], parameter[]]
from relative_module[collections] import module[deque]
variable[queue] assign[=] call[name[deque], parameter[name[requirements]]]
variable[queued] assign[=] <ast.SetComp object at 0x7da18f58cd90>
variable[errors] assign[=] list[[]]
variable[result] assign[=] list[[]]
while name[queue] begin[:]
variable[req] assign[=] call[name[queue].popleft, parameter[]]
call[name[logger].debug, parameter[constant[tracing: %s], name[req]]]
<ast.Try object at 0x7da207f012d0>
assert[compare[name[dist] is_not constant[None]]]
call[name[result].append, parameter[call[name[dist_to_req], parameter[name[dist]]]]]
variable[extras] assign[=] <ast.ListComp object at 0x7da207f023e0>
for taget[name[sub_req]] in starred[call[name[sorted], parameter[call[name[dist].requires, parameter[]]]]] begin[:]
variable[sub_req] assign[=] call[name[InstallRequirement], parameter[name[sub_req], name[req]]]
if call[name[req_cycle], parameter[name[sub_req]]] begin[:]
call[name[logger].warning, parameter[constant[Circular dependency! %s], name[sub_req]]]
continue
if name[errors] begin[:]
<ast.Raise object at 0x7da18fe90d00>
return[name[result]] | keyword[def] identifier[trace_requirements] ( identifier[requirements] ):
literal[string]
identifier[requirements] = identifier[tuple] ( identifier[pretty_req] ( identifier[r] ) keyword[for] identifier[r] keyword[in] identifier[requirements] )
identifier[working_set] = identifier[fresh_working_set] ()
keyword[from] identifier[collections] keyword[import] identifier[deque]
identifier[queue] = identifier[deque] ( identifier[requirements] )
identifier[queued] ={ identifier[_package_req_to_pkg_resources_req] ( identifier[req] . identifier[req] ) keyword[for] identifier[req] keyword[in] identifier[queue] }
identifier[errors] =[]
identifier[result] =[]
keyword[while] identifier[queue] :
identifier[req] = identifier[queue] . identifier[popleft] ()
identifier[logger] . identifier[debug] ( literal[string] , identifier[req] )
keyword[try] :
identifier[dist] = identifier[working_set] . identifier[find_normalized] ( identifier[_package_req_to_pkg_resources_req] ( identifier[req] . identifier[req] ))
keyword[except] identifier[pkg_resources] . identifier[VersionConflict] keyword[as] identifier[conflict] :
identifier[dist] = identifier[conflict] . identifier[args] [ literal[int] ]
identifier[errors] . identifier[append] ( literal[string] . identifier[format] (
identifier[dist] , identifier[timid_relpath] ( identifier[dist] . identifier[location] ), identifier[req]
))
keyword[assert] identifier[dist] keyword[is] keyword[not] keyword[None] , literal[string]
identifier[result] . identifier[append] ( identifier[dist_to_req] ( identifier[dist] ))
identifier[extras] =[ identifier[extra] keyword[for] identifier[extra] keyword[in] identifier[req] . identifier[extras] keyword[if] identifier[extra] keyword[in] identifier[dist] . identifier[extras] ]
keyword[for] identifier[sub_req] keyword[in] identifier[sorted] ( identifier[dist] . identifier[requires] ( identifier[extras] = identifier[extras] ), identifier[key] = keyword[lambda] identifier[req] : identifier[req] . identifier[key] ):
identifier[sub_req] = identifier[InstallRequirement] ( identifier[sub_req] , identifier[req] )
keyword[if] identifier[req_cycle] ( identifier[sub_req] ):
identifier[logger] . identifier[warning] ( literal[string] , identifier[sub_req] )
keyword[continue]
keyword[elif] identifier[sub_req] . identifier[req] keyword[in] identifier[queued] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[sub_req] )
keyword[continue]
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[sub_req] )
identifier[queue] . identifier[append] ( identifier[sub_req] )
identifier[queued] . identifier[add] ( identifier[sub_req] . identifier[req] )
keyword[if] identifier[errors] :
keyword[raise] identifier[InstallationError] ( literal[string] . identifier[join] ( identifier[errors] ))
keyword[return] identifier[result] | def trace_requirements(requirements):
"""given an iterable of pip InstallRequirements,
return the set of required packages, given their transitive requirements.
"""
requirements = tuple((pretty_req(r) for r in requirements))
working_set = fresh_working_set()
# breadth-first traversal:
from collections import deque
queue = deque(requirements)
queued = {_package_req_to_pkg_resources_req(req.req) for req in queue}
errors = []
result = []
while queue:
req = queue.popleft()
logger.debug('tracing: %s', req)
try:
dist = working_set.find_normalized(_package_req_to_pkg_resources_req(req.req)) # depends on [control=['try'], data=[]]
except pkg_resources.VersionConflict as conflict:
dist = conflict.args[0]
errors.append('Error: version conflict: {} ({}) <-> {}'.format(dist, timid_relpath(dist.location), req)) # depends on [control=['except'], data=['conflict']]
assert dist is not None, 'Should be unreachable in pip8+'
result.append(dist_to_req(dist))
# TODO: pip does no validation of extras. should we?
extras = [extra for extra in req.extras if extra in dist.extras]
for sub_req in sorted(dist.requires(extras=extras), key=lambda req: req.key):
sub_req = InstallRequirement(sub_req, req)
if req_cycle(sub_req):
logger.warning('Circular dependency! %s', sub_req)
continue # depends on [control=['if'], data=[]]
elif sub_req.req in queued:
logger.debug('already queued: %s', sub_req)
continue # depends on [control=['if'], data=[]]
else:
logger.debug('adding sub-requirement %s', sub_req)
queue.append(sub_req)
queued.add(sub_req.req) # depends on [control=['for'], data=['sub_req']] # depends on [control=['while'], data=[]]
if errors:
raise InstallationError('\n'.join(errors)) # depends on [control=['if'], data=[]]
return result |
def format(self, version=0x10, wipe=None):
"""Format a FeliCa Lite Tag for NDEF.
"""
return super(FelicaLite, self).format(version, wipe) | def function[format, parameter[self, version, wipe]]:
constant[Format a FeliCa Lite Tag for NDEF.
]
return[call[call[name[super], parameter[name[FelicaLite], name[self]]].format, parameter[name[version], name[wipe]]]] | keyword[def] identifier[format] ( identifier[self] , identifier[version] = literal[int] , identifier[wipe] = keyword[None] ):
literal[string]
keyword[return] identifier[super] ( identifier[FelicaLite] , identifier[self] ). identifier[format] ( identifier[version] , identifier[wipe] ) | def format(self, version=16, wipe=None):
"""Format a FeliCa Lite Tag for NDEF.
"""
return super(FelicaLite, self).format(version, wipe) |
def write_byte(self, byte):
"""Write one byte."""
self.payload[self.pos] = byte
self.pos = self.pos + 1 | def function[write_byte, parameter[self, byte]]:
constant[Write one byte.]
call[name[self].payload][name[self].pos] assign[=] name[byte]
name[self].pos assign[=] binary_operation[name[self].pos + constant[1]] | keyword[def] identifier[write_byte] ( identifier[self] , identifier[byte] ):
literal[string]
identifier[self] . identifier[payload] [ identifier[self] . identifier[pos] ]= identifier[byte]
identifier[self] . identifier[pos] = identifier[self] . identifier[pos] + literal[int] | def write_byte(self, byte):
"""Write one byte."""
self.payload[self.pos] = byte
self.pos = self.pos + 1 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'workspaces') and self.workspaces is not None:
_dict['workspaces'] = [x._to_dict() for x in self.workspaces]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict()
return _dict | def function[_to_dict, parameter[self]]:
constant[Return a json dictionary representing this model.]
variable[_dict] assign[=] dictionary[[], []]
if <ast.BoolOp object at 0x7da1b2347280> begin[:]
call[name[_dict]][constant[workspaces]] assign[=] <ast.ListComp object at 0x7da1b2345db0>
if <ast.BoolOp object at 0x7da1b2347400> begin[:]
call[name[_dict]][constant[pagination]] assign[=] call[name[self].pagination._to_dict, parameter[]]
return[name[_dict]] | keyword[def] identifier[_to_dict] ( identifier[self] ):
literal[string]
identifier[_dict] ={}
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[workspaces] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]=[ identifier[x] . identifier[_to_dict] () keyword[for] identifier[x] keyword[in] identifier[self] . identifier[workspaces] ]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[pagination] keyword[is] keyword[not] keyword[None] :
identifier[_dict] [ literal[string] ]= identifier[self] . identifier[pagination] . identifier[_to_dict] ()
keyword[return] identifier[_dict] | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'workspaces') and self.workspaces is not None:
_dict['workspaces'] = [x._to_dict() for x in self.workspaces] # depends on [control=['if'], data=[]]
if hasattr(self, 'pagination') and self.pagination is not None:
_dict['pagination'] = self.pagination._to_dict() # depends on [control=['if'], data=[]]
return _dict |
def fit(self, **kwargs):
"""
This will try to determine fit parameters using scipy.optimize.leastsq
algorithm. This function relies on a previous call of set_data() and
set_functions().
Notes
-----
results of the fit algorithm are stored in self.results.
See scipy.optimize.leastsq for more information.
Optional keyword arguments are sent to self.set() prior to
fitting.
"""
if len(self._set_xdata)==0 or len(self._set_ydata)==0:
return self._error("No data. Please use set_data() prior to fitting.")
if self._f_raw is None:
return self._error("No functions. Please use set_functions() prior to fitting.")
# Do the processing once, to increase efficiency
self._massage_data()
# Send the keyword arguments to the settings
self.set(**kwargs)
# do the actual optimization
self.results = _opt.leastsq(self._studentized_residuals_concatenated, self._pguess, full_output=1)
# plot if necessary
if self['autoplot']: self.plot()
return self | def function[fit, parameter[self]]:
constant[
This will try to determine fit parameters using scipy.optimize.leastsq
algorithm. This function relies on a previous call of set_data() and
set_functions().
Notes
-----
results of the fit algorithm are stored in self.results.
See scipy.optimize.leastsq for more information.
Optional keyword arguments are sent to self.set() prior to
fitting.
]
if <ast.BoolOp object at 0x7da18ede79d0> begin[:]
return[call[name[self]._error, parameter[constant[No data. Please use set_data() prior to fitting.]]]]
if compare[name[self]._f_raw is constant[None]] begin[:]
return[call[name[self]._error, parameter[constant[No functions. Please use set_functions() prior to fitting.]]]]
call[name[self]._massage_data, parameter[]]
call[name[self].set, parameter[]]
name[self].results assign[=] call[name[_opt].leastsq, parameter[name[self]._studentized_residuals_concatenated, name[self]._pguess]]
if call[name[self]][constant[autoplot]] begin[:]
call[name[self].plot, parameter[]]
return[name[self]] | keyword[def] identifier[fit] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[len] ( identifier[self] . identifier[_set_xdata] )== literal[int] keyword[or] identifier[len] ( identifier[self] . identifier[_set_ydata] )== literal[int] :
keyword[return] identifier[self] . identifier[_error] ( literal[string] )
keyword[if] identifier[self] . identifier[_f_raw] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[_error] ( literal[string] )
identifier[self] . identifier[_massage_data] ()
identifier[self] . identifier[set] (** identifier[kwargs] )
identifier[self] . identifier[results] = identifier[_opt] . identifier[leastsq] ( identifier[self] . identifier[_studentized_residuals_concatenated] , identifier[self] . identifier[_pguess] , identifier[full_output] = literal[int] )
keyword[if] identifier[self] [ literal[string] ]: identifier[self] . identifier[plot] ()
keyword[return] identifier[self] | def fit(self, **kwargs):
"""
This will try to determine fit parameters using scipy.optimize.leastsq
algorithm. This function relies on a previous call of set_data() and
set_functions().
Notes
-----
results of the fit algorithm are stored in self.results.
See scipy.optimize.leastsq for more information.
Optional keyword arguments are sent to self.set() prior to
fitting.
"""
if len(self._set_xdata) == 0 or len(self._set_ydata) == 0:
return self._error('No data. Please use set_data() prior to fitting.') # depends on [control=['if'], data=[]]
if self._f_raw is None:
return self._error('No functions. Please use set_functions() prior to fitting.') # depends on [control=['if'], data=[]]
# Do the processing once, to increase efficiency
self._massage_data()
# Send the keyword arguments to the settings
self.set(**kwargs)
# do the actual optimization
self.results = _opt.leastsq(self._studentized_residuals_concatenated, self._pguess, full_output=1)
# plot if necessary
if self['autoplot']:
self.plot() # depends on [control=['if'], data=[]]
return self |
def all(cls, sort=None, limit=None):
"""Returns all objects of this type. Alias for where() (without filter arguments).
See `where` for documentation on the `sort` and `limit` parameters.
"""
return cls.where(sort=sort, limit=limit) | def function[all, parameter[cls, sort, limit]]:
constant[Returns all objects of this type. Alias for where() (without filter arguments).
See `where` for documentation on the `sort` and `limit` parameters.
]
return[call[name[cls].where, parameter[]]] | keyword[def] identifier[all] ( identifier[cls] , identifier[sort] = keyword[None] , identifier[limit] = keyword[None] ):
literal[string]
keyword[return] identifier[cls] . identifier[where] ( identifier[sort] = identifier[sort] , identifier[limit] = identifier[limit] ) | def all(cls, sort=None, limit=None):
"""Returns all objects of this type. Alias for where() (without filter arguments).
See `where` for documentation on the `sort` and `limit` parameters.
"""
return cls.where(sort=sort, limit=limit) |
def separation(X,y,samples=False):
""" return the sum of the between-class squared distance"""
# pdb.set_trace()
num_classes = len(np.unique(y))
total_dist = (X.max()-X.min())**2
if samples:
# return intra-class distance for each sample
separation = np.zeros(y.shape)
for label in np.unique(y):
for outsider in np.unique(y[y!=label]):
separation[y==label] += (X[y==label] - np.mean(X[y==outsider])) ** 2
#normalize between 0 and 1
print('separation:',separation)
print('num_classes:',num_classes)
print('total_dist:',total_dist)
separation = separation#/separation.max()
print('separation after normalization:',separation)
else:
# return aggregate score
separation = 0
for i,label in enumerate(np.unique(y)):
for outsider in np.unique(y[y!=label]):
separation += np.sum((X[y==label] - np.mean(X[y==outsider])) ** 2)/len(y[y==label])
separation = separation/len(np.unique(y))
return separation | def function[separation, parameter[X, y, samples]]:
constant[ return the sum of the between-class squared distance]
variable[num_classes] assign[=] call[name[len], parameter[call[name[np].unique, parameter[name[y]]]]]
variable[total_dist] assign[=] binary_operation[binary_operation[call[name[X].max, parameter[]] - call[name[X].min, parameter[]]] ** constant[2]]
if name[samples] begin[:]
variable[separation] assign[=] call[name[np].zeros, parameter[name[y].shape]]
for taget[name[label]] in starred[call[name[np].unique, parameter[name[y]]]] begin[:]
for taget[name[outsider]] in starred[call[name[np].unique, parameter[call[name[y]][compare[name[y] not_equal[!=] name[label]]]]]] begin[:]
<ast.AugAssign object at 0x7da18eb57b20>
call[name[print], parameter[constant[separation:], name[separation]]]
call[name[print], parameter[constant[num_classes:], name[num_classes]]]
call[name[print], parameter[constant[total_dist:], name[total_dist]]]
variable[separation] assign[=] name[separation]
call[name[print], parameter[constant[separation after normalization:], name[separation]]]
return[name[separation]] | keyword[def] identifier[separation] ( identifier[X] , identifier[y] , identifier[samples] = keyword[False] ):
literal[string]
identifier[num_classes] = identifier[len] ( identifier[np] . identifier[unique] ( identifier[y] ))
identifier[total_dist] =( identifier[X] . identifier[max] ()- identifier[X] . identifier[min] ())** literal[int]
keyword[if] identifier[samples] :
identifier[separation] = identifier[np] . identifier[zeros] ( identifier[y] . identifier[shape] )
keyword[for] identifier[label] keyword[in] identifier[np] . identifier[unique] ( identifier[y] ):
keyword[for] identifier[outsider] keyword[in] identifier[np] . identifier[unique] ( identifier[y] [ identifier[y] != identifier[label] ]):
identifier[separation] [ identifier[y] == identifier[label] ]+=( identifier[X] [ identifier[y] == identifier[label] ]- identifier[np] . identifier[mean] ( identifier[X] [ identifier[y] == identifier[outsider] ]))** literal[int]
identifier[print] ( literal[string] , identifier[separation] )
identifier[print] ( literal[string] , identifier[num_classes] )
identifier[print] ( literal[string] , identifier[total_dist] )
identifier[separation] = identifier[separation]
identifier[print] ( literal[string] , identifier[separation] )
keyword[else] :
identifier[separation] = literal[int]
keyword[for] identifier[i] , identifier[label] keyword[in] identifier[enumerate] ( identifier[np] . identifier[unique] ( identifier[y] )):
keyword[for] identifier[outsider] keyword[in] identifier[np] . identifier[unique] ( identifier[y] [ identifier[y] != identifier[label] ]):
identifier[separation] += identifier[np] . identifier[sum] (( identifier[X] [ identifier[y] == identifier[label] ]- identifier[np] . identifier[mean] ( identifier[X] [ identifier[y] == identifier[outsider] ]))** literal[int] )/ identifier[len] ( identifier[y] [ identifier[y] == identifier[label] ])
identifier[separation] = identifier[separation] / identifier[len] ( identifier[np] . identifier[unique] ( identifier[y] ))
keyword[return] identifier[separation] | def separation(X, y, samples=False):
""" return the sum of the between-class squared distance"""
# pdb.set_trace()
num_classes = len(np.unique(y))
total_dist = (X.max() - X.min()) ** 2
if samples:
# return intra-class distance for each sample
separation = np.zeros(y.shape)
for label in np.unique(y):
for outsider in np.unique(y[y != label]):
separation[y == label] += (X[y == label] - np.mean(X[y == outsider])) ** 2 # depends on [control=['for'], data=['outsider']] # depends on [control=['for'], data=['label']]
#normalize between 0 and 1
print('separation:', separation)
print('num_classes:', num_classes)
print('total_dist:', total_dist)
separation = separation #/separation.max()
print('separation after normalization:', separation) # depends on [control=['if'], data=[]]
else:
# return aggregate score
separation = 0
for (i, label) in enumerate(np.unique(y)):
for outsider in np.unique(y[y != label]):
separation += np.sum((X[y == label] - np.mean(X[y == outsider])) ** 2) / len(y[y == label]) # depends on [control=['for'], data=['outsider']] # depends on [control=['for'], data=[]]
separation = separation / len(np.unique(y))
return separation |
def processing_block_devices(self):
"""Get list of processing block devices."""
# Get the list and number of Tango PB devices
tango_db = Database()
pb_device_class = "ProcessingBlockDevice"
pb_device_server_instance = "processing_block_ds/1"
pb_devices = tango_db.get_device_name(pb_device_server_instance,
pb_device_class)
LOG.info('Number of PB devices in the pool = %d', len(pb_devices))
pb_device_map = []
for pb_device_name in pb_devices:
device = DeviceProxy(pb_device_name)
if device.pb_id:
LOG.info('%s %s', pb_device_name, device.pb_id)
pb_device_map.append((pb_device_name, device.pb_id))
return str(pb_device_map) | def function[processing_block_devices, parameter[self]]:
constant[Get list of processing block devices.]
variable[tango_db] assign[=] call[name[Database], parameter[]]
variable[pb_device_class] assign[=] constant[ProcessingBlockDevice]
variable[pb_device_server_instance] assign[=] constant[processing_block_ds/1]
variable[pb_devices] assign[=] call[name[tango_db].get_device_name, parameter[name[pb_device_server_instance], name[pb_device_class]]]
call[name[LOG].info, parameter[constant[Number of PB devices in the pool = %d], call[name[len], parameter[name[pb_devices]]]]]
variable[pb_device_map] assign[=] list[[]]
for taget[name[pb_device_name]] in starred[name[pb_devices]] begin[:]
variable[device] assign[=] call[name[DeviceProxy], parameter[name[pb_device_name]]]
if name[device].pb_id begin[:]
call[name[LOG].info, parameter[constant[%s %s], name[pb_device_name], name[device].pb_id]]
call[name[pb_device_map].append, parameter[tuple[[<ast.Name object at 0x7da18f7232b0>, <ast.Attribute object at 0x7da18f721540>]]]]
return[call[name[str], parameter[name[pb_device_map]]]] | keyword[def] identifier[processing_block_devices] ( identifier[self] ):
literal[string]
identifier[tango_db] = identifier[Database] ()
identifier[pb_device_class] = literal[string]
identifier[pb_device_server_instance] = literal[string]
identifier[pb_devices] = identifier[tango_db] . identifier[get_device_name] ( identifier[pb_device_server_instance] ,
identifier[pb_device_class] )
identifier[LOG] . identifier[info] ( literal[string] , identifier[len] ( identifier[pb_devices] ))
identifier[pb_device_map] =[]
keyword[for] identifier[pb_device_name] keyword[in] identifier[pb_devices] :
identifier[device] = identifier[DeviceProxy] ( identifier[pb_device_name] )
keyword[if] identifier[device] . identifier[pb_id] :
identifier[LOG] . identifier[info] ( literal[string] , identifier[pb_device_name] , identifier[device] . identifier[pb_id] )
identifier[pb_device_map] . identifier[append] (( identifier[pb_device_name] , identifier[device] . identifier[pb_id] ))
keyword[return] identifier[str] ( identifier[pb_device_map] ) | def processing_block_devices(self):
"""Get list of processing block devices."""
# Get the list and number of Tango PB devices
tango_db = Database()
pb_device_class = 'ProcessingBlockDevice'
pb_device_server_instance = 'processing_block_ds/1'
pb_devices = tango_db.get_device_name(pb_device_server_instance, pb_device_class)
LOG.info('Number of PB devices in the pool = %d', len(pb_devices))
pb_device_map = []
for pb_device_name in pb_devices:
device = DeviceProxy(pb_device_name)
if device.pb_id:
LOG.info('%s %s', pb_device_name, device.pb_id)
pb_device_map.append((pb_device_name, device.pb_id)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pb_device_name']]
return str(pb_device_map) |
def __replace_within_document(self, document, occurrences, replacement_pattern):
"""
Replaces given pattern occurrences in given document using given settings.
:param document: Document.
:type document: QTextDocument
:param replacement_pattern: Replacement pattern.
:type replacement_pattern: unicode
:return: Replaced occurrences count.
:rtype: int
"""
cursor = QTextCursor(document)
cursor.beginEditBlock()
offset = count = 0
for occurence in sorted(occurrences, key=lambda x: x.position):
cursor.setPosition(offset + occurence.position, QTextCursor.MoveAnchor)
cursor.setPosition(offset + occurence.position + occurence.length, QTextCursor.KeepAnchor)
cursor.insertText(replacement_pattern)
offset += len(replacement_pattern) - occurence.length
count += 1
cursor.endEditBlock()
return count | def function[__replace_within_document, parameter[self, document, occurrences, replacement_pattern]]:
constant[
Replaces given pattern occurrences in given document using given settings.
:param document: Document.
:type document: QTextDocument
:param replacement_pattern: Replacement pattern.
:type replacement_pattern: unicode
:return: Replaced occurrences count.
:rtype: int
]
variable[cursor] assign[=] call[name[QTextCursor], parameter[name[document]]]
call[name[cursor].beginEditBlock, parameter[]]
variable[offset] assign[=] constant[0]
for taget[name[occurence]] in starred[call[name[sorted], parameter[name[occurrences]]]] begin[:]
call[name[cursor].setPosition, parameter[binary_operation[name[offset] + name[occurence].position], name[QTextCursor].MoveAnchor]]
call[name[cursor].setPosition, parameter[binary_operation[binary_operation[name[offset] + name[occurence].position] + name[occurence].length], name[QTextCursor].KeepAnchor]]
call[name[cursor].insertText, parameter[name[replacement_pattern]]]
<ast.AugAssign object at 0x7da1b0861a50>
<ast.AugAssign object at 0x7da1b0863970>
call[name[cursor].endEditBlock, parameter[]]
return[name[count]] | keyword[def] identifier[__replace_within_document] ( identifier[self] , identifier[document] , identifier[occurrences] , identifier[replacement_pattern] ):
literal[string]
identifier[cursor] = identifier[QTextCursor] ( identifier[document] )
identifier[cursor] . identifier[beginEditBlock] ()
identifier[offset] = identifier[count] = literal[int]
keyword[for] identifier[occurence] keyword[in] identifier[sorted] ( identifier[occurrences] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[position] ):
identifier[cursor] . identifier[setPosition] ( identifier[offset] + identifier[occurence] . identifier[position] , identifier[QTextCursor] . identifier[MoveAnchor] )
identifier[cursor] . identifier[setPosition] ( identifier[offset] + identifier[occurence] . identifier[position] + identifier[occurence] . identifier[length] , identifier[QTextCursor] . identifier[KeepAnchor] )
identifier[cursor] . identifier[insertText] ( identifier[replacement_pattern] )
identifier[offset] += identifier[len] ( identifier[replacement_pattern] )- identifier[occurence] . identifier[length]
identifier[count] += literal[int]
identifier[cursor] . identifier[endEditBlock] ()
keyword[return] identifier[count] | def __replace_within_document(self, document, occurrences, replacement_pattern):
"""
Replaces given pattern occurrences in given document using given settings.
:param document: Document.
:type document: QTextDocument
:param replacement_pattern: Replacement pattern.
:type replacement_pattern: unicode
:return: Replaced occurrences count.
:rtype: int
"""
cursor = QTextCursor(document)
cursor.beginEditBlock()
offset = count = 0
for occurence in sorted(occurrences, key=lambda x: x.position):
cursor.setPosition(offset + occurence.position, QTextCursor.MoveAnchor)
cursor.setPosition(offset + occurence.position + occurence.length, QTextCursor.KeepAnchor)
cursor.insertText(replacement_pattern)
offset += len(replacement_pattern) - occurence.length
count += 1 # depends on [control=['for'], data=['occurence']]
cursor.endEditBlock()
return count |
def get_path(src): # pragma: no cover
"""
Prompts the user to input a local path.
:param src: github repository name
:return: Absolute local path
"""
res = None
while not res:
if res is False:
print(colored('You must provide a path to an existing directory!', 'red'))
print('You need a local clone or release of (a fork of) '
'https://github.com/{0}'.format(src))
res = input(colored('Local path to {0}: '.format(src), 'green', attrs=['blink']))
if res and Path(res).exists():
return Path(res).resolve()
res = False | def function[get_path, parameter[src]]:
constant[
Prompts the user to input a local path.
:param src: github repository name
:return: Absolute local path
]
variable[res] assign[=] constant[None]
while <ast.UnaryOp object at 0x7da18f811360> begin[:]
if compare[name[res] is constant[False]] begin[:]
call[name[print], parameter[call[name[colored], parameter[constant[You must provide a path to an existing directory!], constant[red]]]]]
call[name[print], parameter[call[constant[You need a local clone or release of (a fork of) https://github.com/{0}].format, parameter[name[src]]]]]
variable[res] assign[=] call[name[input], parameter[call[name[colored], parameter[call[constant[Local path to {0}: ].format, parameter[name[src]]], constant[green]]]]]
if <ast.BoolOp object at 0x7da207f01d20> begin[:]
return[call[call[name[Path], parameter[name[res]]].resolve, parameter[]]]
variable[res] assign[=] constant[False] | keyword[def] identifier[get_path] ( identifier[src] ):
literal[string]
identifier[res] = keyword[None]
keyword[while] keyword[not] identifier[res] :
keyword[if] identifier[res] keyword[is] keyword[False] :
identifier[print] ( identifier[colored] ( literal[string] , literal[string] ))
identifier[print] ( literal[string]
literal[string] . identifier[format] ( identifier[src] ))
identifier[res] = identifier[input] ( identifier[colored] ( literal[string] . identifier[format] ( identifier[src] ), literal[string] , identifier[attrs] =[ literal[string] ]))
keyword[if] identifier[res] keyword[and] identifier[Path] ( identifier[res] ). identifier[exists] ():
keyword[return] identifier[Path] ( identifier[res] ). identifier[resolve] ()
identifier[res] = keyword[False] | def get_path(src): # pragma: no cover
'\n Prompts the user to input a local path.\n\n :param src: github repository name\n :return: Absolute local path\n '
res = None
while not res:
if res is False:
print(colored('You must provide a path to an existing directory!', 'red')) # depends on [control=['if'], data=[]]
print('You need a local clone or release of (a fork of) https://github.com/{0}'.format(src))
res = input(colored('Local path to {0}: '.format(src), 'green', attrs=['blink']))
if res and Path(res).exists():
return Path(res).resolve() # depends on [control=['if'], data=[]]
res = False # depends on [control=['while'], data=[]] |
def copy_data_in_redis(self, redis_prefix, redis_instance):
"""
Copy the complete lookup data into redis. Old data will be overwritten.
Args:
redis_prefix (str): Prefix to distinguish the data in redis for the different looktypes
redis_instance (str): an Instance of Redis
Returns:
bool: returns True when the data has been copied successfully into Redis
Example:
Copy the entire lookup data from the Country-files.com PLIST File into Redis. This example requires a running
instance of Redis, as well the python Redis connector (pip install redis-py).
>>> from pyhamtools import LookupLib
>>> import redis
>>> r = redis.Redis()
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> print my_lookuplib.copy_data_in_redis(redis_prefix="CF", redis_instance=r)
True
Now let's create an instance of LookupLib, using Redis to query the data
>>> from pyhamtools import LookupLib
>>> import redis
>>> r = redis.Redis()
>>> my_lookuplib = LookupLib(lookuptype="countryfile", redis_instance=r, redis_prefix="CF")
>>> my_lookuplib.lookup_callsign("3D2RI")
{
u'adif': 460,
u'continent': u'OC',
u'country': u'Rotuma Island',
u'cqz': 32,
u'ituz': 56,
u'latitude': -12.48,
u'longitude': 177.08
}
Note:
This method is available for the following lookup type
- clublogxml
- countryfile
"""
if redis_instance is not None:
self._redis = redis_instance
if self._redis is None:
raise AttributeError("redis_instance is missing")
if redis_prefix is None:
raise KeyError("redis_prefix is missing")
if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
self._push_dict_to_redis(self._entities, redis_prefix, "_entity_")
self._push_dict_index_to_redis(self._callsign_exceptions_index, redis_prefix, "_call_ex_index_")
self._push_dict_to_redis(self._callsign_exceptions, redis_prefix, "_call_ex_")
self._push_dict_index_to_redis(self._prefixes_index, redis_prefix, "_prefix_index_")
self._push_dict_to_redis(self._prefixes, redis_prefix, "_prefix_")
self._push_dict_index_to_redis(self._invalid_operations_index, redis_prefix, "_inv_op_index_")
self._push_dict_to_redis(self._invalid_operations, redis_prefix, "_inv_op_")
self._push_dict_index_to_redis(self._zone_exceptions_index, redis_prefix, "_zone_ex_index_")
self._push_dict_to_redis(self._zone_exceptions, redis_prefix, "_zone_ex_")
return True | def function[copy_data_in_redis, parameter[self, redis_prefix, redis_instance]]:
constant[
Copy the complete lookup data into redis. Old data will be overwritten.
Args:
redis_prefix (str): Prefix to distinguish the data in redis for the different looktypes
redis_instance (str): an Instance of Redis
Returns:
bool: returns True when the data has been copied successfully into Redis
Example:
Copy the entire lookup data from the Country-files.com PLIST File into Redis. This example requires a running
instance of Redis, as well the python Redis connector (pip install redis-py).
>>> from pyhamtools import LookupLib
>>> import redis
>>> r = redis.Redis()
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> print my_lookuplib.copy_data_in_redis(redis_prefix="CF", redis_instance=r)
True
Now let's create an instance of LookupLib, using Redis to query the data
>>> from pyhamtools import LookupLib
>>> import redis
>>> r = redis.Redis()
>>> my_lookuplib = LookupLib(lookuptype="countryfile", redis_instance=r, redis_prefix="CF")
>>> my_lookuplib.lookup_callsign("3D2RI")
{
u'adif': 460,
u'continent': u'OC',
u'country': u'Rotuma Island',
u'cqz': 32,
u'ituz': 56,
u'latitude': -12.48,
u'longitude': 177.08
}
Note:
This method is available for the following lookup type
- clublogxml
- countryfile
]
if compare[name[redis_instance] is_not constant[None]] begin[:]
name[self]._redis assign[=] name[redis_instance]
if compare[name[self]._redis is constant[None]] begin[:]
<ast.Raise object at 0x7da1b109a650>
if compare[name[redis_prefix] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b109a6b0>
if <ast.BoolOp object at 0x7da1b1098c10> begin[:]
call[name[self]._push_dict_to_redis, parameter[name[self]._entities, name[redis_prefix], constant[_entity_]]]
call[name[self]._push_dict_index_to_redis, parameter[name[self]._callsign_exceptions_index, name[redis_prefix], constant[_call_ex_index_]]]
call[name[self]._push_dict_to_redis, parameter[name[self]._callsign_exceptions, name[redis_prefix], constant[_call_ex_]]]
call[name[self]._push_dict_index_to_redis, parameter[name[self]._prefixes_index, name[redis_prefix], constant[_prefix_index_]]]
call[name[self]._push_dict_to_redis, parameter[name[self]._prefixes, name[redis_prefix], constant[_prefix_]]]
call[name[self]._push_dict_index_to_redis, parameter[name[self]._invalid_operations_index, name[redis_prefix], constant[_inv_op_index_]]]
call[name[self]._push_dict_to_redis, parameter[name[self]._invalid_operations, name[redis_prefix], constant[_inv_op_]]]
call[name[self]._push_dict_index_to_redis, parameter[name[self]._zone_exceptions_index, name[redis_prefix], constant[_zone_ex_index_]]]
call[name[self]._push_dict_to_redis, parameter[name[self]._zone_exceptions, name[redis_prefix], constant[_zone_ex_]]]
return[constant[True]] | keyword[def] identifier[copy_data_in_redis] ( identifier[self] , identifier[redis_prefix] , identifier[redis_instance] ):
literal[string]
keyword[if] identifier[redis_instance] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_redis] = identifier[redis_instance]
keyword[if] identifier[self] . identifier[_redis] keyword[is] keyword[None] :
keyword[raise] identifier[AttributeError] ( literal[string] )
keyword[if] identifier[redis_prefix] keyword[is] keyword[None] :
keyword[raise] identifier[KeyError] ( literal[string] )
keyword[if] identifier[self] . identifier[_lookuptype] == literal[string] keyword[or] identifier[self] . identifier[_lookuptype] == literal[string] :
identifier[self] . identifier[_push_dict_to_redis] ( identifier[self] . identifier[_entities] , identifier[redis_prefix] , literal[string] )
identifier[self] . identifier[_push_dict_index_to_redis] ( identifier[self] . identifier[_callsign_exceptions_index] , identifier[redis_prefix] , literal[string] )
identifier[self] . identifier[_push_dict_to_redis] ( identifier[self] . identifier[_callsign_exceptions] , identifier[redis_prefix] , literal[string] )
identifier[self] . identifier[_push_dict_index_to_redis] ( identifier[self] . identifier[_prefixes_index] , identifier[redis_prefix] , literal[string] )
identifier[self] . identifier[_push_dict_to_redis] ( identifier[self] . identifier[_prefixes] , identifier[redis_prefix] , literal[string] )
identifier[self] . identifier[_push_dict_index_to_redis] ( identifier[self] . identifier[_invalid_operations_index] , identifier[redis_prefix] , literal[string] )
identifier[self] . identifier[_push_dict_to_redis] ( identifier[self] . identifier[_invalid_operations] , identifier[redis_prefix] , literal[string] )
identifier[self] . identifier[_push_dict_index_to_redis] ( identifier[self] . identifier[_zone_exceptions_index] , identifier[redis_prefix] , literal[string] )
identifier[self] . identifier[_push_dict_to_redis] ( identifier[self] . identifier[_zone_exceptions] , identifier[redis_prefix] , literal[string] )
keyword[return] keyword[True] | def copy_data_in_redis(self, redis_prefix, redis_instance):
"""
Copy the complete lookup data into redis. Old data will be overwritten.
Args:
redis_prefix (str): Prefix to distinguish the data in redis for the different looktypes
redis_instance (str): an Instance of Redis
Returns:
bool: returns True when the data has been copied successfully into Redis
Example:
Copy the entire lookup data from the Country-files.com PLIST File into Redis. This example requires a running
instance of Redis, as well the python Redis connector (pip install redis-py).
>>> from pyhamtools import LookupLib
>>> import redis
>>> r = redis.Redis()
>>> my_lookuplib = LookupLib(lookuptype="countryfile")
>>> print my_lookuplib.copy_data_in_redis(redis_prefix="CF", redis_instance=r)
True
Now let's create an instance of LookupLib, using Redis to query the data
>>> from pyhamtools import LookupLib
>>> import redis
>>> r = redis.Redis()
>>> my_lookuplib = LookupLib(lookuptype="countryfile", redis_instance=r, redis_prefix="CF")
>>> my_lookuplib.lookup_callsign("3D2RI")
{
u'adif': 460,
u'continent': u'OC',
u'country': u'Rotuma Island',
u'cqz': 32,
u'ituz': 56,
u'latitude': -12.48,
u'longitude': 177.08
}
Note:
This method is available for the following lookup type
- clublogxml
- countryfile
"""
if redis_instance is not None:
self._redis = redis_instance # depends on [control=['if'], data=['redis_instance']]
if self._redis is None:
raise AttributeError('redis_instance is missing') # depends on [control=['if'], data=[]]
if redis_prefix is None:
raise KeyError('redis_prefix is missing') # depends on [control=['if'], data=[]]
if self._lookuptype == 'clublogxml' or self._lookuptype == 'countryfile':
self._push_dict_to_redis(self._entities, redis_prefix, '_entity_')
self._push_dict_index_to_redis(self._callsign_exceptions_index, redis_prefix, '_call_ex_index_')
self._push_dict_to_redis(self._callsign_exceptions, redis_prefix, '_call_ex_')
self._push_dict_index_to_redis(self._prefixes_index, redis_prefix, '_prefix_index_')
self._push_dict_to_redis(self._prefixes, redis_prefix, '_prefix_')
self._push_dict_index_to_redis(self._invalid_operations_index, redis_prefix, '_inv_op_index_')
self._push_dict_to_redis(self._invalid_operations, redis_prefix, '_inv_op_')
self._push_dict_index_to_redis(self._zone_exceptions_index, redis_prefix, '_zone_ex_index_')
self._push_dict_to_redis(self._zone_exceptions, redis_prefix, '_zone_ex_') # depends on [control=['if'], data=[]]
return True |
def cornerpoints(results, thin=1, span=None, cmap='plasma', color=None,
kde=True, nkde=1000, plot_kwargs=None, labels=None,
label_kwargs=None, truths=None, truth_color='red',
truth_kwargs=None, max_n_ticks=5, use_math_text=False,
fig=None):
"""
Generate a (sub-)corner plot of (weighted) samples.
Parameters
----------
results : :class:`~dynesty.results.Results` instance
A :class:`~dynesty.results.Results` instance from a nested
sampling run. **Compatible with results derived from**
`nestle <http://kylebarbary.com/nestle/>`_.
thin : int, optional
Thin the samples so that only each `thin`-th sample is plotted.
Default is `1` (no thinning).
span : iterable with shape (ndim,), optional
A list where each element is either a length-2 tuple containing
lower and upper bounds or a float from `(0., 1.]` giving the
fraction of (weighted) samples to include. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.95, (5., 6.)]
Default is `1.` for all parameters (no bound).
cmap : str, optional
A `~matplotlib`-style colormap used when plotting the points,
where each point is colored according to its weight. Default is
`'plasma'`.
color : str, optional
A `~matplotlib`-style color used when plotting the points.
This overrides the `cmap` option by giving all points
the same color. Default is `None` (not used).
kde : bool, optional
Whether to use kernel density estimation to estimate and plot
the PDF of the importance weights as a function of log-volume
(as opposed to the importance weights themselves). Default is
`True`.
nkde : int, optional
The number of grid points used when plotting the kernel density
estimate. Default is `1000`.
plot_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the points.
labels : iterable with shape (ndim,), optional
A list of names for each parameter. If not provided, the default name
used when plotting will follow :math:`x_i` style.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
truths : iterable with shape (ndim,), optional
A list of reference values that will be overplotted on the traces and
marginalized 1-D posteriors as solid horizontal/vertical lines.
Individual values can be exempt using `None`. Default is `None`.
truth_color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting `truths`.
Default is `'red'`.
truth_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the vertical
and horizontal lines with `truths`.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the points onto the provided figure object.
Otherwise, by default an internal figure is generated.
Returns
-------
cornerpoints : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output (sub-)corner plot of (weighted) samples.
"""
# Initialize values.
if truth_kwargs is None:
truth_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
if plot_kwargs is None:
plot_kwargs = dict()
# Set defaults.
plot_kwargs['s'] = plot_kwargs.get('s', 1)
truth_kwargs['linestyle'] = truth_kwargs.get('linestyle', 'solid')
truth_kwargs['linewidth'] = truth_kwargs.get('linewidth', 2)
truth_kwargs['alpha'] = truth_kwargs.get('alpha', 0.7)
# Extract weighted samples.
samples = results['samples']
logvol = results['logvol']
try:
weights = np.exp(results['logwt'] - results['logz'][-1])
except:
weights = results['weights']
if kde:
# Derive kernel density estimate.
wt_kde = gaussian_kde(resample_equal(-logvol, weights)) # KDE
logvol_grid = np.linspace(logvol[0], logvol[-1], nkde) # resample
wt_grid = wt_kde.pdf(-logvol_grid) # evaluate KDE PDF
weights = np.interp(-logvol, -logvol_grid, wt_grid) # interpolate
# Deal with 1D results. A number of extra catches are also here
# in case users are trying to plot other results besides the `Results`
# instance generated by `dynesty`.
samples = np.atleast_1d(samples)
if len(samples.shape) == 1:
samples = np.atleast_2d(samples)
else:
assert len(samples.shape) == 2, "Samples must be 1- or 2-D."
samples = samples.T
assert samples.shape[0] <= samples.shape[1], "There are more " \
"dimensions than samples!"
ndim, nsamps = samples.shape
# Check weights.
if weights.ndim != 1:
raise ValueError("Weights must be 1-D.")
if nsamps != weights.shape[0]:
raise ValueError("The number of weights and samples disagree!")
# Determine plotting bounds.
if span is not None:
if len(span) != ndim:
raise ValueError("Dimension mismatch between samples and span.")
for i, _ in enumerate(span):
try:
xmin, xmax = span[i]
except:
q = [0.5 - 0.5 * span[i], 0.5 + 0.5 * span[i]]
span[i] = _quantile(samples[i], q, weights=weights)
# Set labels
if labels is None:
labels = [r"$x_{"+str(i+1)+"}$" for i in range(ndim)]
# Set colormap.
if color is None:
color = weights
# Setup axis layout (from `corner.py`).
factor = 2.0 # size of side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # size of width/height margin
plotdim = factor * (ndim - 1.) + factor * (ndim - 2.) * whspace
dim = lbdim + plotdim + trdim # total size
# Initialize figure.
if fig is None:
fig, axes = pl.subplots(ndim - 1, ndim - 1, figsize=(dim, dim))
else:
try:
fig, axes = fig
axes = np.array(axes).reshape((ndim - 1, ndim - 1))
except:
raise ValueError("Mismatch between axes and dimension.")
# Format figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
# Plot the 2-D projected samples.
for i, x in enumerate(samples[1:]):
for j, y in enumerate(samples[:-1]):
try:
ax = axes[i, j]
except:
ax = axes
# Setup axes.
if span is not None:
ax.set_xlim(span[j])
ax.set_ylim(span[i])
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator())
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks,
prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks,
prune="lower"))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.xaxis.set_major_formatter(sf)
ax.yaxis.set_major_formatter(sf)
if i < ndim - 2:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
ax.set_ylabel(labels[i+1], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# Plot distribution.
in_bounds = np.ones_like(y).astype('bool')
if span is not None and span[i] is not None:
in_bounds *= ((x >= span[i][0]) & (x <= span[i][1]))
if span is not None and span[j] is not None:
in_bounds *= ((y >= span[j][0]) & (y <= span[j][1]))
ax.scatter(y[in_bounds][::thin], x[in_bounds][::thin],
c=color, cmap=cmap, **plot_kwargs)
# Add truth values
if truths is not None:
if truths[j] is not None:
try:
[ax.axvline(t, color=truth_color, **truth_kwargs)
for t in truths[j]]
except:
ax.axvline(truths[j], color=truth_color,
**truth_kwargs)
if truths[i+1] is not None:
try:
[ax.axhline(t, color=truth_color, **truth_kwargs)
for t in truths[i+1]]
except:
ax.axhline(truths[i+1], color=truth_color,
**truth_kwargs)
return (fig, axes) | def function[cornerpoints, parameter[results, thin, span, cmap, color, kde, nkde, plot_kwargs, labels, label_kwargs, truths, truth_color, truth_kwargs, max_n_ticks, use_math_text, fig]]:
constant[
Generate a (sub-)corner plot of (weighted) samples.
Parameters
----------
results : :class:`~dynesty.results.Results` instance
A :class:`~dynesty.results.Results` instance from a nested
sampling run. **Compatible with results derived from**
`nestle <http://kylebarbary.com/nestle/>`_.
thin : int, optional
Thin the samples so that only each `thin`-th sample is plotted.
Default is `1` (no thinning).
span : iterable with shape (ndim,), optional
A list where each element is either a length-2 tuple containing
lower and upper bounds or a float from `(0., 1.]` giving the
fraction of (weighted) samples to include. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.95, (5., 6.)]
Default is `1.` for all parameters (no bound).
cmap : str, optional
A `~matplotlib`-style colormap used when plotting the points,
where each point is colored according to its weight. Default is
`'plasma'`.
color : str, optional
A `~matplotlib`-style color used when plotting the points.
This overrides the `cmap` option by giving all points
the same color. Default is `None` (not used).
kde : bool, optional
Whether to use kernel density estimation to estimate and plot
the PDF of the importance weights as a function of log-volume
(as opposed to the importance weights themselves). Default is
`True`.
nkde : int, optional
The number of grid points used when plotting the kernel density
estimate. Default is `1000`.
plot_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the points.
labels : iterable with shape (ndim,), optional
A list of names for each parameter. If not provided, the default name
used when plotting will follow :math:`x_i` style.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
truths : iterable with shape (ndim,), optional
A list of reference values that will be overplotted on the traces and
marginalized 1-D posteriors as solid horizontal/vertical lines.
Individual values can be exempt using `None`. Default is `None`.
truth_color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting `truths`.
Default is `'red'`.
truth_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the vertical
and horizontal lines with `truths`.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the points onto the provided figure object.
Otherwise, by default an internal figure is generated.
Returns
-------
cornerpoints : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output (sub-)corner plot of (weighted) samples.
]
if compare[name[truth_kwargs] is constant[None]] begin[:]
variable[truth_kwargs] assign[=] call[name[dict], parameter[]]
if compare[name[label_kwargs] is constant[None]] begin[:]
variable[label_kwargs] assign[=] call[name[dict], parameter[]]
if compare[name[plot_kwargs] is constant[None]] begin[:]
variable[plot_kwargs] assign[=] call[name[dict], parameter[]]
call[name[plot_kwargs]][constant[s]] assign[=] call[name[plot_kwargs].get, parameter[constant[s], constant[1]]]
call[name[truth_kwargs]][constant[linestyle]] assign[=] call[name[truth_kwargs].get, parameter[constant[linestyle], constant[solid]]]
call[name[truth_kwargs]][constant[linewidth]] assign[=] call[name[truth_kwargs].get, parameter[constant[linewidth], constant[2]]]
call[name[truth_kwargs]][constant[alpha]] assign[=] call[name[truth_kwargs].get, parameter[constant[alpha], constant[0.7]]]
variable[samples] assign[=] call[name[results]][constant[samples]]
variable[logvol] assign[=] call[name[results]][constant[logvol]]
<ast.Try object at 0x7da1b1ec9cc0>
if name[kde] begin[:]
variable[wt_kde] assign[=] call[name[gaussian_kde], parameter[call[name[resample_equal], parameter[<ast.UnaryOp object at 0x7da1b1ec9720>, name[weights]]]]]
variable[logvol_grid] assign[=] call[name[np].linspace, parameter[call[name[logvol]][constant[0]], call[name[logvol]][<ast.UnaryOp object at 0x7da1b1ec94b0>], name[nkde]]]
variable[wt_grid] assign[=] call[name[wt_kde].pdf, parameter[<ast.UnaryOp object at 0x7da1b1ec9330>]]
variable[weights] assign[=] call[name[np].interp, parameter[<ast.UnaryOp object at 0x7da1b1ec91e0>, <ast.UnaryOp object at 0x7da1b1ec9180>, name[wt_grid]]]
variable[samples] assign[=] call[name[np].atleast_1d, parameter[name[samples]]]
if compare[call[name[len], parameter[name[samples].shape]] equal[==] constant[1]] begin[:]
variable[samples] assign[=] call[name[np].atleast_2d, parameter[name[samples]]]
assert[compare[call[name[samples].shape][constant[0]] less_or_equal[<=] call[name[samples].shape][constant[1]]]]
<ast.Tuple object at 0x7da1b1ec88e0> assign[=] name[samples].shape
if compare[name[weights].ndim not_equal[!=] constant[1]] begin[:]
<ast.Raise object at 0x7da1b1ec8700>
if compare[name[nsamps] not_equal[!=] call[name[weights].shape][constant[0]]] begin[:]
<ast.Raise object at 0x7da1b1ec84f0>
if compare[name[span] is_not constant[None]] begin[:]
if compare[call[name[len], parameter[name[span]]] not_equal[!=] name[ndim]] begin[:]
<ast.Raise object at 0x7da1b1ec8250>
for taget[tuple[[<ast.Name object at 0x7da1b1ec8130>, <ast.Name object at 0x7da1b1ec8100>]]] in starred[call[name[enumerate], parameter[name[span]]]] begin[:]
<ast.Try object at 0x7da1b1ec8040>
if compare[name[labels] is constant[None]] begin[:]
variable[labels] assign[=] <ast.ListComp object at 0x7da1b1d98910>
if compare[name[color] is constant[None]] begin[:]
variable[color] assign[=] name[weights]
variable[factor] assign[=] constant[2.0]
variable[lbdim] assign[=] binary_operation[constant[0.5] * name[factor]]
variable[trdim] assign[=] binary_operation[constant[0.2] * name[factor]]
variable[whspace] assign[=] constant[0.05]
variable[plotdim] assign[=] binary_operation[binary_operation[name[factor] * binary_operation[name[ndim] - constant[1.0]]] + binary_operation[binary_operation[name[factor] * binary_operation[name[ndim] - constant[2.0]]] * name[whspace]]]
variable[dim] assign[=] binary_operation[binary_operation[name[lbdim] + name[plotdim]] + name[trdim]]
if compare[name[fig] is constant[None]] begin[:]
<ast.Tuple object at 0x7da1b1d9af80> assign[=] call[name[pl].subplots, parameter[binary_operation[name[ndim] - constant[1]], binary_operation[name[ndim] - constant[1]]]]
variable[lb] assign[=] binary_operation[name[lbdim] / name[dim]]
variable[tr] assign[=] binary_operation[binary_operation[name[lbdim] + name[plotdim]] / name[dim]]
call[name[fig].subplots_adjust, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b1d999c0>, <ast.Name object at 0x7da1b1d9a350>]]] in starred[call[name[enumerate], parameter[call[name[samples]][<ast.Slice object at 0x7da1b1d992d0>]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1d9a8f0>, <ast.Name object at 0x7da1b1d9b400>]]] in starred[call[name[enumerate], parameter[call[name[samples]][<ast.Slice object at 0x7da1b1d98700>]]]] begin[:]
<ast.Try object at 0x7da1b1d990f0>
if compare[name[span] is_not constant[None]] begin[:]
call[name[ax].set_xlim, parameter[call[name[span]][name[j]]]]
call[name[ax].set_ylim, parameter[call[name[span]][name[i]]]]
if compare[name[j] greater[>] name[i]] begin[:]
call[name[ax].set_frame_on, parameter[constant[False]]]
call[name[ax].set_xticks, parameter[list[[]]]]
call[name[ax].set_yticks, parameter[list[[]]]]
continue
if compare[name[max_n_ticks] equal[==] constant[0]] begin[:]
call[name[ax].xaxis.set_major_locator, parameter[call[name[NullLocator], parameter[]]]]
call[name[ax].yaxis.set_major_locator, parameter[call[name[NullLocator], parameter[]]]]
variable[sf] assign[=] call[name[ScalarFormatter], parameter[]]
call[name[ax].xaxis.set_major_formatter, parameter[name[sf]]]
call[name[ax].yaxis.set_major_formatter, parameter[name[sf]]]
if compare[name[i] less[<] binary_operation[name[ndim] - constant[2]]] begin[:]
call[name[ax].set_xticklabels, parameter[list[[]]]]
if compare[name[j] greater[>] constant[0]] begin[:]
call[name[ax].set_yticklabels, parameter[list[[]]]]
variable[in_bounds] assign[=] call[call[name[np].ones_like, parameter[name[y]]].astype, parameter[constant[bool]]]
if <ast.BoolOp object at 0x7da1b1d4bbb0> begin[:]
<ast.AugAssign object at 0x7da1b1d47340>
if <ast.BoolOp object at 0x7da1b1d44850> begin[:]
<ast.AugAssign object at 0x7da1b1d44c70>
call[name[ax].scatter, parameter[call[call[name[y]][name[in_bounds]]][<ast.Slice object at 0x7da1b1d478b0>], call[call[name[x]][name[in_bounds]]][<ast.Slice object at 0x7da1b1d47c70>]]]
if compare[name[truths] is_not constant[None]] begin[:]
if compare[call[name[truths]][name[j]] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b1d479a0>
if compare[call[name[truths]][binary_operation[name[i] + constant[1]]] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b1d47d00>
return[tuple[[<ast.Name object at 0x7da1b1d450c0>, <ast.Name object at 0x7da1b1d46680>]]] | keyword[def] identifier[cornerpoints] ( identifier[results] , identifier[thin] = literal[int] , identifier[span] = keyword[None] , identifier[cmap] = literal[string] , identifier[color] = keyword[None] ,
identifier[kde] = keyword[True] , identifier[nkde] = literal[int] , identifier[plot_kwargs] = keyword[None] , identifier[labels] = keyword[None] ,
identifier[label_kwargs] = keyword[None] , identifier[truths] = keyword[None] , identifier[truth_color] = literal[string] ,
identifier[truth_kwargs] = keyword[None] , identifier[max_n_ticks] = literal[int] , identifier[use_math_text] = keyword[False] ,
identifier[fig] = keyword[None] ):
literal[string]
keyword[if] identifier[truth_kwargs] keyword[is] keyword[None] :
identifier[truth_kwargs] = identifier[dict] ()
keyword[if] identifier[label_kwargs] keyword[is] keyword[None] :
identifier[label_kwargs] = identifier[dict] ()
keyword[if] identifier[plot_kwargs] keyword[is] keyword[None] :
identifier[plot_kwargs] = identifier[dict] ()
identifier[plot_kwargs] [ literal[string] ]= identifier[plot_kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[truth_kwargs] [ literal[string] ]= identifier[truth_kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[truth_kwargs] [ literal[string] ]= identifier[truth_kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[truth_kwargs] [ literal[string] ]= identifier[truth_kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[samples] = identifier[results] [ literal[string] ]
identifier[logvol] = identifier[results] [ literal[string] ]
keyword[try] :
identifier[weights] = identifier[np] . identifier[exp] ( identifier[results] [ literal[string] ]- identifier[results] [ literal[string] ][- literal[int] ])
keyword[except] :
identifier[weights] = identifier[results] [ literal[string] ]
keyword[if] identifier[kde] :
identifier[wt_kde] = identifier[gaussian_kde] ( identifier[resample_equal] (- identifier[logvol] , identifier[weights] ))
identifier[logvol_grid] = identifier[np] . identifier[linspace] ( identifier[logvol] [ literal[int] ], identifier[logvol] [- literal[int] ], identifier[nkde] )
identifier[wt_grid] = identifier[wt_kde] . identifier[pdf] (- identifier[logvol_grid] )
identifier[weights] = identifier[np] . identifier[interp] (- identifier[logvol] ,- identifier[logvol_grid] , identifier[wt_grid] )
identifier[samples] = identifier[np] . identifier[atleast_1d] ( identifier[samples] )
keyword[if] identifier[len] ( identifier[samples] . identifier[shape] )== literal[int] :
identifier[samples] = identifier[np] . identifier[atleast_2d] ( identifier[samples] )
keyword[else] :
keyword[assert] identifier[len] ( identifier[samples] . identifier[shape] )== literal[int] , literal[string]
identifier[samples] = identifier[samples] . identifier[T]
keyword[assert] identifier[samples] . identifier[shape] [ literal[int] ]<= identifier[samples] . identifier[shape] [ literal[int] ], literal[string] literal[string]
identifier[ndim] , identifier[nsamps] = identifier[samples] . identifier[shape]
keyword[if] identifier[weights] . identifier[ndim] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[nsamps] != identifier[weights] . identifier[shape] [ literal[int] ]:
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[span] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[len] ( identifier[span] )!= identifier[ndim] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[for] identifier[i] , identifier[_] keyword[in] identifier[enumerate] ( identifier[span] ):
keyword[try] :
identifier[xmin] , identifier[xmax] = identifier[span] [ identifier[i] ]
keyword[except] :
identifier[q] =[ literal[int] - literal[int] * identifier[span] [ identifier[i] ], literal[int] + literal[int] * identifier[span] [ identifier[i] ]]
identifier[span] [ identifier[i] ]= identifier[_quantile] ( identifier[samples] [ identifier[i] ], identifier[q] , identifier[weights] = identifier[weights] )
keyword[if] identifier[labels] keyword[is] keyword[None] :
identifier[labels] =[ literal[string] + identifier[str] ( identifier[i] + literal[int] )+ literal[string] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[ndim] )]
keyword[if] identifier[color] keyword[is] keyword[None] :
identifier[color] = identifier[weights]
identifier[factor] = literal[int]
identifier[lbdim] = literal[int] * identifier[factor]
identifier[trdim] = literal[int] * identifier[factor]
identifier[whspace] = literal[int]
identifier[plotdim] = identifier[factor] *( identifier[ndim] - literal[int] )+ identifier[factor] *( identifier[ndim] - literal[int] )* identifier[whspace]
identifier[dim] = identifier[lbdim] + identifier[plotdim] + identifier[trdim]
keyword[if] identifier[fig] keyword[is] keyword[None] :
identifier[fig] , identifier[axes] = identifier[pl] . identifier[subplots] ( identifier[ndim] - literal[int] , identifier[ndim] - literal[int] , identifier[figsize] =( identifier[dim] , identifier[dim] ))
keyword[else] :
keyword[try] :
identifier[fig] , identifier[axes] = identifier[fig]
identifier[axes] = identifier[np] . identifier[array] ( identifier[axes] ). identifier[reshape] (( identifier[ndim] - literal[int] , identifier[ndim] - literal[int] ))
keyword[except] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[lb] = identifier[lbdim] / identifier[dim]
identifier[tr] =( identifier[lbdim] + identifier[plotdim] )/ identifier[dim]
identifier[fig] . identifier[subplots_adjust] ( identifier[left] = identifier[lb] , identifier[bottom] = identifier[lb] , identifier[right] = identifier[tr] , identifier[top] = identifier[tr] ,
identifier[wspace] = identifier[whspace] , identifier[hspace] = identifier[whspace] )
keyword[for] identifier[i] , identifier[x] keyword[in] identifier[enumerate] ( identifier[samples] [ literal[int] :]):
keyword[for] identifier[j] , identifier[y] keyword[in] identifier[enumerate] ( identifier[samples] [:- literal[int] ]):
keyword[try] :
identifier[ax] = identifier[axes] [ identifier[i] , identifier[j] ]
keyword[except] :
identifier[ax] = identifier[axes]
keyword[if] identifier[span] keyword[is] keyword[not] keyword[None] :
identifier[ax] . identifier[set_xlim] ( identifier[span] [ identifier[j] ])
identifier[ax] . identifier[set_ylim] ( identifier[span] [ identifier[i] ])
keyword[if] identifier[j] > identifier[i] :
identifier[ax] . identifier[set_frame_on] ( keyword[False] )
identifier[ax] . identifier[set_xticks] ([])
identifier[ax] . identifier[set_yticks] ([])
keyword[continue]
keyword[if] identifier[max_n_ticks] == literal[int] :
identifier[ax] . identifier[xaxis] . identifier[set_major_locator] ( identifier[NullLocator] ())
identifier[ax] . identifier[yaxis] . identifier[set_major_locator] ( identifier[NullLocator] ())
keyword[else] :
identifier[ax] . identifier[xaxis] . identifier[set_major_locator] ( identifier[MaxNLocator] ( identifier[max_n_ticks] ,
identifier[prune] = literal[string] ))
identifier[ax] . identifier[yaxis] . identifier[set_major_locator] ( identifier[MaxNLocator] ( identifier[max_n_ticks] ,
identifier[prune] = literal[string] ))
identifier[sf] = identifier[ScalarFormatter] ( identifier[useMathText] = identifier[use_math_text] )
identifier[ax] . identifier[xaxis] . identifier[set_major_formatter] ( identifier[sf] )
identifier[ax] . identifier[yaxis] . identifier[set_major_formatter] ( identifier[sf] )
keyword[if] identifier[i] < identifier[ndim] - literal[int] :
identifier[ax] . identifier[set_xticklabels] ([])
keyword[else] :
[ identifier[l] . identifier[set_rotation] ( literal[int] ) keyword[for] identifier[l] keyword[in] identifier[ax] . identifier[get_xticklabels] ()]
identifier[ax] . identifier[set_xlabel] ( identifier[labels] [ identifier[j] ],** identifier[label_kwargs] )
identifier[ax] . identifier[xaxis] . identifier[set_label_coords] ( literal[int] ,- literal[int] )
keyword[if] identifier[j] > literal[int] :
identifier[ax] . identifier[set_yticklabels] ([])
keyword[else] :
[ identifier[l] . identifier[set_rotation] ( literal[int] ) keyword[for] identifier[l] keyword[in] identifier[ax] . identifier[get_yticklabels] ()]
identifier[ax] . identifier[set_ylabel] ( identifier[labels] [ identifier[i] + literal[int] ],** identifier[label_kwargs] )
identifier[ax] . identifier[yaxis] . identifier[set_label_coords] (- literal[int] , literal[int] )
identifier[in_bounds] = identifier[np] . identifier[ones_like] ( identifier[y] ). identifier[astype] ( literal[string] )
keyword[if] identifier[span] keyword[is] keyword[not] keyword[None] keyword[and] identifier[span] [ identifier[i] ] keyword[is] keyword[not] keyword[None] :
identifier[in_bounds] *=(( identifier[x] >= identifier[span] [ identifier[i] ][ literal[int] ])&( identifier[x] <= identifier[span] [ identifier[i] ][ literal[int] ]))
keyword[if] identifier[span] keyword[is] keyword[not] keyword[None] keyword[and] identifier[span] [ identifier[j] ] keyword[is] keyword[not] keyword[None] :
identifier[in_bounds] *=(( identifier[y] >= identifier[span] [ identifier[j] ][ literal[int] ])&( identifier[y] <= identifier[span] [ identifier[j] ][ literal[int] ]))
identifier[ax] . identifier[scatter] ( identifier[y] [ identifier[in_bounds] ][:: identifier[thin] ], identifier[x] [ identifier[in_bounds] ][:: identifier[thin] ],
identifier[c] = identifier[color] , identifier[cmap] = identifier[cmap] ,** identifier[plot_kwargs] )
keyword[if] identifier[truths] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[truths] [ identifier[j] ] keyword[is] keyword[not] keyword[None] :
keyword[try] :
[ identifier[ax] . identifier[axvline] ( identifier[t] , identifier[color] = identifier[truth_color] ,** identifier[truth_kwargs] )
keyword[for] identifier[t] keyword[in] identifier[truths] [ identifier[j] ]]
keyword[except] :
identifier[ax] . identifier[axvline] ( identifier[truths] [ identifier[j] ], identifier[color] = identifier[truth_color] ,
** identifier[truth_kwargs] )
keyword[if] identifier[truths] [ identifier[i] + literal[int] ] keyword[is] keyword[not] keyword[None] :
keyword[try] :
[ identifier[ax] . identifier[axhline] ( identifier[t] , identifier[color] = identifier[truth_color] ,** identifier[truth_kwargs] )
keyword[for] identifier[t] keyword[in] identifier[truths] [ identifier[i] + literal[int] ]]
keyword[except] :
identifier[ax] . identifier[axhline] ( identifier[truths] [ identifier[i] + literal[int] ], identifier[color] = identifier[truth_color] ,
** identifier[truth_kwargs] )
keyword[return] ( identifier[fig] , identifier[axes] ) | def cornerpoints(results, thin=1, span=None, cmap='plasma', color=None, kde=True, nkde=1000, plot_kwargs=None, labels=None, label_kwargs=None, truths=None, truth_color='red', truth_kwargs=None, max_n_ticks=5, use_math_text=False, fig=None):
"""
Generate a (sub-)corner plot of (weighted) samples.
Parameters
----------
results : :class:`~dynesty.results.Results` instance
A :class:`~dynesty.results.Results` instance from a nested
sampling run. **Compatible with results derived from**
`nestle <http://kylebarbary.com/nestle/>`_.
thin : int, optional
Thin the samples so that only each `thin`-th sample is plotted.
Default is `1` (no thinning).
span : iterable with shape (ndim,), optional
A list where each element is either a length-2 tuple containing
lower and upper bounds or a float from `(0., 1.]` giving the
fraction of (weighted) samples to include. If a fraction is provided,
the bounds are chosen to be equal-tailed. An example would be::
span = [(0., 10.), 0.95, (5., 6.)]
Default is `1.` for all parameters (no bound).
cmap : str, optional
A `~matplotlib`-style colormap used when plotting the points,
where each point is colored according to its weight. Default is
`'plasma'`.
color : str, optional
A `~matplotlib`-style color used when plotting the points.
This overrides the `cmap` option by giving all points
the same color. Default is `None` (not used).
kde : bool, optional
Whether to use kernel density estimation to estimate and plot
the PDF of the importance weights as a function of log-volume
(as opposed to the importance weights themselves). Default is
`True`.
nkde : int, optional
The number of grid points used when plotting the kernel density
estimate. Default is `1000`.
plot_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the points.
labels : iterable with shape (ndim,), optional
A list of names for each parameter. If not provided, the default name
used when plotting will follow :math:`x_i` style.
label_kwargs : dict, optional
Extra keyword arguments that will be sent to the
`~matplotlib.axes.Axes.set_xlabel` and
`~matplotlib.axes.Axes.set_ylabel` methods.
truths : iterable with shape (ndim,), optional
A list of reference values that will be overplotted on the traces and
marginalized 1-D posteriors as solid horizontal/vertical lines.
Individual values can be exempt using `None`. Default is `None`.
truth_color : str or iterable with shape (ndim,), optional
A `~matplotlib`-style color (either a single color or a different
value for each subplot) used when plotting `truths`.
Default is `'red'`.
truth_kwargs : dict, optional
Extra keyword arguments that will be used for plotting the vertical
and horizontal lines with `truths`.
max_n_ticks : int, optional
Maximum number of ticks allowed. Default is `5`.
use_math_text : bool, optional
Whether the axis tick labels for very large/small exponents should be
displayed as powers of 10 rather than using `e`. Default is `False`.
fig : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`), optional
If provided, overplot the points onto the provided figure object.
Otherwise, by default an internal figure is generated.
Returns
-------
cornerpoints : (`~matplotlib.figure.Figure`, `~matplotlib.axes.Axes`)
Output (sub-)corner plot of (weighted) samples.
"""
# Initialize values.
if truth_kwargs is None:
truth_kwargs = dict() # depends on [control=['if'], data=['truth_kwargs']]
if label_kwargs is None:
label_kwargs = dict() # depends on [control=['if'], data=['label_kwargs']]
if plot_kwargs is None:
plot_kwargs = dict() # depends on [control=['if'], data=['plot_kwargs']]
# Set defaults.
plot_kwargs['s'] = plot_kwargs.get('s', 1)
truth_kwargs['linestyle'] = truth_kwargs.get('linestyle', 'solid')
truth_kwargs['linewidth'] = truth_kwargs.get('linewidth', 2)
truth_kwargs['alpha'] = truth_kwargs.get('alpha', 0.7)
# Extract weighted samples.
samples = results['samples']
logvol = results['logvol']
try:
weights = np.exp(results['logwt'] - results['logz'][-1]) # depends on [control=['try'], data=[]]
except:
weights = results['weights'] # depends on [control=['except'], data=[]]
if kde:
# Derive kernel density estimate.
wt_kde = gaussian_kde(resample_equal(-logvol, weights)) # KDE
logvol_grid = np.linspace(logvol[0], logvol[-1], nkde) # resample
wt_grid = wt_kde.pdf(-logvol_grid) # evaluate KDE PDF
weights = np.interp(-logvol, -logvol_grid, wt_grid) # interpolate # depends on [control=['if'], data=[]]
# Deal with 1D results. A number of extra catches are also here
# in case users are trying to plot other results besides the `Results`
# instance generated by `dynesty`.
samples = np.atleast_1d(samples)
if len(samples.shape) == 1:
samples = np.atleast_2d(samples) # depends on [control=['if'], data=[]]
else:
assert len(samples.shape) == 2, 'Samples must be 1- or 2-D.'
samples = samples.T
assert samples.shape[0] <= samples.shape[1], 'There are more dimensions than samples!'
(ndim, nsamps) = samples.shape
# Check weights.
if weights.ndim != 1:
raise ValueError('Weights must be 1-D.') # depends on [control=['if'], data=[]]
if nsamps != weights.shape[0]:
raise ValueError('The number of weights and samples disagree!') # depends on [control=['if'], data=[]]
# Determine plotting bounds.
if span is not None:
if len(span) != ndim:
raise ValueError('Dimension mismatch between samples and span.') # depends on [control=['if'], data=[]]
for (i, _) in enumerate(span):
try:
(xmin, xmax) = span[i] # depends on [control=['try'], data=[]]
except:
q = [0.5 - 0.5 * span[i], 0.5 + 0.5 * span[i]]
span[i] = _quantile(samples[i], q, weights=weights) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['span']]
# Set labels
if labels is None:
labels = ['$x_{' + str(i + 1) + '}$' for i in range(ndim)] # depends on [control=['if'], data=['labels']]
# Set colormap.
if color is None:
color = weights # depends on [control=['if'], data=['color']]
# Setup axis layout (from `corner.py`).
factor = 2.0 # size of side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # size of width/height margin
plotdim = factor * (ndim - 1.0) + factor * (ndim - 2.0) * whspace
dim = lbdim + plotdim + trdim # total size
# Initialize figure.
if fig is None:
(fig, axes) = pl.subplots(ndim - 1, ndim - 1, figsize=(dim, dim)) # depends on [control=['if'], data=['fig']]
else:
try:
(fig, axes) = fig
axes = np.array(axes).reshape((ndim - 1, ndim - 1)) # depends on [control=['try'], data=[]]
except:
raise ValueError('Mismatch between axes and dimension.') # depends on [control=['except'], data=[]]
# Format figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr, wspace=whspace, hspace=whspace)
# Plot the 2-D projected samples.
for (i, x) in enumerate(samples[1:]):
for (j, y) in enumerate(samples[:-1]):
try:
ax = axes[i, j] # depends on [control=['try'], data=[]]
except:
ax = axes # depends on [control=['except'], data=[]]
# Setup axes.
if span is not None:
ax.set_xlim(span[j])
ax.set_ylim(span[i]) # depends on [control=['if'], data=['span']]
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue # depends on [control=['if'], data=[]]
if max_n_ticks == 0:
ax.xaxis.set_major_locator(NullLocator())
ax.yaxis.set_major_locator(NullLocator()) # depends on [control=['if'], data=[]]
else:
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune='lower'))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune='lower'))
# Label axes.
sf = ScalarFormatter(useMathText=use_math_text)
ax.xaxis.set_major_formatter(sf)
ax.yaxis.set_major_formatter(sf)
if i < ndim - 2:
ax.set_xticklabels([]) # depends on [control=['if'], data=[]]
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
if j > 0:
ax.set_yticklabels([]) # depends on [control=['if'], data=[]]
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
ax.set_ylabel(labels[i + 1], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# Plot distribution.
in_bounds = np.ones_like(y).astype('bool')
if span is not None and span[i] is not None:
in_bounds *= (x >= span[i][0]) & (x <= span[i][1]) # depends on [control=['if'], data=[]]
if span is not None and span[j] is not None:
in_bounds *= (y >= span[j][0]) & (y <= span[j][1]) # depends on [control=['if'], data=[]]
ax.scatter(y[in_bounds][::thin], x[in_bounds][::thin], c=color, cmap=cmap, **plot_kwargs)
# Add truth values
if truths is not None:
if truths[j] is not None:
try:
[ax.axvline(t, color=truth_color, **truth_kwargs) for t in truths[j]] # depends on [control=['try'], data=[]]
except:
ax.axvline(truths[j], color=truth_color, **truth_kwargs) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if truths[i + 1] is not None:
try:
[ax.axhline(t, color=truth_color, **truth_kwargs) for t in truths[i + 1]] # depends on [control=['try'], data=[]]
except:
ax.axhline(truths[i + 1], color=truth_color, **truth_kwargs) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['truths']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return (fig, axes) |
def get_longest_line_length(text):
"""Get the length longest line in a paragraph"""
lines = text.split("\n")
length = 0
for i in range(len(lines)):
if len(lines[i]) > length:
length = len(lines[i])
return length | def function[get_longest_line_length, parameter[text]]:
constant[Get the length longest line in a paragraph]
variable[lines] assign[=] call[name[text].split, parameter[constant[
]]]
variable[length] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[lines]]]]]] begin[:]
if compare[call[name[len], parameter[call[name[lines]][name[i]]]] greater[>] name[length]] begin[:]
variable[length] assign[=] call[name[len], parameter[call[name[lines]][name[i]]]]
return[name[length]] | keyword[def] identifier[get_longest_line_length] ( identifier[text] ):
literal[string]
identifier[lines] = identifier[text] . identifier[split] ( literal[string] )
identifier[length] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[lines] )):
keyword[if] identifier[len] ( identifier[lines] [ identifier[i] ])> identifier[length] :
identifier[length] = identifier[len] ( identifier[lines] [ identifier[i] ])
keyword[return] identifier[length] | def get_longest_line_length(text):
"""Get the length longest line in a paragraph"""
lines = text.split('\n')
length = 0
for i in range(len(lines)):
if len(lines[i]) > length:
length = len(lines[i]) # depends on [control=['if'], data=['length']] # depends on [control=['for'], data=['i']]
return length |
def set_last_hop_responded(self, last_hop):
"""Sets the flag if last hop responded."""
for packet in last_hop.packets:
if packet.rtt:
self.last_hop_responded = True
break | def function[set_last_hop_responded, parameter[self, last_hop]]:
constant[Sets the flag if last hop responded.]
for taget[name[packet]] in starred[name[last_hop].packets] begin[:]
if name[packet].rtt begin[:]
name[self].last_hop_responded assign[=] constant[True]
break | keyword[def] identifier[set_last_hop_responded] ( identifier[self] , identifier[last_hop] ):
literal[string]
keyword[for] identifier[packet] keyword[in] identifier[last_hop] . identifier[packets] :
keyword[if] identifier[packet] . identifier[rtt] :
identifier[self] . identifier[last_hop_responded] = keyword[True]
keyword[break] | def set_last_hop_responded(self, last_hop):
"""Sets the flag if last hop responded."""
for packet in last_hop.packets:
if packet.rtt:
self.last_hop_responded = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['packet']] |
def id_fix(value):
""" fix @prefix values for ttl """
if value.startswith('KSC_M'):
pass
else:
value = value.replace(':','_')
if value.startswith('ERO') or value.startswith('OBI') or value.startswith('GO') or value.startswith('UBERON') or value.startswith('IAO'):
value = 'obo:' + value
elif value.startswith('birnlex') or value.startswith('nlx'):
value = 'NIFSTD:' + value
elif value.startswith('MESH'):
value = ':'.join(value.split('_'))
else:
value = ':' + value
return OntId(value).URIRef | def function[id_fix, parameter[value]]:
constant[ fix @prefix values for ttl ]
if call[name[value].startswith, parameter[constant[KSC_M]]] begin[:]
pass
return[call[name[OntId], parameter[name[value]]].URIRef] | keyword[def] identifier[id_fix] ( identifier[value] ):
literal[string]
keyword[if] identifier[value] . identifier[startswith] ( literal[string] ):
keyword[pass]
keyword[else] :
identifier[value] = identifier[value] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[value] . identifier[startswith] ( literal[string] ) keyword[or] identifier[value] . identifier[startswith] ( literal[string] ) keyword[or] identifier[value] . identifier[startswith] ( literal[string] ) keyword[or] identifier[value] . identifier[startswith] ( literal[string] ) keyword[or] identifier[value] . identifier[startswith] ( literal[string] ):
identifier[value] = literal[string] + identifier[value]
keyword[elif] identifier[value] . identifier[startswith] ( literal[string] ) keyword[or] identifier[value] . identifier[startswith] ( literal[string] ):
identifier[value] = literal[string] + identifier[value]
keyword[elif] identifier[value] . identifier[startswith] ( literal[string] ):
identifier[value] = literal[string] . identifier[join] ( identifier[value] . identifier[split] ( literal[string] ))
keyword[else] :
identifier[value] = literal[string] + identifier[value]
keyword[return] identifier[OntId] ( identifier[value] ). identifier[URIRef] | def id_fix(value):
""" fix @prefix values for ttl """
if value.startswith('KSC_M'):
pass # depends on [control=['if'], data=[]]
else:
value = value.replace(':', '_')
if value.startswith('ERO') or value.startswith('OBI') or value.startswith('GO') or value.startswith('UBERON') or value.startswith('IAO'):
value = 'obo:' + value # depends on [control=['if'], data=[]]
elif value.startswith('birnlex') or value.startswith('nlx'):
value = 'NIFSTD:' + value # depends on [control=['if'], data=[]]
elif value.startswith('MESH'):
value = ':'.join(value.split('_')) # depends on [control=['if'], data=[]]
else:
value = ':' + value
return OntId(value).URIRef |
def fetch_new_id(self, ):
"""Return a new id for the given reftrack to be set on the refobject
The id can identify reftracks that share the same parent, type and element.
:returns: A new id
:rtype: int
:raises: None
"""
parent = self.get_parent()
if parent:
others = parent._children
else:
others = [r for r in self.get_root()._reftracks if r.get_parent() is None]
others = [r for r in others
if r != self
and r.get_typ() == self.get_typ()
and r.get_element() == self.get_element()]
highest = -1
for r in others:
identifier = r.get_id()
if identifier > highest:
highest = identifier
return highest + 1 | def function[fetch_new_id, parameter[self]]:
constant[Return a new id for the given reftrack to be set on the refobject
The id can identify reftracks that share the same parent, type and element.
:returns: A new id
:rtype: int
:raises: None
]
variable[parent] assign[=] call[name[self].get_parent, parameter[]]
if name[parent] begin[:]
variable[others] assign[=] name[parent]._children
variable[others] assign[=] <ast.ListComp object at 0x7da20e954820>
variable[highest] assign[=] <ast.UnaryOp object at 0x7da20e9552a0>
for taget[name[r]] in starred[name[others]] begin[:]
variable[identifier] assign[=] call[name[r].get_id, parameter[]]
if compare[name[identifier] greater[>] name[highest]] begin[:]
variable[highest] assign[=] name[identifier]
return[binary_operation[name[highest] + constant[1]]] | keyword[def] identifier[fetch_new_id] ( identifier[self] ,):
literal[string]
identifier[parent] = identifier[self] . identifier[get_parent] ()
keyword[if] identifier[parent] :
identifier[others] = identifier[parent] . identifier[_children]
keyword[else] :
identifier[others] =[ identifier[r] keyword[for] identifier[r] keyword[in] identifier[self] . identifier[get_root] (). identifier[_reftracks] keyword[if] identifier[r] . identifier[get_parent] () keyword[is] keyword[None] ]
identifier[others] =[ identifier[r] keyword[for] identifier[r] keyword[in] identifier[others]
keyword[if] identifier[r] != identifier[self]
keyword[and] identifier[r] . identifier[get_typ] ()== identifier[self] . identifier[get_typ] ()
keyword[and] identifier[r] . identifier[get_element] ()== identifier[self] . identifier[get_element] ()]
identifier[highest] =- literal[int]
keyword[for] identifier[r] keyword[in] identifier[others] :
identifier[identifier] = identifier[r] . identifier[get_id] ()
keyword[if] identifier[identifier] > identifier[highest] :
identifier[highest] = identifier[identifier]
keyword[return] identifier[highest] + literal[int] | def fetch_new_id(self):
"""Return a new id for the given reftrack to be set on the refobject
The id can identify reftracks that share the same parent, type and element.
:returns: A new id
:rtype: int
:raises: None
"""
parent = self.get_parent()
if parent:
others = parent._children # depends on [control=['if'], data=[]]
else:
others = [r for r in self.get_root()._reftracks if r.get_parent() is None]
others = [r for r in others if r != self and r.get_typ() == self.get_typ() and (r.get_element() == self.get_element())]
highest = -1
for r in others:
identifier = r.get_id()
if identifier > highest:
highest = identifier # depends on [control=['if'], data=['identifier', 'highest']] # depends on [control=['for'], data=['r']]
return highest + 1 |
def ipv6_generate_random(total=100):
"""
The generator to produce random, unique IPv6 addresses that are not
defined (can be looked up using ipwhois).
Args:
total (:obj:`int`): The total number of IPv6 addresses to generate.
Yields:
str: The next IPv6 address.
"""
count = 0
yielded = set()
while count < total:
address = str(IPv6Address(random.randint(0, 2**128-1)))
if not ipv6_is_defined(address)[0] and address not in yielded:
count += 1
yielded.add(address)
yield address | def function[ipv6_generate_random, parameter[total]]:
constant[
The generator to produce random, unique IPv6 addresses that are not
defined (can be looked up using ipwhois).
Args:
total (:obj:`int`): The total number of IPv6 addresses to generate.
Yields:
str: The next IPv6 address.
]
variable[count] assign[=] constant[0]
variable[yielded] assign[=] call[name[set], parameter[]]
while compare[name[count] less[<] name[total]] begin[:]
variable[address] assign[=] call[name[str], parameter[call[name[IPv6Address], parameter[call[name[random].randint, parameter[constant[0], binary_operation[binary_operation[constant[2] ** constant[128]] - constant[1]]]]]]]]
if <ast.BoolOp object at 0x7da18eb54d90> begin[:]
<ast.AugAssign object at 0x7da18eb57040>
call[name[yielded].add, parameter[name[address]]]
<ast.Yield object at 0x7da18eb568f0> | keyword[def] identifier[ipv6_generate_random] ( identifier[total] = literal[int] ):
literal[string]
identifier[count] = literal[int]
identifier[yielded] = identifier[set] ()
keyword[while] identifier[count] < identifier[total] :
identifier[address] = identifier[str] ( identifier[IPv6Address] ( identifier[random] . identifier[randint] ( literal[int] , literal[int] ** literal[int] - literal[int] )))
keyword[if] keyword[not] identifier[ipv6_is_defined] ( identifier[address] )[ literal[int] ] keyword[and] identifier[address] keyword[not] keyword[in] identifier[yielded] :
identifier[count] += literal[int]
identifier[yielded] . identifier[add] ( identifier[address] )
keyword[yield] identifier[address] | def ipv6_generate_random(total=100):
"""
The generator to produce random, unique IPv6 addresses that are not
defined (can be looked up using ipwhois).
Args:
total (:obj:`int`): The total number of IPv6 addresses to generate.
Yields:
str: The next IPv6 address.
"""
count = 0
yielded = set()
while count < total:
address = str(IPv6Address(random.randint(0, 2 ** 128 - 1)))
if not ipv6_is_defined(address)[0] and address not in yielded:
count += 1
yielded.add(address)
yield address # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['count']] |
def check_build_status(self, build_id):
"""Checks the status of an app-setups build.
:param build_id: ID of the build to check.
:returns: ``True`` if succeeded, ``False`` if pending.
"""
data = self.api_request('GET', '/app-setups/%s' % build_id)
status = data.get('status')
if status == 'pending':
return False
elif status == 'succeeded':
return True
else:
raise BuildError(str(data)) | def function[check_build_status, parameter[self, build_id]]:
constant[Checks the status of an app-setups build.
:param build_id: ID of the build to check.
:returns: ``True`` if succeeded, ``False`` if pending.
]
variable[data] assign[=] call[name[self].api_request, parameter[constant[GET], binary_operation[constant[/app-setups/%s] <ast.Mod object at 0x7da2590d6920> name[build_id]]]]
variable[status] assign[=] call[name[data].get, parameter[constant[status]]]
if compare[name[status] equal[==] constant[pending]] begin[:]
return[constant[False]] | keyword[def] identifier[check_build_status] ( identifier[self] , identifier[build_id] ):
literal[string]
identifier[data] = identifier[self] . identifier[api_request] ( literal[string] , literal[string] % identifier[build_id] )
identifier[status] = identifier[data] . identifier[get] ( literal[string] )
keyword[if] identifier[status] == literal[string] :
keyword[return] keyword[False]
keyword[elif] identifier[status] == literal[string] :
keyword[return] keyword[True]
keyword[else] :
keyword[raise] identifier[BuildError] ( identifier[str] ( identifier[data] )) | def check_build_status(self, build_id):
"""Checks the status of an app-setups build.
:param build_id: ID of the build to check.
:returns: ``True`` if succeeded, ``False`` if pending.
"""
data = self.api_request('GET', '/app-setups/%s' % build_id)
status = data.get('status')
if status == 'pending':
return False # depends on [control=['if'], data=[]]
elif status == 'succeeded':
return True # depends on [control=['if'], data=[]]
else:
raise BuildError(str(data)) |
def _win32_can_symlink(verbose=0, force=0, testing=0):
"""
CommandLine:
python -m ubelt._win32_links _win32_can_symlink
Example:
>>> # xdoc: +REQUIRES(WIN32)
>>> import ubelt as ub
>>> _win32_can_symlink(verbose=1, force=1, testing=1)
"""
global __win32_can_symlink__
if verbose:
print('__win32_can_symlink__ = {!r}'.format(__win32_can_symlink__))
if __win32_can_symlink__ is not None and not force:
return __win32_can_symlink__
from ubelt import util_platform
tempdir = util_platform.ensure_app_cache_dir('ubelt', '_win32_can_symlink')
util_io.delete(tempdir)
util_path.ensuredir(tempdir)
dpath = join(tempdir, 'dpath')
fpath = join(tempdir, 'fpath.txt')
dlink = join(tempdir, 'dlink')
flink = join(tempdir, 'flink.txt')
util_path.ensuredir(dpath)
util_io.touch(fpath)
# Add broken variants of the links for testing purposes
# Its ugly, but so is all this windows code.
if testing:
broken_dpath = join(tempdir, 'broken_dpath')
broken_fpath = join(tempdir, 'broken_fpath.txt')
# Create files that we will delete after we link to them
util_path.ensuredir(broken_dpath)
util_io.touch(broken_fpath)
try:
_win32_symlink(dpath, dlink)
if testing:
_win32_symlink(broken_dpath, join(tempdir, 'broken_dlink'))
can_symlink_directories = os.path.islink(dlink)
except OSError:
can_symlink_directories = False
if verbose:
print('can_symlink_directories = {!r}'.format(can_symlink_directories))
try:
_win32_symlink(fpath, flink)
if testing:
_win32_symlink(broken_fpath, join(tempdir, 'broken_flink'))
can_symlink_files = os.path.islink(flink)
# os.path.islink(flink)
except OSError:
can_symlink_files = False
if verbose:
print('can_symlink_files = {!r}'.format(can_symlink_files))
assert int(can_symlink_directories) + int(can_symlink_files) != 1, (
'can do one but not both. Unexpected {} {}'.format(
can_symlink_directories, can_symlink_files))
try:
# test that we can create junctions, even if symlinks are disabled
djunc = _win32_junction(dpath, join(tempdir, 'djunc'))
fjunc = _win32_junction(fpath, join(tempdir, 'fjunc.txt'))
if testing:
_win32_junction(broken_dpath, join(tempdir, 'broken_djunc'))
_win32_junction(broken_fpath, join(tempdir, 'broken_fjunc.txt'))
assert _win32_is_junction(djunc)
assert _win32_is_hardlinked(fpath, fjunc)
except Exception:
warnings.warn('We cannot create junctions either!')
raise
if testing:
# break the links
util_io.delete(broken_dpath)
util_io.delete(broken_fpath)
if verbose:
from ubelt import util_links
util_links._dirstats(tempdir)
try:
# Cleanup the test directory
util_io.delete(tempdir)
except Exception:
print('ERROR IN DELETE')
from ubelt import util_links
util_links._dirstats(tempdir)
raise
can_symlink = can_symlink_directories and can_symlink_files
__win32_can_symlink__ = can_symlink
if not can_symlink:
warnings.warn('Cannot make real symlink. Falling back to junction')
if verbose:
print('can_symlink = {!r}'.format(can_symlink))
print('__win32_can_symlink__ = {!r}'.format(__win32_can_symlink__))
return can_symlink | def function[_win32_can_symlink, parameter[verbose, force, testing]]:
constant[
CommandLine:
python -m ubelt._win32_links _win32_can_symlink
Example:
>>> # xdoc: +REQUIRES(WIN32)
>>> import ubelt as ub
>>> _win32_can_symlink(verbose=1, force=1, testing=1)
]
<ast.Global object at 0x7da1b020e320>
if name[verbose] begin[:]
call[name[print], parameter[call[constant[__win32_can_symlink__ = {!r}].format, parameter[name[__win32_can_symlink__]]]]]
if <ast.BoolOp object at 0x7da1b020e590> begin[:]
return[name[__win32_can_symlink__]]
from relative_module[ubelt] import module[util_platform]
variable[tempdir] assign[=] call[name[util_platform].ensure_app_cache_dir, parameter[constant[ubelt], constant[_win32_can_symlink]]]
call[name[util_io].delete, parameter[name[tempdir]]]
call[name[util_path].ensuredir, parameter[name[tempdir]]]
variable[dpath] assign[=] call[name[join], parameter[name[tempdir], constant[dpath]]]
variable[fpath] assign[=] call[name[join], parameter[name[tempdir], constant[fpath.txt]]]
variable[dlink] assign[=] call[name[join], parameter[name[tempdir], constant[dlink]]]
variable[flink] assign[=] call[name[join], parameter[name[tempdir], constant[flink.txt]]]
call[name[util_path].ensuredir, parameter[name[dpath]]]
call[name[util_io].touch, parameter[name[fpath]]]
if name[testing] begin[:]
variable[broken_dpath] assign[=] call[name[join], parameter[name[tempdir], constant[broken_dpath]]]
variable[broken_fpath] assign[=] call[name[join], parameter[name[tempdir], constant[broken_fpath.txt]]]
call[name[util_path].ensuredir, parameter[name[broken_dpath]]]
call[name[util_io].touch, parameter[name[broken_fpath]]]
<ast.Try object at 0x7da1b020e230>
if name[verbose] begin[:]
call[name[print], parameter[call[constant[can_symlink_directories = {!r}].format, parameter[name[can_symlink_directories]]]]]
<ast.Try object at 0x7da1b020e020>
if name[verbose] begin[:]
call[name[print], parameter[call[constant[can_symlink_files = {!r}].format, parameter[name[can_symlink_files]]]]]
assert[compare[binary_operation[call[name[int], parameter[name[can_symlink_directories]]] + call[name[int], parameter[name[can_symlink_files]]]] not_equal[!=] constant[1]]]
<ast.Try object at 0x7da1b020fee0>
if name[testing] begin[:]
call[name[util_io].delete, parameter[name[broken_dpath]]]
call[name[util_io].delete, parameter[name[broken_fpath]]]
if name[verbose] begin[:]
from relative_module[ubelt] import module[util_links]
call[name[util_links]._dirstats, parameter[name[tempdir]]]
<ast.Try object at 0x7da207f02ce0>
variable[can_symlink] assign[=] <ast.BoolOp object at 0x7da207f017b0>
variable[__win32_can_symlink__] assign[=] name[can_symlink]
if <ast.UnaryOp object at 0x7da207f01900> begin[:]
call[name[warnings].warn, parameter[constant[Cannot make real symlink. Falling back to junction]]]
if name[verbose] begin[:]
call[name[print], parameter[call[constant[can_symlink = {!r}].format, parameter[name[can_symlink]]]]]
call[name[print], parameter[call[constant[__win32_can_symlink__ = {!r}].format, parameter[name[__win32_can_symlink__]]]]]
return[name[can_symlink]] | keyword[def] identifier[_win32_can_symlink] ( identifier[verbose] = literal[int] , identifier[force] = literal[int] , identifier[testing] = literal[int] ):
literal[string]
keyword[global] identifier[__win32_can_symlink__]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] . identifier[format] ( identifier[__win32_can_symlink__] ))
keyword[if] identifier[__win32_can_symlink__] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[force] :
keyword[return] identifier[__win32_can_symlink__]
keyword[from] identifier[ubelt] keyword[import] identifier[util_platform]
identifier[tempdir] = identifier[util_platform] . identifier[ensure_app_cache_dir] ( literal[string] , literal[string] )
identifier[util_io] . identifier[delete] ( identifier[tempdir] )
identifier[util_path] . identifier[ensuredir] ( identifier[tempdir] )
identifier[dpath] = identifier[join] ( identifier[tempdir] , literal[string] )
identifier[fpath] = identifier[join] ( identifier[tempdir] , literal[string] )
identifier[dlink] = identifier[join] ( identifier[tempdir] , literal[string] )
identifier[flink] = identifier[join] ( identifier[tempdir] , literal[string] )
identifier[util_path] . identifier[ensuredir] ( identifier[dpath] )
identifier[util_io] . identifier[touch] ( identifier[fpath] )
keyword[if] identifier[testing] :
identifier[broken_dpath] = identifier[join] ( identifier[tempdir] , literal[string] )
identifier[broken_fpath] = identifier[join] ( identifier[tempdir] , literal[string] )
identifier[util_path] . identifier[ensuredir] ( identifier[broken_dpath] )
identifier[util_io] . identifier[touch] ( identifier[broken_fpath] )
keyword[try] :
identifier[_win32_symlink] ( identifier[dpath] , identifier[dlink] )
keyword[if] identifier[testing] :
identifier[_win32_symlink] ( identifier[broken_dpath] , identifier[join] ( identifier[tempdir] , literal[string] ))
identifier[can_symlink_directories] = identifier[os] . identifier[path] . identifier[islink] ( identifier[dlink] )
keyword[except] identifier[OSError] :
identifier[can_symlink_directories] = keyword[False]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] . identifier[format] ( identifier[can_symlink_directories] ))
keyword[try] :
identifier[_win32_symlink] ( identifier[fpath] , identifier[flink] )
keyword[if] identifier[testing] :
identifier[_win32_symlink] ( identifier[broken_fpath] , identifier[join] ( identifier[tempdir] , literal[string] ))
identifier[can_symlink_files] = identifier[os] . identifier[path] . identifier[islink] ( identifier[flink] )
keyword[except] identifier[OSError] :
identifier[can_symlink_files] = keyword[False]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] . identifier[format] ( identifier[can_symlink_files] ))
keyword[assert] identifier[int] ( identifier[can_symlink_directories] )+ identifier[int] ( identifier[can_symlink_files] )!= literal[int] ,(
literal[string] . identifier[format] (
identifier[can_symlink_directories] , identifier[can_symlink_files] ))
keyword[try] :
identifier[djunc] = identifier[_win32_junction] ( identifier[dpath] , identifier[join] ( identifier[tempdir] , literal[string] ))
identifier[fjunc] = identifier[_win32_junction] ( identifier[fpath] , identifier[join] ( identifier[tempdir] , literal[string] ))
keyword[if] identifier[testing] :
identifier[_win32_junction] ( identifier[broken_dpath] , identifier[join] ( identifier[tempdir] , literal[string] ))
identifier[_win32_junction] ( identifier[broken_fpath] , identifier[join] ( identifier[tempdir] , literal[string] ))
keyword[assert] identifier[_win32_is_junction] ( identifier[djunc] )
keyword[assert] identifier[_win32_is_hardlinked] ( identifier[fpath] , identifier[fjunc] )
keyword[except] identifier[Exception] :
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[raise]
keyword[if] identifier[testing] :
identifier[util_io] . identifier[delete] ( identifier[broken_dpath] )
identifier[util_io] . identifier[delete] ( identifier[broken_fpath] )
keyword[if] identifier[verbose] :
keyword[from] identifier[ubelt] keyword[import] identifier[util_links]
identifier[util_links] . identifier[_dirstats] ( identifier[tempdir] )
keyword[try] :
identifier[util_io] . identifier[delete] ( identifier[tempdir] )
keyword[except] identifier[Exception] :
identifier[print] ( literal[string] )
keyword[from] identifier[ubelt] keyword[import] identifier[util_links]
identifier[util_links] . identifier[_dirstats] ( identifier[tempdir] )
keyword[raise]
identifier[can_symlink] = identifier[can_symlink_directories] keyword[and] identifier[can_symlink_files]
identifier[__win32_can_symlink__] = identifier[can_symlink]
keyword[if] keyword[not] identifier[can_symlink] :
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] . identifier[format] ( identifier[can_symlink] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[__win32_can_symlink__] ))
keyword[return] identifier[can_symlink] | def _win32_can_symlink(verbose=0, force=0, testing=0):
"""
CommandLine:
python -m ubelt._win32_links _win32_can_symlink
Example:
>>> # xdoc: +REQUIRES(WIN32)
>>> import ubelt as ub
>>> _win32_can_symlink(verbose=1, force=1, testing=1)
"""
global __win32_can_symlink__
if verbose:
print('__win32_can_symlink__ = {!r}'.format(__win32_can_symlink__)) # depends on [control=['if'], data=[]]
if __win32_can_symlink__ is not None and (not force):
return __win32_can_symlink__ # depends on [control=['if'], data=[]]
from ubelt import util_platform
tempdir = util_platform.ensure_app_cache_dir('ubelt', '_win32_can_symlink')
util_io.delete(tempdir)
util_path.ensuredir(tempdir)
dpath = join(tempdir, 'dpath')
fpath = join(tempdir, 'fpath.txt')
dlink = join(tempdir, 'dlink')
flink = join(tempdir, 'flink.txt')
util_path.ensuredir(dpath)
util_io.touch(fpath)
# Add broken variants of the links for testing purposes
# Its ugly, but so is all this windows code.
if testing:
broken_dpath = join(tempdir, 'broken_dpath')
broken_fpath = join(tempdir, 'broken_fpath.txt')
# Create files that we will delete after we link to them
util_path.ensuredir(broken_dpath)
util_io.touch(broken_fpath) # depends on [control=['if'], data=[]]
try:
_win32_symlink(dpath, dlink)
if testing:
_win32_symlink(broken_dpath, join(tempdir, 'broken_dlink')) # depends on [control=['if'], data=[]]
can_symlink_directories = os.path.islink(dlink) # depends on [control=['try'], data=[]]
except OSError:
can_symlink_directories = False # depends on [control=['except'], data=[]]
if verbose:
print('can_symlink_directories = {!r}'.format(can_symlink_directories)) # depends on [control=['if'], data=[]]
try:
_win32_symlink(fpath, flink)
if testing:
_win32_symlink(broken_fpath, join(tempdir, 'broken_flink')) # depends on [control=['if'], data=[]]
can_symlink_files = os.path.islink(flink) # depends on [control=['try'], data=[]]
# os.path.islink(flink)
except OSError:
can_symlink_files = False # depends on [control=['except'], data=[]]
if verbose:
print('can_symlink_files = {!r}'.format(can_symlink_files)) # depends on [control=['if'], data=[]]
assert int(can_symlink_directories) + int(can_symlink_files) != 1, 'can do one but not both. Unexpected {} {}'.format(can_symlink_directories, can_symlink_files)
try:
# test that we can create junctions, even if symlinks are disabled
djunc = _win32_junction(dpath, join(tempdir, 'djunc'))
fjunc = _win32_junction(fpath, join(tempdir, 'fjunc.txt'))
if testing:
_win32_junction(broken_dpath, join(tempdir, 'broken_djunc'))
_win32_junction(broken_fpath, join(tempdir, 'broken_fjunc.txt')) # depends on [control=['if'], data=[]]
assert _win32_is_junction(djunc)
assert _win32_is_hardlinked(fpath, fjunc) # depends on [control=['try'], data=[]]
except Exception:
warnings.warn('We cannot create junctions either!')
raise # depends on [control=['except'], data=[]]
if testing:
# break the links
util_io.delete(broken_dpath)
util_io.delete(broken_fpath)
if verbose:
from ubelt import util_links
util_links._dirstats(tempdir) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
try:
# Cleanup the test directory
util_io.delete(tempdir) # depends on [control=['try'], data=[]]
except Exception:
print('ERROR IN DELETE')
from ubelt import util_links
util_links._dirstats(tempdir)
raise # depends on [control=['except'], data=[]]
can_symlink = can_symlink_directories and can_symlink_files
__win32_can_symlink__ = can_symlink
if not can_symlink:
warnings.warn('Cannot make real symlink. Falling back to junction') # depends on [control=['if'], data=[]]
if verbose:
print('can_symlink = {!r}'.format(can_symlink))
print('__win32_can_symlink__ = {!r}'.format(__win32_can_symlink__)) # depends on [control=['if'], data=[]]
return can_symlink |
def get_data_point(self, n):
"""
Returns the n'th data point (starting at 0) from all columns.
Parameters
----------
n
Index of data point to return.
"""
# loop over the columns and pop the data
point = []
for k in self.ckeys: point.append(self[k][n])
return point | def function[get_data_point, parameter[self, n]]:
constant[
Returns the n'th data point (starting at 0) from all columns.
Parameters
----------
n
Index of data point to return.
]
variable[point] assign[=] list[[]]
for taget[name[k]] in starred[name[self].ckeys] begin[:]
call[name[point].append, parameter[call[call[name[self]][name[k]]][name[n]]]]
return[name[point]] | keyword[def] identifier[get_data_point] ( identifier[self] , identifier[n] ):
literal[string]
identifier[point] =[]
keyword[for] identifier[k] keyword[in] identifier[self] . identifier[ckeys] : identifier[point] . identifier[append] ( identifier[self] [ identifier[k] ][ identifier[n] ])
keyword[return] identifier[point] | def get_data_point(self, n):
"""
Returns the n'th data point (starting at 0) from all columns.
Parameters
----------
n
Index of data point to return.
"""
# loop over the columns and pop the data
point = []
for k in self.ckeys:
point.append(self[k][n]) # depends on [control=['for'], data=['k']]
return point |
def add_bgedge(self, bgedge, merge=True):
""" Adds supplied :class:`bg.edge.BGEdge` object to current instance of :class:`BreakpointGraph`.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__add_bgedge` method.
:param bgedge: instance of :class:`bg.edge.BGEdge` infromation form which is to be added to current :class:`BreakpointGraph`
:type bgedge: :class:`bg.edge.BGEdge`
:param merge: a flag to merge supplied information from multi-color perspective into a first existing edge between two supplied vertices
:type merge: ``Boolean``
:return: ``None``, performs inplace changes
"""
self.__add_bgedge(bgedge=bgedge, merge=merge) | def function[add_bgedge, parameter[self, bgedge, merge]]:
constant[ Adds supplied :class:`bg.edge.BGEdge` object to current instance of :class:`BreakpointGraph`.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__add_bgedge` method.
:param bgedge: instance of :class:`bg.edge.BGEdge` infromation form which is to be added to current :class:`BreakpointGraph`
:type bgedge: :class:`bg.edge.BGEdge`
:param merge: a flag to merge supplied information from multi-color perspective into a first existing edge between two supplied vertices
:type merge: ``Boolean``
:return: ``None``, performs inplace changes
]
call[name[self].__add_bgedge, parameter[]] | keyword[def] identifier[add_bgedge] ( identifier[self] , identifier[bgedge] , identifier[merge] = keyword[True] ):
literal[string]
identifier[self] . identifier[__add_bgedge] ( identifier[bgedge] = identifier[bgedge] , identifier[merge] = identifier[merge] ) | def add_bgedge(self, bgedge, merge=True):
""" Adds supplied :class:`bg.edge.BGEdge` object to current instance of :class:`BreakpointGraph`.
Proxies a call to :meth:`BreakpointGraph._BreakpointGraph__add_bgedge` method.
:param bgedge: instance of :class:`bg.edge.BGEdge` infromation form which is to be added to current :class:`BreakpointGraph`
:type bgedge: :class:`bg.edge.BGEdge`
:param merge: a flag to merge supplied information from multi-color perspective into a first existing edge between two supplied vertices
:type merge: ``Boolean``
:return: ``None``, performs inplace changes
"""
self.__add_bgedge(bgedge=bgedge, merge=merge) |
def _fix_permissions(self):
"""
Because docker run as root we need to fix permission and ownership to allow user to interact
with it from their filesystem and do operation like file delete
"""
state = yield from self._get_container_state()
if state == "stopped" or state == "exited":
# We need to restart it to fix permissions
yield from self.manager.query("POST", "containers/{}/start".format(self._cid))
for volume in self._volumes:
log.debug("Docker container '{name}' [{image}] fix ownership on {path}".format(
name=self._name, image=self._image, path=volume))
process = yield from asyncio.subprocess.create_subprocess_exec(
"docker",
"exec",
self._cid,
"/gns3/bin/busybox",
"sh",
"-c",
"("
"/gns3/bin/busybox find \"{path}\" -depth -print0"
" | /gns3/bin/busybox xargs -0 /gns3/bin/busybox stat -c '%a:%u:%g:%n' > \"{path}/.gns3_perms\""
")"
" && /gns3/bin/busybox chmod -R u+rX \"{path}\""
" && /gns3/bin/busybox chown {uid}:{gid} -R \"{path}\""
.format(uid=os.getuid(), gid=os.getgid(), path=volume),
)
yield from process.wait() | def function[_fix_permissions, parameter[self]]:
constant[
Because docker run as root we need to fix permission and ownership to allow user to interact
with it from their filesystem and do operation like file delete
]
variable[state] assign[=] <ast.YieldFrom object at 0x7da20c6c7340>
if <ast.BoolOp object at 0x7da20c992e30> begin[:]
<ast.YieldFrom object at 0x7da20c993040>
for taget[name[volume]] in starred[name[self]._volumes] begin[:]
call[name[log].debug, parameter[call[constant[Docker container '{name}' [{image}] fix ownership on {path}].format, parameter[]]]]
variable[process] assign[=] <ast.YieldFrom object at 0x7da18c4cfca0>
<ast.YieldFrom object at 0x7da18dc983d0> | keyword[def] identifier[_fix_permissions] ( identifier[self] ):
literal[string]
identifier[state] = keyword[yield] keyword[from] identifier[self] . identifier[_get_container_state] ()
keyword[if] identifier[state] == literal[string] keyword[or] identifier[state] == literal[string] :
keyword[yield] keyword[from] identifier[self] . identifier[manager] . identifier[query] ( literal[string] , literal[string] . identifier[format] ( identifier[self] . identifier[_cid] ))
keyword[for] identifier[volume] keyword[in] identifier[self] . identifier[_volumes] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] (
identifier[name] = identifier[self] . identifier[_name] , identifier[image] = identifier[self] . identifier[_image] , identifier[path] = identifier[volume] ))
identifier[process] = keyword[yield] keyword[from] identifier[asyncio] . identifier[subprocess] . identifier[create_subprocess_exec] (
literal[string] ,
literal[string] ,
identifier[self] . identifier[_cid] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
. identifier[format] ( identifier[uid] = identifier[os] . identifier[getuid] (), identifier[gid] = identifier[os] . identifier[getgid] (), identifier[path] = identifier[volume] ),
)
keyword[yield] keyword[from] identifier[process] . identifier[wait] () | def _fix_permissions(self):
"""
Because docker run as root we need to fix permission and ownership to allow user to interact
with it from their filesystem and do operation like file delete
"""
state = (yield from self._get_container_state())
if state == 'stopped' or state == 'exited':
# We need to restart it to fix permissions
yield from self.manager.query('POST', 'containers/{}/start'.format(self._cid)) # depends on [control=['if'], data=[]]
for volume in self._volumes:
log.debug("Docker container '{name}' [{image}] fix ownership on {path}".format(name=self._name, image=self._image, path=volume))
process = (yield from asyncio.subprocess.create_subprocess_exec('docker', 'exec', self._cid, '/gns3/bin/busybox', 'sh', '-c', '(/gns3/bin/busybox find "{path}" -depth -print0 | /gns3/bin/busybox xargs -0 /gns3/bin/busybox stat -c \'%a:%u:%g:%n\' > "{path}/.gns3_perms") && /gns3/bin/busybox chmod -R u+rX "{path}" && /gns3/bin/busybox chown {uid}:{gid} -R "{path}"'.format(uid=os.getuid(), gid=os.getgid(), path=volume)))
yield from process.wait() # depends on [control=['for'], data=['volume']] |
def refine_time_offset(image_list, frame_timestamps, rotation_sequence, rotation_timestamps, camera_matrix, readout_time):
"""Refine a time offset between camera and IMU using rolling shutter aware optimization.
To refine the time offset using this function, you must meet the following constraints
1) The data must already be roughly aligned. Only a few image frames of error
is allowed.
2) The images *must* have been captured by a *rolling shutter* camera.
This function finds a refined offset using optimization.
Points are first tracked from the start to the end of the provided images.
Then an optimization function looks at the reprojection error of the tracked points
given the IMU-data and the refined offset.
The found offset *d* is such that you want to perform the following time update
new_frame_timestamps = frame_timestamps + d
Parameters
------------
image_list : list of ndarray
A list of images to perform tracking on. High quality tracks are required,
so make sure the sequence you choose is easy to track in.
frame_timestamps : ndarray
Timestamps of image_list
rotation_sequence : (4, N) ndarray
Absolute rotations as a sequence of unit quaternions (first element is scalar).
rotation_timestamps : ndarray
Timestamps of rotation_sequence
camera_matrix : (3,3) ndarray
The internal camera calibration matrix of the camera.
readout_time : float
The readout time of the camera.
Returns
------------
offset : float
A refined offset that aligns the image data with the rotation data.
"""
# ) Track points
max_corners = 200
quality_level = 0.07
min_distance = 5
max_tracks = 20
initial_points = cv2.goodFeaturesToTrack(image_list[0], max_corners, quality_level, min_distance)
(points, status) = tracking.track_retrack(image_list, initial_points)
# Prune to at most max_tracks number of tracks, choose randomly
track_id_list = np.random.permutation(points.shape[0])[:max_tracks]
rows, cols = image_list[0].shape[:2]
row_delta_time = readout_time / rows
num_tracks, num_frames, _ = points.shape
K = np.matrix(camera_matrix)
def func_to_optimize(td, *args):
res = 0.0
N = 0
for frame_idx in range(num_frames-1):
for track_id in track_id_list:
p1 = points[track_id, frame_idx, :].reshape((-1,1))
p2 = points[track_id, frame_idx + 1, :].reshape((-1,1))
t1 = frame_timestamps[frame_idx] + (p1[1] - 1) * row_delta_time + td
t2 = frame_timestamps[frame_idx + 1] + (p2[1] - 1) * row_delta_time +td
t1 = float(t1)
t2 = float(t2)
q1 = IMU.rotation_at_time(t1, rotation_timestamps, rotation_sequence)
q2 = IMU.rotation_at_time(t2, rotation_timestamps, rotation_sequence)
R1 = rotations.quat_to_rotation_matrix(q1)
R2 = rotations.quat_to_rotation_matrix(q2)
p1_rec = K.dot(R1.T).dot(R2).dot(K.I).dot(np.vstack((p2, 1)))
if p1_rec[2] == 0:
continue
else:
p1_rec /= p1_rec[2]
res += np.sum((p1 - np.array(p1_rec[0:2]))**2)
N += 1
return res / N
# Bounded Brent optimizer
t0 = time.time()
tolerance = 1e-4 # one tenth millisecond
(refined_offset, fval, ierr, numfunc) = scipy.optimize.fminbound(func_to_optimize, -0.12, 0.12, xtol=tolerance, full_output=True)
t1 = time.time()
if ierr == 0:
logger.info("Time offset found by brent optimizer: %.4f. Elapsed: %.2f seconds (%d function calls)", refined_offset, t1-t0, numfunc)
else:
logger.error("Brent optimizer did not converge. Aborting!")
raise Exception("Brent optimizer did not converge, when trying to refine offset.")
return refined_offset | def function[refine_time_offset, parameter[image_list, frame_timestamps, rotation_sequence, rotation_timestamps, camera_matrix, readout_time]]:
constant[Refine a time offset between camera and IMU using rolling shutter aware optimization.
To refine the time offset using this function, you must meet the following constraints
1) The data must already be roughly aligned. Only a few image frames of error
is allowed.
2) The images *must* have been captured by a *rolling shutter* camera.
This function finds a refined offset using optimization.
Points are first tracked from the start to the end of the provided images.
Then an optimization function looks at the reprojection error of the tracked points
given the IMU-data and the refined offset.
The found offset *d* is such that you want to perform the following time update
new_frame_timestamps = frame_timestamps + d
Parameters
------------
image_list : list of ndarray
A list of images to perform tracking on. High quality tracks are required,
so make sure the sequence you choose is easy to track in.
frame_timestamps : ndarray
Timestamps of image_list
rotation_sequence : (4, N) ndarray
Absolute rotations as a sequence of unit quaternions (first element is scalar).
rotation_timestamps : ndarray
Timestamps of rotation_sequence
camera_matrix : (3,3) ndarray
The internal camera calibration matrix of the camera.
readout_time : float
The readout time of the camera.
Returns
------------
offset : float
A refined offset that aligns the image data with the rotation data.
]
variable[max_corners] assign[=] constant[200]
variable[quality_level] assign[=] constant[0.07]
variable[min_distance] assign[=] constant[5]
variable[max_tracks] assign[=] constant[20]
variable[initial_points] assign[=] call[name[cv2].goodFeaturesToTrack, parameter[call[name[image_list]][constant[0]], name[max_corners], name[quality_level], name[min_distance]]]
<ast.Tuple object at 0x7da18c4cc850> assign[=] call[name[tracking].track_retrack, parameter[name[image_list], name[initial_points]]]
variable[track_id_list] assign[=] call[call[name[np].random.permutation, parameter[call[name[points].shape][constant[0]]]]][<ast.Slice object at 0x7da18c4cd060>]
<ast.Tuple object at 0x7da18c4cd330> assign[=] call[call[name[image_list]][constant[0]].shape][<ast.Slice object at 0x7da18c4cf010>]
variable[row_delta_time] assign[=] binary_operation[name[readout_time] / name[rows]]
<ast.Tuple object at 0x7da18c4ce200> assign[=] name[points].shape
variable[K] assign[=] call[name[np].matrix, parameter[name[camera_matrix]]]
def function[func_to_optimize, parameter[td]]:
variable[res] assign[=] constant[0.0]
variable[N] assign[=] constant[0]
for taget[name[frame_idx]] in starred[call[name[range], parameter[binary_operation[name[num_frames] - constant[1]]]]] begin[:]
for taget[name[track_id]] in starred[name[track_id_list]] begin[:]
variable[p1] assign[=] call[call[name[points]][tuple[[<ast.Name object at 0x7da18c4cde40>, <ast.Name object at 0x7da18c4cec50>, <ast.Slice object at 0x7da18c4cd000>]]].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da18c4ce920>, <ast.Constant object at 0x7da18c4cc640>]]]]
variable[p2] assign[=] call[call[name[points]][tuple[[<ast.Name object at 0x7da1b057ba00>, <ast.BinOp object at 0x7da1b057b130>, <ast.Slice object at 0x7da1b057b670>]]].reshape, parameter[tuple[[<ast.UnaryOp object at 0x7da1b057aaa0>, <ast.Constant object at 0x7da1b05799c0>]]]]
variable[t1] assign[=] binary_operation[binary_operation[call[name[frame_timestamps]][name[frame_idx]] + binary_operation[binary_operation[call[name[p1]][constant[1]] - constant[1]] * name[row_delta_time]]] + name[td]]
variable[t2] assign[=] binary_operation[binary_operation[call[name[frame_timestamps]][binary_operation[name[frame_idx] + constant[1]]] + binary_operation[binary_operation[call[name[p2]][constant[1]] - constant[1]] * name[row_delta_time]]] + name[td]]
variable[t1] assign[=] call[name[float], parameter[name[t1]]]
variable[t2] assign[=] call[name[float], parameter[name[t2]]]
variable[q1] assign[=] call[name[IMU].rotation_at_time, parameter[name[t1], name[rotation_timestamps], name[rotation_sequence]]]
variable[q2] assign[=] call[name[IMU].rotation_at_time, parameter[name[t2], name[rotation_timestamps], name[rotation_sequence]]]
variable[R1] assign[=] call[name[rotations].quat_to_rotation_matrix, parameter[name[q1]]]
variable[R2] assign[=] call[name[rotations].quat_to_rotation_matrix, parameter[name[q2]]]
variable[p1_rec] assign[=] call[call[call[call[name[K].dot, parameter[name[R1].T]].dot, parameter[name[R2]]].dot, parameter[name[K].I]].dot, parameter[call[name[np].vstack, parameter[tuple[[<ast.Name object at 0x7da20c7966e0>, <ast.Constant object at 0x7da20c796b00>]]]]]]
if compare[call[name[p1_rec]][constant[2]] equal[==] constant[0]] begin[:]
continue
<ast.AugAssign object at 0x7da20c795360>
<ast.AugAssign object at 0x7da2044c3b50>
return[binary_operation[name[res] / name[N]]]
variable[t0] assign[=] call[name[time].time, parameter[]]
variable[tolerance] assign[=] constant[0.0001]
<ast.Tuple object at 0x7da2044c36d0> assign[=] call[name[scipy].optimize.fminbound, parameter[name[func_to_optimize], <ast.UnaryOp object at 0x7da2044c2c20>, constant[0.12]]]
variable[t1] assign[=] call[name[time].time, parameter[]]
if compare[name[ierr] equal[==] constant[0]] begin[:]
call[name[logger].info, parameter[constant[Time offset found by brent optimizer: %.4f. Elapsed: %.2f seconds (%d function calls)], name[refined_offset], binary_operation[name[t1] - name[t0]], name[numfunc]]]
return[name[refined_offset]] | keyword[def] identifier[refine_time_offset] ( identifier[image_list] , identifier[frame_timestamps] , identifier[rotation_sequence] , identifier[rotation_timestamps] , identifier[camera_matrix] , identifier[readout_time] ):
literal[string]
identifier[max_corners] = literal[int]
identifier[quality_level] = literal[int]
identifier[min_distance] = literal[int]
identifier[max_tracks] = literal[int]
identifier[initial_points] = identifier[cv2] . identifier[goodFeaturesToTrack] ( identifier[image_list] [ literal[int] ], identifier[max_corners] , identifier[quality_level] , identifier[min_distance] )
( identifier[points] , identifier[status] )= identifier[tracking] . identifier[track_retrack] ( identifier[image_list] , identifier[initial_points] )
identifier[track_id_list] = identifier[np] . identifier[random] . identifier[permutation] ( identifier[points] . identifier[shape] [ literal[int] ])[: identifier[max_tracks] ]
identifier[rows] , identifier[cols] = identifier[image_list] [ literal[int] ]. identifier[shape] [: literal[int] ]
identifier[row_delta_time] = identifier[readout_time] / identifier[rows]
identifier[num_tracks] , identifier[num_frames] , identifier[_] = identifier[points] . identifier[shape]
identifier[K] = identifier[np] . identifier[matrix] ( identifier[camera_matrix] )
keyword[def] identifier[func_to_optimize] ( identifier[td] ,* identifier[args] ):
identifier[res] = literal[int]
identifier[N] = literal[int]
keyword[for] identifier[frame_idx] keyword[in] identifier[range] ( identifier[num_frames] - literal[int] ):
keyword[for] identifier[track_id] keyword[in] identifier[track_id_list] :
identifier[p1] = identifier[points] [ identifier[track_id] , identifier[frame_idx] ,:]. identifier[reshape] ((- literal[int] , literal[int] ))
identifier[p2] = identifier[points] [ identifier[track_id] , identifier[frame_idx] + literal[int] ,:]. identifier[reshape] ((- literal[int] , literal[int] ))
identifier[t1] = identifier[frame_timestamps] [ identifier[frame_idx] ]+( identifier[p1] [ literal[int] ]- literal[int] )* identifier[row_delta_time] + identifier[td]
identifier[t2] = identifier[frame_timestamps] [ identifier[frame_idx] + literal[int] ]+( identifier[p2] [ literal[int] ]- literal[int] )* identifier[row_delta_time] + identifier[td]
identifier[t1] = identifier[float] ( identifier[t1] )
identifier[t2] = identifier[float] ( identifier[t2] )
identifier[q1] = identifier[IMU] . identifier[rotation_at_time] ( identifier[t1] , identifier[rotation_timestamps] , identifier[rotation_sequence] )
identifier[q2] = identifier[IMU] . identifier[rotation_at_time] ( identifier[t2] , identifier[rotation_timestamps] , identifier[rotation_sequence] )
identifier[R1] = identifier[rotations] . identifier[quat_to_rotation_matrix] ( identifier[q1] )
identifier[R2] = identifier[rotations] . identifier[quat_to_rotation_matrix] ( identifier[q2] )
identifier[p1_rec] = identifier[K] . identifier[dot] ( identifier[R1] . identifier[T] ). identifier[dot] ( identifier[R2] ). identifier[dot] ( identifier[K] . identifier[I] ). identifier[dot] ( identifier[np] . identifier[vstack] (( identifier[p2] , literal[int] )))
keyword[if] identifier[p1_rec] [ literal[int] ]== literal[int] :
keyword[continue]
keyword[else] :
identifier[p1_rec] /= identifier[p1_rec] [ literal[int] ]
identifier[res] += identifier[np] . identifier[sum] (( identifier[p1] - identifier[np] . identifier[array] ( identifier[p1_rec] [ literal[int] : literal[int] ]))** literal[int] )
identifier[N] += literal[int]
keyword[return] identifier[res] / identifier[N]
identifier[t0] = identifier[time] . identifier[time] ()
identifier[tolerance] = literal[int]
( identifier[refined_offset] , identifier[fval] , identifier[ierr] , identifier[numfunc] )= identifier[scipy] . identifier[optimize] . identifier[fminbound] ( identifier[func_to_optimize] ,- literal[int] , literal[int] , identifier[xtol] = identifier[tolerance] , identifier[full_output] = keyword[True] )
identifier[t1] = identifier[time] . identifier[time] ()
keyword[if] identifier[ierr] == literal[int] :
identifier[logger] . identifier[info] ( literal[string] , identifier[refined_offset] , identifier[t1] - identifier[t0] , identifier[numfunc] )
keyword[else] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] identifier[refined_offset] | def refine_time_offset(image_list, frame_timestamps, rotation_sequence, rotation_timestamps, camera_matrix, readout_time):
"""Refine a time offset between camera and IMU using rolling shutter aware optimization.
To refine the time offset using this function, you must meet the following constraints
1) The data must already be roughly aligned. Only a few image frames of error
is allowed.
2) The images *must* have been captured by a *rolling shutter* camera.
This function finds a refined offset using optimization.
Points are first tracked from the start to the end of the provided images.
Then an optimization function looks at the reprojection error of the tracked points
given the IMU-data and the refined offset.
The found offset *d* is such that you want to perform the following time update
new_frame_timestamps = frame_timestamps + d
Parameters
------------
image_list : list of ndarray
A list of images to perform tracking on. High quality tracks are required,
so make sure the sequence you choose is easy to track in.
frame_timestamps : ndarray
Timestamps of image_list
rotation_sequence : (4, N) ndarray
Absolute rotations as a sequence of unit quaternions (first element is scalar).
rotation_timestamps : ndarray
Timestamps of rotation_sequence
camera_matrix : (3,3) ndarray
The internal camera calibration matrix of the camera.
readout_time : float
The readout time of the camera.
Returns
------------
offset : float
A refined offset that aligns the image data with the rotation data.
"""
# ) Track points
max_corners = 200
quality_level = 0.07
min_distance = 5
max_tracks = 20
initial_points = cv2.goodFeaturesToTrack(image_list[0], max_corners, quality_level, min_distance)
(points, status) = tracking.track_retrack(image_list, initial_points) # Prune to at most max_tracks number of tracks, choose randomly
track_id_list = np.random.permutation(points.shape[0])[:max_tracks]
(rows, cols) = image_list[0].shape[:2]
row_delta_time = readout_time / rows
(num_tracks, num_frames, _) = points.shape
K = np.matrix(camera_matrix)
def func_to_optimize(td, *args):
res = 0.0
N = 0
for frame_idx in range(num_frames - 1):
for track_id in track_id_list:
p1 = points[track_id, frame_idx, :].reshape((-1, 1))
p2 = points[track_id, frame_idx + 1, :].reshape((-1, 1))
t1 = frame_timestamps[frame_idx] + (p1[1] - 1) * row_delta_time + td
t2 = frame_timestamps[frame_idx + 1] + (p2[1] - 1) * row_delta_time + td
t1 = float(t1)
t2 = float(t2)
q1 = IMU.rotation_at_time(t1, rotation_timestamps, rotation_sequence)
q2 = IMU.rotation_at_time(t2, rotation_timestamps, rotation_sequence)
R1 = rotations.quat_to_rotation_matrix(q1)
R2 = rotations.quat_to_rotation_matrix(q2)
p1_rec = K.dot(R1.T).dot(R2).dot(K.I).dot(np.vstack((p2, 1)))
if p1_rec[2] == 0:
continue # depends on [control=['if'], data=[]]
else:
p1_rec /= p1_rec[2]
res += np.sum((p1 - np.array(p1_rec[0:2])) ** 2)
N += 1 # depends on [control=['for'], data=['track_id']] # depends on [control=['for'], data=['frame_idx']]
return res / N
# Bounded Brent optimizer
t0 = time.time()
tolerance = 0.0001 # one tenth millisecond
(refined_offset, fval, ierr, numfunc) = scipy.optimize.fminbound(func_to_optimize, -0.12, 0.12, xtol=tolerance, full_output=True)
t1 = time.time()
if ierr == 0:
logger.info('Time offset found by brent optimizer: %.4f. Elapsed: %.2f seconds (%d function calls)', refined_offset, t1 - t0, numfunc) # depends on [control=['if'], data=[]]
else:
logger.error('Brent optimizer did not converge. Aborting!')
raise Exception('Brent optimizer did not converge, when trying to refine offset.')
return refined_offset |
def create(cls, infile, config=None, params=None, mask=None):
"""Create a new instance of GTAnalysis from an analysis output file
generated with `~fermipy.GTAnalysis.write_roi`. By default
the new instance will inherit the configuration of the saved
analysis instance. The configuration may be overriden by
passing a configuration file path with the ``config``
argument.
Parameters
----------
infile : str
Path to the ROI results file.
config : str
Path to a configuration file. This will override the
configuration in the ROI results file.
params : str
Path to a yaml file with updated parameter values
mask : str
Path to a fits file with an updated mask
"""
infile = os.path.abspath(infile)
roi_file, roi_data = utils.load_data(infile)
if config is None:
config = roi_data['config']
validate = False
else:
validate = True
gta = cls(config, validate=validate)
gta.setup(init_sources=False)
gta.load_roi(infile, params=params, mask=mask)
return gta | def function[create, parameter[cls, infile, config, params, mask]]:
constant[Create a new instance of GTAnalysis from an analysis output file
generated with `~fermipy.GTAnalysis.write_roi`. By default
the new instance will inherit the configuration of the saved
analysis instance. The configuration may be overriden by
passing a configuration file path with the ``config``
argument.
Parameters
----------
infile : str
Path to the ROI results file.
config : str
Path to a configuration file. This will override the
configuration in the ROI results file.
params : str
Path to a yaml file with updated parameter values
mask : str
Path to a fits file with an updated mask
]
variable[infile] assign[=] call[name[os].path.abspath, parameter[name[infile]]]
<ast.Tuple object at 0x7da18f00cfa0> assign[=] call[name[utils].load_data, parameter[name[infile]]]
if compare[name[config] is constant[None]] begin[:]
variable[config] assign[=] call[name[roi_data]][constant[config]]
variable[validate] assign[=] constant[False]
variable[gta] assign[=] call[name[cls], parameter[name[config]]]
call[name[gta].setup, parameter[]]
call[name[gta].load_roi, parameter[name[infile]]]
return[name[gta]] | keyword[def] identifier[create] ( identifier[cls] , identifier[infile] , identifier[config] = keyword[None] , identifier[params] = keyword[None] , identifier[mask] = keyword[None] ):
literal[string]
identifier[infile] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[infile] )
identifier[roi_file] , identifier[roi_data] = identifier[utils] . identifier[load_data] ( identifier[infile] )
keyword[if] identifier[config] keyword[is] keyword[None] :
identifier[config] = identifier[roi_data] [ literal[string] ]
identifier[validate] = keyword[False]
keyword[else] :
identifier[validate] = keyword[True]
identifier[gta] = identifier[cls] ( identifier[config] , identifier[validate] = identifier[validate] )
identifier[gta] . identifier[setup] ( identifier[init_sources] = keyword[False] )
identifier[gta] . identifier[load_roi] ( identifier[infile] , identifier[params] = identifier[params] , identifier[mask] = identifier[mask] )
keyword[return] identifier[gta] | def create(cls, infile, config=None, params=None, mask=None):
"""Create a new instance of GTAnalysis from an analysis output file
generated with `~fermipy.GTAnalysis.write_roi`. By default
the new instance will inherit the configuration of the saved
analysis instance. The configuration may be overriden by
passing a configuration file path with the ``config``
argument.
Parameters
----------
infile : str
Path to the ROI results file.
config : str
Path to a configuration file. This will override the
configuration in the ROI results file.
params : str
Path to a yaml file with updated parameter values
mask : str
Path to a fits file with an updated mask
"""
infile = os.path.abspath(infile)
(roi_file, roi_data) = utils.load_data(infile)
if config is None:
config = roi_data['config']
validate = False # depends on [control=['if'], data=['config']]
else:
validate = True
gta = cls(config, validate=validate)
gta.setup(init_sources=False)
gta.load_roi(infile, params=params, mask=mask)
return gta |
def source(self, value=None):
"""Corresponds to IDD Field `source`
Args:
value (str): value for IDD Field `source`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value)
except ValueError:
raise ValueError('value {} need to be of type str '
'for field `source`'.format(value))
if ',' in value:
raise ValueError('value should not contain a comma '
'for field `source`')
self._source = value | def function[source, parameter[self, value]]:
constant[Corresponds to IDD Field `source`
Args:
value (str): value for IDD Field `source`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
]
if compare[name[value] is_not constant[None]] begin[:]
<ast.Try object at 0x7da1b0f9f4f0>
if compare[constant[,] in name[value]] begin[:]
<ast.Raise object at 0x7da1b0f9f9a0>
name[self]._source assign[=] name[value] | keyword[def] identifier[source] ( identifier[self] , identifier[value] = keyword[None] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[value] = identifier[str] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[value] ))
keyword[if] literal[string] keyword[in] identifier[value] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] )
identifier[self] . identifier[_source] = identifier[value] | def source(self, value=None):
"""Corresponds to IDD Field `source`
Args:
value (str): value for IDD Field `source`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = str(value) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('value {} need to be of type str for field `source`'.format(value)) # depends on [control=['except'], data=[]]
if ',' in value:
raise ValueError('value should not contain a comma for field `source`') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['value']]
self._source = value |
def read_py(self, fin_txt, get_goids_only, exclude_ungrouped, prt=sys.stdout):
"""Read GO IDs or sections data from a Python file."""
goids_fin = self._read_py(fin_txt, get_goids_only, exclude_ungrouped)
sections = self._read_finish(goids_fin, prt)
# Print summary of GO IDs read
if prt is not None:
self._prt_read_msg(prt, fin_txt, exclude_ungrouped)
return sections | def function[read_py, parameter[self, fin_txt, get_goids_only, exclude_ungrouped, prt]]:
constant[Read GO IDs or sections data from a Python file.]
variable[goids_fin] assign[=] call[name[self]._read_py, parameter[name[fin_txt], name[get_goids_only], name[exclude_ungrouped]]]
variable[sections] assign[=] call[name[self]._read_finish, parameter[name[goids_fin], name[prt]]]
if compare[name[prt] is_not constant[None]] begin[:]
call[name[self]._prt_read_msg, parameter[name[prt], name[fin_txt], name[exclude_ungrouped]]]
return[name[sections]] | keyword[def] identifier[read_py] ( identifier[self] , identifier[fin_txt] , identifier[get_goids_only] , identifier[exclude_ungrouped] , identifier[prt] = identifier[sys] . identifier[stdout] ):
literal[string]
identifier[goids_fin] = identifier[self] . identifier[_read_py] ( identifier[fin_txt] , identifier[get_goids_only] , identifier[exclude_ungrouped] )
identifier[sections] = identifier[self] . identifier[_read_finish] ( identifier[goids_fin] , identifier[prt] )
keyword[if] identifier[prt] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_prt_read_msg] ( identifier[prt] , identifier[fin_txt] , identifier[exclude_ungrouped] )
keyword[return] identifier[sections] | def read_py(self, fin_txt, get_goids_only, exclude_ungrouped, prt=sys.stdout):
"""Read GO IDs or sections data from a Python file."""
goids_fin = self._read_py(fin_txt, get_goids_only, exclude_ungrouped)
sections = self._read_finish(goids_fin, prt)
# Print summary of GO IDs read
if prt is not None:
self._prt_read_msg(prt, fin_txt, exclude_ungrouped) # depends on [control=['if'], data=['prt']]
return sections |
def convertDate(date):
"""Convert DATE string into a decimal year."""
d, t = date.split('T')
return decimal_date(d, timeobs=t) | def function[convertDate, parameter[date]]:
constant[Convert DATE string into a decimal year.]
<ast.Tuple object at 0x7da1b0e3d360> assign[=] call[name[date].split, parameter[constant[T]]]
return[call[name[decimal_date], parameter[name[d]]]] | keyword[def] identifier[convertDate] ( identifier[date] ):
literal[string]
identifier[d] , identifier[t] = identifier[date] . identifier[split] ( literal[string] )
keyword[return] identifier[decimal_date] ( identifier[d] , identifier[timeobs] = identifier[t] ) | def convertDate(date):
"""Convert DATE string into a decimal year."""
(d, t) = date.split('T')
return decimal_date(d, timeobs=t) |
def trigger_deleted(self, filepath):
"""Triggers deleted event if the flie doesn't exist."""
if not os.path.exists(filepath):
self._trigger('deleted', filepath) | def function[trigger_deleted, parameter[self, filepath]]:
constant[Triggers deleted event if the flie doesn't exist.]
if <ast.UnaryOp object at 0x7da1b2852650> begin[:]
call[name[self]._trigger, parameter[constant[deleted], name[filepath]]] | keyword[def] identifier[trigger_deleted] ( identifier[self] , identifier[filepath] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[filepath] ):
identifier[self] . identifier[_trigger] ( literal[string] , identifier[filepath] ) | def trigger_deleted(self, filepath):
"""Triggers deleted event if the flie doesn't exist."""
if not os.path.exists(filepath):
self._trigger('deleted', filepath) # depends on [control=['if'], data=[]] |
def kelvin_to_fahrenheit(kelvintemp):
"""
Converts a numeric temperature from Kelvin degrees to Fahrenheit degrees
:param kelvintemp: the Kelvin temperature
:type kelvintemp: int/long/float
:returns: the float Fahrenheit temperature
:raises: *TypeError* when bad argument types are provided
"""
if kelvintemp < 0:
raise ValueError(__name__ + \
": negative temperature values not allowed")
fahrenheittemp = (kelvintemp - KELVIN_OFFSET) * \
FAHRENHEIT_DEGREE_SCALE + FAHRENHEIT_OFFSET
return float("{0:.2f}".format(fahrenheittemp)) | def function[kelvin_to_fahrenheit, parameter[kelvintemp]]:
constant[
Converts a numeric temperature from Kelvin degrees to Fahrenheit degrees
:param kelvintemp: the Kelvin temperature
:type kelvintemp: int/long/float
:returns: the float Fahrenheit temperature
:raises: *TypeError* when bad argument types are provided
]
if compare[name[kelvintemp] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da18eb57f70>
variable[fahrenheittemp] assign[=] binary_operation[binary_operation[binary_operation[name[kelvintemp] - name[KELVIN_OFFSET]] * name[FAHRENHEIT_DEGREE_SCALE]] + name[FAHRENHEIT_OFFSET]]
return[call[name[float], parameter[call[constant[{0:.2f}].format, parameter[name[fahrenheittemp]]]]]] | keyword[def] identifier[kelvin_to_fahrenheit] ( identifier[kelvintemp] ):
literal[string]
keyword[if] identifier[kelvintemp] < literal[int] :
keyword[raise] identifier[ValueError] ( identifier[__name__] + literal[string] )
identifier[fahrenheittemp] =( identifier[kelvintemp] - identifier[KELVIN_OFFSET] )* identifier[FAHRENHEIT_DEGREE_SCALE] + identifier[FAHRENHEIT_OFFSET]
keyword[return] identifier[float] ( literal[string] . identifier[format] ( identifier[fahrenheittemp] )) | def kelvin_to_fahrenheit(kelvintemp):
"""
Converts a numeric temperature from Kelvin degrees to Fahrenheit degrees
:param kelvintemp: the Kelvin temperature
:type kelvintemp: int/long/float
:returns: the float Fahrenheit temperature
:raises: *TypeError* when bad argument types are provided
"""
if kelvintemp < 0:
raise ValueError(__name__ + ': negative temperature values not allowed') # depends on [control=['if'], data=[]]
fahrenheittemp = (kelvintemp - KELVIN_OFFSET) * FAHRENHEIT_DEGREE_SCALE + FAHRENHEIT_OFFSET
return float('{0:.2f}'.format(fahrenheittemp)) |
def add_mpl_labels(heatmap_axes, rowlabels, collabels, params):
"""Add labels to Matplotlib heatmap axes, in-place."""
if params.labels:
# If a label mapping is missing, use the key text as fall back
rowlabels = [params.labels.get(lab, lab) for lab in rowlabels]
collabels = [params.labels.get(lab, lab) for lab in collabels]
xlabs = heatmap_axes.set_xticklabels(collabels)
ylabs = heatmap_axes.set_yticklabels(rowlabels)
for label in xlabs: # Rotate column labels
label.set_rotation(90)
for labset in (xlabs, ylabs): # Smaller font
for label in labset:
label.set_fontsize(8) | def function[add_mpl_labels, parameter[heatmap_axes, rowlabels, collabels, params]]:
constant[Add labels to Matplotlib heatmap axes, in-place.]
if name[params].labels begin[:]
variable[rowlabels] assign[=] <ast.ListComp object at 0x7da18bcc8460>
variable[collabels] assign[=] <ast.ListComp object at 0x7da18bccbc10>
variable[xlabs] assign[=] call[name[heatmap_axes].set_xticklabels, parameter[name[collabels]]]
variable[ylabs] assign[=] call[name[heatmap_axes].set_yticklabels, parameter[name[rowlabels]]]
for taget[name[label]] in starred[name[xlabs]] begin[:]
call[name[label].set_rotation, parameter[constant[90]]]
for taget[name[labset]] in starred[tuple[[<ast.Name object at 0x7da1b0d19060>, <ast.Name object at 0x7da1b0d18190>]]] begin[:]
for taget[name[label]] in starred[name[labset]] begin[:]
call[name[label].set_fontsize, parameter[constant[8]]] | keyword[def] identifier[add_mpl_labels] ( identifier[heatmap_axes] , identifier[rowlabels] , identifier[collabels] , identifier[params] ):
literal[string]
keyword[if] identifier[params] . identifier[labels] :
identifier[rowlabels] =[ identifier[params] . identifier[labels] . identifier[get] ( identifier[lab] , identifier[lab] ) keyword[for] identifier[lab] keyword[in] identifier[rowlabels] ]
identifier[collabels] =[ identifier[params] . identifier[labels] . identifier[get] ( identifier[lab] , identifier[lab] ) keyword[for] identifier[lab] keyword[in] identifier[collabels] ]
identifier[xlabs] = identifier[heatmap_axes] . identifier[set_xticklabels] ( identifier[collabels] )
identifier[ylabs] = identifier[heatmap_axes] . identifier[set_yticklabels] ( identifier[rowlabels] )
keyword[for] identifier[label] keyword[in] identifier[xlabs] :
identifier[label] . identifier[set_rotation] ( literal[int] )
keyword[for] identifier[labset] keyword[in] ( identifier[xlabs] , identifier[ylabs] ):
keyword[for] identifier[label] keyword[in] identifier[labset] :
identifier[label] . identifier[set_fontsize] ( literal[int] ) | def add_mpl_labels(heatmap_axes, rowlabels, collabels, params):
"""Add labels to Matplotlib heatmap axes, in-place."""
if params.labels:
# If a label mapping is missing, use the key text as fall back
rowlabels = [params.labels.get(lab, lab) for lab in rowlabels]
collabels = [params.labels.get(lab, lab) for lab in collabels] # depends on [control=['if'], data=[]]
xlabs = heatmap_axes.set_xticklabels(collabels)
ylabs = heatmap_axes.set_yticklabels(rowlabels)
for label in xlabs: # Rotate column labels
label.set_rotation(90) # depends on [control=['for'], data=['label']]
for labset in (xlabs, ylabs): # Smaller font
for label in labset:
label.set_fontsize(8) # depends on [control=['for'], data=['label']] # depends on [control=['for'], data=['labset']] |
def kill_processes(self):
"""Gets called on shutdown by the timer when too much time has gone by,
calling the terminate method instead of nicely asking for the consumers
to stop.
"""
LOGGER.critical('Max shutdown exceeded, forcibly exiting')
processes = self.active_processes(False)
while processes:
for proc in self.active_processes(False):
if int(proc.pid) != int(os.getpid()):
LOGGER.warning('Killing %s (%s)', proc.name, proc.pid)
try:
os.kill(int(proc.pid), signal.SIGKILL)
except OSError:
pass
else:
LOGGER.warning('Cowardly refusing kill self (%s, %s)',
proc.pid, os.getpid())
time.sleep(0.5)
processes = self.active_processes(False)
LOGGER.info('Killed all children')
return self.set_state(self.STATE_STOPPED) | def function[kill_processes, parameter[self]]:
constant[Gets called on shutdown by the timer when too much time has gone by,
calling the terminate method instead of nicely asking for the consumers
to stop.
]
call[name[LOGGER].critical, parameter[constant[Max shutdown exceeded, forcibly exiting]]]
variable[processes] assign[=] call[name[self].active_processes, parameter[constant[False]]]
while name[processes] begin[:]
for taget[name[proc]] in starred[call[name[self].active_processes, parameter[constant[False]]]] begin[:]
if compare[call[name[int], parameter[name[proc].pid]] not_equal[!=] call[name[int], parameter[call[name[os].getpid, parameter[]]]]] begin[:]
call[name[LOGGER].warning, parameter[constant[Killing %s (%s)], name[proc].name, name[proc].pid]]
<ast.Try object at 0x7da20e955120>
call[name[time].sleep, parameter[constant[0.5]]]
variable[processes] assign[=] call[name[self].active_processes, parameter[constant[False]]]
call[name[LOGGER].info, parameter[constant[Killed all children]]]
return[call[name[self].set_state, parameter[name[self].STATE_STOPPED]]] | keyword[def] identifier[kill_processes] ( identifier[self] ):
literal[string]
identifier[LOGGER] . identifier[critical] ( literal[string] )
identifier[processes] = identifier[self] . identifier[active_processes] ( keyword[False] )
keyword[while] identifier[processes] :
keyword[for] identifier[proc] keyword[in] identifier[self] . identifier[active_processes] ( keyword[False] ):
keyword[if] identifier[int] ( identifier[proc] . identifier[pid] )!= identifier[int] ( identifier[os] . identifier[getpid] ()):
identifier[LOGGER] . identifier[warning] ( literal[string] , identifier[proc] . identifier[name] , identifier[proc] . identifier[pid] )
keyword[try] :
identifier[os] . identifier[kill] ( identifier[int] ( identifier[proc] . identifier[pid] ), identifier[signal] . identifier[SIGKILL] )
keyword[except] identifier[OSError] :
keyword[pass]
keyword[else] :
identifier[LOGGER] . identifier[warning] ( literal[string] ,
identifier[proc] . identifier[pid] , identifier[os] . identifier[getpid] ())
identifier[time] . identifier[sleep] ( literal[int] )
identifier[processes] = identifier[self] . identifier[active_processes] ( keyword[False] )
identifier[LOGGER] . identifier[info] ( literal[string] )
keyword[return] identifier[self] . identifier[set_state] ( identifier[self] . identifier[STATE_STOPPED] ) | def kill_processes(self):
"""Gets called on shutdown by the timer when too much time has gone by,
calling the terminate method instead of nicely asking for the consumers
to stop.
"""
LOGGER.critical('Max shutdown exceeded, forcibly exiting')
processes = self.active_processes(False)
while processes:
for proc in self.active_processes(False):
if int(proc.pid) != int(os.getpid()):
LOGGER.warning('Killing %s (%s)', proc.name, proc.pid)
try:
os.kill(int(proc.pid), signal.SIGKILL) # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
LOGGER.warning('Cowardly refusing kill self (%s, %s)', proc.pid, os.getpid()) # depends on [control=['for'], data=['proc']]
time.sleep(0.5)
processes = self.active_processes(False) # depends on [control=['while'], data=[]]
LOGGER.info('Killed all children')
return self.set_state(self.STATE_STOPPED) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.