code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def load_children(self):
"""
Load the subelements from the xml_element in its correspondent classes.
:returns: List of child objects.
:rtype: list
:raises CardinalityException: If there is more than one Version child.
"""
# Containers
children = list()
statuses = list()
version = None
titles = list()
descriptions = list()
platforms = list()
idents = list()
# Element load
for element in self.xml_element:
uri, tag = Element.get_namespace_and_tag(element.tag)
if tag == 'version':
if version is None:
version = Version(element)
else:
error_msg = 'version element found more than once'
raise CardinalityException(error_msg)
elif tag == 'status':
statuses.append(Status(element))
elif tag == 'title':
titles.append(Title(element))
elif tag == 'description':
descriptions.append(Description(element))
elif tag == 'platform':
platforms.append(Platform(element))
elif tag == 'ident':
idents.append(Ident(element))
# List construction
children.extend(statuses)
if version is not None:
children.append(version)
children.extend(titles)
children.extend(descriptions)
children.extend(platforms)
children.extend(idents)
return children | def function[load_children, parameter[self]]:
constant[
Load the subelements from the xml_element in its correspondent classes.
:returns: List of child objects.
:rtype: list
:raises CardinalityException: If there is more than one Version child.
]
variable[children] assign[=] call[name[list], parameter[]]
variable[statuses] assign[=] call[name[list], parameter[]]
variable[version] assign[=] constant[None]
variable[titles] assign[=] call[name[list], parameter[]]
variable[descriptions] assign[=] call[name[list], parameter[]]
variable[platforms] assign[=] call[name[list], parameter[]]
variable[idents] assign[=] call[name[list], parameter[]]
for taget[name[element]] in starred[name[self].xml_element] begin[:]
<ast.Tuple object at 0x7da1b0a055a0> assign[=] call[name[Element].get_namespace_and_tag, parameter[name[element].tag]]
if compare[name[tag] equal[==] constant[version]] begin[:]
if compare[name[version] is constant[None]] begin[:]
variable[version] assign[=] call[name[Version], parameter[name[element]]]
call[name[children].extend, parameter[name[statuses]]]
if compare[name[version] is_not constant[None]] begin[:]
call[name[children].append, parameter[name[version]]]
call[name[children].extend, parameter[name[titles]]]
call[name[children].extend, parameter[name[descriptions]]]
call[name[children].extend, parameter[name[platforms]]]
call[name[children].extend, parameter[name[idents]]]
return[name[children]] | keyword[def] identifier[load_children] ( identifier[self] ):
literal[string]
identifier[children] = identifier[list] ()
identifier[statuses] = identifier[list] ()
identifier[version] = keyword[None]
identifier[titles] = identifier[list] ()
identifier[descriptions] = identifier[list] ()
identifier[platforms] = identifier[list] ()
identifier[idents] = identifier[list] ()
keyword[for] identifier[element] keyword[in] identifier[self] . identifier[xml_element] :
identifier[uri] , identifier[tag] = identifier[Element] . identifier[get_namespace_and_tag] ( identifier[element] . identifier[tag] )
keyword[if] identifier[tag] == literal[string] :
keyword[if] identifier[version] keyword[is] keyword[None] :
identifier[version] = identifier[Version] ( identifier[element] )
keyword[else] :
identifier[error_msg] = literal[string]
keyword[raise] identifier[CardinalityException] ( identifier[error_msg] )
keyword[elif] identifier[tag] == literal[string] :
identifier[statuses] . identifier[append] ( identifier[Status] ( identifier[element] ))
keyword[elif] identifier[tag] == literal[string] :
identifier[titles] . identifier[append] ( identifier[Title] ( identifier[element] ))
keyword[elif] identifier[tag] == literal[string] :
identifier[descriptions] . identifier[append] ( identifier[Description] ( identifier[element] ))
keyword[elif] identifier[tag] == literal[string] :
identifier[platforms] . identifier[append] ( identifier[Platform] ( identifier[element] ))
keyword[elif] identifier[tag] == literal[string] :
identifier[idents] . identifier[append] ( identifier[Ident] ( identifier[element] ))
identifier[children] . identifier[extend] ( identifier[statuses] )
keyword[if] identifier[version] keyword[is] keyword[not] keyword[None] :
identifier[children] . identifier[append] ( identifier[version] )
identifier[children] . identifier[extend] ( identifier[titles] )
identifier[children] . identifier[extend] ( identifier[descriptions] )
identifier[children] . identifier[extend] ( identifier[platforms] )
identifier[children] . identifier[extend] ( identifier[idents] )
keyword[return] identifier[children] | def load_children(self):
"""
Load the subelements from the xml_element in its correspondent classes.
:returns: List of child objects.
:rtype: list
:raises CardinalityException: If there is more than one Version child.
"""
# Containers
children = list()
statuses = list()
version = None
titles = list()
descriptions = list()
platforms = list()
idents = list()
# Element load
for element in self.xml_element:
(uri, tag) = Element.get_namespace_and_tag(element.tag)
if tag == 'version':
if version is None:
version = Version(element) # depends on [control=['if'], data=['version']]
else:
error_msg = 'version element found more than once'
raise CardinalityException(error_msg) # depends on [control=['if'], data=[]]
elif tag == 'status':
statuses.append(Status(element)) # depends on [control=['if'], data=[]]
elif tag == 'title':
titles.append(Title(element)) # depends on [control=['if'], data=[]]
elif tag == 'description':
descriptions.append(Description(element)) # depends on [control=['if'], data=[]]
elif tag == 'platform':
platforms.append(Platform(element)) # depends on [control=['if'], data=[]]
elif tag == 'ident':
idents.append(Ident(element)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['element']]
# List construction
children.extend(statuses)
if version is not None:
children.append(version) # depends on [control=['if'], data=['version']]
children.extend(titles)
children.extend(descriptions)
children.extend(platforms)
children.extend(idents)
return children |
def from_json(cls, json_data):
"""Instantiate a Credentials object from a JSON description of it.
The JSON should have been produced by calling .to_json() on the object.
Args:
json_data: string or bytes, JSON to deserialize.
Returns:
An instance of a Credentials subclass.
"""
data = json.loads(_helpers._from_bytes(json_data))
if (data.get('token_expiry') and
not isinstance(data['token_expiry'], datetime.datetime)):
try:
data['token_expiry'] = datetime.datetime.strptime(
data['token_expiry'], EXPIRY_FORMAT)
except ValueError:
data['token_expiry'] = None
retval = cls(
data['access_token'],
data['client_id'],
data['client_secret'],
data['refresh_token'],
data['token_expiry'],
data['token_uri'],
data['user_agent'],
revoke_uri=data.get('revoke_uri', None),
id_token=data.get('id_token', None),
id_token_jwt=data.get('id_token_jwt', None),
token_response=data.get('token_response', None),
scopes=data.get('scopes', None),
token_info_uri=data.get('token_info_uri', None))
retval.invalid = data['invalid']
return retval | def function[from_json, parameter[cls, json_data]]:
constant[Instantiate a Credentials object from a JSON description of it.
The JSON should have been produced by calling .to_json() on the object.
Args:
json_data: string or bytes, JSON to deserialize.
Returns:
An instance of a Credentials subclass.
]
variable[data] assign[=] call[name[json].loads, parameter[call[name[_helpers]._from_bytes, parameter[name[json_data]]]]]
if <ast.BoolOp object at 0x7da1b01fca30> begin[:]
<ast.Try object at 0x7da1b01ff7f0>
variable[retval] assign[=] call[name[cls], parameter[call[name[data]][constant[access_token]], call[name[data]][constant[client_id]], call[name[data]][constant[client_secret]], call[name[data]][constant[refresh_token]], call[name[data]][constant[token_expiry]], call[name[data]][constant[token_uri]], call[name[data]][constant[user_agent]]]]
name[retval].invalid assign[=] call[name[data]][constant[invalid]]
return[name[retval]] | keyword[def] identifier[from_json] ( identifier[cls] , identifier[json_data] ):
literal[string]
identifier[data] = identifier[json] . identifier[loads] ( identifier[_helpers] . identifier[_from_bytes] ( identifier[json_data] ))
keyword[if] ( identifier[data] . identifier[get] ( literal[string] ) keyword[and]
keyword[not] identifier[isinstance] ( identifier[data] [ literal[string] ], identifier[datetime] . identifier[datetime] )):
keyword[try] :
identifier[data] [ literal[string] ]= identifier[datetime] . identifier[datetime] . identifier[strptime] (
identifier[data] [ literal[string] ], identifier[EXPIRY_FORMAT] )
keyword[except] identifier[ValueError] :
identifier[data] [ literal[string] ]= keyword[None]
identifier[retval] = identifier[cls] (
identifier[data] [ literal[string] ],
identifier[data] [ literal[string] ],
identifier[data] [ literal[string] ],
identifier[data] [ literal[string] ],
identifier[data] [ literal[string] ],
identifier[data] [ literal[string] ],
identifier[data] [ literal[string] ],
identifier[revoke_uri] = identifier[data] . identifier[get] ( literal[string] , keyword[None] ),
identifier[id_token] = identifier[data] . identifier[get] ( literal[string] , keyword[None] ),
identifier[id_token_jwt] = identifier[data] . identifier[get] ( literal[string] , keyword[None] ),
identifier[token_response] = identifier[data] . identifier[get] ( literal[string] , keyword[None] ),
identifier[scopes] = identifier[data] . identifier[get] ( literal[string] , keyword[None] ),
identifier[token_info_uri] = identifier[data] . identifier[get] ( literal[string] , keyword[None] ))
identifier[retval] . identifier[invalid] = identifier[data] [ literal[string] ]
keyword[return] identifier[retval] | def from_json(cls, json_data):
"""Instantiate a Credentials object from a JSON description of it.
The JSON should have been produced by calling .to_json() on the object.
Args:
json_data: string or bytes, JSON to deserialize.
Returns:
An instance of a Credentials subclass.
"""
data = json.loads(_helpers._from_bytes(json_data))
if data.get('token_expiry') and (not isinstance(data['token_expiry'], datetime.datetime)):
try:
data['token_expiry'] = datetime.datetime.strptime(data['token_expiry'], EXPIRY_FORMAT) # depends on [control=['try'], data=[]]
except ValueError:
data['token_expiry'] = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
retval = cls(data['access_token'], data['client_id'], data['client_secret'], data['refresh_token'], data['token_expiry'], data['token_uri'], data['user_agent'], revoke_uri=data.get('revoke_uri', None), id_token=data.get('id_token', None), id_token_jwt=data.get('id_token_jwt', None), token_response=data.get('token_response', None), scopes=data.get('scopes', None), token_info_uri=data.get('token_info_uri', None))
retval.invalid = data['invalid']
return retval |
def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
json = {}
json["hash"] = self.Hash.To0xString()
json["size"] = self.Size()
json["version"] = self.Version
json["previousblockhash"] = self.PrevHash.To0xString()
json["merkleroot"] = self.MerkleRoot.To0xString()
json["time"] = self.Timestamp
json["index"] = self.Index
nonce = bytearray(self.ConsensusData.to_bytes(8, 'little'))
nonce.reverse()
json["nonce"] = nonce.hex()
json['nextconsensus'] = Crypto.ToAddress(self.NextConsensus)
# json["consensus data"] = self.ConsensusData
json["script"] = '' if not self.Script else self.Script.ToJson()
return json | def function[ToJson, parameter[self]]:
constant[
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
]
variable[json] assign[=] dictionary[[], []]
call[name[json]][constant[hash]] assign[=] call[name[self].Hash.To0xString, parameter[]]
call[name[json]][constant[size]] assign[=] call[name[self].Size, parameter[]]
call[name[json]][constant[version]] assign[=] name[self].Version
call[name[json]][constant[previousblockhash]] assign[=] call[name[self].PrevHash.To0xString, parameter[]]
call[name[json]][constant[merkleroot]] assign[=] call[name[self].MerkleRoot.To0xString, parameter[]]
call[name[json]][constant[time]] assign[=] name[self].Timestamp
call[name[json]][constant[index]] assign[=] name[self].Index
variable[nonce] assign[=] call[name[bytearray], parameter[call[name[self].ConsensusData.to_bytes, parameter[constant[8], constant[little]]]]]
call[name[nonce].reverse, parameter[]]
call[name[json]][constant[nonce]] assign[=] call[name[nonce].hex, parameter[]]
call[name[json]][constant[nextconsensus]] assign[=] call[name[Crypto].ToAddress, parameter[name[self].NextConsensus]]
call[name[json]][constant[script]] assign[=] <ast.IfExp object at 0x7da1b1dd3dc0>
return[name[json]] | keyword[def] identifier[ToJson] ( identifier[self] ):
literal[string]
identifier[json] ={}
identifier[json] [ literal[string] ]= identifier[self] . identifier[Hash] . identifier[To0xString] ()
identifier[json] [ literal[string] ]= identifier[self] . identifier[Size] ()
identifier[json] [ literal[string] ]= identifier[self] . identifier[Version]
identifier[json] [ literal[string] ]= identifier[self] . identifier[PrevHash] . identifier[To0xString] ()
identifier[json] [ literal[string] ]= identifier[self] . identifier[MerkleRoot] . identifier[To0xString] ()
identifier[json] [ literal[string] ]= identifier[self] . identifier[Timestamp]
identifier[json] [ literal[string] ]= identifier[self] . identifier[Index]
identifier[nonce] = identifier[bytearray] ( identifier[self] . identifier[ConsensusData] . identifier[to_bytes] ( literal[int] , literal[string] ))
identifier[nonce] . identifier[reverse] ()
identifier[json] [ literal[string] ]= identifier[nonce] . identifier[hex] ()
identifier[json] [ literal[string] ]= identifier[Crypto] . identifier[ToAddress] ( identifier[self] . identifier[NextConsensus] )
identifier[json] [ literal[string] ]= literal[string] keyword[if] keyword[not] identifier[self] . identifier[Script] keyword[else] identifier[self] . identifier[Script] . identifier[ToJson] ()
keyword[return] identifier[json] | def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
json = {}
json['hash'] = self.Hash.To0xString()
json['size'] = self.Size()
json['version'] = self.Version
json['previousblockhash'] = self.PrevHash.To0xString()
json['merkleroot'] = self.MerkleRoot.To0xString()
json['time'] = self.Timestamp
json['index'] = self.Index
nonce = bytearray(self.ConsensusData.to_bytes(8, 'little'))
nonce.reverse()
json['nonce'] = nonce.hex()
json['nextconsensus'] = Crypto.ToAddress(self.NextConsensus)
# json["consensus data"] = self.ConsensusData
json['script'] = '' if not self.Script else self.Script.ToJson()
return json |
def set_spcPct(self, value):
"""
Set spacing to *value* lines, e.g. 1.75 lines. A ./a:spcPts child is
removed if present.
"""
self._remove_spcPts()
spcPct = self.get_or_add_spcPct()
spcPct.val = value | def function[set_spcPct, parameter[self, value]]:
constant[
Set spacing to *value* lines, e.g. 1.75 lines. A ./a:spcPts child is
removed if present.
]
call[name[self]._remove_spcPts, parameter[]]
variable[spcPct] assign[=] call[name[self].get_or_add_spcPct, parameter[]]
name[spcPct].val assign[=] name[value] | keyword[def] identifier[set_spcPct] ( identifier[self] , identifier[value] ):
literal[string]
identifier[self] . identifier[_remove_spcPts] ()
identifier[spcPct] = identifier[self] . identifier[get_or_add_spcPct] ()
identifier[spcPct] . identifier[val] = identifier[value] | def set_spcPct(self, value):
"""
Set spacing to *value* lines, e.g. 1.75 lines. A ./a:spcPts child is
removed if present.
"""
self._remove_spcPts()
spcPct = self.get_or_add_spcPct()
spcPct.val = value |
def tag_limit_sibling_ordinal(tag, stop_tag_name):
"""
Count previous tags of the same name until it
reaches a tag name of type stop_tag, then stop counting
"""
tag_count = 1
for prev_tag in tag.previous_elements:
if prev_tag.name == tag.name:
tag_count += 1
if prev_tag.name == stop_tag_name:
break
return tag_count | def function[tag_limit_sibling_ordinal, parameter[tag, stop_tag_name]]:
constant[
Count previous tags of the same name until it
reaches a tag name of type stop_tag, then stop counting
]
variable[tag_count] assign[=] constant[1]
for taget[name[prev_tag]] in starred[name[tag].previous_elements] begin[:]
if compare[name[prev_tag].name equal[==] name[tag].name] begin[:]
<ast.AugAssign object at 0x7da1b1081750>
if compare[name[prev_tag].name equal[==] name[stop_tag_name]] begin[:]
break
return[name[tag_count]] | keyword[def] identifier[tag_limit_sibling_ordinal] ( identifier[tag] , identifier[stop_tag_name] ):
literal[string]
identifier[tag_count] = literal[int]
keyword[for] identifier[prev_tag] keyword[in] identifier[tag] . identifier[previous_elements] :
keyword[if] identifier[prev_tag] . identifier[name] == identifier[tag] . identifier[name] :
identifier[tag_count] += literal[int]
keyword[if] identifier[prev_tag] . identifier[name] == identifier[stop_tag_name] :
keyword[break]
keyword[return] identifier[tag_count] | def tag_limit_sibling_ordinal(tag, stop_tag_name):
"""
Count previous tags of the same name until it
reaches a tag name of type stop_tag, then stop counting
"""
tag_count = 1
for prev_tag in tag.previous_elements:
if prev_tag.name == tag.name:
tag_count += 1 # depends on [control=['if'], data=[]]
if prev_tag.name == stop_tag_name:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['prev_tag']]
return tag_count |
def get_exif_info(self):
"""return exif-tag dict
"""
_dict = {}
for tag in _EXIF_TAGS:
ret = self.img.attribute("EXIF:%s" % tag)
if ret and ret != 'unknown':
_dict[tag] = ret
return _dict | def function[get_exif_info, parameter[self]]:
constant[return exif-tag dict
]
variable[_dict] assign[=] dictionary[[], []]
for taget[name[tag]] in starred[name[_EXIF_TAGS]] begin[:]
variable[ret] assign[=] call[name[self].img.attribute, parameter[binary_operation[constant[EXIF:%s] <ast.Mod object at 0x7da2590d6920> name[tag]]]]
if <ast.BoolOp object at 0x7da20c6e7670> begin[:]
call[name[_dict]][name[tag]] assign[=] name[ret]
return[name[_dict]] | keyword[def] identifier[get_exif_info] ( identifier[self] ):
literal[string]
identifier[_dict] ={}
keyword[for] identifier[tag] keyword[in] identifier[_EXIF_TAGS] :
identifier[ret] = identifier[self] . identifier[img] . identifier[attribute] ( literal[string] % identifier[tag] )
keyword[if] identifier[ret] keyword[and] identifier[ret] != literal[string] :
identifier[_dict] [ identifier[tag] ]= identifier[ret]
keyword[return] identifier[_dict] | def get_exif_info(self):
"""return exif-tag dict
"""
_dict = {}
for tag in _EXIF_TAGS:
ret = self.img.attribute('EXIF:%s' % tag)
if ret and ret != 'unknown':
_dict[tag] = ret # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tag']]
return _dict |
def unregister_signals_oaiset(self):
"""Unregister signals oaiset."""
from .models import OAISet
from .receivers import after_insert_oai_set, \
after_update_oai_set, after_delete_oai_set
if contains(OAISet, 'after_insert', after_insert_oai_set):
remove(OAISet, 'after_insert', after_insert_oai_set)
remove(OAISet, 'after_update', after_update_oai_set)
remove(OAISet, 'after_delete', after_delete_oai_set) | def function[unregister_signals_oaiset, parameter[self]]:
constant[Unregister signals oaiset.]
from relative_module[models] import module[OAISet]
from relative_module[receivers] import module[after_insert_oai_set], module[after_update_oai_set], module[after_delete_oai_set]
if call[name[contains], parameter[name[OAISet], constant[after_insert], name[after_insert_oai_set]]] begin[:]
call[name[remove], parameter[name[OAISet], constant[after_insert], name[after_insert_oai_set]]]
call[name[remove], parameter[name[OAISet], constant[after_update], name[after_update_oai_set]]]
call[name[remove], parameter[name[OAISet], constant[after_delete], name[after_delete_oai_set]]] | keyword[def] identifier[unregister_signals_oaiset] ( identifier[self] ):
literal[string]
keyword[from] . identifier[models] keyword[import] identifier[OAISet]
keyword[from] . identifier[receivers] keyword[import] identifier[after_insert_oai_set] , identifier[after_update_oai_set] , identifier[after_delete_oai_set]
keyword[if] identifier[contains] ( identifier[OAISet] , literal[string] , identifier[after_insert_oai_set] ):
identifier[remove] ( identifier[OAISet] , literal[string] , identifier[after_insert_oai_set] )
identifier[remove] ( identifier[OAISet] , literal[string] , identifier[after_update_oai_set] )
identifier[remove] ( identifier[OAISet] , literal[string] , identifier[after_delete_oai_set] ) | def unregister_signals_oaiset(self):
"""Unregister signals oaiset."""
from .models import OAISet
from .receivers import after_insert_oai_set, after_update_oai_set, after_delete_oai_set
if contains(OAISet, 'after_insert', after_insert_oai_set):
remove(OAISet, 'after_insert', after_insert_oai_set)
remove(OAISet, 'after_update', after_update_oai_set)
remove(OAISet, 'after_delete', after_delete_oai_set) # depends on [control=['if'], data=[]] |
def render_paginate(request, template, objects, per_page, extra_context={}):
"""
Paginated list of objects.
"""
paginator = Paginator(objects, per_page)
page = request.GET.get('page', 1)
get_params = '&'.join(['%s=%s' % (k, request.GET[k])
for k in request.GET if k != 'page'])
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404
try:
page_obj = paginator.page(page_number)
except InvalidPage:
raise Http404
context = {
'object_list': page_obj.object_list,
'paginator': paginator,
'page_obj': page_obj,
'is_paginated': page_obj.has_other_pages(),
'get_params': get_params
}
context.update(extra_context)
return render(request, template, context) | def function[render_paginate, parameter[request, template, objects, per_page, extra_context]]:
constant[
Paginated list of objects.
]
variable[paginator] assign[=] call[name[Paginator], parameter[name[objects], name[per_page]]]
variable[page] assign[=] call[name[request].GET.get, parameter[constant[page], constant[1]]]
variable[get_params] assign[=] call[constant[&].join, parameter[<ast.ListComp object at 0x7da2054a5120>]]
<ast.Try object at 0x7da2054a76d0>
<ast.Try object at 0x7da2054a6080>
variable[context] assign[=] dictionary[[<ast.Constant object at 0x7da2054a4670>, <ast.Constant object at 0x7da2054a4c40>, <ast.Constant object at 0x7da2054a69b0>, <ast.Constant object at 0x7da2054a63e0>, <ast.Constant object at 0x7da2054a76a0>], [<ast.Attribute object at 0x7da2054a5db0>, <ast.Name object at 0x7da2054a7490>, <ast.Name object at 0x7da2054a4b80>, <ast.Call object at 0x7da2054a5a20>, <ast.Name object at 0x7da2054a44f0>]]
call[name[context].update, parameter[name[extra_context]]]
return[call[name[render], parameter[name[request], name[template], name[context]]]] | keyword[def] identifier[render_paginate] ( identifier[request] , identifier[template] , identifier[objects] , identifier[per_page] , identifier[extra_context] ={}):
literal[string]
identifier[paginator] = identifier[Paginator] ( identifier[objects] , identifier[per_page] )
identifier[page] = identifier[request] . identifier[GET] . identifier[get] ( literal[string] , literal[int] )
identifier[get_params] = literal[string] . identifier[join] ([ literal[string] %( identifier[k] , identifier[request] . identifier[GET] [ identifier[k] ])
keyword[for] identifier[k] keyword[in] identifier[request] . identifier[GET] keyword[if] identifier[k] != literal[string] ])
keyword[try] :
identifier[page_number] = identifier[int] ( identifier[page] )
keyword[except] identifier[ValueError] :
keyword[if] identifier[page] == literal[string] :
identifier[page_number] = identifier[paginator] . identifier[num_pages]
keyword[else] :
keyword[raise] identifier[Http404]
keyword[try] :
identifier[page_obj] = identifier[paginator] . identifier[page] ( identifier[page_number] )
keyword[except] identifier[InvalidPage] :
keyword[raise] identifier[Http404]
identifier[context] ={
literal[string] : identifier[page_obj] . identifier[object_list] ,
literal[string] : identifier[paginator] ,
literal[string] : identifier[page_obj] ,
literal[string] : identifier[page_obj] . identifier[has_other_pages] (),
literal[string] : identifier[get_params]
}
identifier[context] . identifier[update] ( identifier[extra_context] )
keyword[return] identifier[render] ( identifier[request] , identifier[template] , identifier[context] ) | def render_paginate(request, template, objects, per_page, extra_context={}):
"""
Paginated list of objects.
"""
paginator = Paginator(objects, per_page)
page = request.GET.get('page', 1)
get_params = '&'.join(['%s=%s' % (k, request.GET[k]) for k in request.GET if k != 'page'])
try:
page_number = int(page) # depends on [control=['try'], data=[]]
except ValueError:
if page == 'last':
page_number = paginator.num_pages # depends on [control=['if'], data=[]]
else:
raise Http404 # depends on [control=['except'], data=[]]
try:
page_obj = paginator.page(page_number) # depends on [control=['try'], data=[]]
except InvalidPage:
raise Http404 # depends on [control=['except'], data=[]]
context = {'object_list': page_obj.object_list, 'paginator': paginator, 'page_obj': page_obj, 'is_paginated': page_obj.has_other_pages(), 'get_params': get_params}
context.update(extra_context)
return render(request, template, context) |
def tree2text(tree_obj, indent=4):
# type: (TreeInfo, int) -> str
"""
Return text representation of a decision tree.
"""
parts = []
def _format_node(node, depth=0):
# type: (NodeInfo, int) -> None
def p(*args):
# type: (*str) -> None
parts.append(" " * depth * indent)
parts.extend(args)
if node.is_leaf:
value_repr = _format_leaf_value(tree_obj, node)
parts.append(" ---> {}".format(value_repr))
else:
assert node.left is not None
assert node.right is not None
feat_name = node.feature_name
if depth > 0:
parts.append("\n")
left_samples = node.left.sample_ratio
p("{feat_name} <= {threshold:0.3f} ({left_samples:0.1%})".format(
left_samples=left_samples,
feat_name=feat_name,
threshold=node.threshold,
))
_format_node(node.left, depth=depth + 1)
parts.append("\n")
right_samples = node.right.sample_ratio
p("{feat_name} > {threshold:0.3f} ({right_samples:0.1%})".format(
right_samples=right_samples,
feat_name=feat_name,
threshold=node.threshold,
))
_format_node(node.right, depth=depth + 1)
_format_node(tree_obj.tree)
return "".join(parts) | def function[tree2text, parameter[tree_obj, indent]]:
constant[
Return text representation of a decision tree.
]
variable[parts] assign[=] list[[]]
def function[_format_node, parameter[node, depth]]:
def function[p, parameter[]]:
call[name[parts].append, parameter[binary_operation[binary_operation[constant[ ] * name[depth]] * name[indent]]]]
call[name[parts].extend, parameter[name[args]]]
if name[node].is_leaf begin[:]
variable[value_repr] assign[=] call[name[_format_leaf_value], parameter[name[tree_obj], name[node]]]
call[name[parts].append, parameter[call[constant[ ---> {}].format, parameter[name[value_repr]]]]]
call[name[_format_node], parameter[name[tree_obj].tree]]
return[call[constant[].join, parameter[name[parts]]]] | keyword[def] identifier[tree2text] ( identifier[tree_obj] , identifier[indent] = literal[int] ):
literal[string]
identifier[parts] =[]
keyword[def] identifier[_format_node] ( identifier[node] , identifier[depth] = literal[int] ):
keyword[def] identifier[p] (* identifier[args] ):
identifier[parts] . identifier[append] ( literal[string] * identifier[depth] * identifier[indent] )
identifier[parts] . identifier[extend] ( identifier[args] )
keyword[if] identifier[node] . identifier[is_leaf] :
identifier[value_repr] = identifier[_format_leaf_value] ( identifier[tree_obj] , identifier[node] )
identifier[parts] . identifier[append] ( literal[string] . identifier[format] ( identifier[value_repr] ))
keyword[else] :
keyword[assert] identifier[node] . identifier[left] keyword[is] keyword[not] keyword[None]
keyword[assert] identifier[node] . identifier[right] keyword[is] keyword[not] keyword[None]
identifier[feat_name] = identifier[node] . identifier[feature_name]
keyword[if] identifier[depth] > literal[int] :
identifier[parts] . identifier[append] ( literal[string] )
identifier[left_samples] = identifier[node] . identifier[left] . identifier[sample_ratio]
identifier[p] ( literal[string] . identifier[format] (
identifier[left_samples] = identifier[left_samples] ,
identifier[feat_name] = identifier[feat_name] ,
identifier[threshold] = identifier[node] . identifier[threshold] ,
))
identifier[_format_node] ( identifier[node] . identifier[left] , identifier[depth] = identifier[depth] + literal[int] )
identifier[parts] . identifier[append] ( literal[string] )
identifier[right_samples] = identifier[node] . identifier[right] . identifier[sample_ratio]
identifier[p] ( literal[string] . identifier[format] (
identifier[right_samples] = identifier[right_samples] ,
identifier[feat_name] = identifier[feat_name] ,
identifier[threshold] = identifier[node] . identifier[threshold] ,
))
identifier[_format_node] ( identifier[node] . identifier[right] , identifier[depth] = identifier[depth] + literal[int] )
identifier[_format_node] ( identifier[tree_obj] . identifier[tree] )
keyword[return] literal[string] . identifier[join] ( identifier[parts] ) | def tree2text(tree_obj, indent=4):
# type: (TreeInfo, int) -> str
'\n Return text representation of a decision tree.\n '
parts = []
def _format_node(node, depth=0):
# type: (NodeInfo, int) -> None
def p(*args):
# type: (*str) -> None
parts.append(' ' * depth * indent)
parts.extend(args)
if node.is_leaf:
value_repr = _format_leaf_value(tree_obj, node)
parts.append(' ---> {}'.format(value_repr)) # depends on [control=['if'], data=[]]
else:
assert node.left is not None
assert node.right is not None
feat_name = node.feature_name
if depth > 0:
parts.append('\n') # depends on [control=['if'], data=[]]
left_samples = node.left.sample_ratio
p('{feat_name} <= {threshold:0.3f} ({left_samples:0.1%})'.format(left_samples=left_samples, feat_name=feat_name, threshold=node.threshold))
_format_node(node.left, depth=depth + 1)
parts.append('\n')
right_samples = node.right.sample_ratio
p('{feat_name} > {threshold:0.3f} ({right_samples:0.1%})'.format(right_samples=right_samples, feat_name=feat_name, threshold=node.threshold))
_format_node(node.right, depth=depth + 1)
_format_node(tree_obj.tree)
return ''.join(parts) |
def set_xlimits(self, row, column, min=None, max=None):
"""Set x-axis limits of a subplot.
:param row,column: specify the subplot.
:param min: minimal axis value
:param max: maximum axis value
"""
subplot = self.get_subplot_at(row, column)
subplot.set_xlimits(min, max) | def function[set_xlimits, parameter[self, row, column, min, max]]:
constant[Set x-axis limits of a subplot.
:param row,column: specify the subplot.
:param min: minimal axis value
:param max: maximum axis value
]
variable[subplot] assign[=] call[name[self].get_subplot_at, parameter[name[row], name[column]]]
call[name[subplot].set_xlimits, parameter[name[min], name[max]]] | keyword[def] identifier[set_xlimits] ( identifier[self] , identifier[row] , identifier[column] , identifier[min] = keyword[None] , identifier[max] = keyword[None] ):
literal[string]
identifier[subplot] = identifier[self] . identifier[get_subplot_at] ( identifier[row] , identifier[column] )
identifier[subplot] . identifier[set_xlimits] ( identifier[min] , identifier[max] ) | def set_xlimits(self, row, column, min=None, max=None):
"""Set x-axis limits of a subplot.
:param row,column: specify the subplot.
:param min: minimal axis value
:param max: maximum axis value
"""
subplot = self.get_subplot_at(row, column)
subplot.set_xlimits(min, max) |
def __get_merge_versions(self, merge_id):
"""Get merge versions"""
versions = []
group_versions = self.client.merge_versions(merge_id)
for raw_versions in group_versions:
for version in json.loads(raw_versions):
version_id = version['id']
version_full_raw = self.client.merge_version(merge_id, version_id)
version_full = json.loads(version_full_raw)
version_full.pop('diffs', None)
versions.append(version_full)
return versions | def function[__get_merge_versions, parameter[self, merge_id]]:
constant[Get merge versions]
variable[versions] assign[=] list[[]]
variable[group_versions] assign[=] call[name[self].client.merge_versions, parameter[name[merge_id]]]
for taget[name[raw_versions]] in starred[name[group_versions]] begin[:]
for taget[name[version]] in starred[call[name[json].loads, parameter[name[raw_versions]]]] begin[:]
variable[version_id] assign[=] call[name[version]][constant[id]]
variable[version_full_raw] assign[=] call[name[self].client.merge_version, parameter[name[merge_id], name[version_id]]]
variable[version_full] assign[=] call[name[json].loads, parameter[name[version_full_raw]]]
call[name[version_full].pop, parameter[constant[diffs], constant[None]]]
call[name[versions].append, parameter[name[version_full]]]
return[name[versions]] | keyword[def] identifier[__get_merge_versions] ( identifier[self] , identifier[merge_id] ):
literal[string]
identifier[versions] =[]
identifier[group_versions] = identifier[self] . identifier[client] . identifier[merge_versions] ( identifier[merge_id] )
keyword[for] identifier[raw_versions] keyword[in] identifier[group_versions] :
keyword[for] identifier[version] keyword[in] identifier[json] . identifier[loads] ( identifier[raw_versions] ):
identifier[version_id] = identifier[version] [ literal[string] ]
identifier[version_full_raw] = identifier[self] . identifier[client] . identifier[merge_version] ( identifier[merge_id] , identifier[version_id] )
identifier[version_full] = identifier[json] . identifier[loads] ( identifier[version_full_raw] )
identifier[version_full] . identifier[pop] ( literal[string] , keyword[None] )
identifier[versions] . identifier[append] ( identifier[version_full] )
keyword[return] identifier[versions] | def __get_merge_versions(self, merge_id):
"""Get merge versions"""
versions = []
group_versions = self.client.merge_versions(merge_id)
for raw_versions in group_versions:
for version in json.loads(raw_versions):
version_id = version['id']
version_full_raw = self.client.merge_version(merge_id, version_id)
version_full = json.loads(version_full_raw)
version_full.pop('diffs', None)
versions.append(version_full) # depends on [control=['for'], data=['version']] # depends on [control=['for'], data=['raw_versions']]
return versions |
def parse_variable(lexer: Lexer) -> VariableNode:
"""Variable: $Name"""
start = lexer.token
expect_token(lexer, TokenKind.DOLLAR)
return VariableNode(name=parse_name(lexer), loc=loc(lexer, start)) | def function[parse_variable, parameter[lexer]]:
constant[Variable: $Name]
variable[start] assign[=] name[lexer].token
call[name[expect_token], parameter[name[lexer], name[TokenKind].DOLLAR]]
return[call[name[VariableNode], parameter[]]] | keyword[def] identifier[parse_variable] ( identifier[lexer] : identifier[Lexer] )-> identifier[VariableNode] :
literal[string]
identifier[start] = identifier[lexer] . identifier[token]
identifier[expect_token] ( identifier[lexer] , identifier[TokenKind] . identifier[DOLLAR] )
keyword[return] identifier[VariableNode] ( identifier[name] = identifier[parse_name] ( identifier[lexer] ), identifier[loc] = identifier[loc] ( identifier[lexer] , identifier[start] )) | def parse_variable(lexer: Lexer) -> VariableNode:
"""Variable: $Name"""
start = lexer.token
expect_token(lexer, TokenKind.DOLLAR)
return VariableNode(name=parse_name(lexer), loc=loc(lexer, start)) |
def bvlpdu_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
return key_value_contents(use_dict=use_dict, as_class=as_class,
key_values=(
('function', 'DeleteForeignDeviceTableEntry'),
('address', str(self.bvlciAddress)),
)) | def function[bvlpdu_contents, parameter[self, use_dict, as_class]]:
constant[Return the contents of an object as a dict.]
return[call[name[key_value_contents], parameter[]]] | keyword[def] identifier[bvlpdu_contents] ( identifier[self] , identifier[use_dict] = keyword[None] , identifier[as_class] = identifier[dict] ):
literal[string]
keyword[return] identifier[key_value_contents] ( identifier[use_dict] = identifier[use_dict] , identifier[as_class] = identifier[as_class] ,
identifier[key_values] =(
( literal[string] , literal[string] ),
( literal[string] , identifier[str] ( identifier[self] . identifier[bvlciAddress] )),
)) | def bvlpdu_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
return key_value_contents(use_dict=use_dict, as_class=as_class, key_values=(('function', 'DeleteForeignDeviceTableEntry'), ('address', str(self.bvlciAddress)))) |
def traverse(self, fn=None, specs=None, full_breadth=True):
"""
Traverses any nested DimensionedPlot returning a list
of all plots that match the specs. The specs should
be supplied as a list of either Plot types or callables,
which should return a boolean given the plot class.
"""
accumulator = []
matches = specs is None
if not matches:
for spec in specs:
matches = self.matches(spec)
if matches: break
if matches:
accumulator.append(fn(self) if fn else self)
# Assumes composite objects are iterables
if hasattr(self, 'subplots') and self.subplots:
for el in self.subplots.values():
if el is None:
continue
accumulator += el.traverse(fn, specs, full_breadth)
if not full_breadth: break
return accumulator | def function[traverse, parameter[self, fn, specs, full_breadth]]:
constant[
Traverses any nested DimensionedPlot returning a list
of all plots that match the specs. The specs should
be supplied as a list of either Plot types or callables,
which should return a boolean given the plot class.
]
variable[accumulator] assign[=] list[[]]
variable[matches] assign[=] compare[name[specs] is constant[None]]
if <ast.UnaryOp object at 0x7da18fe93f10> begin[:]
for taget[name[spec]] in starred[name[specs]] begin[:]
variable[matches] assign[=] call[name[self].matches, parameter[name[spec]]]
if name[matches] begin[:]
break
if name[matches] begin[:]
call[name[accumulator].append, parameter[<ast.IfExp object at 0x7da18fe92b60>]]
if <ast.BoolOp object at 0x7da18fe93610> begin[:]
for taget[name[el]] in starred[call[name[self].subplots.values, parameter[]]] begin[:]
if compare[name[el] is constant[None]] begin[:]
continue
<ast.AugAssign object at 0x7da18fe93a30>
if <ast.UnaryOp object at 0x7da18fe92170> begin[:]
break
return[name[accumulator]] | keyword[def] identifier[traverse] ( identifier[self] , identifier[fn] = keyword[None] , identifier[specs] = keyword[None] , identifier[full_breadth] = keyword[True] ):
literal[string]
identifier[accumulator] =[]
identifier[matches] = identifier[specs] keyword[is] keyword[None]
keyword[if] keyword[not] identifier[matches] :
keyword[for] identifier[spec] keyword[in] identifier[specs] :
identifier[matches] = identifier[self] . identifier[matches] ( identifier[spec] )
keyword[if] identifier[matches] : keyword[break]
keyword[if] identifier[matches] :
identifier[accumulator] . identifier[append] ( identifier[fn] ( identifier[self] ) keyword[if] identifier[fn] keyword[else] identifier[self] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[subplots] :
keyword[for] identifier[el] keyword[in] identifier[self] . identifier[subplots] . identifier[values] ():
keyword[if] identifier[el] keyword[is] keyword[None] :
keyword[continue]
identifier[accumulator] += identifier[el] . identifier[traverse] ( identifier[fn] , identifier[specs] , identifier[full_breadth] )
keyword[if] keyword[not] identifier[full_breadth] : keyword[break]
keyword[return] identifier[accumulator] | def traverse(self, fn=None, specs=None, full_breadth=True):
"""
Traverses any nested DimensionedPlot returning a list
of all plots that match the specs. The specs should
be supplied as a list of either Plot types or callables,
which should return a boolean given the plot class.
"""
accumulator = []
matches = specs is None
if not matches:
for spec in specs:
matches = self.matches(spec)
if matches:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['spec']] # depends on [control=['if'], data=[]]
if matches:
accumulator.append(fn(self) if fn else self) # depends on [control=['if'], data=[]]
# Assumes composite objects are iterables
if hasattr(self, 'subplots') and self.subplots:
for el in self.subplots.values():
if el is None:
continue # depends on [control=['if'], data=[]]
accumulator += el.traverse(fn, specs, full_breadth)
if not full_breadth:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['el']] # depends on [control=['if'], data=[]]
return accumulator |
def mouseReleaseEvent( self, event ):
"""
Overloads the mouse release event to ignore the event when the \
scene is in view mode, and release the selection block signal.
:param event <QMouseReleaseEvent>
"""
event.setAccepted(False)
if self._hotspotPressed:
event.accept()
self._hotspotPressed = False
return
# ignore events when the scene is in view mode
scene = self.scene()
if ( self.isLocked() or self._ignoreMouseEvents or \
(scene and (scene.inViewMode() or scene.isConnecting()))):
event.ignore()
self._ignoreMouseEvents = False
return
super(XNode, self).mouseReleaseEvent(event)
# emit the geometry changed signal
self.emitGeometryChanged()
# unblock the selection signals
if ( scene ):
scene.blockSelectionSignals(False)
delta = datetime.datetime.now() - self._pressTime
if not scene.signalsBlocked() and delta.seconds < 1:
scene.nodeClicked.emit(self) | def function[mouseReleaseEvent, parameter[self, event]]:
constant[
Overloads the mouse release event to ignore the event when the scene is in view mode, and release the selection block signal.
:param event <QMouseReleaseEvent>
]
call[name[event].setAccepted, parameter[constant[False]]]
if name[self]._hotspotPressed begin[:]
call[name[event].accept, parameter[]]
name[self]._hotspotPressed assign[=] constant[False]
return[None]
variable[scene] assign[=] call[name[self].scene, parameter[]]
if <ast.BoolOp object at 0x7da18c4cfca0> begin[:]
call[name[event].ignore, parameter[]]
name[self]._ignoreMouseEvents assign[=] constant[False]
return[None]
call[call[name[super], parameter[name[XNode], name[self]]].mouseReleaseEvent, parameter[name[event]]]
call[name[self].emitGeometryChanged, parameter[]]
if name[scene] begin[:]
call[name[scene].blockSelectionSignals, parameter[constant[False]]]
variable[delta] assign[=] binary_operation[call[name[datetime].datetime.now, parameter[]] - name[self]._pressTime]
if <ast.BoolOp object at 0x7da18c4cc6a0> begin[:]
call[name[scene].nodeClicked.emit, parameter[name[self]]] | keyword[def] identifier[mouseReleaseEvent] ( identifier[self] , identifier[event] ):
literal[string]
identifier[event] . identifier[setAccepted] ( keyword[False] )
keyword[if] identifier[self] . identifier[_hotspotPressed] :
identifier[event] . identifier[accept] ()
identifier[self] . identifier[_hotspotPressed] = keyword[False]
keyword[return]
identifier[scene] = identifier[self] . identifier[scene] ()
keyword[if] ( identifier[self] . identifier[isLocked] () keyword[or] identifier[self] . identifier[_ignoreMouseEvents] keyword[or] ( identifier[scene] keyword[and] ( identifier[scene] . identifier[inViewMode] () keyword[or] identifier[scene] . identifier[isConnecting] ()))):
identifier[event] . identifier[ignore] ()
identifier[self] . identifier[_ignoreMouseEvents] = keyword[False]
keyword[return]
identifier[super] ( identifier[XNode] , identifier[self] ). identifier[mouseReleaseEvent] ( identifier[event] )
identifier[self] . identifier[emitGeometryChanged] ()
keyword[if] ( identifier[scene] ):
identifier[scene] . identifier[blockSelectionSignals] ( keyword[False] )
identifier[delta] = identifier[datetime] . identifier[datetime] . identifier[now] ()- identifier[self] . identifier[_pressTime]
keyword[if] keyword[not] identifier[scene] . identifier[signalsBlocked] () keyword[and] identifier[delta] . identifier[seconds] < literal[int] :
identifier[scene] . identifier[nodeClicked] . identifier[emit] ( identifier[self] ) | def mouseReleaseEvent(self, event):
"""
Overloads the mouse release event to ignore the event when the scene is in view mode, and release the selection block signal.
:param event <QMouseReleaseEvent>
"""
event.setAccepted(False)
if self._hotspotPressed:
event.accept()
self._hotspotPressed = False
return # depends on [control=['if'], data=[]]
# ignore events when the scene is in view mode
scene = self.scene()
if self.isLocked() or self._ignoreMouseEvents or (scene and (scene.inViewMode() or scene.isConnecting())):
event.ignore()
self._ignoreMouseEvents = False
return # depends on [control=['if'], data=[]]
super(XNode, self).mouseReleaseEvent(event)
# emit the geometry changed signal
self.emitGeometryChanged()
# unblock the selection signals
if scene:
scene.blockSelectionSignals(False)
delta = datetime.datetime.now() - self._pressTime
if not scene.signalsBlocked() and delta.seconds < 1:
scene.nodeClicked.emit(self) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def Pool(pool='AnyPool', **kwargs):
'''
Chooses between the different pools.
If ``pool == 'AnyPool'``, chooses based on availability.
'''
if pool == 'MPIPool':
return MPIPool(**kwargs)
elif pool == 'MultiPool':
return MultiPool(**kwargs)
elif pool == 'SerialPool':
return SerialPool(**kwargs)
elif pool == 'AnyPool':
if MPIPool.enabled():
return MPIPool(**kwargs)
elif MultiPool.enabled():
return MultiPool(**kwargs)
else:
return SerialPool(**kwargs)
else:
raise ValueError('Invalid pool ``%s``.' % pool) | def function[Pool, parameter[pool]]:
constant[
Chooses between the different pools.
If ``pool == 'AnyPool'``, chooses based on availability.
]
if compare[name[pool] equal[==] constant[MPIPool]] begin[:]
return[call[name[MPIPool], parameter[]]] | keyword[def] identifier[Pool] ( identifier[pool] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[pool] == literal[string] :
keyword[return] identifier[MPIPool] (** identifier[kwargs] )
keyword[elif] identifier[pool] == literal[string] :
keyword[return] identifier[MultiPool] (** identifier[kwargs] )
keyword[elif] identifier[pool] == literal[string] :
keyword[return] identifier[SerialPool] (** identifier[kwargs] )
keyword[elif] identifier[pool] == literal[string] :
keyword[if] identifier[MPIPool] . identifier[enabled] ():
keyword[return] identifier[MPIPool] (** identifier[kwargs] )
keyword[elif] identifier[MultiPool] . identifier[enabled] ():
keyword[return] identifier[MultiPool] (** identifier[kwargs] )
keyword[else] :
keyword[return] identifier[SerialPool] (** identifier[kwargs] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[pool] ) | def Pool(pool='AnyPool', **kwargs):
"""
Chooses between the different pools.
If ``pool == 'AnyPool'``, chooses based on availability.
"""
if pool == 'MPIPool':
return MPIPool(**kwargs) # depends on [control=['if'], data=[]]
elif pool == 'MultiPool':
return MultiPool(**kwargs) # depends on [control=['if'], data=[]]
elif pool == 'SerialPool':
return SerialPool(**kwargs) # depends on [control=['if'], data=[]]
elif pool == 'AnyPool':
if MPIPool.enabled():
return MPIPool(**kwargs) # depends on [control=['if'], data=[]]
elif MultiPool.enabled():
return MultiPool(**kwargs) # depends on [control=['if'], data=[]]
else:
return SerialPool(**kwargs) # depends on [control=['if'], data=[]]
else:
raise ValueError('Invalid pool ``%s``.' % pool) |
def read_folder(directory):
"""read text files in directory and returns them as array
Args:
directory: where the text files are
Returns:
Array of text
"""
res = []
for filename in os.listdir(directory):
with io.open(os.path.join(directory, filename), encoding="utf-8") as f:
content = f.read()
res.append(content)
return res | def function[read_folder, parameter[directory]]:
constant[read text files in directory and returns them as array
Args:
directory: where the text files are
Returns:
Array of text
]
variable[res] assign[=] list[[]]
for taget[name[filename]] in starred[call[name[os].listdir, parameter[name[directory]]]] begin[:]
with call[name[io].open, parameter[call[name[os].path.join, parameter[name[directory], name[filename]]]]] begin[:]
variable[content] assign[=] call[name[f].read, parameter[]]
call[name[res].append, parameter[name[content]]]
return[name[res]] | keyword[def] identifier[read_folder] ( identifier[directory] ):
literal[string]
identifier[res] =[]
keyword[for] identifier[filename] keyword[in] identifier[os] . identifier[listdir] ( identifier[directory] ):
keyword[with] identifier[io] . identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[filename] ), identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[content] = identifier[f] . identifier[read] ()
identifier[res] . identifier[append] ( identifier[content] )
keyword[return] identifier[res] | def read_folder(directory):
"""read text files in directory and returns them as array
Args:
directory: where the text files are
Returns:
Array of text
"""
res = []
for filename in os.listdir(directory):
with io.open(os.path.join(directory, filename), encoding='utf-8') as f:
content = f.read()
res.append(content) # depends on [control=['with'], data=['f']] # depends on [control=['for'], data=['filename']]
return res |
def discretize_path(self, path):
"""
Given a list of entities, return a list of connected points.
Parameters
-----------
path: (n,) int, indexes of self.entities
Returns
-----------
discrete: (m, dimension)
"""
discrete = traversal.discretize_path(self.entities,
self.vertices,
path,
scale=self.scale)
return discrete | def function[discretize_path, parameter[self, path]]:
constant[
Given a list of entities, return a list of connected points.
Parameters
-----------
path: (n,) int, indexes of self.entities
Returns
-----------
discrete: (m, dimension)
]
variable[discrete] assign[=] call[name[traversal].discretize_path, parameter[name[self].entities, name[self].vertices, name[path]]]
return[name[discrete]] | keyword[def] identifier[discretize_path] ( identifier[self] , identifier[path] ):
literal[string]
identifier[discrete] = identifier[traversal] . identifier[discretize_path] ( identifier[self] . identifier[entities] ,
identifier[self] . identifier[vertices] ,
identifier[path] ,
identifier[scale] = identifier[self] . identifier[scale] )
keyword[return] identifier[discrete] | def discretize_path(self, path):
"""
Given a list of entities, return a list of connected points.
Parameters
-----------
path: (n,) int, indexes of self.entities
Returns
-----------
discrete: (m, dimension)
"""
discrete = traversal.discretize_path(self.entities, self.vertices, path, scale=self.scale)
return discrete |
def visitObjectExpr(self, ctx: jsgParser.ObjectExprContext):
""" objectExpr: OBRACE membersDef? CBRACE
OBRACE (LEXER_ID_REF | ANY)? MAPSTO valueType ebnfSuffix? CBRACE
"""
if not self._name:
self._name = self._context.anon_id()
if ctx.membersDef():
self.visitChildren(ctx)
elif ctx.MAPSTO():
if ctx.LEXER_ID_REF():
self._map_name_type = as_token(ctx)
# Any and absent mean the same thing
self._map_valuetype = JSGValueType(self._context, ctx.valueType())
if ctx.ebnfSuffix():
self._map_ebnf = JSGEbnf(self._context, ctx.ebnfSuffix()) | def function[visitObjectExpr, parameter[self, ctx]]:
constant[ objectExpr: OBRACE membersDef? CBRACE
OBRACE (LEXER_ID_REF | ANY)? MAPSTO valueType ebnfSuffix? CBRACE
]
if <ast.UnaryOp object at 0x7da2045652d0> begin[:]
name[self]._name assign[=] call[name[self]._context.anon_id, parameter[]]
if call[name[ctx].membersDef, parameter[]] begin[:]
call[name[self].visitChildren, parameter[name[ctx]]] | keyword[def] identifier[visitObjectExpr] ( identifier[self] , identifier[ctx] : identifier[jsgParser] . identifier[ObjectExprContext] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_name] :
identifier[self] . identifier[_name] = identifier[self] . identifier[_context] . identifier[anon_id] ()
keyword[if] identifier[ctx] . identifier[membersDef] ():
identifier[self] . identifier[visitChildren] ( identifier[ctx] )
keyword[elif] identifier[ctx] . identifier[MAPSTO] ():
keyword[if] identifier[ctx] . identifier[LEXER_ID_REF] ():
identifier[self] . identifier[_map_name_type] = identifier[as_token] ( identifier[ctx] )
identifier[self] . identifier[_map_valuetype] = identifier[JSGValueType] ( identifier[self] . identifier[_context] , identifier[ctx] . identifier[valueType] ())
keyword[if] identifier[ctx] . identifier[ebnfSuffix] ():
identifier[self] . identifier[_map_ebnf] = identifier[JSGEbnf] ( identifier[self] . identifier[_context] , identifier[ctx] . identifier[ebnfSuffix] ()) | def visitObjectExpr(self, ctx: jsgParser.ObjectExprContext):
""" objectExpr: OBRACE membersDef? CBRACE
OBRACE (LEXER_ID_REF | ANY)? MAPSTO valueType ebnfSuffix? CBRACE
"""
if not self._name:
self._name = self._context.anon_id() # depends on [control=['if'], data=[]]
if ctx.membersDef():
self.visitChildren(ctx) # depends on [control=['if'], data=[]]
elif ctx.MAPSTO():
if ctx.LEXER_ID_REF():
self._map_name_type = as_token(ctx) # depends on [control=['if'], data=[]]
# Any and absent mean the same thing
self._map_valuetype = JSGValueType(self._context, ctx.valueType())
if ctx.ebnfSuffix():
self._map_ebnf = JSGEbnf(self._context, ctx.ebnfSuffix()) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def query_by_entity_uid(idd, kind=''):
'''
Query post2tag by certain post.
'''
if kind == '':
return TabPost2Tag.select(
TabPost2Tag,
TabTag.slug.alias('tag_slug'),
TabTag.name.alias('tag_name')
).join(
TabTag, on=(TabPost2Tag.tag_id == TabTag.uid)
).where(
(TabPost2Tag.post_id == idd) &
(TabTag.kind != 'z')
).order_by(
TabPost2Tag.order
)
return TabPost2Tag.select(
TabPost2Tag,
TabTag.slug.alias('tag_slug'),
TabTag.name.alias('tag_name')
).join(TabTag, on=(TabPost2Tag.tag_id == TabTag.uid)).where(
(TabTag.kind == kind) &
(TabPost2Tag.post_id == idd)
).order_by(
TabPost2Tag.order
) | def function[query_by_entity_uid, parameter[idd, kind]]:
constant[
Query post2tag by certain post.
]
if compare[name[kind] equal[==] constant[]] begin[:]
return[call[call[call[call[name[TabPost2Tag].select, parameter[name[TabPost2Tag], call[name[TabTag].slug.alias, parameter[constant[tag_slug]]], call[name[TabTag].name.alias, parameter[constant[tag_name]]]]].join, parameter[name[TabTag]]].where, parameter[binary_operation[compare[name[TabPost2Tag].post_id equal[==] name[idd]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[TabTag].kind not_equal[!=] constant[z]]]]].order_by, parameter[name[TabPost2Tag].order]]]
return[call[call[call[call[name[TabPost2Tag].select, parameter[name[TabPost2Tag], call[name[TabTag].slug.alias, parameter[constant[tag_slug]]], call[name[TabTag].name.alias, parameter[constant[tag_name]]]]].join, parameter[name[TabTag]]].where, parameter[binary_operation[compare[name[TabTag].kind equal[==] name[kind]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[TabPost2Tag].post_id equal[==] name[idd]]]]].order_by, parameter[name[TabPost2Tag].order]]] | keyword[def] identifier[query_by_entity_uid] ( identifier[idd] , identifier[kind] = literal[string] ):
literal[string]
keyword[if] identifier[kind] == literal[string] :
keyword[return] identifier[TabPost2Tag] . identifier[select] (
identifier[TabPost2Tag] ,
identifier[TabTag] . identifier[slug] . identifier[alias] ( literal[string] ),
identifier[TabTag] . identifier[name] . identifier[alias] ( literal[string] )
). identifier[join] (
identifier[TabTag] , identifier[on] =( identifier[TabPost2Tag] . identifier[tag_id] == identifier[TabTag] . identifier[uid] )
). identifier[where] (
( identifier[TabPost2Tag] . identifier[post_id] == identifier[idd] )&
( identifier[TabTag] . identifier[kind] != literal[string] )
). identifier[order_by] (
identifier[TabPost2Tag] . identifier[order]
)
keyword[return] identifier[TabPost2Tag] . identifier[select] (
identifier[TabPost2Tag] ,
identifier[TabTag] . identifier[slug] . identifier[alias] ( literal[string] ),
identifier[TabTag] . identifier[name] . identifier[alias] ( literal[string] )
). identifier[join] ( identifier[TabTag] , identifier[on] =( identifier[TabPost2Tag] . identifier[tag_id] == identifier[TabTag] . identifier[uid] )). identifier[where] (
( identifier[TabTag] . identifier[kind] == identifier[kind] )&
( identifier[TabPost2Tag] . identifier[post_id] == identifier[idd] )
). identifier[order_by] (
identifier[TabPost2Tag] . identifier[order]
) | def query_by_entity_uid(idd, kind=''):
"""
Query post2tag by certain post.
"""
if kind == '':
return TabPost2Tag.select(TabPost2Tag, TabTag.slug.alias('tag_slug'), TabTag.name.alias('tag_name')).join(TabTag, on=TabPost2Tag.tag_id == TabTag.uid).where((TabPost2Tag.post_id == idd) & (TabTag.kind != 'z')).order_by(TabPost2Tag.order) # depends on [control=['if'], data=[]]
return TabPost2Tag.select(TabPost2Tag, TabTag.slug.alias('tag_slug'), TabTag.name.alias('tag_name')).join(TabTag, on=TabPost2Tag.tag_id == TabTag.uid).where((TabTag.kind == kind) & (TabPost2Tag.post_id == idd)).order_by(TabPost2Tag.order) |
def pipeline_counter(self):
"""
Get pipeline counter of current job instance.
Because instantiating job instance could be performed in different ways and those return different results,
we have to check where from to get counter of the pipeline.
:return: pipeline counter.
"""
if 'pipeline_counter' in self.data and self.data.pipeline_counter:
return self.data.get('pipeline_counter')
elif self.stage.pipeline is not None:
return self.stage.pipeline.data.counter
else:
return self.stage.data.pipeline_counter | def function[pipeline_counter, parameter[self]]:
constant[
Get pipeline counter of current job instance.
Because instantiating job instance could be performed in different ways and those return different results,
we have to check where from to get counter of the pipeline.
:return: pipeline counter.
]
if <ast.BoolOp object at 0x7da18f812a40> begin[:]
return[call[name[self].data.get, parameter[constant[pipeline_counter]]]] | keyword[def] identifier[pipeline_counter] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[self] . identifier[data] keyword[and] identifier[self] . identifier[data] . identifier[pipeline_counter] :
keyword[return] identifier[self] . identifier[data] . identifier[get] ( literal[string] )
keyword[elif] identifier[self] . identifier[stage] . identifier[pipeline] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[stage] . identifier[pipeline] . identifier[data] . identifier[counter]
keyword[else] :
keyword[return] identifier[self] . identifier[stage] . identifier[data] . identifier[pipeline_counter] | def pipeline_counter(self):
"""
Get pipeline counter of current job instance.
Because instantiating job instance could be performed in different ways and those return different results,
we have to check where from to get counter of the pipeline.
:return: pipeline counter.
"""
if 'pipeline_counter' in self.data and self.data.pipeline_counter:
return self.data.get('pipeline_counter') # depends on [control=['if'], data=[]]
elif self.stage.pipeline is not None:
return self.stage.pipeline.data.counter # depends on [control=['if'], data=[]]
else:
return self.stage.data.pipeline_counter |
def get_activity(self, id_num):
"""Return the activity with the given id.
Note that this contains more detailed information than returned
by `get_activities`.
"""
url = self._build_url('my', 'activities', id_num)
return self._json(url) | def function[get_activity, parameter[self, id_num]]:
constant[Return the activity with the given id.
Note that this contains more detailed information than returned
by `get_activities`.
]
variable[url] assign[=] call[name[self]._build_url, parameter[constant[my], constant[activities], name[id_num]]]
return[call[name[self]._json, parameter[name[url]]]] | keyword[def] identifier[get_activity] ( identifier[self] , identifier[id_num] ):
literal[string]
identifier[url] = identifier[self] . identifier[_build_url] ( literal[string] , literal[string] , identifier[id_num] )
keyword[return] identifier[self] . identifier[_json] ( identifier[url] ) | def get_activity(self, id_num):
"""Return the activity with the given id.
Note that this contains more detailed information than returned
by `get_activities`.
"""
url = self._build_url('my', 'activities', id_num)
return self._json(url) |
def forward(self, Q_, p_, G_, h_, A_, b_):
"""Solve a batch of QPs.
This function solves a batch of QPs, each optimizing over
`nz` variables and having `nineq` inequality constraints
and `neq` equality constraints.
The optimization problem for each instance in the batch
(dropping indexing from the notation) is of the form
\hat z = argmin_z 1/2 z^T Q z + p^T z
subject to Gz <= h
Az = b
where Q \in S^{nz,nz},
S^{nz,nz} is the set of all positive semi-definite matrices,
p \in R^{nz}
G \in R^{nineq,nz}
h \in R^{nineq}
A \in R^{neq,nz}
b \in R^{neq}
These parameters should all be passed to this function as
Variable- or Parameter-wrapped Tensors.
(See torch.autograd.Variable and torch.nn.parameter.Parameter)
If you want to solve a batch of QPs where `nz`, `nineq` and `neq`
are the same, but some of the contents differ across the
minibatch, you can pass in tensors in the standard way
where the first dimension indicates the batch example.
This can be done with some or all of the coefficients.
You do not need to add an extra dimension to coefficients
that will not change across all of the minibatch examples.
This function is able to infer such cases.
If you don't want to use any equality or inequality constraints,
you can set the appropriate values to:
e = Variable(torch.Tensor())
Parameters:
Q: A (nBatch, nz, nz) or (nz, nz) Tensor.
p: A (nBatch, nz) or (nz) Tensor.
G: A (nBatch, nineq, nz) or (nineq, nz) Tensor.
h: A (nBatch, nineq) or (nineq) Tensor.
A: A (nBatch, neq, nz) or (neq, nz) Tensor.
b: A (nBatch, neq) or (neq) Tensor.
Returns: \hat z: a (nBatch, nz) Tensor.
"""
nBatch = extract_nBatch(Q_, p_, G_, h_, A_, b_)
Q, _ = expandParam(Q_, nBatch, 3)
p, _ = expandParam(p_, nBatch, 2)
G, _ = expandParam(G_, nBatch, 3)
h, _ = expandParam(h_, nBatch, 2)
A, _ = expandParam(A_, nBatch, 3)
b, _ = expandParam(b_, nBatch, 2)
if self.check_Q_spd:
for i in range(nBatch):
e, _ = torch.eig(Q[i])
if not torch.all(e[:,0] > 0):
raise RuntimeError('Q is not SPD.')
_, nineq, nz = G.size()
neq = A.size(1) if A.nelement() > 0 else 0
assert(neq > 0 or nineq > 0)
self.neq, self.nineq, self.nz = neq, nineq, nz
if self.solver == QPSolvers.PDIPM_BATCHED:
self.Q_LU, self.S_LU, self.R = pdipm_b.pre_factor_kkt(Q, G, A)
zhats, self.nus, self.lams, self.slacks = pdipm_b.forward(
Q, p, G, h, A, b, self.Q_LU, self.S_LU, self.R,
self.eps, self.verbose, self.notImprovedLim, self.maxIter)
elif self.solver == QPSolvers.CVXPY:
vals = torch.Tensor(nBatch).type_as(Q)
zhats = torch.Tensor(nBatch, self.nz).type_as(Q)
lams = torch.Tensor(nBatch, self.nineq).type_as(Q)
nus = torch.Tensor(nBatch, self.neq).type_as(Q) \
if self.neq > 0 else torch.Tensor()
slacks = torch.Tensor(nBatch, self.nineq).type_as(Q)
for i in range(nBatch):
Ai, bi = (A[i], b[i]) if neq > 0 else (None, None)
vals[i], zhati, nui, lami, si = solvers.cvxpy.forward_single_np(
*[x.cpu().numpy() if x is not None else None
for x in (Q[i], p[i], G[i], h[i], Ai, bi)])
# if zhati[0] is None:
# import IPython, sys; IPython.embed(); sys.exit(-1)
zhats[i] = torch.Tensor(zhati)
lams[i] = torch.Tensor(lami)
slacks[i] = torch.Tensor(si)
if neq > 0:
nus[i] = torch.Tensor(nui)
self.vals = vals
self.lams = lams
self.nus = nus
self.slacks = slacks
else:
assert False
self.save_for_backward(zhats, Q_, p_, G_, h_, A_, b_)
return zhats | def function[forward, parameter[self, Q_, p_, G_, h_, A_, b_]]:
constant[Solve a batch of QPs.
This function solves a batch of QPs, each optimizing over
`nz` variables and having `nineq` inequality constraints
and `neq` equality constraints.
The optimization problem for each instance in the batch
(dropping indexing from the notation) is of the form
\hat z = argmin_z 1/2 z^T Q z + p^T z
subject to Gz <= h
Az = b
where Q \in S^{nz,nz},
S^{nz,nz} is the set of all positive semi-definite matrices,
p \in R^{nz}
G \in R^{nineq,nz}
h \in R^{nineq}
A \in R^{neq,nz}
b \in R^{neq}
These parameters should all be passed to this function as
Variable- or Parameter-wrapped Tensors.
(See torch.autograd.Variable and torch.nn.parameter.Parameter)
If you want to solve a batch of QPs where `nz`, `nineq` and `neq`
are the same, but some of the contents differ across the
minibatch, you can pass in tensors in the standard way
where the first dimension indicates the batch example.
This can be done with some or all of the coefficients.
You do not need to add an extra dimension to coefficients
that will not change across all of the minibatch examples.
This function is able to infer such cases.
If you don't want to use any equality or inequality constraints,
you can set the appropriate values to:
e = Variable(torch.Tensor())
Parameters:
Q: A (nBatch, nz, nz) or (nz, nz) Tensor.
p: A (nBatch, nz) or (nz) Tensor.
G: A (nBatch, nineq, nz) or (nineq, nz) Tensor.
h: A (nBatch, nineq) or (nineq) Tensor.
A: A (nBatch, neq, nz) or (neq, nz) Tensor.
b: A (nBatch, neq) or (neq) Tensor.
Returns: \hat z: a (nBatch, nz) Tensor.
]
variable[nBatch] assign[=] call[name[extract_nBatch], parameter[name[Q_], name[p_], name[G_], name[h_], name[A_], name[b_]]]
<ast.Tuple object at 0x7da2041d8760> assign[=] call[name[expandParam], parameter[name[Q_], name[nBatch], constant[3]]]
<ast.Tuple object at 0x7da2041daec0> assign[=] call[name[expandParam], parameter[name[p_], name[nBatch], constant[2]]]
<ast.Tuple object at 0x7da2041db220> assign[=] call[name[expandParam], parameter[name[G_], name[nBatch], constant[3]]]
<ast.Tuple object at 0x7da2041da890> assign[=] call[name[expandParam], parameter[name[h_], name[nBatch], constant[2]]]
<ast.Tuple object at 0x7da2041d86a0> assign[=] call[name[expandParam], parameter[name[A_], name[nBatch], constant[3]]]
<ast.Tuple object at 0x7da2041d9510> assign[=] call[name[expandParam], parameter[name[b_], name[nBatch], constant[2]]]
if name[self].check_Q_spd begin[:]
for taget[name[i]] in starred[call[name[range], parameter[name[nBatch]]]] begin[:]
<ast.Tuple object at 0x7da2041dbbb0> assign[=] call[name[torch].eig, parameter[call[name[Q]][name[i]]]]
if <ast.UnaryOp object at 0x7da2041d8190> begin[:]
<ast.Raise object at 0x7da2041db880>
<ast.Tuple object at 0x7da2041d86d0> assign[=] call[name[G].size, parameter[]]
variable[neq] assign[=] <ast.IfExp object at 0x7da2041d8be0>
assert[<ast.BoolOp object at 0x7da2041d9540>]
<ast.Tuple object at 0x7da2041da3b0> assign[=] tuple[[<ast.Name object at 0x7da2041d8a90>, <ast.Name object at 0x7da2041d9f60>, <ast.Name object at 0x7da2041d97e0>]]
if compare[name[self].solver equal[==] name[QPSolvers].PDIPM_BATCHED] begin[:]
<ast.Tuple object at 0x7da2041d82e0> assign[=] call[name[pdipm_b].pre_factor_kkt, parameter[name[Q], name[G], name[A]]]
<ast.Tuple object at 0x7da2041dbe20> assign[=] call[name[pdipm_b].forward, parameter[name[Q], name[p], name[G], name[h], name[A], name[b], name[self].Q_LU, name[self].S_LU, name[self].R, name[self].eps, name[self].verbose, name[self].notImprovedLim, name[self].maxIter]]
call[name[self].save_for_backward, parameter[name[zhats], name[Q_], name[p_], name[G_], name[h_], name[A_], name[b_]]]
return[name[zhats]] | keyword[def] identifier[forward] ( identifier[self] , identifier[Q_] , identifier[p_] , identifier[G_] , identifier[h_] , identifier[A_] , identifier[b_] ):
literal[string]
identifier[nBatch] = identifier[extract_nBatch] ( identifier[Q_] , identifier[p_] , identifier[G_] , identifier[h_] , identifier[A_] , identifier[b_] )
identifier[Q] , identifier[_] = identifier[expandParam] ( identifier[Q_] , identifier[nBatch] , literal[int] )
identifier[p] , identifier[_] = identifier[expandParam] ( identifier[p_] , identifier[nBatch] , literal[int] )
identifier[G] , identifier[_] = identifier[expandParam] ( identifier[G_] , identifier[nBatch] , literal[int] )
identifier[h] , identifier[_] = identifier[expandParam] ( identifier[h_] , identifier[nBatch] , literal[int] )
identifier[A] , identifier[_] = identifier[expandParam] ( identifier[A_] , identifier[nBatch] , literal[int] )
identifier[b] , identifier[_] = identifier[expandParam] ( identifier[b_] , identifier[nBatch] , literal[int] )
keyword[if] identifier[self] . identifier[check_Q_spd] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nBatch] ):
identifier[e] , identifier[_] = identifier[torch] . identifier[eig] ( identifier[Q] [ identifier[i] ])
keyword[if] keyword[not] identifier[torch] . identifier[all] ( identifier[e] [:, literal[int] ]> literal[int] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[_] , identifier[nineq] , identifier[nz] = identifier[G] . identifier[size] ()
identifier[neq] = identifier[A] . identifier[size] ( literal[int] ) keyword[if] identifier[A] . identifier[nelement] ()> literal[int] keyword[else] literal[int]
keyword[assert] ( identifier[neq] > literal[int] keyword[or] identifier[nineq] > literal[int] )
identifier[self] . identifier[neq] , identifier[self] . identifier[nineq] , identifier[self] . identifier[nz] = identifier[neq] , identifier[nineq] , identifier[nz]
keyword[if] identifier[self] . identifier[solver] == identifier[QPSolvers] . identifier[PDIPM_BATCHED] :
identifier[self] . identifier[Q_LU] , identifier[self] . identifier[S_LU] , identifier[self] . identifier[R] = identifier[pdipm_b] . identifier[pre_factor_kkt] ( identifier[Q] , identifier[G] , identifier[A] )
identifier[zhats] , identifier[self] . identifier[nus] , identifier[self] . identifier[lams] , identifier[self] . identifier[slacks] = identifier[pdipm_b] . identifier[forward] (
identifier[Q] , identifier[p] , identifier[G] , identifier[h] , identifier[A] , identifier[b] , identifier[self] . identifier[Q_LU] , identifier[self] . identifier[S_LU] , identifier[self] . identifier[R] ,
identifier[self] . identifier[eps] , identifier[self] . identifier[verbose] , identifier[self] . identifier[notImprovedLim] , identifier[self] . identifier[maxIter] )
keyword[elif] identifier[self] . identifier[solver] == identifier[QPSolvers] . identifier[CVXPY] :
identifier[vals] = identifier[torch] . identifier[Tensor] ( identifier[nBatch] ). identifier[type_as] ( identifier[Q] )
identifier[zhats] = identifier[torch] . identifier[Tensor] ( identifier[nBatch] , identifier[self] . identifier[nz] ). identifier[type_as] ( identifier[Q] )
identifier[lams] = identifier[torch] . identifier[Tensor] ( identifier[nBatch] , identifier[self] . identifier[nineq] ). identifier[type_as] ( identifier[Q] )
identifier[nus] = identifier[torch] . identifier[Tensor] ( identifier[nBatch] , identifier[self] . identifier[neq] ). identifier[type_as] ( identifier[Q] ) keyword[if] identifier[self] . identifier[neq] > literal[int] keyword[else] identifier[torch] . identifier[Tensor] ()
identifier[slacks] = identifier[torch] . identifier[Tensor] ( identifier[nBatch] , identifier[self] . identifier[nineq] ). identifier[type_as] ( identifier[Q] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nBatch] ):
identifier[Ai] , identifier[bi] =( identifier[A] [ identifier[i] ], identifier[b] [ identifier[i] ]) keyword[if] identifier[neq] > literal[int] keyword[else] ( keyword[None] , keyword[None] )
identifier[vals] [ identifier[i] ], identifier[zhati] , identifier[nui] , identifier[lami] , identifier[si] = identifier[solvers] . identifier[cvxpy] . identifier[forward_single_np] (
*[ identifier[x] . identifier[cpu] (). identifier[numpy] () keyword[if] identifier[x] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None]
keyword[for] identifier[x] keyword[in] ( identifier[Q] [ identifier[i] ], identifier[p] [ identifier[i] ], identifier[G] [ identifier[i] ], identifier[h] [ identifier[i] ], identifier[Ai] , identifier[bi] )])
identifier[zhats] [ identifier[i] ]= identifier[torch] . identifier[Tensor] ( identifier[zhati] )
identifier[lams] [ identifier[i] ]= identifier[torch] . identifier[Tensor] ( identifier[lami] )
identifier[slacks] [ identifier[i] ]= identifier[torch] . identifier[Tensor] ( identifier[si] )
keyword[if] identifier[neq] > literal[int] :
identifier[nus] [ identifier[i] ]= identifier[torch] . identifier[Tensor] ( identifier[nui] )
identifier[self] . identifier[vals] = identifier[vals]
identifier[self] . identifier[lams] = identifier[lams]
identifier[self] . identifier[nus] = identifier[nus]
identifier[self] . identifier[slacks] = identifier[slacks]
keyword[else] :
keyword[assert] keyword[False]
identifier[self] . identifier[save_for_backward] ( identifier[zhats] , identifier[Q_] , identifier[p_] , identifier[G_] , identifier[h_] , identifier[A_] , identifier[b_] )
keyword[return] identifier[zhats] | def forward(self, Q_, p_, G_, h_, A_, b_):
"""Solve a batch of QPs.
This function solves a batch of QPs, each optimizing over
`nz` variables and having `nineq` inequality constraints
and `neq` equality constraints.
The optimization problem for each instance in the batch
(dropping indexing from the notation) is of the form
\\hat z = argmin_z 1/2 z^T Q z + p^T z
subject to Gz <= h
Az = b
where Q \\in S^{nz,nz},
S^{nz,nz} is the set of all positive semi-definite matrices,
p \\in R^{nz}
G \\in R^{nineq,nz}
h \\in R^{nineq}
A \\in R^{neq,nz}
b \\in R^{neq}
These parameters should all be passed to this function as
Variable- or Parameter-wrapped Tensors.
(See torch.autograd.Variable and torch.nn.parameter.Parameter)
If you want to solve a batch of QPs where `nz`, `nineq` and `neq`
are the same, but some of the contents differ across the
minibatch, you can pass in tensors in the standard way
where the first dimension indicates the batch example.
This can be done with some or all of the coefficients.
You do not need to add an extra dimension to coefficients
that will not change across all of the minibatch examples.
This function is able to infer such cases.
If you don't want to use any equality or inequality constraints,
you can set the appropriate values to:
e = Variable(torch.Tensor())
Parameters:
Q: A (nBatch, nz, nz) or (nz, nz) Tensor.
p: A (nBatch, nz) or (nz) Tensor.
G: A (nBatch, nineq, nz) or (nineq, nz) Tensor.
h: A (nBatch, nineq) or (nineq) Tensor.
A: A (nBatch, neq, nz) or (neq, nz) Tensor.
b: A (nBatch, neq) or (neq) Tensor.
Returns: \\hat z: a (nBatch, nz) Tensor.
"""
nBatch = extract_nBatch(Q_, p_, G_, h_, A_, b_)
(Q, _) = expandParam(Q_, nBatch, 3)
(p, _) = expandParam(p_, nBatch, 2)
(G, _) = expandParam(G_, nBatch, 3)
(h, _) = expandParam(h_, nBatch, 2)
(A, _) = expandParam(A_, nBatch, 3)
(b, _) = expandParam(b_, nBatch, 2)
if self.check_Q_spd:
for i in range(nBatch):
(e, _) = torch.eig(Q[i])
if not torch.all(e[:, 0] > 0):
raise RuntimeError('Q is not SPD.') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
(_, nineq, nz) = G.size()
neq = A.size(1) if A.nelement() > 0 else 0
assert neq > 0 or nineq > 0
(self.neq, self.nineq, self.nz) = (neq, nineq, nz)
if self.solver == QPSolvers.PDIPM_BATCHED:
(self.Q_LU, self.S_LU, self.R) = pdipm_b.pre_factor_kkt(Q, G, A)
(zhats, self.nus, self.lams, self.slacks) = pdipm_b.forward(Q, p, G, h, A, b, self.Q_LU, self.S_LU, self.R, self.eps, self.verbose, self.notImprovedLim, self.maxIter) # depends on [control=['if'], data=[]]
elif self.solver == QPSolvers.CVXPY:
vals = torch.Tensor(nBatch).type_as(Q)
zhats = torch.Tensor(nBatch, self.nz).type_as(Q)
lams = torch.Tensor(nBatch, self.nineq).type_as(Q)
nus = torch.Tensor(nBatch, self.neq).type_as(Q) if self.neq > 0 else torch.Tensor()
slacks = torch.Tensor(nBatch, self.nineq).type_as(Q)
for i in range(nBatch):
(Ai, bi) = (A[i], b[i]) if neq > 0 else (None, None)
(vals[i], zhati, nui, lami, si) = solvers.cvxpy.forward_single_np(*[x.cpu().numpy() if x is not None else None for x in (Q[i], p[i], G[i], h[i], Ai, bi)])
# if zhati[0] is None:
# import IPython, sys; IPython.embed(); sys.exit(-1)
zhats[i] = torch.Tensor(zhati)
lams[i] = torch.Tensor(lami)
slacks[i] = torch.Tensor(si)
if neq > 0:
nus[i] = torch.Tensor(nui) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
self.vals = vals
self.lams = lams
self.nus = nus
self.slacks = slacks # depends on [control=['if'], data=[]]
else:
assert False
self.save_for_backward(zhats, Q_, p_, G_, h_, A_, b_)
return zhats |
def __load(self, path):
"""Method to load the serialized dataset from disk."""
try:
path = os.path.abspath(path)
with open(path, 'rb') as df:
# loaded_dataset = pickle.load(df)
self.__data, self.__classes, self.__labels, \
self.__dtype, self.__description, \
self.__num_features, self.__feature_names = pickle.load(df)
# ensure the loaded dataset is valid
self.__validate(self.__data, self.__classes, self.__labels)
except IOError as ioe:
raise IOError('Unable to read the dataset from file: {}', format(ioe))
except:
raise | def function[__load, parameter[self, path]]:
constant[Method to load the serialized dataset from disk.]
<ast.Try object at 0x7da18f09d0f0> | keyword[def] identifier[__load] ( identifier[self] , identifier[path] ):
literal[string]
keyword[try] :
identifier[path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[path] )
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[df] :
identifier[self] . identifier[__data] , identifier[self] . identifier[__classes] , identifier[self] . identifier[__labels] , identifier[self] . identifier[__dtype] , identifier[self] . identifier[__description] , identifier[self] . identifier[__num_features] , identifier[self] . identifier[__feature_names] = identifier[pickle] . identifier[load] ( identifier[df] )
identifier[self] . identifier[__validate] ( identifier[self] . identifier[__data] , identifier[self] . identifier[__classes] , identifier[self] . identifier[__labels] )
keyword[except] identifier[IOError] keyword[as] identifier[ioe] :
keyword[raise] identifier[IOError] ( literal[string] , identifier[format] ( identifier[ioe] ))
keyword[except] :
keyword[raise] | def __load(self, path):
"""Method to load the serialized dataset from disk."""
try:
path = os.path.abspath(path)
with open(path, 'rb') as df:
# loaded_dataset = pickle.load(df)
(self.__data, self.__classes, self.__labels, self.__dtype, self.__description, self.__num_features, self.__feature_names) = pickle.load(df) # depends on [control=['with'], data=['df']]
# ensure the loaded dataset is valid
self.__validate(self.__data, self.__classes, self.__labels) # depends on [control=['try'], data=[]]
except IOError as ioe:
raise IOError('Unable to read the dataset from file: {}', format(ioe)) # depends on [control=['except'], data=['ioe']]
except:
raise # depends on [control=['except'], data=[]] |
def filter(self, *args, **kwargs):
"""
Adds WHERE arguments to the queryset, returning a new queryset
#TODO: show examples
:rtype: AbstractQuerySet
"""
#add arguments to the where clause filters
if len([x for x in kwargs.values() if x is None]):
raise CQLEngineException("None values on filter are not allowed")
clone = copy.deepcopy(self)
for operator in args:
if not isinstance(operator, WhereClause):
raise QueryException('{} is not a valid query operator'.format(operator))
clone._where.append(operator)
for arg, val in kwargs.items():
col_name, col_op = self._parse_filter_arg(arg)
quote_field = True
#resolve column and operator
try:
column = self.model._get_column(col_name)
except KeyError:
if col_name == 'pk__token':
if not isinstance(val, Token):
raise QueryException("Virtual column 'pk__token' may only be compared to Token() values")
column = columns._PartitionKeysToken(self.model)
quote_field = False
else:
raise QueryException("Can't resolve column name: '{}'".format(col_name))
if isinstance(val, Token):
if col_name != 'pk__token':
raise QueryException("Token() values may only be compared to the 'pk__token' virtual column")
partition_columns = column.partition_columns
if len(partition_columns) != len(val.value):
raise QueryException(
'Token() received {} arguments but model has {} partition keys'.format(
len(val.value), len(partition_columns)))
val.set_columns(partition_columns)
#get query operator, or use equals if not supplied
operator_class = BaseWhereOperator.get_operator(col_op or 'EQ')
operator = operator_class()
if isinstance(operator, InOperator):
if not isinstance(val, (list, tuple)):
raise QueryException('IN queries must use a list/tuple value')
query_val = [column.to_database(v) for v in val]
elif isinstance(val, BaseQueryFunction):
query_val = val
else:
query_val = column.to_database(val)
clone._where.append(WhereClause(column.db_field_name, operator, query_val, quote_field=quote_field))
return clone | def function[filter, parameter[self]]:
constant[
Adds WHERE arguments to the queryset, returning a new queryset
#TODO: show examples
:rtype: AbstractQuerySet
]
if call[name[len], parameter[<ast.ListComp object at 0x7da207f01030>]] begin[:]
<ast.Raise object at 0x7da207f00730>
variable[clone] assign[=] call[name[copy].deepcopy, parameter[name[self]]]
for taget[name[operator]] in starred[name[args]] begin[:]
if <ast.UnaryOp object at 0x7da18f720a30> begin[:]
<ast.Raise object at 0x7da18f720820>
call[name[clone]._where.append, parameter[name[operator]]]
for taget[tuple[[<ast.Name object at 0x7da18f722020>, <ast.Name object at 0x7da18f722b00>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
<ast.Tuple object at 0x7da18f7222f0> assign[=] call[name[self]._parse_filter_arg, parameter[name[arg]]]
variable[quote_field] assign[=] constant[True]
<ast.Try object at 0x7da20c9908e0>
if call[name[isinstance], parameter[name[val], name[Token]]] begin[:]
if compare[name[col_name] not_equal[!=] constant[pk__token]] begin[:]
<ast.Raise object at 0x7da20c991180>
variable[partition_columns] assign[=] name[column].partition_columns
if compare[call[name[len], parameter[name[partition_columns]]] not_equal[!=] call[name[len], parameter[name[val].value]]] begin[:]
<ast.Raise object at 0x7da20c6c4580>
call[name[val].set_columns, parameter[name[partition_columns]]]
variable[operator_class] assign[=] call[name[BaseWhereOperator].get_operator, parameter[<ast.BoolOp object at 0x7da20c6c49d0>]]
variable[operator] assign[=] call[name[operator_class], parameter[]]
if call[name[isinstance], parameter[name[operator], name[InOperator]]] begin[:]
if <ast.UnaryOp object at 0x7da20c6c4af0> begin[:]
<ast.Raise object at 0x7da20c6c6380>
variable[query_val] assign[=] <ast.ListComp object at 0x7da20c6c4100>
call[name[clone]._where.append, parameter[call[name[WhereClause], parameter[name[column].db_field_name, name[operator], name[query_val]]]]]
return[name[clone]] | keyword[def] identifier[filter] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[len] ([ identifier[x] keyword[for] identifier[x] keyword[in] identifier[kwargs] . identifier[values] () keyword[if] identifier[x] keyword[is] keyword[None] ]):
keyword[raise] identifier[CQLEngineException] ( literal[string] )
identifier[clone] = identifier[copy] . identifier[deepcopy] ( identifier[self] )
keyword[for] identifier[operator] keyword[in] identifier[args] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[operator] , identifier[WhereClause] ):
keyword[raise] identifier[QueryException] ( literal[string] . identifier[format] ( identifier[operator] ))
identifier[clone] . identifier[_where] . identifier[append] ( identifier[operator] )
keyword[for] identifier[arg] , identifier[val] keyword[in] identifier[kwargs] . identifier[items] ():
identifier[col_name] , identifier[col_op] = identifier[self] . identifier[_parse_filter_arg] ( identifier[arg] )
identifier[quote_field] = keyword[True]
keyword[try] :
identifier[column] = identifier[self] . identifier[model] . identifier[_get_column] ( identifier[col_name] )
keyword[except] identifier[KeyError] :
keyword[if] identifier[col_name] == literal[string] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[val] , identifier[Token] ):
keyword[raise] identifier[QueryException] ( literal[string] )
identifier[column] = identifier[columns] . identifier[_PartitionKeysToken] ( identifier[self] . identifier[model] )
identifier[quote_field] = keyword[False]
keyword[else] :
keyword[raise] identifier[QueryException] ( literal[string] . identifier[format] ( identifier[col_name] ))
keyword[if] identifier[isinstance] ( identifier[val] , identifier[Token] ):
keyword[if] identifier[col_name] != literal[string] :
keyword[raise] identifier[QueryException] ( literal[string] )
identifier[partition_columns] = identifier[column] . identifier[partition_columns]
keyword[if] identifier[len] ( identifier[partition_columns] )!= identifier[len] ( identifier[val] . identifier[value] ):
keyword[raise] identifier[QueryException] (
literal[string] . identifier[format] (
identifier[len] ( identifier[val] . identifier[value] ), identifier[len] ( identifier[partition_columns] )))
identifier[val] . identifier[set_columns] ( identifier[partition_columns] )
identifier[operator_class] = identifier[BaseWhereOperator] . identifier[get_operator] ( identifier[col_op] keyword[or] literal[string] )
identifier[operator] = identifier[operator_class] ()
keyword[if] identifier[isinstance] ( identifier[operator] , identifier[InOperator] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[val] ,( identifier[list] , identifier[tuple] )):
keyword[raise] identifier[QueryException] ( literal[string] )
identifier[query_val] =[ identifier[column] . identifier[to_database] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[val] ]
keyword[elif] identifier[isinstance] ( identifier[val] , identifier[BaseQueryFunction] ):
identifier[query_val] = identifier[val]
keyword[else] :
identifier[query_val] = identifier[column] . identifier[to_database] ( identifier[val] )
identifier[clone] . identifier[_where] . identifier[append] ( identifier[WhereClause] ( identifier[column] . identifier[db_field_name] , identifier[operator] , identifier[query_val] , identifier[quote_field] = identifier[quote_field] ))
keyword[return] identifier[clone] | def filter(self, *args, **kwargs):
"""
Adds WHERE arguments to the queryset, returning a new queryset
#TODO: show examples
:rtype: AbstractQuerySet
"""
#add arguments to the where clause filters
if len([x for x in kwargs.values() if x is None]):
raise CQLEngineException('None values on filter are not allowed') # depends on [control=['if'], data=[]]
clone = copy.deepcopy(self)
for operator in args:
if not isinstance(operator, WhereClause):
raise QueryException('{} is not a valid query operator'.format(operator)) # depends on [control=['if'], data=[]]
clone._where.append(operator) # depends on [control=['for'], data=['operator']]
for (arg, val) in kwargs.items():
(col_name, col_op) = self._parse_filter_arg(arg)
quote_field = True
#resolve column and operator
try:
column = self.model._get_column(col_name) # depends on [control=['try'], data=[]]
except KeyError:
if col_name == 'pk__token':
if not isinstance(val, Token):
raise QueryException("Virtual column 'pk__token' may only be compared to Token() values") # depends on [control=['if'], data=[]]
column = columns._PartitionKeysToken(self.model)
quote_field = False # depends on [control=['if'], data=[]]
else:
raise QueryException("Can't resolve column name: '{}'".format(col_name)) # depends on [control=['except'], data=[]]
if isinstance(val, Token):
if col_name != 'pk__token':
raise QueryException("Token() values may only be compared to the 'pk__token' virtual column") # depends on [control=['if'], data=[]]
partition_columns = column.partition_columns
if len(partition_columns) != len(val.value):
raise QueryException('Token() received {} arguments but model has {} partition keys'.format(len(val.value), len(partition_columns))) # depends on [control=['if'], data=[]]
val.set_columns(partition_columns) # depends on [control=['if'], data=[]]
#get query operator, or use equals if not supplied
operator_class = BaseWhereOperator.get_operator(col_op or 'EQ')
operator = operator_class()
if isinstance(operator, InOperator):
if not isinstance(val, (list, tuple)):
raise QueryException('IN queries must use a list/tuple value') # depends on [control=['if'], data=[]]
query_val = [column.to_database(v) for v in val] # depends on [control=['if'], data=[]]
elif isinstance(val, BaseQueryFunction):
query_val = val # depends on [control=['if'], data=[]]
else:
query_val = column.to_database(val)
clone._where.append(WhereClause(column.db_field_name, operator, query_val, quote_field=quote_field)) # depends on [control=['for'], data=[]]
return clone |
def get_active_terms_not_agreed_to(user):
"""Checks to see if a specified user has agreed to all the latest terms and conditions"""
if TERMS_EXCLUDE_USERS_WITH_PERM is not None:
if user.has_perm(TERMS_EXCLUDE_USERS_WITH_PERM) and not user.is_superuser:
# Django's has_perm() returns True if is_superuser, we don't want that
return []
not_agreed_terms = cache.get('tandc.not_agreed_terms_' + user.get_username())
if not_agreed_terms is None:
try:
LOGGER.debug("Not Agreed Terms")
not_agreed_terms = TermsAndConditions.get_active_terms_list().exclude(
userterms__in=UserTermsAndConditions.objects.filter(user=user)
).order_by('slug')
cache.set('tandc.not_agreed_terms_' + user.get_username(), not_agreed_terms, TERMS_CACHE_SECONDS)
except (TypeError, UserTermsAndConditions.DoesNotExist):
return []
return not_agreed_terms | def function[get_active_terms_not_agreed_to, parameter[user]]:
constant[Checks to see if a specified user has agreed to all the latest terms and conditions]
if compare[name[TERMS_EXCLUDE_USERS_WITH_PERM] is_not constant[None]] begin[:]
if <ast.BoolOp object at 0x7da1b12f18d0> begin[:]
return[list[[]]]
variable[not_agreed_terms] assign[=] call[name[cache].get, parameter[binary_operation[constant[tandc.not_agreed_terms_] + call[name[user].get_username, parameter[]]]]]
if compare[name[not_agreed_terms] is constant[None]] begin[:]
<ast.Try object at 0x7da1b12f2410>
return[name[not_agreed_terms]] | keyword[def] identifier[get_active_terms_not_agreed_to] ( identifier[user] ):
literal[string]
keyword[if] identifier[TERMS_EXCLUDE_USERS_WITH_PERM] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[user] . identifier[has_perm] ( identifier[TERMS_EXCLUDE_USERS_WITH_PERM] ) keyword[and] keyword[not] identifier[user] . identifier[is_superuser] :
keyword[return] []
identifier[not_agreed_terms] = identifier[cache] . identifier[get] ( literal[string] + identifier[user] . identifier[get_username] ())
keyword[if] identifier[not_agreed_terms] keyword[is] keyword[None] :
keyword[try] :
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[not_agreed_terms] = identifier[TermsAndConditions] . identifier[get_active_terms_list] (). identifier[exclude] (
identifier[userterms__in] = identifier[UserTermsAndConditions] . identifier[objects] . identifier[filter] ( identifier[user] = identifier[user] )
). identifier[order_by] ( literal[string] )
identifier[cache] . identifier[set] ( literal[string] + identifier[user] . identifier[get_username] (), identifier[not_agreed_terms] , identifier[TERMS_CACHE_SECONDS] )
keyword[except] ( identifier[TypeError] , identifier[UserTermsAndConditions] . identifier[DoesNotExist] ):
keyword[return] []
keyword[return] identifier[not_agreed_terms] | def get_active_terms_not_agreed_to(user):
"""Checks to see if a specified user has agreed to all the latest terms and conditions"""
if TERMS_EXCLUDE_USERS_WITH_PERM is not None:
if user.has_perm(TERMS_EXCLUDE_USERS_WITH_PERM) and (not user.is_superuser):
# Django's has_perm() returns True if is_superuser, we don't want that
return [] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['TERMS_EXCLUDE_USERS_WITH_PERM']]
not_agreed_terms = cache.get('tandc.not_agreed_terms_' + user.get_username())
if not_agreed_terms is None:
try:
LOGGER.debug('Not Agreed Terms')
not_agreed_terms = TermsAndConditions.get_active_terms_list().exclude(userterms__in=UserTermsAndConditions.objects.filter(user=user)).order_by('slug')
cache.set('tandc.not_agreed_terms_' + user.get_username(), not_agreed_terms, TERMS_CACHE_SECONDS) # depends on [control=['try'], data=[]]
except (TypeError, UserTermsAndConditions.DoesNotExist):
return [] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['not_agreed_terms']]
return not_agreed_terms |
def css_class_cycler():
'''
Return a dictionary keyed by ``EventType`` abbreviations, whose values are an
iterable or cycle of CSS class names.
'''
FMT = 'evt-{0}-{1}'.format
return defaultdict(default_css_class_cycler, (
(e.abbr, itertools.cycle((FMT(e.abbr, 'even'), FMT(e.abbr, 'odd'))))
for e in EventType.objects.all()
)) | def function[css_class_cycler, parameter[]]:
constant[
Return a dictionary keyed by ``EventType`` abbreviations, whose values are an
iterable or cycle of CSS class names.
]
variable[FMT] assign[=] constant[evt-{0}-{1}].format
return[call[name[defaultdict], parameter[name[default_css_class_cycler], <ast.GeneratorExp object at 0x7da18fe92680>]]] | keyword[def] identifier[css_class_cycler] ():
literal[string]
identifier[FMT] = literal[string] . identifier[format]
keyword[return] identifier[defaultdict] ( identifier[default_css_class_cycler] ,(
( identifier[e] . identifier[abbr] , identifier[itertools] . identifier[cycle] (( identifier[FMT] ( identifier[e] . identifier[abbr] , literal[string] ), identifier[FMT] ( identifier[e] . identifier[abbr] , literal[string] ))))
keyword[for] identifier[e] keyword[in] identifier[EventType] . identifier[objects] . identifier[all] ()
)) | def css_class_cycler():
"""
Return a dictionary keyed by ``EventType`` abbreviations, whose values are an
iterable or cycle of CSS class names.
"""
FMT = 'evt-{0}-{1}'.format
return defaultdict(default_css_class_cycler, ((e.abbr, itertools.cycle((FMT(e.abbr, 'even'), FMT(e.abbr, 'odd')))) for e in EventType.objects.all())) |
def get_variants_in_region(self, chrom, start, end):
"""Iterate over variants in a region."""
region = self.get_vcf()(
"{}:{}-{}".format(chrom, start, end)
)
for v in region:
for coded_allele, g in self._make_genotypes(v.ALT, v.genotypes):
variant = Variant(
v.ID, v.CHROM, v.POS, [v.REF, coded_allele]
)
yield Genotypes(variant, g, v.REF, coded_allele,
multiallelic=len(v.ALT) > 1) | def function[get_variants_in_region, parameter[self, chrom, start, end]]:
constant[Iterate over variants in a region.]
variable[region] assign[=] call[call[name[self].get_vcf, parameter[]], parameter[call[constant[{}:{}-{}].format, parameter[name[chrom], name[start], name[end]]]]]
for taget[name[v]] in starred[name[region]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b242a6e0>, <ast.Name object at 0x7da1b2429900>]]] in starred[call[name[self]._make_genotypes, parameter[name[v].ALT, name[v].genotypes]]] begin[:]
variable[variant] assign[=] call[name[Variant], parameter[name[v].ID, name[v].CHROM, name[v].POS, list[[<ast.Attribute object at 0x7da1b242bfa0>, <ast.Name object at 0x7da1b2429b40>]]]]
<ast.Yield object at 0x7da1b242a140> | keyword[def] identifier[get_variants_in_region] ( identifier[self] , identifier[chrom] , identifier[start] , identifier[end] ):
literal[string]
identifier[region] = identifier[self] . identifier[get_vcf] ()(
literal[string] . identifier[format] ( identifier[chrom] , identifier[start] , identifier[end] )
)
keyword[for] identifier[v] keyword[in] identifier[region] :
keyword[for] identifier[coded_allele] , identifier[g] keyword[in] identifier[self] . identifier[_make_genotypes] ( identifier[v] . identifier[ALT] , identifier[v] . identifier[genotypes] ):
identifier[variant] = identifier[Variant] (
identifier[v] . identifier[ID] , identifier[v] . identifier[CHROM] , identifier[v] . identifier[POS] ,[ identifier[v] . identifier[REF] , identifier[coded_allele] ]
)
keyword[yield] identifier[Genotypes] ( identifier[variant] , identifier[g] , identifier[v] . identifier[REF] , identifier[coded_allele] ,
identifier[multiallelic] = identifier[len] ( identifier[v] . identifier[ALT] )> literal[int] ) | def get_variants_in_region(self, chrom, start, end):
"""Iterate over variants in a region."""
region = self.get_vcf()('{}:{}-{}'.format(chrom, start, end))
for v in region:
for (coded_allele, g) in self._make_genotypes(v.ALT, v.genotypes):
variant = Variant(v.ID, v.CHROM, v.POS, [v.REF, coded_allele])
yield Genotypes(variant, g, v.REF, coded_allele, multiallelic=len(v.ALT) > 1) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['v']] |
def unitResponse(self,band):
"""This is used internally for :ref:`pysynphot-formula-effstim`
calculations."""
#sum = asumr(band,nwave)
total = band.throughput.sum()
return 2.5*math.log10(total) | def function[unitResponse, parameter[self, band]]:
constant[This is used internally for :ref:`pysynphot-formula-effstim`
calculations.]
variable[total] assign[=] call[name[band].throughput.sum, parameter[]]
return[binary_operation[constant[2.5] * call[name[math].log10, parameter[name[total]]]]] | keyword[def] identifier[unitResponse] ( identifier[self] , identifier[band] ):
literal[string]
identifier[total] = identifier[band] . identifier[throughput] . identifier[sum] ()
keyword[return] literal[int] * identifier[math] . identifier[log10] ( identifier[total] ) | def unitResponse(self, band):
"""This is used internally for :ref:`pysynphot-formula-effstim`
calculations."""
#sum = asumr(band,nwave)
total = band.throughput.sum()
return 2.5 * math.log10(total) |
def apply(self, func, axis, *args, **kwargs):
"""Apply func across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if callable(func):
return self._callable_func(func, axis, *args, **kwargs)
elif isinstance(func, dict):
return self._dict_func(func, axis, *args, **kwargs)
elif is_list_like(func):
return self._list_like_func(func, axis, *args, **kwargs)
else:
pass | def function[apply, parameter[self, func, axis]]:
constant[Apply func across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
]
if call[name[callable], parameter[name[func]]] begin[:]
return[call[name[self]._callable_func, parameter[name[func], name[axis], <ast.Starred object at 0x7da1b23450f0>]]] | keyword[def] identifier[apply] ( identifier[self] , identifier[func] , identifier[axis] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[callable] ( identifier[func] ):
keyword[return] identifier[self] . identifier[_callable_func] ( identifier[func] , identifier[axis] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[isinstance] ( identifier[func] , identifier[dict] ):
keyword[return] identifier[self] . identifier[_dict_func] ( identifier[func] , identifier[axis] ,* identifier[args] ,** identifier[kwargs] )
keyword[elif] identifier[is_list_like] ( identifier[func] ):
keyword[return] identifier[self] . identifier[_list_like_func] ( identifier[func] , identifier[axis] ,* identifier[args] ,** identifier[kwargs] )
keyword[else] :
keyword[pass] | def apply(self, func, axis, *args, **kwargs):
"""Apply func across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler.
"""
if callable(func):
return self._callable_func(func, axis, *args, **kwargs) # depends on [control=['if'], data=[]]
elif isinstance(func, dict):
return self._dict_func(func, axis, *args, **kwargs) # depends on [control=['if'], data=[]]
elif is_list_like(func):
return self._list_like_func(func, axis, *args, **kwargs) # depends on [control=['if'], data=[]]
else:
pass |
def execute(self, key: str, executable: str, *, capture_stdout: bool=False, capture_stderr: bool=False,
**acquire_kwargs) -> Tuple[Optional[int], Optional[bytes], Optional[bytes]]:
"""
Executes the given executable whilst holding the given lock.
:param key:
:param executable:
:param capture_stdout:
:param capture_stderr:
:param acquire_kwargs:
:return:
"""
lock = self.acquire(key, **acquire_kwargs)
if lock is None:
return None, None, None
return self.execute_with_lock(executable, lock, capture_stdout=capture_stdout, capture_stderr=capture_stderr) | def function[execute, parameter[self, key, executable]]:
constant[
Executes the given executable whilst holding the given lock.
:param key:
:param executable:
:param capture_stdout:
:param capture_stderr:
:param acquire_kwargs:
:return:
]
variable[lock] assign[=] call[name[self].acquire, parameter[name[key]]]
if compare[name[lock] is constant[None]] begin[:]
return[tuple[[<ast.Constant object at 0x7da20c76f340>, <ast.Constant object at 0x7da20c76f820>, <ast.Constant object at 0x7da20c76f520>]]]
return[call[name[self].execute_with_lock, parameter[name[executable], name[lock]]]] | keyword[def] identifier[execute] ( identifier[self] , identifier[key] : identifier[str] , identifier[executable] : identifier[str] ,*, identifier[capture_stdout] : identifier[bool] = keyword[False] , identifier[capture_stderr] : identifier[bool] = keyword[False] ,
** identifier[acquire_kwargs] )-> identifier[Tuple] [ identifier[Optional] [ identifier[int] ], identifier[Optional] [ identifier[bytes] ], identifier[Optional] [ identifier[bytes] ]]:
literal[string]
identifier[lock] = identifier[self] . identifier[acquire] ( identifier[key] ,** identifier[acquire_kwargs] )
keyword[if] identifier[lock] keyword[is] keyword[None] :
keyword[return] keyword[None] , keyword[None] , keyword[None]
keyword[return] identifier[self] . identifier[execute_with_lock] ( identifier[executable] , identifier[lock] , identifier[capture_stdout] = identifier[capture_stdout] , identifier[capture_stderr] = identifier[capture_stderr] ) | def execute(self, key: str, executable: str, *, capture_stdout: bool=False, capture_stderr: bool=False, **acquire_kwargs) -> Tuple[Optional[int], Optional[bytes], Optional[bytes]]:
"""
Executes the given executable whilst holding the given lock.
:param key:
:param executable:
:param capture_stdout:
:param capture_stderr:
:param acquire_kwargs:
:return:
"""
lock = self.acquire(key, **acquire_kwargs)
if lock is None:
return (None, None, None) # depends on [control=['if'], data=[]]
return self.execute_with_lock(executable, lock, capture_stdout=capture_stdout, capture_stderr=capture_stderr) |
def url(self, url):
""" Set API URL endpoint
Args:
url: the url of the API endpoint
"""
if url and url.endswith('/'):
url = url[:-1]
self._url = url | def function[url, parameter[self, url]]:
constant[ Set API URL endpoint
Args:
url: the url of the API endpoint
]
if <ast.BoolOp object at 0x7da1b0d1e0e0> begin[:]
variable[url] assign[=] call[name[url]][<ast.Slice object at 0x7da1b0d1d7b0>]
name[self]._url assign[=] name[url] | keyword[def] identifier[url] ( identifier[self] , identifier[url] ):
literal[string]
keyword[if] identifier[url] keyword[and] identifier[url] . identifier[endswith] ( literal[string] ):
identifier[url] = identifier[url] [:- literal[int] ]
identifier[self] . identifier[_url] = identifier[url] | def url(self, url):
""" Set API URL endpoint
Args:
url: the url of the API endpoint
"""
if url and url.endswith('/'):
url = url[:-1] # depends on [control=['if'], data=[]]
self._url = url |
def get_wind_efficiency_curve(curve_name='all'):
r"""
Reads wind efficiency curve(s) specified in `curve_name`.
Parameters
----------
curve_name : str or list
Specifies the curve. Use 'all' to get all curves in a MultiIndex
DataFrame or one of the curve names to retrieve a single curve.
Default: 'all'.
Returns
-------
efficiency_curve : pd.DataFrame
Wind efficiency curve. Contains 'wind_speed' and 'efficiency' columns
with wind speed in m/s and wind efficiency (dimensionless).
If `curve_name` is 'all' or a list of strings a MultiIndex DataFrame is
returned with curve names in the first level of the columns.
Notes
-----
The wind efficiency curves were generated in the "Dena Netzstudie" [1]_ and
in the work of Kaspar Knorr [2]_. The mean wind efficiency curve is an
average curve from 12 wind farm distributed over Germany ([1]_) or
respectively an average from over 2000 wind farms in Germany ([2]_). Curves
with the appendix 'extreme' are wind efficiency curves of single wind farms
that are extremely deviating from the respective mean wind efficiency
curve. For more information see [1]_ and [2]_.
References
----------
.. [1] Kohler et.al.: "dena-Netzstudie II. Integration erneuerbarer
Energien in die deutsche Stromversorgung im Zeitraum 2015 – 2020
mit Ausblick 2025.", Deutsche Energie-Agentur GmbH (dena),
Tech. rept., 2010, p. 101
.. [2] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 124
Examples
--------
.. parsed-literal::
# Example to plot all curves
fig, ax = plt.subplots() /n
df = get_wind_efficiency_curve(curve_name='all')
for t in df.columns.get_level_values(0).unique():
p = df[t].set_index('wind_speed')['efficiency']
p.name = t
ax = p.plot(ax=ax, legend=True)
plt.show()
"""
possible_curve_names = ['dena_mean', 'knorr_mean', 'dena_extreme1',
'dena_extreme2', 'knorr_extreme1',
'knorr_extreme2', 'knorr_extreme3']
if curve_name == 'all':
curve_names = possible_curve_names
elif isinstance(curve_name, str):
curve_names = [curve_name]
else:
curve_names = curve_name
efficiency_curve = pd.DataFrame(columns=pd.MultiIndex(levels=[[], []],
codes=[[], []]))
for curve_name in curve_names:
if curve_name.split('_')[0] not in ['dena', 'knorr']:
raise ValueError("`curve_name` must be one of the following: " +
"{} but is {}".format(possible_curve_names,
curve_name))
path = os.path.join(os.path.dirname(__file__), 'data',
'wind_efficiency_curves_{}.csv'.format(
curve_name.split('_')[0]))
# Read wind efficiency curves from file
wind_efficiency_curves = pd.read_csv(path)
# Raise error if wind efficiency curve specified in 'curve_name' does
# not exist
if curve_name not in list(wind_efficiency_curves):
msg = ("Efficiency curve <{0}> does not exist. Must be one of the"
"following: {1}.")
raise ValueError(msg.format(curve_name, *possible_curve_names))
# Get wind efficiency curve and rename column containing efficiency
wec = wind_efficiency_curves[['wind_speed', curve_name]]
efficiency_curve[curve_name, 'wind_speed'] = wec['wind_speed']
efficiency_curve[curve_name, 'efficiency'] = wec[curve_name]
if len(curve_names) == 1:
return efficiency_curve[curve_names[0]]
else:
return efficiency_curve | def function[get_wind_efficiency_curve, parameter[curve_name]]:
constant[
Reads wind efficiency curve(s) specified in `curve_name`.
Parameters
----------
curve_name : str or list
Specifies the curve. Use 'all' to get all curves in a MultiIndex
DataFrame or one of the curve names to retrieve a single curve.
Default: 'all'.
Returns
-------
efficiency_curve : pd.DataFrame
Wind efficiency curve. Contains 'wind_speed' and 'efficiency' columns
with wind speed in m/s and wind efficiency (dimensionless).
If `curve_name` is 'all' or a list of strings a MultiIndex DataFrame is
returned with curve names in the first level of the columns.
Notes
-----
The wind efficiency curves were generated in the "Dena Netzstudie" [1]_ and
in the work of Kaspar Knorr [2]_. The mean wind efficiency curve is an
average curve from 12 wind farm distributed over Germany ([1]_) or
respectively an average from over 2000 wind farms in Germany ([2]_). Curves
with the appendix 'extreme' are wind efficiency curves of single wind farms
that are extremely deviating from the respective mean wind efficiency
curve. For more information see [1]_ and [2]_.
References
----------
.. [1] Kohler et.al.: "dena-Netzstudie II. Integration erneuerbarer
Energien in die deutsche Stromversorgung im Zeitraum 2015 – 2020
mit Ausblick 2025.", Deutsche Energie-Agentur GmbH (dena),
Tech. rept., 2010, p. 101
.. [2] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 124
Examples
--------
.. parsed-literal::
# Example to plot all curves
fig, ax = plt.subplots() /n
df = get_wind_efficiency_curve(curve_name='all')
for t in df.columns.get_level_values(0).unique():
p = df[t].set_index('wind_speed')['efficiency']
p.name = t
ax = p.plot(ax=ax, legend=True)
plt.show()
]
variable[possible_curve_names] assign[=] list[[<ast.Constant object at 0x7da1b084f6d0>, <ast.Constant object at 0x7da1b084d420>, <ast.Constant object at 0x7da1b084d900>, <ast.Constant object at 0x7da1b084df30>, <ast.Constant object at 0x7da1b084cfd0>, <ast.Constant object at 0x7da1b084dae0>, <ast.Constant object at 0x7da1b084d870>]]
if compare[name[curve_name] equal[==] constant[all]] begin[:]
variable[curve_names] assign[=] name[possible_curve_names]
variable[efficiency_curve] assign[=] call[name[pd].DataFrame, parameter[]]
for taget[name[curve_name]] in starred[name[curve_names]] begin[:]
if compare[call[call[name[curve_name].split, parameter[constant[_]]]][constant[0]] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b084ddb0>, <ast.Constant object at 0x7da1b084d480>]]] begin[:]
<ast.Raise object at 0x7da1b084cf40>
variable[path] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[__file__]]], constant[data], call[constant[wind_efficiency_curves_{}.csv].format, parameter[call[call[name[curve_name].split, parameter[constant[_]]]][constant[0]]]]]]
variable[wind_efficiency_curves] assign[=] call[name[pd].read_csv, parameter[name[path]]]
if compare[name[curve_name] <ast.NotIn object at 0x7da2590d7190> call[name[list], parameter[name[wind_efficiency_curves]]]] begin[:]
variable[msg] assign[=] constant[Efficiency curve <{0}> does not exist. Must be one of thefollowing: {1}.]
<ast.Raise object at 0x7da1b084faf0>
variable[wec] assign[=] call[name[wind_efficiency_curves]][list[[<ast.Constant object at 0x7da1b084e2f0>, <ast.Name object at 0x7da1b084c6a0>]]]
call[name[efficiency_curve]][tuple[[<ast.Name object at 0x7da1b084f190>, <ast.Constant object at 0x7da1b084ec80>]]] assign[=] call[name[wec]][constant[wind_speed]]
call[name[efficiency_curve]][tuple[[<ast.Name object at 0x7da1b084dc30>, <ast.Constant object at 0x7da1b084f2b0>]]] assign[=] call[name[wec]][name[curve_name]]
if compare[call[name[len], parameter[name[curve_names]]] equal[==] constant[1]] begin[:]
return[call[name[efficiency_curve]][call[name[curve_names]][constant[0]]]] | keyword[def] identifier[get_wind_efficiency_curve] ( identifier[curve_name] = literal[string] ):
literal[string]
identifier[possible_curve_names] =[ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string] ]
keyword[if] identifier[curve_name] == literal[string] :
identifier[curve_names] = identifier[possible_curve_names]
keyword[elif] identifier[isinstance] ( identifier[curve_name] , identifier[str] ):
identifier[curve_names] =[ identifier[curve_name] ]
keyword[else] :
identifier[curve_names] = identifier[curve_name]
identifier[efficiency_curve] = identifier[pd] . identifier[DataFrame] ( identifier[columns] = identifier[pd] . identifier[MultiIndex] ( identifier[levels] =[[],[]],
identifier[codes] =[[],[]]))
keyword[for] identifier[curve_name] keyword[in] identifier[curve_names] :
keyword[if] identifier[curve_name] . identifier[split] ( literal[string] )[ literal[int] ] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
keyword[raise] identifier[ValueError] ( literal[string] +
literal[string] . identifier[format] ( identifier[possible_curve_names] ,
identifier[curve_name] ))
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ), literal[string] ,
literal[string] . identifier[format] (
identifier[curve_name] . identifier[split] ( literal[string] )[ literal[int] ]))
identifier[wind_efficiency_curves] = identifier[pd] . identifier[read_csv] ( identifier[path] )
keyword[if] identifier[curve_name] keyword[not] keyword[in] identifier[list] ( identifier[wind_efficiency_curves] ):
identifier[msg] =( literal[string]
literal[string] )
keyword[raise] identifier[ValueError] ( identifier[msg] . identifier[format] ( identifier[curve_name] ,* identifier[possible_curve_names] ))
identifier[wec] = identifier[wind_efficiency_curves] [[ literal[string] , identifier[curve_name] ]]
identifier[efficiency_curve] [ identifier[curve_name] , literal[string] ]= identifier[wec] [ literal[string] ]
identifier[efficiency_curve] [ identifier[curve_name] , literal[string] ]= identifier[wec] [ identifier[curve_name] ]
keyword[if] identifier[len] ( identifier[curve_names] )== literal[int] :
keyword[return] identifier[efficiency_curve] [ identifier[curve_names] [ literal[int] ]]
keyword[else] :
keyword[return] identifier[efficiency_curve] | def get_wind_efficiency_curve(curve_name='all'):
"""
Reads wind efficiency curve(s) specified in `curve_name`.
Parameters
----------
curve_name : str or list
Specifies the curve. Use 'all' to get all curves in a MultiIndex
DataFrame or one of the curve names to retrieve a single curve.
Default: 'all'.
Returns
-------
efficiency_curve : pd.DataFrame
Wind efficiency curve. Contains 'wind_speed' and 'efficiency' columns
with wind speed in m/s and wind efficiency (dimensionless).
If `curve_name` is 'all' or a list of strings a MultiIndex DataFrame is
returned with curve names in the first level of the columns.
Notes
-----
The wind efficiency curves were generated in the "Dena Netzstudie" [1]_ and
in the work of Kaspar Knorr [2]_. The mean wind efficiency curve is an
average curve from 12 wind farm distributed over Germany ([1]_) or
respectively an average from over 2000 wind farms in Germany ([2]_). Curves
with the appendix 'extreme' are wind efficiency curves of single wind farms
that are extremely deviating from the respective mean wind efficiency
curve. For more information see [1]_ and [2]_.
References
----------
.. [1] Kohler et.al.: "dena-Netzstudie II. Integration erneuerbarer
Energien in die deutsche Stromversorgung im Zeitraum 2015 – 2020
mit Ausblick 2025.", Deutsche Energie-Agentur GmbH (dena),
Tech. rept., 2010, p. 101
.. [2] Knorr, K.: "Modellierung von raum-zeitlichen Eigenschaften der
Windenergieeinspeisung für wetterdatenbasierte
Windleistungssimulationen". Universität Kassel, Diss., 2016,
p. 124
Examples
--------
.. parsed-literal::
# Example to plot all curves
fig, ax = plt.subplots() /n
df = get_wind_efficiency_curve(curve_name='all')
for t in df.columns.get_level_values(0).unique():
p = df[t].set_index('wind_speed')['efficiency']
p.name = t
ax = p.plot(ax=ax, legend=True)
plt.show()
"""
possible_curve_names = ['dena_mean', 'knorr_mean', 'dena_extreme1', 'dena_extreme2', 'knorr_extreme1', 'knorr_extreme2', 'knorr_extreme3']
if curve_name == 'all':
curve_names = possible_curve_names # depends on [control=['if'], data=[]]
elif isinstance(curve_name, str):
curve_names = [curve_name] # depends on [control=['if'], data=[]]
else:
curve_names = curve_name
efficiency_curve = pd.DataFrame(columns=pd.MultiIndex(levels=[[], []], codes=[[], []]))
for curve_name in curve_names:
if curve_name.split('_')[0] not in ['dena', 'knorr']:
raise ValueError('`curve_name` must be one of the following: ' + '{} but is {}'.format(possible_curve_names, curve_name)) # depends on [control=['if'], data=[]]
path = os.path.join(os.path.dirname(__file__), 'data', 'wind_efficiency_curves_{}.csv'.format(curve_name.split('_')[0]))
# Read wind efficiency curves from file
wind_efficiency_curves = pd.read_csv(path)
# Raise error if wind efficiency curve specified in 'curve_name' does
# not exist
if curve_name not in list(wind_efficiency_curves):
msg = 'Efficiency curve <{0}> does not exist. Must be one of thefollowing: {1}.'
raise ValueError(msg.format(curve_name, *possible_curve_names)) # depends on [control=['if'], data=['curve_name']]
# Get wind efficiency curve and rename column containing efficiency
wec = wind_efficiency_curves[['wind_speed', curve_name]]
efficiency_curve[curve_name, 'wind_speed'] = wec['wind_speed']
efficiency_curve[curve_name, 'efficiency'] = wec[curve_name] # depends on [control=['for'], data=['curve_name']]
if len(curve_names) == 1:
return efficiency_curve[curve_names[0]] # depends on [control=['if'], data=[]]
else:
return efficiency_curve |
def update(self, volume_id, **kwargs):
"""
update an export
"""
# These arguments are allowed
self.allowed('update', kwargs,
['status', 'instance_id',
'mountpoint', 'ip', 'initiator', 'session_ip',
'session_initiator'])
# Remove parameters that are None
params = self.unused(kwargs)
return self.http_post('/volumes/%s/export' % volume_id, params=params) | def function[update, parameter[self, volume_id]]:
constant[
update an export
]
call[name[self].allowed, parameter[constant[update], name[kwargs], list[[<ast.Constant object at 0x7da2041dbb80>, <ast.Constant object at 0x7da2041dbc10>, <ast.Constant object at 0x7da2041d8670>, <ast.Constant object at 0x7da2041dbd90>, <ast.Constant object at 0x7da2041d8f70>, <ast.Constant object at 0x7da2041db220>, <ast.Constant object at 0x7da2041da560>]]]]
variable[params] assign[=] call[name[self].unused, parameter[name[kwargs]]]
return[call[name[self].http_post, parameter[binary_operation[constant[/volumes/%s/export] <ast.Mod object at 0x7da2590d6920> name[volume_id]]]]] | keyword[def] identifier[update] ( identifier[self] , identifier[volume_id] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[allowed] ( literal[string] , identifier[kwargs] ,
[ literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] ])
identifier[params] = identifier[self] . identifier[unused] ( identifier[kwargs] )
keyword[return] identifier[self] . identifier[http_post] ( literal[string] % identifier[volume_id] , identifier[params] = identifier[params] ) | def update(self, volume_id, **kwargs):
"""
update an export
"""
# These arguments are allowed
self.allowed('update', kwargs, ['status', 'instance_id', 'mountpoint', 'ip', 'initiator', 'session_ip', 'session_initiator'])
# Remove parameters that are None
params = self.unused(kwargs)
return self.http_post('/volumes/%s/export' % volume_id, params=params) |
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
TimeoutError: If the future didn't finish executing before the given
timeout.
exceptions.Exception: If the call raised then that exception will be
raised.
"""
if self._state == self.RUNNING:
self._context.wait_all_futures([self], timeout)
return self.__get_result() | def function[result, parameter[self, timeout]]:
constant[Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
TimeoutError: If the future didn't finish executing before the given
timeout.
exceptions.Exception: If the call raised then that exception will be
raised.
]
if compare[name[self]._state equal[==] name[self].RUNNING] begin[:]
call[name[self]._context.wait_all_futures, parameter[list[[<ast.Name object at 0x7da18ede78e0>]], name[timeout]]]
return[call[name[self].__get_result, parameter[]]] | keyword[def] identifier[result] ( identifier[self] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[_state] == identifier[self] . identifier[RUNNING] :
identifier[self] . identifier[_context] . identifier[wait_all_futures] ([ identifier[self] ], identifier[timeout] )
keyword[return] identifier[self] . identifier[__get_result] () | def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
TimeoutError: If the future didn't finish executing before the given
timeout.
exceptions.Exception: If the call raised then that exception will be
raised.
"""
if self._state == self.RUNNING:
self._context.wait_all_futures([self], timeout) # depends on [control=['if'], data=[]]
return self.__get_result() |
def get_geo_index():
"""Get entire index of geographic name (key) and set of associated authors
(value).
"""
_dict = {}
for k, v in AUTHOR_EPITHET.items():
_dict[k] = set(v)
return _dict | def function[get_geo_index, parameter[]]:
constant[Get entire index of geographic name (key) and set of associated authors
(value).
]
variable[_dict] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da2045670a0>, <ast.Name object at 0x7da2045645e0>]]] in starred[call[name[AUTHOR_EPITHET].items, parameter[]]] begin[:]
call[name[_dict]][name[k]] assign[=] call[name[set], parameter[name[v]]]
return[name[_dict]] | keyword[def] identifier[get_geo_index] ():
literal[string]
identifier[_dict] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[AUTHOR_EPITHET] . identifier[items] ():
identifier[_dict] [ identifier[k] ]= identifier[set] ( identifier[v] )
keyword[return] identifier[_dict] | def get_geo_index():
"""Get entire index of geographic name (key) and set of associated authors
(value).
"""
_dict = {}
for (k, v) in AUTHOR_EPITHET.items():
_dict[k] = set(v) # depends on [control=['for'], data=[]]
return _dict |
def build(self, builder):
"""Build XML by appending to builder"""
builder.start("GlobalVariables", {})
make_element(builder, "StudyName", self.name)
make_element(builder, "StudyDescription", self.description)
make_element(builder, "ProtocolName", self.protocol_name)
builder.end("GlobalVariables") | def function[build, parameter[self, builder]]:
constant[Build XML by appending to builder]
call[name[builder].start, parameter[constant[GlobalVariables], dictionary[[], []]]]
call[name[make_element], parameter[name[builder], constant[StudyName], name[self].name]]
call[name[make_element], parameter[name[builder], constant[StudyDescription], name[self].description]]
call[name[make_element], parameter[name[builder], constant[ProtocolName], name[self].protocol_name]]
call[name[builder].end, parameter[constant[GlobalVariables]]] | keyword[def] identifier[build] ( identifier[self] , identifier[builder] ):
literal[string]
identifier[builder] . identifier[start] ( literal[string] ,{})
identifier[make_element] ( identifier[builder] , literal[string] , identifier[self] . identifier[name] )
identifier[make_element] ( identifier[builder] , literal[string] , identifier[self] . identifier[description] )
identifier[make_element] ( identifier[builder] , literal[string] , identifier[self] . identifier[protocol_name] )
identifier[builder] . identifier[end] ( literal[string] ) | def build(self, builder):
"""Build XML by appending to builder"""
builder.start('GlobalVariables', {})
make_element(builder, 'StudyName', self.name)
make_element(builder, 'StudyDescription', self.description)
make_element(builder, 'ProtocolName', self.protocol_name)
builder.end('GlobalVariables') |
def apply(self, q, bindings, fields, distinct=False):
""" Define a set of fields to return for a non-aggregated query. """
info = []
group_by = None
for field in self.parse(fields):
for concept in self.cube.model.match(field):
info.append(concept.ref)
table, column = concept.bind(self.cube)
bindings.append(Binding(table, concept.ref))
if distinct:
if group_by is None:
q = q.group_by(column)
group_by = column
else:
min_column = func.max(column)
min_column = min_column.label(column.name)
column = min_column
q = q.column(column)
if not len(self.results):
# If no fields are requested, return all available fields.
for concept in list(self.cube.model.attributes) + \
list(self.cube.model.measures):
info.append(concept.ref)
table, column = concept.bind(self.cube)
bindings.append(Binding(table, concept.ref))
q = q.column(column)
return info, q, bindings | def function[apply, parameter[self, q, bindings, fields, distinct]]:
constant[ Define a set of fields to return for a non-aggregated query. ]
variable[info] assign[=] list[[]]
variable[group_by] assign[=] constant[None]
for taget[name[field]] in starred[call[name[self].parse, parameter[name[fields]]]] begin[:]
for taget[name[concept]] in starred[call[name[self].cube.model.match, parameter[name[field]]]] begin[:]
call[name[info].append, parameter[name[concept].ref]]
<ast.Tuple object at 0x7da20c76c670> assign[=] call[name[concept].bind, parameter[name[self].cube]]
call[name[bindings].append, parameter[call[name[Binding], parameter[name[table], name[concept].ref]]]]
if name[distinct] begin[:]
if compare[name[group_by] is constant[None]] begin[:]
variable[q] assign[=] call[name[q].group_by, parameter[name[column]]]
variable[group_by] assign[=] name[column]
variable[q] assign[=] call[name[q].column, parameter[name[column]]]
if <ast.UnaryOp object at 0x7da20c76e7a0> begin[:]
for taget[name[concept]] in starred[binary_operation[call[name[list], parameter[name[self].cube.model.attributes]] + call[name[list], parameter[name[self].cube.model.measures]]]] begin[:]
call[name[info].append, parameter[name[concept].ref]]
<ast.Tuple object at 0x7da20c76f2e0> assign[=] call[name[concept].bind, parameter[name[self].cube]]
call[name[bindings].append, parameter[call[name[Binding], parameter[name[table], name[concept].ref]]]]
variable[q] assign[=] call[name[q].column, parameter[name[column]]]
return[tuple[[<ast.Name object at 0x7da2044c1060>, <ast.Name object at 0x7da2044c2980>, <ast.Name object at 0x7da2044c12d0>]]] | keyword[def] identifier[apply] ( identifier[self] , identifier[q] , identifier[bindings] , identifier[fields] , identifier[distinct] = keyword[False] ):
literal[string]
identifier[info] =[]
identifier[group_by] = keyword[None]
keyword[for] identifier[field] keyword[in] identifier[self] . identifier[parse] ( identifier[fields] ):
keyword[for] identifier[concept] keyword[in] identifier[self] . identifier[cube] . identifier[model] . identifier[match] ( identifier[field] ):
identifier[info] . identifier[append] ( identifier[concept] . identifier[ref] )
identifier[table] , identifier[column] = identifier[concept] . identifier[bind] ( identifier[self] . identifier[cube] )
identifier[bindings] . identifier[append] ( identifier[Binding] ( identifier[table] , identifier[concept] . identifier[ref] ))
keyword[if] identifier[distinct] :
keyword[if] identifier[group_by] keyword[is] keyword[None] :
identifier[q] = identifier[q] . identifier[group_by] ( identifier[column] )
identifier[group_by] = identifier[column]
keyword[else] :
identifier[min_column] = identifier[func] . identifier[max] ( identifier[column] )
identifier[min_column] = identifier[min_column] . identifier[label] ( identifier[column] . identifier[name] )
identifier[column] = identifier[min_column]
identifier[q] = identifier[q] . identifier[column] ( identifier[column] )
keyword[if] keyword[not] identifier[len] ( identifier[self] . identifier[results] ):
keyword[for] identifier[concept] keyword[in] identifier[list] ( identifier[self] . identifier[cube] . identifier[model] . identifier[attributes] )+ identifier[list] ( identifier[self] . identifier[cube] . identifier[model] . identifier[measures] ):
identifier[info] . identifier[append] ( identifier[concept] . identifier[ref] )
identifier[table] , identifier[column] = identifier[concept] . identifier[bind] ( identifier[self] . identifier[cube] )
identifier[bindings] . identifier[append] ( identifier[Binding] ( identifier[table] , identifier[concept] . identifier[ref] ))
identifier[q] = identifier[q] . identifier[column] ( identifier[column] )
keyword[return] identifier[info] , identifier[q] , identifier[bindings] | def apply(self, q, bindings, fields, distinct=False):
""" Define a set of fields to return for a non-aggregated query. """
info = []
group_by = None
for field in self.parse(fields):
for concept in self.cube.model.match(field):
info.append(concept.ref)
(table, column) = concept.bind(self.cube)
bindings.append(Binding(table, concept.ref))
if distinct:
if group_by is None:
q = q.group_by(column)
group_by = column # depends on [control=['if'], data=['group_by']]
else:
min_column = func.max(column)
min_column = min_column.label(column.name)
column = min_column # depends on [control=['if'], data=[]]
q = q.column(column) # depends on [control=['for'], data=['concept']] # depends on [control=['for'], data=['field']]
if not len(self.results):
# If no fields are requested, return all available fields.
for concept in list(self.cube.model.attributes) + list(self.cube.model.measures):
info.append(concept.ref)
(table, column) = concept.bind(self.cube)
bindings.append(Binding(table, concept.ref))
q = q.column(column) # depends on [control=['for'], data=['concept']] # depends on [control=['if'], data=[]]
return (info, q, bindings) |
def _make_mountpoint(self, casename=None, var_name='mountpoint', suffix='', in_paths=False):
"""Creates a directory that can be used as a mountpoint. The directory is stored in :attr:`mountpoint`,
or the varname as specified by the argument. If in_paths is True, the path is stored in the :attr:`_paths`
attribute instead.
:returns: the mountpoint path
:raises NoMountpointAvailableError: if no mountpoint could be made
"""
parser = self.disk.parser
if parser.mountdir and not os.path.exists(parser.mountdir):
os.makedirs(parser.mountdir)
if parser.pretty:
md = parser.mountdir or tempfile.gettempdir()
case_name = casename or self.disk.parser.casename or \
".".join(os.path.basename(self.disk.paths[0]).split('.')[0:-1]) or \
os.path.basename(self.disk.paths[0])
if self.disk.parser.casename == case_name: # the casename is already in the path in this case
pretty_label = "{0}-{1}".format(self.index, self.get_safe_label() or self.fstype or 'volume')
else:
pretty_label = "{0}-{1}-{2}".format(case_name, self.index,
self.get_safe_label() or self.fstype or 'volume')
if suffix:
pretty_label += "-" + suffix
path = os.path.join(md, pretty_label)
# check if path already exists, otherwise try to find another nice path
if os.path.exists(path):
for i in range(2, 100):
path = os.path.join(md, pretty_label + "-" + str(i))
if not os.path.exists(path):
break
else:
logger.error("Could not find free mountdir.")
raise NoMountpointAvailableError()
# noinspection PyBroadException
try:
os.mkdir(path, 777)
if in_paths:
self._paths[var_name] = path
else:
setattr(self, var_name, path)
return path
except Exception:
logger.exception("Could not create mountdir.")
raise NoMountpointAvailableError()
else:
t = tempfile.mkdtemp(prefix='im_' + self.index + '_',
suffix='_' + self.get_safe_label() + ("_" + suffix if suffix else ""),
dir=parser.mountdir)
if in_paths:
self._paths[var_name] = t
else:
setattr(self, var_name, t)
return t | def function[_make_mountpoint, parameter[self, casename, var_name, suffix, in_paths]]:
constant[Creates a directory that can be used as a mountpoint. The directory is stored in :attr:`mountpoint`,
or the varname as specified by the argument. If in_paths is True, the path is stored in the :attr:`_paths`
attribute instead.
:returns: the mountpoint path
:raises NoMountpointAvailableError: if no mountpoint could be made
]
variable[parser] assign[=] name[self].disk.parser
if <ast.BoolOp object at 0x7da1b045f6a0> begin[:]
call[name[os].makedirs, parameter[name[parser].mountdir]]
if name[parser].pretty begin[:]
variable[md] assign[=] <ast.BoolOp object at 0x7da1b045fc70>
variable[case_name] assign[=] <ast.BoolOp object at 0x7da1b045ca60>
if compare[name[self].disk.parser.casename equal[==] name[case_name]] begin[:]
variable[pretty_label] assign[=] call[constant[{0}-{1}].format, parameter[name[self].index, <ast.BoolOp object at 0x7da1b045e260>]]
if name[suffix] begin[:]
<ast.AugAssign object at 0x7da1b045cac0>
variable[path] assign[=] call[name[os].path.join, parameter[name[md], name[pretty_label]]]
if call[name[os].path.exists, parameter[name[path]]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[constant[2], constant[100]]]] begin[:]
variable[path] assign[=] call[name[os].path.join, parameter[name[md], binary_operation[binary_operation[name[pretty_label] + constant[-]] + call[name[str], parameter[name[i]]]]]]
if <ast.UnaryOp object at 0x7da1b0455a20> begin[:]
break
<ast.Try object at 0x7da1b0454a30> | keyword[def] identifier[_make_mountpoint] ( identifier[self] , identifier[casename] = keyword[None] , identifier[var_name] = literal[string] , identifier[suffix] = literal[string] , identifier[in_paths] = keyword[False] ):
literal[string]
identifier[parser] = identifier[self] . identifier[disk] . identifier[parser]
keyword[if] identifier[parser] . identifier[mountdir] keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[parser] . identifier[mountdir] ):
identifier[os] . identifier[makedirs] ( identifier[parser] . identifier[mountdir] )
keyword[if] identifier[parser] . identifier[pretty] :
identifier[md] = identifier[parser] . identifier[mountdir] keyword[or] identifier[tempfile] . identifier[gettempdir] ()
identifier[case_name] = identifier[casename] keyword[or] identifier[self] . identifier[disk] . identifier[parser] . identifier[casename] keyword[or] literal[string] . identifier[join] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[self] . identifier[disk] . identifier[paths] [ literal[int] ]). identifier[split] ( literal[string] )[ literal[int] :- literal[int] ]) keyword[or] identifier[os] . identifier[path] . identifier[basename] ( identifier[self] . identifier[disk] . identifier[paths] [ literal[int] ])
keyword[if] identifier[self] . identifier[disk] . identifier[parser] . identifier[casename] == identifier[case_name] :
identifier[pretty_label] = literal[string] . identifier[format] ( identifier[self] . identifier[index] , identifier[self] . identifier[get_safe_label] () keyword[or] identifier[self] . identifier[fstype] keyword[or] literal[string] )
keyword[else] :
identifier[pretty_label] = literal[string] . identifier[format] ( identifier[case_name] , identifier[self] . identifier[index] ,
identifier[self] . identifier[get_safe_label] () keyword[or] identifier[self] . identifier[fstype] keyword[or] literal[string] )
keyword[if] identifier[suffix] :
identifier[pretty_label] += literal[string] + identifier[suffix]
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[md] , identifier[pretty_label] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[md] , identifier[pretty_label] + literal[string] + identifier[str] ( identifier[i] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
keyword[break]
keyword[else] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[raise] identifier[NoMountpointAvailableError] ()
keyword[try] :
identifier[os] . identifier[mkdir] ( identifier[path] , literal[int] )
keyword[if] identifier[in_paths] :
identifier[self] . identifier[_paths] [ identifier[var_name] ]= identifier[path]
keyword[else] :
identifier[setattr] ( identifier[self] , identifier[var_name] , identifier[path] )
keyword[return] identifier[path]
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] ( literal[string] )
keyword[raise] identifier[NoMountpointAvailableError] ()
keyword[else] :
identifier[t] = identifier[tempfile] . identifier[mkdtemp] ( identifier[prefix] = literal[string] + identifier[self] . identifier[index] + literal[string] ,
identifier[suffix] = literal[string] + identifier[self] . identifier[get_safe_label] ()+( literal[string] + identifier[suffix] keyword[if] identifier[suffix] keyword[else] literal[string] ),
identifier[dir] = identifier[parser] . identifier[mountdir] )
keyword[if] identifier[in_paths] :
identifier[self] . identifier[_paths] [ identifier[var_name] ]= identifier[t]
keyword[else] :
identifier[setattr] ( identifier[self] , identifier[var_name] , identifier[t] )
keyword[return] identifier[t] | def _make_mountpoint(self, casename=None, var_name='mountpoint', suffix='', in_paths=False):
"""Creates a directory that can be used as a mountpoint. The directory is stored in :attr:`mountpoint`,
or the varname as specified by the argument. If in_paths is True, the path is stored in the :attr:`_paths`
attribute instead.
:returns: the mountpoint path
:raises NoMountpointAvailableError: if no mountpoint could be made
"""
parser = self.disk.parser
if parser.mountdir and (not os.path.exists(parser.mountdir)):
os.makedirs(parser.mountdir) # depends on [control=['if'], data=[]]
if parser.pretty:
md = parser.mountdir or tempfile.gettempdir()
case_name = casename or self.disk.parser.casename or '.'.join(os.path.basename(self.disk.paths[0]).split('.')[0:-1]) or os.path.basename(self.disk.paths[0])
if self.disk.parser.casename == case_name: # the casename is already in the path in this case
pretty_label = '{0}-{1}'.format(self.index, self.get_safe_label() or self.fstype or 'volume') # depends on [control=['if'], data=[]]
else:
pretty_label = '{0}-{1}-{2}'.format(case_name, self.index, self.get_safe_label() or self.fstype or 'volume')
if suffix:
pretty_label += '-' + suffix # depends on [control=['if'], data=[]]
path = os.path.join(md, pretty_label)
# check if path already exists, otherwise try to find another nice path
if os.path.exists(path):
for i in range(2, 100):
path = os.path.join(md, pretty_label + '-' + str(i))
if not os.path.exists(path):
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
else:
logger.error('Could not find free mountdir.')
raise NoMountpointAvailableError() # depends on [control=['if'], data=[]]
# noinspection PyBroadException
try:
os.mkdir(path, 777)
if in_paths:
self._paths[var_name] = path # depends on [control=['if'], data=[]]
else:
setattr(self, var_name, path)
return path # depends on [control=['try'], data=[]]
except Exception:
logger.exception('Could not create mountdir.')
raise NoMountpointAvailableError() # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
t = tempfile.mkdtemp(prefix='im_' + self.index + '_', suffix='_' + self.get_safe_label() + ('_' + suffix if suffix else ''), dir=parser.mountdir)
if in_paths:
self._paths[var_name] = t # depends on [control=['if'], data=[]]
else:
setattr(self, var_name, t)
return t |
def methods():
"Names of operations provided by containerizers, as a set."
pairs = inspect.getmembers(Containerizer, predicate=inspect.ismethod)
return set(k for k, _ in pairs if k[0:1] != "_") | def function[methods, parameter[]]:
constant[Names of operations provided by containerizers, as a set.]
variable[pairs] assign[=] call[name[inspect].getmembers, parameter[name[Containerizer]]]
return[call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b0be16f0>]]] | keyword[def] identifier[methods] ():
literal[string]
identifier[pairs] = identifier[inspect] . identifier[getmembers] ( identifier[Containerizer] , identifier[predicate] = identifier[inspect] . identifier[ismethod] )
keyword[return] identifier[set] ( identifier[k] keyword[for] identifier[k] , identifier[_] keyword[in] identifier[pairs] keyword[if] identifier[k] [ literal[int] : literal[int] ]!= literal[string] ) | def methods():
"""Names of operations provided by containerizers, as a set."""
pairs = inspect.getmembers(Containerizer, predicate=inspect.ismethod)
return set((k for (k, _) in pairs if k[0:1] != '_')) |
def export_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Export a model from one model persister to another.
The model persister to export to is supposed to be available in the
configuration file under the 'model_persister_export' key.
Usage:
pld-export [options]
Options:
--version=<v> Export a specific version rather than the active
one.
--no-activate Don't activate the exported model with the
'model_persister_export'.
-h --help Show this screen.
"""
arguments = docopt(export_cmd.__doc__, argv=argv)
model_version = export(
model_version=arguments['--version'],
activate=not arguments['--no-activate'],
)
logger.info("Exported model. New version number: {}".format(model_version)) | def function[export_cmd, parameter[argv]]:
constant[Export a model from one model persister to another.
The model persister to export to is supposed to be available in the
configuration file under the 'model_persister_export' key.
Usage:
pld-export [options]
Options:
--version=<v> Export a specific version rather than the active
one.
--no-activate Don't activate the exported model with the
'model_persister_export'.
-h --help Show this screen.
]
variable[arguments] assign[=] call[name[docopt], parameter[name[export_cmd].__doc__]]
variable[model_version] assign[=] call[name[export], parameter[]]
call[name[logger].info, parameter[call[constant[Exported model. New version number: {}].format, parameter[name[model_version]]]]] | keyword[def] identifier[export_cmd] ( identifier[argv] = identifier[sys] . identifier[argv] [ literal[int] :]):
literal[string]
identifier[arguments] = identifier[docopt] ( identifier[export_cmd] . identifier[__doc__] , identifier[argv] = identifier[argv] )
identifier[model_version] = identifier[export] (
identifier[model_version] = identifier[arguments] [ literal[string] ],
identifier[activate] = keyword[not] identifier[arguments] [ literal[string] ],
)
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[model_version] )) | def export_cmd(argv=sys.argv[1:]): # pragma: no cover
"Export a model from one model persister to another.\n\nThe model persister to export to is supposed to be available in the\nconfiguration file under the 'model_persister_export' key.\n\nUsage:\n pld-export [options]\n\nOptions:\n --version=<v> Export a specific version rather than the active\n one.\n\n --no-activate Don't activate the exported model with the\n 'model_persister_export'.\n\n -h --help Show this screen.\n"
arguments = docopt(export_cmd.__doc__, argv=argv)
model_version = export(model_version=arguments['--version'], activate=not arguments['--no-activate'])
logger.info('Exported model. New version number: {}'.format(model_version)) |
def remove_node_by_value(self, value):
"""
Delete all nodes in ``self.node_list`` with the value ``value``.
Args:
value (Any): The value to find and delete owners of.
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> graph = Graph([node_1])
>>> graph.remove_node_by_value('One')
>>> len(graph.node_list)
0
"""
self.node_list = [node for node in self.node_list
if node.value != value]
# Remove links pointing to the deleted node
for node in self.node_list:
node.link_list = [link for link in node.link_list if
link.target.value != value] | def function[remove_node_by_value, parameter[self, value]]:
constant[
Delete all nodes in ``self.node_list`` with the value ``value``.
Args:
value (Any): The value to find and delete owners of.
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> graph = Graph([node_1])
>>> graph.remove_node_by_value('One')
>>> len(graph.node_list)
0
]
name[self].node_list assign[=] <ast.ListComp object at 0x7da18eb54310>
for taget[name[node]] in starred[name[self].node_list] begin[:]
name[node].link_list assign[=] <ast.ListComp object at 0x7da18eb56b60> | keyword[def] identifier[remove_node_by_value] ( identifier[self] , identifier[value] ):
literal[string]
identifier[self] . identifier[node_list] =[ identifier[node] keyword[for] identifier[node] keyword[in] identifier[self] . identifier[node_list]
keyword[if] identifier[node] . identifier[value] != identifier[value] ]
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[node_list] :
identifier[node] . identifier[link_list] =[ identifier[link] keyword[for] identifier[link] keyword[in] identifier[node] . identifier[link_list] keyword[if]
identifier[link] . identifier[target] . identifier[value] != identifier[value] ] | def remove_node_by_value(self, value):
"""
Delete all nodes in ``self.node_list`` with the value ``value``.
Args:
value (Any): The value to find and delete owners of.
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> graph = Graph([node_1])
>>> graph.remove_node_by_value('One')
>>> len(graph.node_list)
0
"""
self.node_list = [node for node in self.node_list if node.value != value]
# Remove links pointing to the deleted node
for node in self.node_list:
node.link_list = [link for link in node.link_list if link.target.value != value] # depends on [control=['for'], data=['node']] |
def setup(self, *args, **kwargs):
"""Set parameters for the compiler."""
if self.comp is None:
self.comp = Compiler(*args, **kwargs)
else:
self.comp.setup(*args, **kwargs) | def function[setup, parameter[self]]:
constant[Set parameters for the compiler.]
if compare[name[self].comp is constant[None]] begin[:]
name[self].comp assign[=] call[name[Compiler], parameter[<ast.Starred object at 0x7da20c6e4a90>]] | keyword[def] identifier[setup] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[comp] keyword[is] keyword[None] :
identifier[self] . identifier[comp] = identifier[Compiler] (* identifier[args] ,** identifier[kwargs] )
keyword[else] :
identifier[self] . identifier[comp] . identifier[setup] (* identifier[args] ,** identifier[kwargs] ) | def setup(self, *args, **kwargs):
"""Set parameters for the compiler."""
if self.comp is None:
self.comp = Compiler(*args, **kwargs) # depends on [control=['if'], data=[]]
else:
self.comp.setup(*args, **kwargs) |
def get_cart_deformed_cell(base_cryst, axis=0, size=1):
'''Return the cell deformed along one of the cartesian directions
Creates new deformed structure. The deformation is based on the
base structure and is performed along single axis. The axis is
specified as follows: 0,1,2 = x,y,z ; sheers: 3,4,5 = yz, xz, xy.
The size of the deformation is in percent and degrees, respectively.
:param base_cryst: structure to be deformed
:param axis: direction of deformation
:param size: size of the deformation
:returns: new, deformed structure
'''
cryst = Atoms(base_cryst)
uc = base_cryst.get_cell()
s = size/100.0
L = diag(ones(3))
if axis < 3:
L[axis, axis] += s
else:
if axis == 3:
L[1, 2] += s
elif axis == 4:
L[0, 2] += s
else:
L[0, 1] += s
uc = dot(uc, L)
cryst.set_cell(uc, scale_atoms=True)
# print(cryst.get_cell())
# print(uc)
return cryst | def function[get_cart_deformed_cell, parameter[base_cryst, axis, size]]:
constant[Return the cell deformed along one of the cartesian directions
Creates new deformed structure. The deformation is based on the
base structure and is performed along single axis. The axis is
specified as follows: 0,1,2 = x,y,z ; sheers: 3,4,5 = yz, xz, xy.
The size of the deformation is in percent and degrees, respectively.
:param base_cryst: structure to be deformed
:param axis: direction of deformation
:param size: size of the deformation
:returns: new, deformed structure
]
variable[cryst] assign[=] call[name[Atoms], parameter[name[base_cryst]]]
variable[uc] assign[=] call[name[base_cryst].get_cell, parameter[]]
variable[s] assign[=] binary_operation[name[size] / constant[100.0]]
variable[L] assign[=] call[name[diag], parameter[call[name[ones], parameter[constant[3]]]]]
if compare[name[axis] less[<] constant[3]] begin[:]
<ast.AugAssign object at 0x7da1b0c67b80>
variable[uc] assign[=] call[name[dot], parameter[name[uc], name[L]]]
call[name[cryst].set_cell, parameter[name[uc]]]
return[name[cryst]] | keyword[def] identifier[get_cart_deformed_cell] ( identifier[base_cryst] , identifier[axis] = literal[int] , identifier[size] = literal[int] ):
literal[string]
identifier[cryst] = identifier[Atoms] ( identifier[base_cryst] )
identifier[uc] = identifier[base_cryst] . identifier[get_cell] ()
identifier[s] = identifier[size] / literal[int]
identifier[L] = identifier[diag] ( identifier[ones] ( literal[int] ))
keyword[if] identifier[axis] < literal[int] :
identifier[L] [ identifier[axis] , identifier[axis] ]+= identifier[s]
keyword[else] :
keyword[if] identifier[axis] == literal[int] :
identifier[L] [ literal[int] , literal[int] ]+= identifier[s]
keyword[elif] identifier[axis] == literal[int] :
identifier[L] [ literal[int] , literal[int] ]+= identifier[s]
keyword[else] :
identifier[L] [ literal[int] , literal[int] ]+= identifier[s]
identifier[uc] = identifier[dot] ( identifier[uc] , identifier[L] )
identifier[cryst] . identifier[set_cell] ( identifier[uc] , identifier[scale_atoms] = keyword[True] )
keyword[return] identifier[cryst] | def get_cart_deformed_cell(base_cryst, axis=0, size=1):
"""Return the cell deformed along one of the cartesian directions
Creates new deformed structure. The deformation is based on the
base structure and is performed along single axis. The axis is
specified as follows: 0,1,2 = x,y,z ; sheers: 3,4,5 = yz, xz, xy.
The size of the deformation is in percent and degrees, respectively.
:param base_cryst: structure to be deformed
:param axis: direction of deformation
:param size: size of the deformation
:returns: new, deformed structure
"""
cryst = Atoms(base_cryst)
uc = base_cryst.get_cell()
s = size / 100.0
L = diag(ones(3))
if axis < 3:
L[axis, axis] += s # depends on [control=['if'], data=['axis']]
elif axis == 3:
L[1, 2] += s # depends on [control=['if'], data=[]]
elif axis == 4:
L[0, 2] += s # depends on [control=['if'], data=[]]
else:
L[0, 1] += s
uc = dot(uc, L)
cryst.set_cell(uc, scale_atoms=True)
# print(cryst.get_cell())
# print(uc)
return cryst |
def relativize(self, absolute_address, target_region_id=None):
"""
Convert an absolute address to the memory offset in a memory region.
Note that if an address belongs to heap region is passed in to a stack region map, it will be converted to an
offset included in the closest stack frame, and vice versa for passing a stack address to a heap region.
Therefore you should only pass in address that belongs to the same category (stack or non-stack) of this region
map.
:param absolute_address: An absolute memory address
:return: A tuple of the closest region ID, the relative offset, and the related function
address.
"""
if target_region_id is None:
if self.is_stack:
# Get the base address of the stack frame it belongs to
base_address = next(self._address_to_region_id.irange(minimum=absolute_address, reverse=False))
else:
try:
base_address = next(self._address_to_region_id.irange(maximum=absolute_address, reverse=True))
except StopIteration:
# Not found. It belongs to the global region then.
return 'global', absolute_address, None
descriptor = self._address_to_region_id[base_address]
else:
if target_region_id == 'global':
# Just return the absolute address
return 'global', absolute_address, None
if target_region_id not in self._region_id_to_address:
raise SimRegionMapError('Trying to relativize to a non-existent region "%s"' % target_region_id)
descriptor = self._region_id_to_address[target_region_id]
base_address = descriptor.base_address
return descriptor.region_id, absolute_address - base_address, descriptor.related_function_address | def function[relativize, parameter[self, absolute_address, target_region_id]]:
constant[
Convert an absolute address to the memory offset in a memory region.
Note that if an address belongs to heap region is passed in to a stack region map, it will be converted to an
offset included in the closest stack frame, and vice versa for passing a stack address to a heap region.
Therefore you should only pass in address that belongs to the same category (stack or non-stack) of this region
map.
:param absolute_address: An absolute memory address
:return: A tuple of the closest region ID, the relative offset, and the related function
address.
]
if compare[name[target_region_id] is constant[None]] begin[:]
if name[self].is_stack begin[:]
variable[base_address] assign[=] call[name[next], parameter[call[name[self]._address_to_region_id.irange, parameter[]]]]
variable[descriptor] assign[=] call[name[self]._address_to_region_id][name[base_address]]
return[tuple[[<ast.Attribute object at 0x7da20c6c6260>, <ast.BinOp object at 0x7da20c6c5390>, <ast.Attribute object at 0x7da20c6c5a80>]]] | keyword[def] identifier[relativize] ( identifier[self] , identifier[absolute_address] , identifier[target_region_id] = keyword[None] ):
literal[string]
keyword[if] identifier[target_region_id] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[is_stack] :
identifier[base_address] = identifier[next] ( identifier[self] . identifier[_address_to_region_id] . identifier[irange] ( identifier[minimum] = identifier[absolute_address] , identifier[reverse] = keyword[False] ))
keyword[else] :
keyword[try] :
identifier[base_address] = identifier[next] ( identifier[self] . identifier[_address_to_region_id] . identifier[irange] ( identifier[maximum] = identifier[absolute_address] , identifier[reverse] = keyword[True] ))
keyword[except] identifier[StopIteration] :
keyword[return] literal[string] , identifier[absolute_address] , keyword[None]
identifier[descriptor] = identifier[self] . identifier[_address_to_region_id] [ identifier[base_address] ]
keyword[else] :
keyword[if] identifier[target_region_id] == literal[string] :
keyword[return] literal[string] , identifier[absolute_address] , keyword[None]
keyword[if] identifier[target_region_id] keyword[not] keyword[in] identifier[self] . identifier[_region_id_to_address] :
keyword[raise] identifier[SimRegionMapError] ( literal[string] % identifier[target_region_id] )
identifier[descriptor] = identifier[self] . identifier[_region_id_to_address] [ identifier[target_region_id] ]
identifier[base_address] = identifier[descriptor] . identifier[base_address]
keyword[return] identifier[descriptor] . identifier[region_id] , identifier[absolute_address] - identifier[base_address] , identifier[descriptor] . identifier[related_function_address] | def relativize(self, absolute_address, target_region_id=None):
"""
Convert an absolute address to the memory offset in a memory region.
Note that if an address belongs to heap region is passed in to a stack region map, it will be converted to an
offset included in the closest stack frame, and vice versa for passing a stack address to a heap region.
Therefore you should only pass in address that belongs to the same category (stack or non-stack) of this region
map.
:param absolute_address: An absolute memory address
:return: A tuple of the closest region ID, the relative offset, and the related function
address.
"""
if target_region_id is None:
if self.is_stack:
# Get the base address of the stack frame it belongs to
base_address = next(self._address_to_region_id.irange(minimum=absolute_address, reverse=False)) # depends on [control=['if'], data=[]]
else:
try:
base_address = next(self._address_to_region_id.irange(maximum=absolute_address, reverse=True)) # depends on [control=['try'], data=[]]
except StopIteration:
# Not found. It belongs to the global region then.
return ('global', absolute_address, None) # depends on [control=['except'], data=[]]
descriptor = self._address_to_region_id[base_address] # depends on [control=['if'], data=[]]
else:
if target_region_id == 'global':
# Just return the absolute address
return ('global', absolute_address, None) # depends on [control=['if'], data=[]]
if target_region_id not in self._region_id_to_address:
raise SimRegionMapError('Trying to relativize to a non-existent region "%s"' % target_region_id) # depends on [control=['if'], data=['target_region_id']]
descriptor = self._region_id_to_address[target_region_id]
base_address = descriptor.base_address
return (descriptor.region_id, absolute_address - base_address, descriptor.related_function_address) |
def drop_schema(self):
"""Drop all gauged tables"""
try:
self.cursor.execute("""
DROP TABLE IF EXISTS gauged_data;
DROP TABLE IF EXISTS gauged_keys;
DROP TABLE IF EXISTS gauged_writer_history;
DROP TABLE IF EXISTS gauged_cache;
DROP TABLE IF EXISTS gauged_statistics;
DROP TABLE IF EXISTS gauged_metadata""")
self.db.commit()
except self.psycopg2.InternalError: # pragma: no cover
self.db.rollback() | def function[drop_schema, parameter[self]]:
constant[Drop all gauged tables]
<ast.Try object at 0x7da1b23302b0> | keyword[def] identifier[drop_schema] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[self] . identifier[cursor] . identifier[execute] ( literal[string] )
identifier[self] . identifier[db] . identifier[commit] ()
keyword[except] identifier[self] . identifier[psycopg2] . identifier[InternalError] :
identifier[self] . identifier[db] . identifier[rollback] () | def drop_schema(self):
"""Drop all gauged tables"""
try:
self.cursor.execute('\n DROP TABLE IF EXISTS gauged_data;\n DROP TABLE IF EXISTS gauged_keys;\n DROP TABLE IF EXISTS gauged_writer_history;\n DROP TABLE IF EXISTS gauged_cache;\n DROP TABLE IF EXISTS gauged_statistics;\n DROP TABLE IF EXISTS gauged_metadata')
self.db.commit() # depends on [control=['try'], data=[]]
except self.psycopg2.InternalError: # pragma: no cover
self.db.rollback() # depends on [control=['except'], data=[]] |
async def pop(self, name, init=False):
'''
Remove a property from a node and return the value
'''
prop = self.form.prop(name)
if prop is None:
if self.snap.strict:
raise s_exc.NoSuchProp(name=name)
await self.snap.warn(f'No Such Property: {name}')
return False
if self.isrunt:
if prop.info.get('ro'):
raise s_exc.IsRuntForm(mesg='Cannot delete read-only props on runt nodes',
form=self.form.full, prop=name)
return await self.snap.core.runRuntPropDel(self, prop)
if not init:
if prop.info.get('ro'):
if self.snap.strict:
raise s_exc.ReadOnlyProp(name=name)
await self.snap.warn(f'Property is read-only: {name}')
return False
curv = self.props.pop(name, s_common.novalu)
if curv is s_common.novalu:
return False
sops = prop.getDelOps(self.buid)
splice = self.snap.splice('prop:del', ndef=self.ndef, prop=prop.name, valu=curv)
await self.snap.stor(sops, [splice])
await prop.wasDel(self, curv) | <ast.AsyncFunctionDef object at 0x7da18eb56620> | keyword[async] keyword[def] identifier[pop] ( identifier[self] , identifier[name] , identifier[init] = keyword[False] ):
literal[string]
identifier[prop] = identifier[self] . identifier[form] . identifier[prop] ( identifier[name] )
keyword[if] identifier[prop] keyword[is] keyword[None] :
keyword[if] identifier[self] . identifier[snap] . identifier[strict] :
keyword[raise] identifier[s_exc] . identifier[NoSuchProp] ( identifier[name] = identifier[name] )
keyword[await] identifier[self] . identifier[snap] . identifier[warn] ( literal[string] )
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[isrunt] :
keyword[if] identifier[prop] . identifier[info] . identifier[get] ( literal[string] ):
keyword[raise] identifier[s_exc] . identifier[IsRuntForm] ( identifier[mesg] = literal[string] ,
identifier[form] = identifier[self] . identifier[form] . identifier[full] , identifier[prop] = identifier[name] )
keyword[return] keyword[await] identifier[self] . identifier[snap] . identifier[core] . identifier[runRuntPropDel] ( identifier[self] , identifier[prop] )
keyword[if] keyword[not] identifier[init] :
keyword[if] identifier[prop] . identifier[info] . identifier[get] ( literal[string] ):
keyword[if] identifier[self] . identifier[snap] . identifier[strict] :
keyword[raise] identifier[s_exc] . identifier[ReadOnlyProp] ( identifier[name] = identifier[name] )
keyword[await] identifier[self] . identifier[snap] . identifier[warn] ( literal[string] )
keyword[return] keyword[False]
identifier[curv] = identifier[self] . identifier[props] . identifier[pop] ( identifier[name] , identifier[s_common] . identifier[novalu] )
keyword[if] identifier[curv] keyword[is] identifier[s_common] . identifier[novalu] :
keyword[return] keyword[False]
identifier[sops] = identifier[prop] . identifier[getDelOps] ( identifier[self] . identifier[buid] )
identifier[splice] = identifier[self] . identifier[snap] . identifier[splice] ( literal[string] , identifier[ndef] = identifier[self] . identifier[ndef] , identifier[prop] = identifier[prop] . identifier[name] , identifier[valu] = identifier[curv] )
keyword[await] identifier[self] . identifier[snap] . identifier[stor] ( identifier[sops] ,[ identifier[splice] ])
keyword[await] identifier[prop] . identifier[wasDel] ( identifier[self] , identifier[curv] ) | async def pop(self, name, init=False):
"""
Remove a property from a node and return the value
"""
prop = self.form.prop(name)
if prop is None:
if self.snap.strict:
raise s_exc.NoSuchProp(name=name) # depends on [control=['if'], data=[]]
await self.snap.warn(f'No Such Property: {name}')
return False # depends on [control=['if'], data=[]]
if self.isrunt:
if prop.info.get('ro'):
raise s_exc.IsRuntForm(mesg='Cannot delete read-only props on runt nodes', form=self.form.full, prop=name) # depends on [control=['if'], data=[]]
return await self.snap.core.runRuntPropDel(self, prop) # depends on [control=['if'], data=[]]
if not init:
if prop.info.get('ro'):
if self.snap.strict:
raise s_exc.ReadOnlyProp(name=name) # depends on [control=['if'], data=[]]
await self.snap.warn(f'Property is read-only: {name}')
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
curv = self.props.pop(name, s_common.novalu)
if curv is s_common.novalu:
return False # depends on [control=['if'], data=[]]
sops = prop.getDelOps(self.buid)
splice = self.snap.splice('prop:del', ndef=self.ndef, prop=prop.name, valu=curv)
await self.snap.stor(sops, [splice])
await prop.wasDel(self, curv) |
def add_remote(name, location):
'''
Adds a new location to install flatpak packages from.
Args:
name (str): The repository's name.
location (str): The location of the repository.
Returns:
dict: The ``result`` and ``output``.
CLI Example:
.. code-block:: bash
salt '*' flatpak.add_remote flathub https://flathub.org/repo/flathub.flatpakrepo
'''
ret = {'result': None, 'output': ''}
out = __salt__['cmd.run_all'](FLATPAK_BINARY_NAME + ' remote-add ' + name + ' ' + location)
if out['retcode'] and out['stderr']:
ret['stderr'] = out['stderr'].strip()
ret['result'] = False
else:
ret['stdout'] = out['stdout'].strip()
ret['result'] = True
return ret | def function[add_remote, parameter[name, location]]:
constant[
Adds a new location to install flatpak packages from.
Args:
name (str): The repository's name.
location (str): The location of the repository.
Returns:
dict: The ``result`` and ``output``.
CLI Example:
.. code-block:: bash
salt '*' flatpak.add_remote flathub https://flathub.org/repo/flathub.flatpakrepo
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b2139c30>, <ast.Constant object at 0x7da1b213a4d0>], [<ast.Constant object at 0x7da1b213aad0>, <ast.Constant object at 0x7da1b2139660>]]
variable[out] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[FLATPAK_BINARY_NAME] + constant[ remote-add ]] + name[name]] + constant[ ]] + name[location]]]]
if <ast.BoolOp object at 0x7da1b213a890> begin[:]
call[name[ret]][constant[stderr]] assign[=] call[call[name[out]][constant[stderr]].strip, parameter[]]
call[name[ret]][constant[result]] assign[=] constant[False]
return[name[ret]] | keyword[def] identifier[add_remote] ( identifier[name] , identifier[location] ):
literal[string]
identifier[ret] ={ literal[string] : keyword[None] , literal[string] : literal[string] }
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[FLATPAK_BINARY_NAME] + literal[string] + identifier[name] + literal[string] + identifier[location] )
keyword[if] identifier[out] [ literal[string] ] keyword[and] identifier[out] [ literal[string] ]:
identifier[ret] [ literal[string] ]= identifier[out] [ literal[string] ]. identifier[strip] ()
identifier[ret] [ literal[string] ]= keyword[False]
keyword[else] :
identifier[ret] [ literal[string] ]= identifier[out] [ literal[string] ]. identifier[strip] ()
identifier[ret] [ literal[string] ]= keyword[True]
keyword[return] identifier[ret] | def add_remote(name, location):
"""
Adds a new location to install flatpak packages from.
Args:
name (str): The repository's name.
location (str): The location of the repository.
Returns:
dict: The ``result`` and ``output``.
CLI Example:
.. code-block:: bash
salt '*' flatpak.add_remote flathub https://flathub.org/repo/flathub.flatpakrepo
"""
ret = {'result': None, 'output': ''}
out = __salt__['cmd.run_all'](FLATPAK_BINARY_NAME + ' remote-add ' + name + ' ' + location)
if out['retcode'] and out['stderr']:
ret['stderr'] = out['stderr'].strip()
ret['result'] = False # depends on [control=['if'], data=[]]
else:
ret['stdout'] = out['stdout'].strip()
ret['result'] = True
return ret |
def print_detailed_traceback(self, space=None, file=None):
"""NOT_RPYTHON: Dump a nice detailed interpreter- and
application-level traceback, useful to debug the interpreter."""
if file is None:
file = sys.stderr
f = io.StringIO()
for i in range(len(self.debug_excs)-1, -1, -1):
print >> f, "Traceback (interpreter-level):"
traceback.print_tb(self.debug_excs[i][2], file=f)
f.seek(0)
debug_print(''.join(['|| ' + line for line in f.readlines()]), file)
if self.debug_excs:
from pypy.tool import tb_server
tb_server.publish_exc(self.debug_excs[-1])
self.print_app_tb_only(file)
print >> file, '(application-level)', self.errorstr(space)
if AUTO_DEBUG:
debug.fire(self) | def function[print_detailed_traceback, parameter[self, space, file]]:
constant[NOT_RPYTHON: Dump a nice detailed interpreter- and
application-level traceback, useful to debug the interpreter.]
if compare[name[file] is constant[None]] begin[:]
variable[file] assign[=] name[sys].stderr
variable[f] assign[=] call[name[io].StringIO, parameter[]]
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[call[name[len], parameter[name[self].debug_excs]] - constant[1]], <ast.UnaryOp object at 0x7da20c6e7520>, <ast.UnaryOp object at 0x7da20c6e4d30>]]] begin[:]
tuple[[<ast.BinOp object at 0x7da20c6e6200>, <ast.Constant object at 0x7da20c6e7940>]]
call[name[traceback].print_tb, parameter[call[call[name[self].debug_excs][name[i]]][constant[2]]]]
call[name[f].seek, parameter[constant[0]]]
call[name[debug_print], parameter[call[constant[].join, parameter[<ast.ListComp object at 0x7da20c6e5480>]], name[file]]]
if name[self].debug_excs begin[:]
from relative_module[pypy.tool] import module[tb_server]
call[name[tb_server].publish_exc, parameter[call[name[self].debug_excs][<ast.UnaryOp object at 0x7da20c6e4c70>]]]
call[name[self].print_app_tb_only, parameter[name[file]]]
tuple[[<ast.BinOp object at 0x7da20c6e6ce0>, <ast.Constant object at 0x7da20c6e5fc0>, <ast.Call object at 0x7da20c6e71c0>]]
if name[AUTO_DEBUG] begin[:]
call[name[debug].fire, parameter[name[self]]] | keyword[def] identifier[print_detailed_traceback] ( identifier[self] , identifier[space] = keyword[None] , identifier[file] = keyword[None] ):
literal[string]
keyword[if] identifier[file] keyword[is] keyword[None] :
identifier[file] = identifier[sys] . identifier[stderr]
identifier[f] = identifier[io] . identifier[StringIO] ()
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[debug_excs] )- literal[int] ,- literal[int] ,- literal[int] ):
identifier[print] >> identifier[f] , literal[string]
identifier[traceback] . identifier[print_tb] ( identifier[self] . identifier[debug_excs] [ identifier[i] ][ literal[int] ], identifier[file] = identifier[f] )
identifier[f] . identifier[seek] ( literal[int] )
identifier[debug_print] ( literal[string] . identifier[join] ([ literal[string] + identifier[line] keyword[for] identifier[line] keyword[in] identifier[f] . identifier[readlines] ()]), identifier[file] )
keyword[if] identifier[self] . identifier[debug_excs] :
keyword[from] identifier[pypy] . identifier[tool] keyword[import] identifier[tb_server]
identifier[tb_server] . identifier[publish_exc] ( identifier[self] . identifier[debug_excs] [- literal[int] ])
identifier[self] . identifier[print_app_tb_only] ( identifier[file] )
identifier[print] >> identifier[file] , literal[string] , identifier[self] . identifier[errorstr] ( identifier[space] )
keyword[if] identifier[AUTO_DEBUG] :
identifier[debug] . identifier[fire] ( identifier[self] ) | def print_detailed_traceback(self, space=None, file=None):
"""NOT_RPYTHON: Dump a nice detailed interpreter- and
application-level traceback, useful to debug the interpreter."""
if file is None:
file = sys.stderr # depends on [control=['if'], data=['file']]
f = io.StringIO()
for i in range(len(self.debug_excs) - 1, -1, -1):
(print >> f, 'Traceback (interpreter-level):')
traceback.print_tb(self.debug_excs[i][2], file=f) # depends on [control=['for'], data=['i']]
f.seek(0)
debug_print(''.join(['|| ' + line for line in f.readlines()]), file)
if self.debug_excs:
from pypy.tool import tb_server
tb_server.publish_exc(self.debug_excs[-1]) # depends on [control=['if'], data=[]]
self.print_app_tb_only(file)
(print >> file, '(application-level)', self.errorstr(space))
if AUTO_DEBUG:
debug.fire(self) # depends on [control=['if'], data=[]] |
def used_by(self, bundle):
"""
Indicates that this reference is being used by the given bundle.
This method should only be used by the framework.
:param bundle: A bundle using this reference
"""
if bundle is None or bundle is self.__bundle:
# Ignore
return
with self.__usage_lock:
self.__using_bundles.setdefault(bundle, _UsageCounter()).inc() | def function[used_by, parameter[self, bundle]]:
constant[
Indicates that this reference is being used by the given bundle.
This method should only be used by the framework.
:param bundle: A bundle using this reference
]
if <ast.BoolOp object at 0x7da1b039ac50> begin[:]
return[None]
with name[self].__usage_lock begin[:]
call[call[name[self].__using_bundles.setdefault, parameter[name[bundle], call[name[_UsageCounter], parameter[]]]].inc, parameter[]] | keyword[def] identifier[used_by] ( identifier[self] , identifier[bundle] ):
literal[string]
keyword[if] identifier[bundle] keyword[is] keyword[None] keyword[or] identifier[bundle] keyword[is] identifier[self] . identifier[__bundle] :
keyword[return]
keyword[with] identifier[self] . identifier[__usage_lock] :
identifier[self] . identifier[__using_bundles] . identifier[setdefault] ( identifier[bundle] , identifier[_UsageCounter] ()). identifier[inc] () | def used_by(self, bundle):
"""
Indicates that this reference is being used by the given bundle.
This method should only be used by the framework.
:param bundle: A bundle using this reference
"""
if bundle is None or bundle is self.__bundle:
# Ignore
return # depends on [control=['if'], data=[]]
with self.__usage_lock:
self.__using_bundles.setdefault(bundle, _UsageCounter()).inc() # depends on [control=['with'], data=[]] |
def _pretty_message(string, *params):
"""
Takes a multi-line string and does the following:
- dedents
- converts newlines with text before and after into a single line
- strips leading and trailing whitespace
:param string:
The string to format
:param *params:
Params to interpolate into the string
:return:
The formatted string
"""
output = textwrap.dedent(string)
# Unwrap lines, taking into account bulleted lists, ordered lists and
# underlines consisting of = signs
if output.find('\n') != -1:
output = re.sub('(?<=\\S)\n(?=[^ \n\t\\d\\*\\-=])', ' ', output)
if params:
output = output % params
output = output.strip()
return output | def function[_pretty_message, parameter[string]]:
constant[
Takes a multi-line string and does the following:
- dedents
- converts newlines with text before and after into a single line
- strips leading and trailing whitespace
:param string:
The string to format
:param *params:
Params to interpolate into the string
:return:
The formatted string
]
variable[output] assign[=] call[name[textwrap].dedent, parameter[name[string]]]
if compare[call[name[output].find, parameter[constant[
]]] not_equal[!=] <ast.UnaryOp object at 0x7da204347be0>] begin[:]
variable[output] assign[=] call[name[re].sub, parameter[constant[(?<=\S)
(?=[^
\d\*\-=])], constant[ ], name[output]]]
if name[params] begin[:]
variable[output] assign[=] binary_operation[name[output] <ast.Mod object at 0x7da2590d6920> name[params]]
variable[output] assign[=] call[name[output].strip, parameter[]]
return[name[output]] | keyword[def] identifier[_pretty_message] ( identifier[string] ,* identifier[params] ):
literal[string]
identifier[output] = identifier[textwrap] . identifier[dedent] ( identifier[string] )
keyword[if] identifier[output] . identifier[find] ( literal[string] )!=- literal[int] :
identifier[output] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[output] )
keyword[if] identifier[params] :
identifier[output] = identifier[output] % identifier[params]
identifier[output] = identifier[output] . identifier[strip] ()
keyword[return] identifier[output] | def _pretty_message(string, *params):
"""
Takes a multi-line string and does the following:
- dedents
- converts newlines with text before and after into a single line
- strips leading and trailing whitespace
:param string:
The string to format
:param *params:
Params to interpolate into the string
:return:
The formatted string
"""
output = textwrap.dedent(string)
# Unwrap lines, taking into account bulleted lists, ordered lists and
# underlines consisting of = signs
if output.find('\n') != -1:
output = re.sub('(?<=\\S)\n(?=[^ \n\t\\d\\*\\-=])', ' ', output) # depends on [control=['if'], data=[]]
if params:
output = output % params # depends on [control=['if'], data=[]]
output = output.strip()
return output |
def COOKIES(self):
""" Cookies parsed into a dictionary. Signed cookies are NOT decoded
automatically. See :meth:`get_cookie` for details.
"""
raw_dict = SimpleCookie(self.headers.get('Cookie',''))
cookies = {}
for cookie in six.itervalues(raw_dict):
cookies[cookie.key] = cookie.value
return cookies | def function[COOKIES, parameter[self]]:
constant[ Cookies parsed into a dictionary. Signed cookies are NOT decoded
automatically. See :meth:`get_cookie` for details.
]
variable[raw_dict] assign[=] call[name[SimpleCookie], parameter[call[name[self].headers.get, parameter[constant[Cookie], constant[]]]]]
variable[cookies] assign[=] dictionary[[], []]
for taget[name[cookie]] in starred[call[name[six].itervalues, parameter[name[raw_dict]]]] begin[:]
call[name[cookies]][name[cookie].key] assign[=] name[cookie].value
return[name[cookies]] | keyword[def] identifier[COOKIES] ( identifier[self] ):
literal[string]
identifier[raw_dict] = identifier[SimpleCookie] ( identifier[self] . identifier[headers] . identifier[get] ( literal[string] , literal[string] ))
identifier[cookies] ={}
keyword[for] identifier[cookie] keyword[in] identifier[six] . identifier[itervalues] ( identifier[raw_dict] ):
identifier[cookies] [ identifier[cookie] . identifier[key] ]= identifier[cookie] . identifier[value]
keyword[return] identifier[cookies] | def COOKIES(self):
""" Cookies parsed into a dictionary. Signed cookies are NOT decoded
automatically. See :meth:`get_cookie` for details.
"""
raw_dict = SimpleCookie(self.headers.get('Cookie', ''))
cookies = {}
for cookie in six.itervalues(raw_dict):
cookies[cookie.key] = cookie.value # depends on [control=['for'], data=['cookie']]
return cookies |
def parse(self, filename):
"""
. reads 1 file
. if there is a compilation error, print a warning
. get root cursor and recurse
. for each STRUCT_DECL, register a new struct type
. for each UNION_DECL, register a new union type
. for each TYPEDEF_DECL, register a new alias/typdef to the underlying type
- underlying type is cursor.type.get_declaration() for Record
. for each VAR_DECL, register a Variable
. for each TYPEREF ??
"""
index = Index.create()
self.tu = index.parse(filename, self.flags, options=self.tu_options)
if not self.tu:
log.warning("unable to load input")
return
if len(self.tu.diagnostics) > 0:
for x in self.tu.diagnostics:
log.warning(x.spelling)
if x.severity > 2:
log.warning("Source code has some error. Please fix.")
log.warning(x.spelling)
# code.interact(local=locals())
break
root = self.tu.cursor
for node in root.get_children():
self.startElement(node)
return | def function[parse, parameter[self, filename]]:
constant[
. reads 1 file
. if there is a compilation error, print a warning
. get root cursor and recurse
. for each STRUCT_DECL, register a new struct type
. for each UNION_DECL, register a new union type
. for each TYPEDEF_DECL, register a new alias/typdef to the underlying type
- underlying type is cursor.type.get_declaration() for Record
. for each VAR_DECL, register a Variable
. for each TYPEREF ??
]
variable[index] assign[=] call[name[Index].create, parameter[]]
name[self].tu assign[=] call[name[index].parse, parameter[name[filename], name[self].flags]]
if <ast.UnaryOp object at 0x7da1b2346b60> begin[:]
call[name[log].warning, parameter[constant[unable to load input]]]
return[None]
if compare[call[name[len], parameter[name[self].tu.diagnostics]] greater[>] constant[0]] begin[:]
for taget[name[x]] in starred[name[self].tu.diagnostics] begin[:]
call[name[log].warning, parameter[name[x].spelling]]
if compare[name[x].severity greater[>] constant[2]] begin[:]
call[name[log].warning, parameter[constant[Source code has some error. Please fix.]]]
call[name[log].warning, parameter[name[x].spelling]]
break
variable[root] assign[=] name[self].tu.cursor
for taget[name[node]] in starred[call[name[root].get_children, parameter[]]] begin[:]
call[name[self].startElement, parameter[name[node]]]
return[None] | keyword[def] identifier[parse] ( identifier[self] , identifier[filename] ):
literal[string]
identifier[index] = identifier[Index] . identifier[create] ()
identifier[self] . identifier[tu] = identifier[index] . identifier[parse] ( identifier[filename] , identifier[self] . identifier[flags] , identifier[options] = identifier[self] . identifier[tu_options] )
keyword[if] keyword[not] identifier[self] . identifier[tu] :
identifier[log] . identifier[warning] ( literal[string] )
keyword[return]
keyword[if] identifier[len] ( identifier[self] . identifier[tu] . identifier[diagnostics] )> literal[int] :
keyword[for] identifier[x] keyword[in] identifier[self] . identifier[tu] . identifier[diagnostics] :
identifier[log] . identifier[warning] ( identifier[x] . identifier[spelling] )
keyword[if] identifier[x] . identifier[severity] > literal[int] :
identifier[log] . identifier[warning] ( literal[string] )
identifier[log] . identifier[warning] ( identifier[x] . identifier[spelling] )
keyword[break]
identifier[root] = identifier[self] . identifier[tu] . identifier[cursor]
keyword[for] identifier[node] keyword[in] identifier[root] . identifier[get_children] ():
identifier[self] . identifier[startElement] ( identifier[node] )
keyword[return] | def parse(self, filename):
"""
. reads 1 file
. if there is a compilation error, print a warning
. get root cursor and recurse
. for each STRUCT_DECL, register a new struct type
. for each UNION_DECL, register a new union type
. for each TYPEDEF_DECL, register a new alias/typdef to the underlying type
- underlying type is cursor.type.get_declaration() for Record
. for each VAR_DECL, register a Variable
. for each TYPEREF ??
"""
index = Index.create()
self.tu = index.parse(filename, self.flags, options=self.tu_options)
if not self.tu:
log.warning('unable to load input')
return # depends on [control=['if'], data=[]]
if len(self.tu.diagnostics) > 0:
for x in self.tu.diagnostics:
log.warning(x.spelling)
if x.severity > 2:
log.warning('Source code has some error. Please fix.')
log.warning(x.spelling)
# code.interact(local=locals())
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] # depends on [control=['if'], data=[]]
root = self.tu.cursor
for node in root.get_children():
self.startElement(node) # depends on [control=['for'], data=['node']]
return |
async def open(self) -> '_BaseAgent':
"""
Context manager entry; open wallet.
For use when keeping agent open across multiple calls.
:return: current object
"""
LOGGER.debug('_BaseAgent.open >>>')
# Do not open pool independently: let relying party decide when to go on-line and off-line
await self.wallet.open()
LOGGER.debug('_BaseAgent.open <<<')
return self | <ast.AsyncFunctionDef object at 0x7da2054a5e70> | keyword[async] keyword[def] identifier[open] ( identifier[self] )-> literal[string] :
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] )
keyword[await] identifier[self] . identifier[wallet] . identifier[open] ()
identifier[LOGGER] . identifier[debug] ( literal[string] )
keyword[return] identifier[self] | async def open(self) -> '_BaseAgent':
"""
Context manager entry; open wallet.
For use when keeping agent open across multiple calls.
:return: current object
"""
LOGGER.debug('_BaseAgent.open >>>')
# Do not open pool independently: let relying party decide when to go on-line and off-line
await self.wallet.open()
LOGGER.debug('_BaseAgent.open <<<')
return self |
def getBaseNameScope(cls):
"""
Get root of name space
"""
s = NameScope(False)
s.setLevel(1)
s[0].update(cls._keywords_dict)
return s | def function[getBaseNameScope, parameter[cls]]:
constant[
Get root of name space
]
variable[s] assign[=] call[name[NameScope], parameter[constant[False]]]
call[name[s].setLevel, parameter[constant[1]]]
call[call[name[s]][constant[0]].update, parameter[name[cls]._keywords_dict]]
return[name[s]] | keyword[def] identifier[getBaseNameScope] ( identifier[cls] ):
literal[string]
identifier[s] = identifier[NameScope] ( keyword[False] )
identifier[s] . identifier[setLevel] ( literal[int] )
identifier[s] [ literal[int] ]. identifier[update] ( identifier[cls] . identifier[_keywords_dict] )
keyword[return] identifier[s] | def getBaseNameScope(cls):
"""
Get root of name space
"""
s = NameScope(False)
s.setLevel(1)
s[0].update(cls._keywords_dict)
return s |
def _get_day_of_month(other, day_option):
"""Find the day in `other`'s month that satisfies a BaseCFTimeOffset's
onOffset policy, as described by the `day_option` argument.
Parameters
----------
other : cftime.datetime
day_option : 'start', 'end'
'start': returns 1
'end': returns last day of the month
Returns
-------
day_of_month : int
"""
if day_option == 'start':
return 1
elif day_option == 'end':
days_in_month = _days_in_month(other)
return days_in_month
elif day_option is None:
# Note: unlike `_shift_month`, _get_day_of_month does not
# allow day_option = None
raise NotImplementedError
else:
raise ValueError(day_option) | def function[_get_day_of_month, parameter[other, day_option]]:
constant[Find the day in `other`'s month that satisfies a BaseCFTimeOffset's
onOffset policy, as described by the `day_option` argument.
Parameters
----------
other : cftime.datetime
day_option : 'start', 'end'
'start': returns 1
'end': returns last day of the month
Returns
-------
day_of_month : int
]
if compare[name[day_option] equal[==] constant[start]] begin[:]
return[constant[1]] | keyword[def] identifier[_get_day_of_month] ( identifier[other] , identifier[day_option] ):
literal[string]
keyword[if] identifier[day_option] == literal[string] :
keyword[return] literal[int]
keyword[elif] identifier[day_option] == literal[string] :
identifier[days_in_month] = identifier[_days_in_month] ( identifier[other] )
keyword[return] identifier[days_in_month]
keyword[elif] identifier[day_option] keyword[is] keyword[None] :
keyword[raise] identifier[NotImplementedError]
keyword[else] :
keyword[raise] identifier[ValueError] ( identifier[day_option] ) | def _get_day_of_month(other, day_option):
"""Find the day in `other`'s month that satisfies a BaseCFTimeOffset's
onOffset policy, as described by the `day_option` argument.
Parameters
----------
other : cftime.datetime
day_option : 'start', 'end'
'start': returns 1
'end': returns last day of the month
Returns
-------
day_of_month : int
"""
if day_option == 'start':
return 1 # depends on [control=['if'], data=[]]
elif day_option == 'end':
days_in_month = _days_in_month(other)
return days_in_month # depends on [control=['if'], data=[]]
elif day_option is None:
# Note: unlike `_shift_month`, _get_day_of_month does not
# allow day_option = None
raise NotImplementedError # depends on [control=['if'], data=[]]
else:
raise ValueError(day_option) |
def expand_dates(df, columns=[]):
"""
generate year, month, day features from specified date features
"""
columns = df.columns.intersection(columns)
df2 = df.reindex(columns=set(df.columns).difference(columns))
for column in columns:
df2[column + '_year'] = df[column].apply(lambda x: x.year)
df2[column + '_month'] = df[column].apply(lambda x: x.month)
df2[column + '_day'] = df[column].apply(lambda x: x.day)
return df2 | def function[expand_dates, parameter[df, columns]]:
constant[
generate year, month, day features from specified date features
]
variable[columns] assign[=] call[name[df].columns.intersection, parameter[name[columns]]]
variable[df2] assign[=] call[name[df].reindex, parameter[]]
for taget[name[column]] in starred[name[columns]] begin[:]
call[name[df2]][binary_operation[name[column] + constant[_year]]] assign[=] call[call[name[df]][name[column]].apply, parameter[<ast.Lambda object at 0x7da1b23507f0>]]
call[name[df2]][binary_operation[name[column] + constant[_month]]] assign[=] call[call[name[df]][name[column]].apply, parameter[<ast.Lambda object at 0x7da1b2350ca0>]]
call[name[df2]][binary_operation[name[column] + constant[_day]]] assign[=] call[call[name[df]][name[column]].apply, parameter[<ast.Lambda object at 0x7da1b2350af0>]]
return[name[df2]] | keyword[def] identifier[expand_dates] ( identifier[df] , identifier[columns] =[]):
literal[string]
identifier[columns] = identifier[df] . identifier[columns] . identifier[intersection] ( identifier[columns] )
identifier[df2] = identifier[df] . identifier[reindex] ( identifier[columns] = identifier[set] ( identifier[df] . identifier[columns] ). identifier[difference] ( identifier[columns] ))
keyword[for] identifier[column] keyword[in] identifier[columns] :
identifier[df2] [ identifier[column] + literal[string] ]= identifier[df] [ identifier[column] ]. identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] . identifier[year] )
identifier[df2] [ identifier[column] + literal[string] ]= identifier[df] [ identifier[column] ]. identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] . identifier[month] )
identifier[df2] [ identifier[column] + literal[string] ]= identifier[df] [ identifier[column] ]. identifier[apply] ( keyword[lambda] identifier[x] : identifier[x] . identifier[day] )
keyword[return] identifier[df2] | def expand_dates(df, columns=[]):
"""
generate year, month, day features from specified date features
"""
columns = df.columns.intersection(columns)
df2 = df.reindex(columns=set(df.columns).difference(columns))
for column in columns:
df2[column + '_year'] = df[column].apply(lambda x: x.year)
df2[column + '_month'] = df[column].apply(lambda x: x.month)
df2[column + '_day'] = df[column].apply(lambda x: x.day) # depends on [control=['for'], data=['column']]
return df2 |
def get_file_samples(file_ids):
"""Get TCGA associated sample barcodes for a list of file IDs.
Params
------
file_ids : Iterable
The file IDs.
Returns
-------
`pandas.Series`
Series containing file IDs as index and corresponding sample barcodes.
"""
assert isinstance(file_ids, Iterable)
# query TCGA API to get sample barcodes associated with file IDs
payload = {
"filters":json.dumps({
"op":"in",
"content":{
"field":"files.file_id",
"value": list(file_ids),
}
}),
"fields":"file_id,cases.samples.submitter_id",
"size":10000
}
r = requests.post('https://gdc-api.nci.nih.gov/files', data=payload)
j = json.loads(r.content.decode('utf-8'))
file_samples = OrderedDict()
for hit in j['data']['hits']:
file_id = hit['file_id']
assert len(hit['cases']) == 1
case = hit['cases'][0]
assert len(case['samples']) == 1
sample = case['samples'][0]
sample_barcode = sample['submitter_id']
file_samples[file_id] = sample_barcode
df = pd.DataFrame.from_dict(file_samples, orient='index')
df = df.reset_index()
df.columns = ['file_id', 'sample_barcode']
return df | def function[get_file_samples, parameter[file_ids]]:
constant[Get TCGA associated sample barcodes for a list of file IDs.
Params
------
file_ids : Iterable
The file IDs.
Returns
-------
`pandas.Series`
Series containing file IDs as index and corresponding sample barcodes.
]
assert[call[name[isinstance], parameter[name[file_ids], name[Iterable]]]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da18f58f5e0>, <ast.Constant object at 0x7da18f58ec20>, <ast.Constant object at 0x7da18f58d8d0>], [<ast.Call object at 0x7da18f58c490>, <ast.Constant object at 0x7da18f58de40>, <ast.Constant object at 0x7da18f58e4d0>]]
variable[r] assign[=] call[name[requests].post, parameter[constant[https://gdc-api.nci.nih.gov/files]]]
variable[j] assign[=] call[name[json].loads, parameter[call[name[r].content.decode, parameter[constant[utf-8]]]]]
variable[file_samples] assign[=] call[name[OrderedDict], parameter[]]
for taget[name[hit]] in starred[call[call[name[j]][constant[data]]][constant[hits]]] begin[:]
variable[file_id] assign[=] call[name[hit]][constant[file_id]]
assert[compare[call[name[len], parameter[call[name[hit]][constant[cases]]]] equal[==] constant[1]]]
variable[case] assign[=] call[call[name[hit]][constant[cases]]][constant[0]]
assert[compare[call[name[len], parameter[call[name[case]][constant[samples]]]] equal[==] constant[1]]]
variable[sample] assign[=] call[call[name[case]][constant[samples]]][constant[0]]
variable[sample_barcode] assign[=] call[name[sample]][constant[submitter_id]]
call[name[file_samples]][name[file_id]] assign[=] name[sample_barcode]
variable[df] assign[=] call[name[pd].DataFrame.from_dict, parameter[name[file_samples]]]
variable[df] assign[=] call[name[df].reset_index, parameter[]]
name[df].columns assign[=] list[[<ast.Constant object at 0x7da18f810c40>, <ast.Constant object at 0x7da18f813f40>]]
return[name[df]] | keyword[def] identifier[get_file_samples] ( identifier[file_ids] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[file_ids] , identifier[Iterable] )
identifier[payload] ={
literal[string] : identifier[json] . identifier[dumps] ({
literal[string] : literal[string] ,
literal[string] :{
literal[string] : literal[string] ,
literal[string] : identifier[list] ( identifier[file_ids] ),
}
}),
literal[string] : literal[string] ,
literal[string] : literal[int]
}
identifier[r] = identifier[requests] . identifier[post] ( literal[string] , identifier[data] = identifier[payload] )
identifier[j] = identifier[json] . identifier[loads] ( identifier[r] . identifier[content] . identifier[decode] ( literal[string] ))
identifier[file_samples] = identifier[OrderedDict] ()
keyword[for] identifier[hit] keyword[in] identifier[j] [ literal[string] ][ literal[string] ]:
identifier[file_id] = identifier[hit] [ literal[string] ]
keyword[assert] identifier[len] ( identifier[hit] [ literal[string] ])== literal[int]
identifier[case] = identifier[hit] [ literal[string] ][ literal[int] ]
keyword[assert] identifier[len] ( identifier[case] [ literal[string] ])== literal[int]
identifier[sample] = identifier[case] [ literal[string] ][ literal[int] ]
identifier[sample_barcode] = identifier[sample] [ literal[string] ]
identifier[file_samples] [ identifier[file_id] ]= identifier[sample_barcode]
identifier[df] = identifier[pd] . identifier[DataFrame] . identifier[from_dict] ( identifier[file_samples] , identifier[orient] = literal[string] )
identifier[df] = identifier[df] . identifier[reset_index] ()
identifier[df] . identifier[columns] =[ literal[string] , literal[string] ]
keyword[return] identifier[df] | def get_file_samples(file_ids):
"""Get TCGA associated sample barcodes for a list of file IDs.
Params
------
file_ids : Iterable
The file IDs.
Returns
-------
`pandas.Series`
Series containing file IDs as index and corresponding sample barcodes.
"""
assert isinstance(file_ids, Iterable)
# query TCGA API to get sample barcodes associated with file IDs
payload = {'filters': json.dumps({'op': 'in', 'content': {'field': 'files.file_id', 'value': list(file_ids)}}), 'fields': 'file_id,cases.samples.submitter_id', 'size': 10000}
r = requests.post('https://gdc-api.nci.nih.gov/files', data=payload)
j = json.loads(r.content.decode('utf-8'))
file_samples = OrderedDict()
for hit in j['data']['hits']:
file_id = hit['file_id']
assert len(hit['cases']) == 1
case = hit['cases'][0]
assert len(case['samples']) == 1
sample = case['samples'][0]
sample_barcode = sample['submitter_id']
file_samples[file_id] = sample_barcode # depends on [control=['for'], data=['hit']]
df = pd.DataFrame.from_dict(file_samples, orient='index')
df = df.reset_index()
df.columns = ['file_id', 'sample_barcode']
return df |
def is_namedtuple(type_: Type[Any]) -> bool:
'''
Generated with typing.NamedTuple
'''
return _issubclass(type_, tuple) and hasattr(type_, '_field_types') and hasattr(type_, '_fields') | def function[is_namedtuple, parameter[type_]]:
constant[
Generated with typing.NamedTuple
]
return[<ast.BoolOp object at 0x7da20c993190>] | keyword[def] identifier[is_namedtuple] ( identifier[type_] : identifier[Type] [ identifier[Any] ])-> identifier[bool] :
literal[string]
keyword[return] identifier[_issubclass] ( identifier[type_] , identifier[tuple] ) keyword[and] identifier[hasattr] ( identifier[type_] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[type_] , literal[string] ) | def is_namedtuple(type_: Type[Any]) -> bool:
"""
Generated with typing.NamedTuple
"""
return _issubclass(type_, tuple) and hasattr(type_, '_field_types') and hasattr(type_, '_fields') |
def get_user_agent():
""" Obtain the default user agent string sent to the server after
a successful handshake.
"""
from sys import platform, version_info
template = "neobolt/{} Python/{}.{}.{}-{}-{} ({})"
fields = (version,) + tuple(version_info) + (platform,)
return template.format(*fields) | def function[get_user_agent, parameter[]]:
constant[ Obtain the default user agent string sent to the server after
a successful handshake.
]
from relative_module[sys] import module[platform], module[version_info]
variable[template] assign[=] constant[neobolt/{} Python/{}.{}.{}-{}-{} ({})]
variable[fields] assign[=] binary_operation[binary_operation[tuple[[<ast.Name object at 0x7da1b26acaf0>]] + call[name[tuple], parameter[name[version_info]]]] + tuple[[<ast.Name object at 0x7da1b26aee90>]]]
return[call[name[template].format, parameter[<ast.Starred object at 0x7da1b26ad570>]]] | keyword[def] identifier[get_user_agent] ():
literal[string]
keyword[from] identifier[sys] keyword[import] identifier[platform] , identifier[version_info]
identifier[template] = literal[string]
identifier[fields] =( identifier[version] ,)+ identifier[tuple] ( identifier[version_info] )+( identifier[platform] ,)
keyword[return] identifier[template] . identifier[format] (* identifier[fields] ) | def get_user_agent():
""" Obtain the default user agent string sent to the server after
a successful handshake.
"""
from sys import platform, version_info
template = 'neobolt/{} Python/{}.{}.{}-{}-{} ({})'
fields = (version,) + tuple(version_info) + (platform,)
return template.format(*fields) |
def setTimescale( self, timescale ):
"""
Sets the timescale value for this widget to the inputed value.
:param timescale | <XGanttWidget.Timescale>
"""
self._timescale = timescale
# show hour/minute scale
if timescale == XGanttWidget.Timescale.Minute:
self._cellWidth = 60 # (60 seconds)
self._dateStart = QDate.currentDate()
self._timeStart = QTime(0, 0, 0)
self._dateEnd = QDate.currentDate()
self._timeEnd = QTime(23, 59, 59)
elif timescale == XGanttWidget.Timescale.Hour:
self._cellWidth = 30 # (60 seconds / 2.0)
self._dateStart = QDate.currentDate()
self._timeStart = QTime(0, 0, 0)
self._dateEnd = QDate.currentDate()
self._timeEnd = QTime(23, 59, 59)
# show day/hour scale
elif timescale == XGanttWidget.Timescale.Day:
self._cellWidth = 30 # (60 minutes / 2.0)
self._dateStart = QDate.currentDate().addDays(-7)
self._timeStart = QTime(0, 0, 0)
self._dateEnd = QDate.currentDate().addDays(7)
self._timeEnd = QTime(23, 59, 59) | def function[setTimescale, parameter[self, timescale]]:
constant[
Sets the timescale value for this widget to the inputed value.
:param timescale | <XGanttWidget.Timescale>
]
name[self]._timescale assign[=] name[timescale]
if compare[name[timescale] equal[==] name[XGanttWidget].Timescale.Minute] begin[:]
name[self]._cellWidth assign[=] constant[60]
name[self]._dateStart assign[=] call[name[QDate].currentDate, parameter[]]
name[self]._timeStart assign[=] call[name[QTime], parameter[constant[0], constant[0], constant[0]]]
name[self]._dateEnd assign[=] call[name[QDate].currentDate, parameter[]]
name[self]._timeEnd assign[=] call[name[QTime], parameter[constant[23], constant[59], constant[59]]] | keyword[def] identifier[setTimescale] ( identifier[self] , identifier[timescale] ):
literal[string]
identifier[self] . identifier[_timescale] = identifier[timescale]
keyword[if] identifier[timescale] == identifier[XGanttWidget] . identifier[Timescale] . identifier[Minute] :
identifier[self] . identifier[_cellWidth] = literal[int]
identifier[self] . identifier[_dateStart] = identifier[QDate] . identifier[currentDate] ()
identifier[self] . identifier[_timeStart] = identifier[QTime] ( literal[int] , literal[int] , literal[int] )
identifier[self] . identifier[_dateEnd] = identifier[QDate] . identifier[currentDate] ()
identifier[self] . identifier[_timeEnd] = identifier[QTime] ( literal[int] , literal[int] , literal[int] )
keyword[elif] identifier[timescale] == identifier[XGanttWidget] . identifier[Timescale] . identifier[Hour] :
identifier[self] . identifier[_cellWidth] = literal[int]
identifier[self] . identifier[_dateStart] = identifier[QDate] . identifier[currentDate] ()
identifier[self] . identifier[_timeStart] = identifier[QTime] ( literal[int] , literal[int] , literal[int] )
identifier[self] . identifier[_dateEnd] = identifier[QDate] . identifier[currentDate] ()
identifier[self] . identifier[_timeEnd] = identifier[QTime] ( literal[int] , literal[int] , literal[int] )
keyword[elif] identifier[timescale] == identifier[XGanttWidget] . identifier[Timescale] . identifier[Day] :
identifier[self] . identifier[_cellWidth] = literal[int]
identifier[self] . identifier[_dateStart] = identifier[QDate] . identifier[currentDate] (). identifier[addDays] (- literal[int] )
identifier[self] . identifier[_timeStart] = identifier[QTime] ( literal[int] , literal[int] , literal[int] )
identifier[self] . identifier[_dateEnd] = identifier[QDate] . identifier[currentDate] (). identifier[addDays] ( literal[int] )
identifier[self] . identifier[_timeEnd] = identifier[QTime] ( literal[int] , literal[int] , literal[int] ) | def setTimescale(self, timescale):
"""
Sets the timescale value for this widget to the inputed value.
:param timescale | <XGanttWidget.Timescale>
"""
self._timescale = timescale # show hour/minute scale
if timescale == XGanttWidget.Timescale.Minute:
self._cellWidth = 60 # (60 seconds)
self._dateStart = QDate.currentDate()
self._timeStart = QTime(0, 0, 0)
self._dateEnd = QDate.currentDate()
self._timeEnd = QTime(23, 59, 59) # depends on [control=['if'], data=[]]
elif timescale == XGanttWidget.Timescale.Hour:
self._cellWidth = 30 # (60 seconds / 2.0)
self._dateStart = QDate.currentDate()
self._timeStart = QTime(0, 0, 0)
self._dateEnd = QDate.currentDate()
self._timeEnd = QTime(23, 59, 59) # depends on [control=['if'], data=[]] # show day/hour scale
elif timescale == XGanttWidget.Timescale.Day:
self._cellWidth = 30 # (60 minutes / 2.0)
self._dateStart = QDate.currentDate().addDays(-7)
self._timeStart = QTime(0, 0, 0)
self._dateEnd = QDate.currentDate().addDays(7)
self._timeEnd = QTime(23, 59, 59) # depends on [control=['if'], data=[]] |
def _normalize_custom_param_name(name):
"""Replace curved quotes with straight quotes in a custom parameter name.
These should be the only keys with problematic (non-ascii) characters,
since they can be user-generated.
"""
replacements = (("\u2018", "'"), ("\u2019", "'"), ("\u201C", '"'), ("\u201D", '"'))
for orig, replacement in replacements:
name = name.replace(orig, replacement)
return name | def function[_normalize_custom_param_name, parameter[name]]:
constant[Replace curved quotes with straight quotes in a custom parameter name.
These should be the only keys with problematic (non-ascii) characters,
since they can be user-generated.
]
variable[replacements] assign[=] tuple[[<ast.Tuple object at 0x7da1b03caf50>, <ast.Tuple object at 0x7da1b03c8c10>, <ast.Tuple object at 0x7da1b03c9330>, <ast.Tuple object at 0x7da1b03c9150>]]
for taget[tuple[[<ast.Name object at 0x7da1b03ca680>, <ast.Name object at 0x7da1b03ca860>]]] in starred[name[replacements]] begin[:]
variable[name] assign[=] call[name[name].replace, parameter[name[orig], name[replacement]]]
return[name[name]] | keyword[def] identifier[_normalize_custom_param_name] ( identifier[name] ):
literal[string]
identifier[replacements] =(( literal[string] , literal[string] ),( literal[string] , literal[string] ),( literal[string] , literal[string] ),( literal[string] , literal[string] ))
keyword[for] identifier[orig] , identifier[replacement] keyword[in] identifier[replacements] :
identifier[name] = identifier[name] . identifier[replace] ( identifier[orig] , identifier[replacement] )
keyword[return] identifier[name] | def _normalize_custom_param_name(name):
"""Replace curved quotes with straight quotes in a custom parameter name.
These should be the only keys with problematic (non-ascii) characters,
since they can be user-generated.
"""
replacements = (('‘', "'"), ('’', "'"), ('“', '"'), ('”', '"'))
for (orig, replacement) in replacements:
name = name.replace(orig, replacement) # depends on [control=['for'], data=[]]
return name |
def begin_x(self):
"""
Return the X-position of the begin point of this connector, in
English Metric Units (as a |Length| object).
"""
cxnSp = self._element
x, cx, flipH = cxnSp.x, cxnSp.cx, cxnSp.flipH
begin_x = x+cx if flipH else x
return Emu(begin_x) | def function[begin_x, parameter[self]]:
constant[
Return the X-position of the begin point of this connector, in
English Metric Units (as a |Length| object).
]
variable[cxnSp] assign[=] name[self]._element
<ast.Tuple object at 0x7da18ede4310> assign[=] tuple[[<ast.Attribute object at 0x7da18ede75b0>, <ast.Attribute object at 0x7da18ede5d50>, <ast.Attribute object at 0x7da18ede4790>]]
variable[begin_x] assign[=] <ast.IfExp object at 0x7da18ede7280>
return[call[name[Emu], parameter[name[begin_x]]]] | keyword[def] identifier[begin_x] ( identifier[self] ):
literal[string]
identifier[cxnSp] = identifier[self] . identifier[_element]
identifier[x] , identifier[cx] , identifier[flipH] = identifier[cxnSp] . identifier[x] , identifier[cxnSp] . identifier[cx] , identifier[cxnSp] . identifier[flipH]
identifier[begin_x] = identifier[x] + identifier[cx] keyword[if] identifier[flipH] keyword[else] identifier[x]
keyword[return] identifier[Emu] ( identifier[begin_x] ) | def begin_x(self):
"""
Return the X-position of the begin point of this connector, in
English Metric Units (as a |Length| object).
"""
cxnSp = self._element
(x, cx, flipH) = (cxnSp.x, cxnSp.cx, cxnSp.flipH)
begin_x = x + cx if flipH else x
return Emu(begin_x) |
def getTypedValueOrException(self, row):
'Returns the properly-typed value for the given row at this column, or an Exception object.'
return wrapply(self.type, wrapply(self.getValue, row)) | def function[getTypedValueOrException, parameter[self, row]]:
constant[Returns the properly-typed value for the given row at this column, or an Exception object.]
return[call[name[wrapply], parameter[name[self].type, call[name[wrapply], parameter[name[self].getValue, name[row]]]]]] | keyword[def] identifier[getTypedValueOrException] ( identifier[self] , identifier[row] ):
literal[string]
keyword[return] identifier[wrapply] ( identifier[self] . identifier[type] , identifier[wrapply] ( identifier[self] . identifier[getValue] , identifier[row] )) | def getTypedValueOrException(self, row):
"""Returns the properly-typed value for the given row at this column, or an Exception object."""
return wrapply(self.type, wrapply(self.getValue, row)) |
def get_output(script, expanded):
"""Get output of the script.
:param script: Console script.
:type script: str
:param expanded: Console script with expanded aliases.
:type expanded: str
:rtype: str
"""
if shell_logger.is_available():
return shell_logger.get_output(script)
if settings.instant_mode:
return read_log.get_output(script)
else:
return rerun.get_output(script, expanded) | def function[get_output, parameter[script, expanded]]:
constant[Get output of the script.
:param script: Console script.
:type script: str
:param expanded: Console script with expanded aliases.
:type expanded: str
:rtype: str
]
if call[name[shell_logger].is_available, parameter[]] begin[:]
return[call[name[shell_logger].get_output, parameter[name[script]]]]
if name[settings].instant_mode begin[:]
return[call[name[read_log].get_output, parameter[name[script]]]] | keyword[def] identifier[get_output] ( identifier[script] , identifier[expanded] ):
literal[string]
keyword[if] identifier[shell_logger] . identifier[is_available] ():
keyword[return] identifier[shell_logger] . identifier[get_output] ( identifier[script] )
keyword[if] identifier[settings] . identifier[instant_mode] :
keyword[return] identifier[read_log] . identifier[get_output] ( identifier[script] )
keyword[else] :
keyword[return] identifier[rerun] . identifier[get_output] ( identifier[script] , identifier[expanded] ) | def get_output(script, expanded):
"""Get output of the script.
:param script: Console script.
:type script: str
:param expanded: Console script with expanded aliases.
:type expanded: str
:rtype: str
"""
if shell_logger.is_available():
return shell_logger.get_output(script) # depends on [control=['if'], data=[]]
if settings.instant_mode:
return read_log.get_output(script) # depends on [control=['if'], data=[]]
else:
return rerun.get_output(script, expanded) |
def store_node_label_meta(self, x, y, tx, ty, rot):
"""
This function stored coordinates-related metadate for a node
This function should not be called by the user
:param x: x location of node label or number
:type x: np.float64
:param y: y location of node label or number
:type y: np.float64
:param tx: text location x of node label (numbers)
:type tx: np.float64
:param ty: text location y of node label (numbers)
:type ty: np.float64
:param rot: rotation angle of the text (rotation)
:type rot: float
"""
# Store computed values
self.node_label_coords["x"].append(x)
self.node_label_coords["y"].append(y)
self.node_label_coords["tx"].append(tx)
self.node_label_coords["ty"].append(ty)
# Computes the text alignment for x
if x == 0:
self.node_label_aligns["has"].append("center")
elif x > 0:
self.node_label_aligns["has"].append("left")
else:
self.node_label_aligns["has"].append("right")
# Computes the text alignment for y
if self.node_label_layout == "rotate" or y == 0:
self.node_label_aligns["vas"].append("center")
elif y > 0:
self.node_label_aligns["vas"].append("bottom")
else:
self.node_label_aligns["vas"].append("top")
self.node_label_rotation.append(rot) | def function[store_node_label_meta, parameter[self, x, y, tx, ty, rot]]:
constant[
This function stored coordinates-related metadate for a node
This function should not be called by the user
:param x: x location of node label or number
:type x: np.float64
:param y: y location of node label or number
:type y: np.float64
:param tx: text location x of node label (numbers)
:type tx: np.float64
:param ty: text location y of node label (numbers)
:type ty: np.float64
:param rot: rotation angle of the text (rotation)
:type rot: float
]
call[call[name[self].node_label_coords][constant[x]].append, parameter[name[x]]]
call[call[name[self].node_label_coords][constant[y]].append, parameter[name[y]]]
call[call[name[self].node_label_coords][constant[tx]].append, parameter[name[tx]]]
call[call[name[self].node_label_coords][constant[ty]].append, parameter[name[ty]]]
if compare[name[x] equal[==] constant[0]] begin[:]
call[call[name[self].node_label_aligns][constant[has]].append, parameter[constant[center]]]
if <ast.BoolOp object at 0x7da1b1d99a80> begin[:]
call[call[name[self].node_label_aligns][constant[vas]].append, parameter[constant[center]]]
call[name[self].node_label_rotation.append, parameter[name[rot]]] | keyword[def] identifier[store_node_label_meta] ( identifier[self] , identifier[x] , identifier[y] , identifier[tx] , identifier[ty] , identifier[rot] ):
literal[string]
identifier[self] . identifier[node_label_coords] [ literal[string] ]. identifier[append] ( identifier[x] )
identifier[self] . identifier[node_label_coords] [ literal[string] ]. identifier[append] ( identifier[y] )
identifier[self] . identifier[node_label_coords] [ literal[string] ]. identifier[append] ( identifier[tx] )
identifier[self] . identifier[node_label_coords] [ literal[string] ]. identifier[append] ( identifier[ty] )
keyword[if] identifier[x] == literal[int] :
identifier[self] . identifier[node_label_aligns] [ literal[string] ]. identifier[append] ( literal[string] )
keyword[elif] identifier[x] > literal[int] :
identifier[self] . identifier[node_label_aligns] [ literal[string] ]. identifier[append] ( literal[string] )
keyword[else] :
identifier[self] . identifier[node_label_aligns] [ literal[string] ]. identifier[append] ( literal[string] )
keyword[if] identifier[self] . identifier[node_label_layout] == literal[string] keyword[or] identifier[y] == literal[int] :
identifier[self] . identifier[node_label_aligns] [ literal[string] ]. identifier[append] ( literal[string] )
keyword[elif] identifier[y] > literal[int] :
identifier[self] . identifier[node_label_aligns] [ literal[string] ]. identifier[append] ( literal[string] )
keyword[else] :
identifier[self] . identifier[node_label_aligns] [ literal[string] ]. identifier[append] ( literal[string] )
identifier[self] . identifier[node_label_rotation] . identifier[append] ( identifier[rot] ) | def store_node_label_meta(self, x, y, tx, ty, rot):
"""
This function stored coordinates-related metadate for a node
This function should not be called by the user
:param x: x location of node label or number
:type x: np.float64
:param y: y location of node label or number
:type y: np.float64
:param tx: text location x of node label (numbers)
:type tx: np.float64
:param ty: text location y of node label (numbers)
:type ty: np.float64
:param rot: rotation angle of the text (rotation)
:type rot: float
"""
# Store computed values
self.node_label_coords['x'].append(x)
self.node_label_coords['y'].append(y)
self.node_label_coords['tx'].append(tx)
self.node_label_coords['ty'].append(ty)
# Computes the text alignment for x
if x == 0:
self.node_label_aligns['has'].append('center') # depends on [control=['if'], data=[]]
elif x > 0:
self.node_label_aligns['has'].append('left') # depends on [control=['if'], data=[]]
else:
self.node_label_aligns['has'].append('right')
# Computes the text alignment for y
if self.node_label_layout == 'rotate' or y == 0:
self.node_label_aligns['vas'].append('center') # depends on [control=['if'], data=[]]
elif y > 0:
self.node_label_aligns['vas'].append('bottom') # depends on [control=['if'], data=[]]
else:
self.node_label_aligns['vas'].append('top')
self.node_label_rotation.append(rot) |
def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](
error.args[1]['message'])
raise error
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response')
return json.loads(http_response.body.decode('utf-8')) | def function[_process_response, parameter[response]]:
constant[Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
]
variable[error] assign[=] call[name[response].exception, parameter[]]
if name[error] begin[:]
if call[name[isinstance], parameter[name[error], name[aws_exceptions].AWSError]] begin[:]
if compare[call[call[name[error].args][constant[1]]][constant[type]] in name[exceptions].MAP] begin[:]
<ast.Raise object at 0x7da20e74bc70>
<ast.Raise object at 0x7da20e7483a0>
variable[http_response] assign[=] call[name[response].result, parameter[]]
if <ast.BoolOp object at 0x7da20e748820> begin[:]
<ast.Raise object at 0x7da20e74baf0>
return[call[name[json].loads, parameter[call[name[http_response].body.decode, parameter[constant[utf-8]]]]]] | keyword[def] identifier[_process_response] ( identifier[response] ):
literal[string]
identifier[error] = identifier[response] . identifier[exception] ()
keyword[if] identifier[error] :
keyword[if] identifier[isinstance] ( identifier[error] , identifier[aws_exceptions] . identifier[AWSError] ):
keyword[if] identifier[error] . identifier[args] [ literal[int] ][ literal[string] ] keyword[in] identifier[exceptions] . identifier[MAP] :
keyword[raise] identifier[exceptions] . identifier[MAP] [ identifier[error] . identifier[args] [ literal[int] ][ literal[string] ]](
identifier[error] . identifier[args] [ literal[int] ][ literal[string] ])
keyword[raise] identifier[error]
identifier[http_response] = identifier[response] . identifier[result] ()
keyword[if] keyword[not] identifier[http_response] keyword[or] keyword[not] identifier[http_response] . identifier[body] :
keyword[raise] identifier[exceptions] . identifier[DynamoDBException] ( literal[string] )
keyword[return] identifier[json] . identifier[loads] ( identifier[http_response] . identifier[body] . identifier[decode] ( literal[string] )) | def _process_response(response):
"""Process the raw AWS response, returning either the mapped exception
or deserialized response.
:param tornado.concurrent.Future response: The request future
:rtype: dict or list
:raises: sprockets_dynamodb.exceptions.DynamoDBException
"""
error = response.exception()
if error:
if isinstance(error, aws_exceptions.AWSError):
if error.args[1]['type'] in exceptions.MAP:
raise exceptions.MAP[error.args[1]['type']](error.args[1]['message']) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
raise error # depends on [control=['if'], data=[]]
http_response = response.result()
if not http_response or not http_response.body:
raise exceptions.DynamoDBException('empty response') # depends on [control=['if'], data=[]]
return json.loads(http_response.body.decode('utf-8')) |
def max_bipartite_matching2(bigraph):
"""Bipartie maximum matching
:param bigraph: adjacency list, index = vertex in U,
value = neighbor list in V
:comment: U and V can have different cardinalities
:returns: matching list, match[v] == u iff (u, v) in matching
:complexity: `O(|V|*|E|)`
"""
nU = len(bigraph)
# the following line works only in Python version ≥ 2.5
# nV = max(max(adjlist, default=-1) for adjlist in bigraph) + 1
nV = 0
for adjlist in bigraph:
for v in adjlist:
if v + 1 > nV:
nV = v + 1
match = [None] * nV
for u in range(nU):
augment(u, bigraph, [False] * nV, match)
return match | def function[max_bipartite_matching2, parameter[bigraph]]:
constant[Bipartie maximum matching
:param bigraph: adjacency list, index = vertex in U,
value = neighbor list in V
:comment: U and V can have different cardinalities
:returns: matching list, match[v] == u iff (u, v) in matching
:complexity: `O(|V|*|E|)`
]
variable[nU] assign[=] call[name[len], parameter[name[bigraph]]]
variable[nV] assign[=] constant[0]
for taget[name[adjlist]] in starred[name[bigraph]] begin[:]
for taget[name[v]] in starred[name[adjlist]] begin[:]
if compare[binary_operation[name[v] + constant[1]] greater[>] name[nV]] begin[:]
variable[nV] assign[=] binary_operation[name[v] + constant[1]]
variable[match] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b07cd2d0>]] * name[nV]]
for taget[name[u]] in starred[call[name[range], parameter[name[nU]]]] begin[:]
call[name[augment], parameter[name[u], name[bigraph], binary_operation[list[[<ast.Constant object at 0x7da1b07cf130>]] * name[nV]], name[match]]]
return[name[match]] | keyword[def] identifier[max_bipartite_matching2] ( identifier[bigraph] ):
literal[string]
identifier[nU] = identifier[len] ( identifier[bigraph] )
identifier[nV] = literal[int]
keyword[for] identifier[adjlist] keyword[in] identifier[bigraph] :
keyword[for] identifier[v] keyword[in] identifier[adjlist] :
keyword[if] identifier[v] + literal[int] > identifier[nV] :
identifier[nV] = identifier[v] + literal[int]
identifier[match] =[ keyword[None] ]* identifier[nV]
keyword[for] identifier[u] keyword[in] identifier[range] ( identifier[nU] ):
identifier[augment] ( identifier[u] , identifier[bigraph] ,[ keyword[False] ]* identifier[nV] , identifier[match] )
keyword[return] identifier[match] | def max_bipartite_matching2(bigraph):
"""Bipartie maximum matching
:param bigraph: adjacency list, index = vertex in U,
value = neighbor list in V
:comment: U and V can have different cardinalities
:returns: matching list, match[v] == u iff (u, v) in matching
:complexity: `O(|V|*|E|)`
"""
nU = len(bigraph)
# the following line works only in Python version ≥ 2.5
# nV = max(max(adjlist, default=-1) for adjlist in bigraph) + 1
nV = 0
for adjlist in bigraph:
for v in adjlist:
if v + 1 > nV:
nV = v + 1 # depends on [control=['if'], data=['nV']] # depends on [control=['for'], data=['v']] # depends on [control=['for'], data=['adjlist']]
match = [None] * nV
for u in range(nU):
augment(u, bigraph, [False] * nV, match) # depends on [control=['for'], data=['u']]
return match |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._attachment_public_uuid is not None:
return False
if self._content_type is not None:
return False
if self._height is not None:
return False
if self._width is not None:
return False
return True | def function[is_all_field_none, parameter[self]]:
constant[
:rtype: bool
]
if compare[name[self]._attachment_public_uuid is_not constant[None]] begin[:]
return[constant[False]]
if compare[name[self]._content_type is_not constant[None]] begin[:]
return[constant[False]]
if compare[name[self]._height is_not constant[None]] begin[:]
return[constant[False]]
if compare[name[self]._width is_not constant[None]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_all_field_none] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_attachment_public_uuid] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_content_type] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_height] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_width] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_all_field_none(self):
"""
:rtype: bool
"""
if self._attachment_public_uuid is not None:
return False # depends on [control=['if'], data=[]]
if self._content_type is not None:
return False # depends on [control=['if'], data=[]]
if self._height is not None:
return False # depends on [control=['if'], data=[]]
if self._width is not None:
return False # depends on [control=['if'], data=[]]
return True |
def getmessage(self) -> str:
""" parse self into unicode string as message content """
image = {}
for key, default in vars(self.__class__).items():
if not key.startswith('_') and key !='' and (not key in vars(QueueMessage).items()):
if isinstance(default, datetime.date):
image[key] = safe_cast(getattr(self, key, default), str, dformat=self._dateformat)
if isinstance(default, datetime.datetime):
image[key] = safe_cast(getattr(self, key, default), str, dformat=self._datetimeformat)
else:
image[key] = getattr(self, key, default)
return str(image) | def function[getmessage, parameter[self]]:
constant[ parse self into unicode string as message content ]
variable[image] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da207f994b0>, <ast.Name object at 0x7da207f9ab00>]]] in starred[call[call[name[vars], parameter[name[self].__class__]].items, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da207f99540> begin[:]
if call[name[isinstance], parameter[name[default], name[datetime].date]] begin[:]
call[name[image]][name[key]] assign[=] call[name[safe_cast], parameter[call[name[getattr], parameter[name[self], name[key], name[default]]], name[str]]]
if call[name[isinstance], parameter[name[default], name[datetime].datetime]] begin[:]
call[name[image]][name[key]] assign[=] call[name[safe_cast], parameter[call[name[getattr], parameter[name[self], name[key], name[default]]], name[str]]]
return[call[name[str], parameter[name[image]]]] | keyword[def] identifier[getmessage] ( identifier[self] )-> identifier[str] :
literal[string]
identifier[image] ={}
keyword[for] identifier[key] , identifier[default] keyword[in] identifier[vars] ( identifier[self] . identifier[__class__] ). identifier[items] ():
keyword[if] keyword[not] identifier[key] . identifier[startswith] ( literal[string] ) keyword[and] identifier[key] != literal[string] keyword[and] ( keyword[not] identifier[key] keyword[in] identifier[vars] ( identifier[QueueMessage] ). identifier[items] ()):
keyword[if] identifier[isinstance] ( identifier[default] , identifier[datetime] . identifier[date] ):
identifier[image] [ identifier[key] ]= identifier[safe_cast] ( identifier[getattr] ( identifier[self] , identifier[key] , identifier[default] ), identifier[str] , identifier[dformat] = identifier[self] . identifier[_dateformat] )
keyword[if] identifier[isinstance] ( identifier[default] , identifier[datetime] . identifier[datetime] ):
identifier[image] [ identifier[key] ]= identifier[safe_cast] ( identifier[getattr] ( identifier[self] , identifier[key] , identifier[default] ), identifier[str] , identifier[dformat] = identifier[self] . identifier[_datetimeformat] )
keyword[else] :
identifier[image] [ identifier[key] ]= identifier[getattr] ( identifier[self] , identifier[key] , identifier[default] )
keyword[return] identifier[str] ( identifier[image] ) | def getmessage(self) -> str:
""" parse self into unicode string as message content """
image = {}
for (key, default) in vars(self.__class__).items():
if not key.startswith('_') and key != '' and (not key in vars(QueueMessage).items()):
if isinstance(default, datetime.date):
image[key] = safe_cast(getattr(self, key, default), str, dformat=self._dateformat) # depends on [control=['if'], data=[]]
if isinstance(default, datetime.datetime):
image[key] = safe_cast(getattr(self, key, default), str, dformat=self._datetimeformat) # depends on [control=['if'], data=[]]
else:
image[key] = getattr(self, key, default) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return str(image) |
def _parse_attributes(self, attributes):
""" Ensure compliance with the spec's attributes section
Specifically, the attributes object of the single resource
object. This contains the key / values to be mapped to the
model.
:param attributes:
dict JSON API attributes object
"""
link = 'jsonapi.org/format/#document-resource-object-attributes'
if not isinstance(attributes, dict):
self.fail('The JSON API resource object attributes key MUST '
'be a hash.', link)
elif 'id' in attributes or 'type' in attributes:
self.fail('A field name of `id` or `type` is not allowed in '
'the attributes object. They should be top-level '
'keys.', link) | def function[_parse_attributes, parameter[self, attributes]]:
constant[ Ensure compliance with the spec's attributes section
Specifically, the attributes object of the single resource
object. This contains the key / values to be mapped to the
model.
:param attributes:
dict JSON API attributes object
]
variable[link] assign[=] constant[jsonapi.org/format/#document-resource-object-attributes]
if <ast.UnaryOp object at 0x7da18fe93310> begin[:]
call[name[self].fail, parameter[constant[The JSON API resource object attributes key MUST be a hash.], name[link]]] | keyword[def] identifier[_parse_attributes] ( identifier[self] , identifier[attributes] ):
literal[string]
identifier[link] = literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[attributes] , identifier[dict] ):
identifier[self] . identifier[fail] ( literal[string]
literal[string] , identifier[link] )
keyword[elif] literal[string] keyword[in] identifier[attributes] keyword[or] literal[string] keyword[in] identifier[attributes] :
identifier[self] . identifier[fail] ( literal[string]
literal[string]
literal[string] , identifier[link] ) | def _parse_attributes(self, attributes):
""" Ensure compliance with the spec's attributes section
Specifically, the attributes object of the single resource
object. This contains the key / values to be mapped to the
model.
:param attributes:
dict JSON API attributes object
"""
link = 'jsonapi.org/format/#document-resource-object-attributes'
if not isinstance(attributes, dict):
self.fail('The JSON API resource object attributes key MUST be a hash.', link) # depends on [control=['if'], data=[]]
elif 'id' in attributes or 'type' in attributes:
self.fail('A field name of `id` or `type` is not allowed in the attributes object. They should be top-level keys.', link) # depends on [control=['if'], data=[]] |
def get_scope_path(self, scope_separator="::"):
"""
Generate a string that represents this component's declaration namespace
scope.
Parameters
----------
scope_separator: str
Override the separator between namespace scopes
"""
if self.parent_scope is None:
return ""
elif isinstance(self.parent_scope, Root):
return ""
else:
parent_path = self.parent_scope.get_scope_path(scope_separator)
if parent_path:
return(
parent_path
+ scope_separator
+ self.parent_scope.type_name
)
else:
return self.parent_scope.type_name | def function[get_scope_path, parameter[self, scope_separator]]:
constant[
Generate a string that represents this component's declaration namespace
scope.
Parameters
----------
scope_separator: str
Override the separator between namespace scopes
]
if compare[name[self].parent_scope is constant[None]] begin[:]
return[constant[]] | keyword[def] identifier[get_scope_path] ( identifier[self] , identifier[scope_separator] = literal[string] ):
literal[string]
keyword[if] identifier[self] . identifier[parent_scope] keyword[is] keyword[None] :
keyword[return] literal[string]
keyword[elif] identifier[isinstance] ( identifier[self] . identifier[parent_scope] , identifier[Root] ):
keyword[return] literal[string]
keyword[else] :
identifier[parent_path] = identifier[self] . identifier[parent_scope] . identifier[get_scope_path] ( identifier[scope_separator] )
keyword[if] identifier[parent_path] :
keyword[return] (
identifier[parent_path]
+ identifier[scope_separator]
+ identifier[self] . identifier[parent_scope] . identifier[type_name]
)
keyword[else] :
keyword[return] identifier[self] . identifier[parent_scope] . identifier[type_name] | def get_scope_path(self, scope_separator='::'):
"""
Generate a string that represents this component's declaration namespace
scope.
Parameters
----------
scope_separator: str
Override the separator between namespace scopes
"""
if self.parent_scope is None:
return '' # depends on [control=['if'], data=[]]
elif isinstance(self.parent_scope, Root):
return '' # depends on [control=['if'], data=[]]
else:
parent_path = self.parent_scope.get_scope_path(scope_separator)
if parent_path:
return parent_path + scope_separator + self.parent_scope.type_name # depends on [control=['if'], data=[]]
else:
return self.parent_scope.type_name |
def from_raw(self, raw: RawScalar) -> Optional[ScalarValue]:
"""Return a cooked value of the receiver type.
Args:
raw: Raw value obtained from JSON parser.
"""
if isinstance(raw, str):
return raw | def function[from_raw, parameter[self, raw]]:
constant[Return a cooked value of the receiver type.
Args:
raw: Raw value obtained from JSON parser.
]
if call[name[isinstance], parameter[name[raw], name[str]]] begin[:]
return[name[raw]] | keyword[def] identifier[from_raw] ( identifier[self] , identifier[raw] : identifier[RawScalar] )-> identifier[Optional] [ identifier[ScalarValue] ]:
literal[string]
keyword[if] identifier[isinstance] ( identifier[raw] , identifier[str] ):
keyword[return] identifier[raw] | def from_raw(self, raw: RawScalar) -> Optional[ScalarValue]:
"""Return a cooked value of the receiver type.
Args:
raw: Raw value obtained from JSON parser.
"""
if isinstance(raw, str):
return raw # depends on [control=['if'], data=[]] |
def _index_by_name(self, name):
""":return: index of an item with name, or -1 if not found"""
for i, t in enumerate(self._cache):
if t[2] == name:
return i
# END found item
# END for each item in cache
return -1 | def function[_index_by_name, parameter[self, name]]:
constant[:return: index of an item with name, or -1 if not found]
for taget[tuple[[<ast.Name object at 0x7da1b22482b0>, <ast.Name object at 0x7da1b224ab90>]]] in starred[call[name[enumerate], parameter[name[self]._cache]]] begin[:]
if compare[call[name[t]][constant[2]] equal[==] name[name]] begin[:]
return[name[i]]
return[<ast.UnaryOp object at 0x7da1b2248760>] | keyword[def] identifier[_index_by_name] ( identifier[self] , identifier[name] ):
literal[string]
keyword[for] identifier[i] , identifier[t] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_cache] ):
keyword[if] identifier[t] [ literal[int] ]== identifier[name] :
keyword[return] identifier[i]
keyword[return] - literal[int] | def _index_by_name(self, name):
""":return: index of an item with name, or -1 if not found"""
for (i, t) in enumerate(self._cache):
if t[2] == name:
return i # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# END found item
# END for each item in cache
return -1 |
def tabulate(self):
"""Return a simplified version of the spectrum.
Composite spectrum can be overly complicated when it
has too many components and sub-components. This method
copies the following into a simple (tabulated) source spectrum:
* Name
* Wavelength array and unit
* Flux array and unit
Returns
-------
sp : `ArraySourceSpectrum`
Tabulated source spectrum.
"""
sp = ArraySourceSpectrum(wave=self.wave,
flux=self.flux,
waveunits=self.waveunits,
fluxunits=self.fluxunits,
name='%s (tabulated)' % self.name)
return sp | def function[tabulate, parameter[self]]:
constant[Return a simplified version of the spectrum.
Composite spectrum can be overly complicated when it
has too many components and sub-components. This method
copies the following into a simple (tabulated) source spectrum:
* Name
* Wavelength array and unit
* Flux array and unit
Returns
-------
sp : `ArraySourceSpectrum`
Tabulated source spectrum.
]
variable[sp] assign[=] call[name[ArraySourceSpectrum], parameter[]]
return[name[sp]] | keyword[def] identifier[tabulate] ( identifier[self] ):
literal[string]
identifier[sp] = identifier[ArraySourceSpectrum] ( identifier[wave] = identifier[self] . identifier[wave] ,
identifier[flux] = identifier[self] . identifier[flux] ,
identifier[waveunits] = identifier[self] . identifier[waveunits] ,
identifier[fluxunits] = identifier[self] . identifier[fluxunits] ,
identifier[name] = literal[string] % identifier[self] . identifier[name] )
keyword[return] identifier[sp] | def tabulate(self):
"""Return a simplified version of the spectrum.
Composite spectrum can be overly complicated when it
has too many components and sub-components. This method
copies the following into a simple (tabulated) source spectrum:
* Name
* Wavelength array and unit
* Flux array and unit
Returns
-------
sp : `ArraySourceSpectrum`
Tabulated source spectrum.
"""
sp = ArraySourceSpectrum(wave=self.wave, flux=self.flux, waveunits=self.waveunits, fluxunits=self.fluxunits, name='%s (tabulated)' % self.name)
return sp |
def transform(self, Y):
r"""Compute all pairwise distances between `self.X_fit_` and `Y`.
Parameters
----------
y : array-like, shape = (n_samples_y, n_features)
Returns
-------
kernel : ndarray, shape = (n_samples_y, n_samples_X_fit\_)
Kernel matrix. Values are normalized to lie within [0, 1].
"""
check_is_fitted(self, 'X_fit_')
n_samples_x, n_features = self.X_fit_.shape
Y = numpy.asarray(Y)
if Y.shape[1] != n_features:
raise ValueError('expected array with %d features, but got %d' % (n_features, Y.shape[1]))
n_samples_y = Y.shape[0]
mat = numpy.zeros((n_samples_y, n_samples_x), dtype=float)
continuous_ordinal_kernel_with_ranges(Y[:, self._numeric_columns].astype(numpy.float64),
self.X_fit_[:, self._numeric_columns].astype(numpy.float64),
self._numeric_ranges, mat)
if len(self._nominal_columns) > 0:
_nominal_kernel(Y[:, self._nominal_columns],
self.X_fit_[:, self._nominal_columns],
mat)
mat /= n_features
return mat | def function[transform, parameter[self, Y]]:
constant[Compute all pairwise distances between `self.X_fit_` and `Y`.
Parameters
----------
y : array-like, shape = (n_samples_y, n_features)
Returns
-------
kernel : ndarray, shape = (n_samples_y, n_samples_X_fit\_)
Kernel matrix. Values are normalized to lie within [0, 1].
]
call[name[check_is_fitted], parameter[name[self], constant[X_fit_]]]
<ast.Tuple object at 0x7da1b16ab040> assign[=] name[self].X_fit_.shape
variable[Y] assign[=] call[name[numpy].asarray, parameter[name[Y]]]
if compare[call[name[Y].shape][constant[1]] not_equal[!=] name[n_features]] begin[:]
<ast.Raise object at 0x7da1b1726c20>
variable[n_samples_y] assign[=] call[name[Y].shape][constant[0]]
variable[mat] assign[=] call[name[numpy].zeros, parameter[tuple[[<ast.Name object at 0x7da1b1726140>, <ast.Name object at 0x7da1b1726170>]]]]
call[name[continuous_ordinal_kernel_with_ranges], parameter[call[call[name[Y]][tuple[[<ast.Slice object at 0x7da1b1725f90>, <ast.Attribute object at 0x7da1b1725d20>]]].astype, parameter[name[numpy].float64]], call[call[name[self].X_fit_][tuple[[<ast.Slice object at 0x7da1b1725ed0>, <ast.Attribute object at 0x7da1b1726b60>]]].astype, parameter[name[numpy].float64]], name[self]._numeric_ranges, name[mat]]]
if compare[call[name[len], parameter[name[self]._nominal_columns]] greater[>] constant[0]] begin[:]
call[name[_nominal_kernel], parameter[call[name[Y]][tuple[[<ast.Slice object at 0x7da1b1727130>, <ast.Attribute object at 0x7da1b17242b0>]]], call[name[self].X_fit_][tuple[[<ast.Slice object at 0x7da1b1725960>, <ast.Attribute object at 0x7da1b1725540>]]], name[mat]]]
<ast.AugAssign object at 0x7da1b1725660>
return[name[mat]] | keyword[def] identifier[transform] ( identifier[self] , identifier[Y] ):
literal[string]
identifier[check_is_fitted] ( identifier[self] , literal[string] )
identifier[n_samples_x] , identifier[n_features] = identifier[self] . identifier[X_fit_] . identifier[shape]
identifier[Y] = identifier[numpy] . identifier[asarray] ( identifier[Y] )
keyword[if] identifier[Y] . identifier[shape] [ literal[int] ]!= identifier[n_features] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[n_features] , identifier[Y] . identifier[shape] [ literal[int] ]))
identifier[n_samples_y] = identifier[Y] . identifier[shape] [ literal[int] ]
identifier[mat] = identifier[numpy] . identifier[zeros] (( identifier[n_samples_y] , identifier[n_samples_x] ), identifier[dtype] = identifier[float] )
identifier[continuous_ordinal_kernel_with_ranges] ( identifier[Y] [:, identifier[self] . identifier[_numeric_columns] ]. identifier[astype] ( identifier[numpy] . identifier[float64] ),
identifier[self] . identifier[X_fit_] [:, identifier[self] . identifier[_numeric_columns] ]. identifier[astype] ( identifier[numpy] . identifier[float64] ),
identifier[self] . identifier[_numeric_ranges] , identifier[mat] )
keyword[if] identifier[len] ( identifier[self] . identifier[_nominal_columns] )> literal[int] :
identifier[_nominal_kernel] ( identifier[Y] [:, identifier[self] . identifier[_nominal_columns] ],
identifier[self] . identifier[X_fit_] [:, identifier[self] . identifier[_nominal_columns] ],
identifier[mat] )
identifier[mat] /= identifier[n_features]
keyword[return] identifier[mat] | def transform(self, Y):
"""Compute all pairwise distances between `self.X_fit_` and `Y`.
Parameters
----------
y : array-like, shape = (n_samples_y, n_features)
Returns
-------
kernel : ndarray, shape = (n_samples_y, n_samples_X_fit\\_)
Kernel matrix. Values are normalized to lie within [0, 1].
"""
check_is_fitted(self, 'X_fit_')
(n_samples_x, n_features) = self.X_fit_.shape
Y = numpy.asarray(Y)
if Y.shape[1] != n_features:
raise ValueError('expected array with %d features, but got %d' % (n_features, Y.shape[1])) # depends on [control=['if'], data=['n_features']]
n_samples_y = Y.shape[0]
mat = numpy.zeros((n_samples_y, n_samples_x), dtype=float)
continuous_ordinal_kernel_with_ranges(Y[:, self._numeric_columns].astype(numpy.float64), self.X_fit_[:, self._numeric_columns].astype(numpy.float64), self._numeric_ranges, mat)
if len(self._nominal_columns) > 0:
_nominal_kernel(Y[:, self._nominal_columns], self.X_fit_[:, self._nominal_columns], mat) # depends on [control=['if'], data=[]]
mat /= n_features
return mat |
def illumg(method, target, ilusrc, et, fixref, abcorr, obsrvr, spoint):
"""
Find the illumination angles (phase, incidence, and
emission) at a specified surface point of a target body.
The surface of the target body may be represented by a triaxial
ellipsoid or by topographic data provided by DSK files.
The illumination source is a specified ephemeris object.
param method: Computation method.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/illumg_c.html
:type method: str
:param target: Name of target body.
:type target: str
:param ilusrc: Name of illumination source.
:type ilusrc: str
:param et: Epoch in ephemeris seconds past J2000.
:type et: float
:param fixref: Body-fixed, body-centered target body frame.
:type fixref: str
:param abcorr: Desired aberration correction.
:type abcorr: str
:param obsrvr: Name of observing body.
:type obsrvr: str
:param spoint: Body-fixed coordinates of a target surface point.
:type spoint: 3-Element Array of floats
:return: Target surface point epoch, Vector from observer to target
surface point, Phase angle at the surface point, Source incidence
angle at the surface point, Emission angle at the surface point,
:rtype: tuple
"""
method = stypes.stringToCharP(method)
target = stypes.stringToCharP(target)
ilusrc = stypes.stringToCharP(ilusrc)
et = ctypes.c_double(et)
fixref = stypes.stringToCharP(fixref)
abcorr = stypes.stringToCharP(abcorr)
obsrvr = stypes.stringToCharP(obsrvr)
spoint = stypes.toDoubleVector(spoint)
trgepc = ctypes.c_double(0)
srfvec = stypes.emptyDoubleVector(3)
phase = ctypes.c_double(0)
incdnc = ctypes.c_double(0)
emissn = ctypes.c_double(0)
libspice.illumg_c(method, target, ilusrc, et, fixref, abcorr, obsrvr, spoint,
ctypes.byref(trgepc), srfvec, ctypes.byref(phase),
ctypes.byref(incdnc), ctypes.byref(emissn))
return trgepc.value, stypes.cVectorToPython(srfvec), \
phase.value, incdnc.value, emissn.value | def function[illumg, parameter[method, target, ilusrc, et, fixref, abcorr, obsrvr, spoint]]:
constant[
Find the illumination angles (phase, incidence, and
emission) at a specified surface point of a target body.
The surface of the target body may be represented by a triaxial
ellipsoid or by topographic data provided by DSK files.
The illumination source is a specified ephemeris object.
param method: Computation method.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/illumg_c.html
:type method: str
:param target: Name of target body.
:type target: str
:param ilusrc: Name of illumination source.
:type ilusrc: str
:param et: Epoch in ephemeris seconds past J2000.
:type et: float
:param fixref: Body-fixed, body-centered target body frame.
:type fixref: str
:param abcorr: Desired aberration correction.
:type abcorr: str
:param obsrvr: Name of observing body.
:type obsrvr: str
:param spoint: Body-fixed coordinates of a target surface point.
:type spoint: 3-Element Array of floats
:return: Target surface point epoch, Vector from observer to target
surface point, Phase angle at the surface point, Source incidence
angle at the surface point, Emission angle at the surface point,
:rtype: tuple
]
variable[method] assign[=] call[name[stypes].stringToCharP, parameter[name[method]]]
variable[target] assign[=] call[name[stypes].stringToCharP, parameter[name[target]]]
variable[ilusrc] assign[=] call[name[stypes].stringToCharP, parameter[name[ilusrc]]]
variable[et] assign[=] call[name[ctypes].c_double, parameter[name[et]]]
variable[fixref] assign[=] call[name[stypes].stringToCharP, parameter[name[fixref]]]
variable[abcorr] assign[=] call[name[stypes].stringToCharP, parameter[name[abcorr]]]
variable[obsrvr] assign[=] call[name[stypes].stringToCharP, parameter[name[obsrvr]]]
variable[spoint] assign[=] call[name[stypes].toDoubleVector, parameter[name[spoint]]]
variable[trgepc] assign[=] call[name[ctypes].c_double, parameter[constant[0]]]
variable[srfvec] assign[=] call[name[stypes].emptyDoubleVector, parameter[constant[3]]]
variable[phase] assign[=] call[name[ctypes].c_double, parameter[constant[0]]]
variable[incdnc] assign[=] call[name[ctypes].c_double, parameter[constant[0]]]
variable[emissn] assign[=] call[name[ctypes].c_double, parameter[constant[0]]]
call[name[libspice].illumg_c, parameter[name[method], name[target], name[ilusrc], name[et], name[fixref], name[abcorr], name[obsrvr], name[spoint], call[name[ctypes].byref, parameter[name[trgepc]]], name[srfvec], call[name[ctypes].byref, parameter[name[phase]]], call[name[ctypes].byref, parameter[name[incdnc]]], call[name[ctypes].byref, parameter[name[emissn]]]]]
return[tuple[[<ast.Attribute object at 0x7da18f09d690>, <ast.Call object at 0x7da18f09df30>, <ast.Attribute object at 0x7da18f09ea70>, <ast.Attribute object at 0x7da18f09dba0>, <ast.Attribute object at 0x7da18f09cc40>]]] | keyword[def] identifier[illumg] ( identifier[method] , identifier[target] , identifier[ilusrc] , identifier[et] , identifier[fixref] , identifier[abcorr] , identifier[obsrvr] , identifier[spoint] ):
literal[string]
identifier[method] = identifier[stypes] . identifier[stringToCharP] ( identifier[method] )
identifier[target] = identifier[stypes] . identifier[stringToCharP] ( identifier[target] )
identifier[ilusrc] = identifier[stypes] . identifier[stringToCharP] ( identifier[ilusrc] )
identifier[et] = identifier[ctypes] . identifier[c_double] ( identifier[et] )
identifier[fixref] = identifier[stypes] . identifier[stringToCharP] ( identifier[fixref] )
identifier[abcorr] = identifier[stypes] . identifier[stringToCharP] ( identifier[abcorr] )
identifier[obsrvr] = identifier[stypes] . identifier[stringToCharP] ( identifier[obsrvr] )
identifier[spoint] = identifier[stypes] . identifier[toDoubleVector] ( identifier[spoint] )
identifier[trgepc] = identifier[ctypes] . identifier[c_double] ( literal[int] )
identifier[srfvec] = identifier[stypes] . identifier[emptyDoubleVector] ( literal[int] )
identifier[phase] = identifier[ctypes] . identifier[c_double] ( literal[int] )
identifier[incdnc] = identifier[ctypes] . identifier[c_double] ( literal[int] )
identifier[emissn] = identifier[ctypes] . identifier[c_double] ( literal[int] )
identifier[libspice] . identifier[illumg_c] ( identifier[method] , identifier[target] , identifier[ilusrc] , identifier[et] , identifier[fixref] , identifier[abcorr] , identifier[obsrvr] , identifier[spoint] ,
identifier[ctypes] . identifier[byref] ( identifier[trgepc] ), identifier[srfvec] , identifier[ctypes] . identifier[byref] ( identifier[phase] ),
identifier[ctypes] . identifier[byref] ( identifier[incdnc] ), identifier[ctypes] . identifier[byref] ( identifier[emissn] ))
keyword[return] identifier[trgepc] . identifier[value] , identifier[stypes] . identifier[cVectorToPython] ( identifier[srfvec] ), identifier[phase] . identifier[value] , identifier[incdnc] . identifier[value] , identifier[emissn] . identifier[value] | def illumg(method, target, ilusrc, et, fixref, abcorr, obsrvr, spoint):
"""
Find the illumination angles (phase, incidence, and
emission) at a specified surface point of a target body.
The surface of the target body may be represented by a triaxial
ellipsoid or by topographic data provided by DSK files.
The illumination source is a specified ephemeris object.
param method: Computation method.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/illumg_c.html
:type method: str
:param target: Name of target body.
:type target: str
:param ilusrc: Name of illumination source.
:type ilusrc: str
:param et: Epoch in ephemeris seconds past J2000.
:type et: float
:param fixref: Body-fixed, body-centered target body frame.
:type fixref: str
:param abcorr: Desired aberration correction.
:type abcorr: str
:param obsrvr: Name of observing body.
:type obsrvr: str
:param spoint: Body-fixed coordinates of a target surface point.
:type spoint: 3-Element Array of floats
:return: Target surface point epoch, Vector from observer to target
surface point, Phase angle at the surface point, Source incidence
angle at the surface point, Emission angle at the surface point,
:rtype: tuple
"""
method = stypes.stringToCharP(method)
target = stypes.stringToCharP(target)
ilusrc = stypes.stringToCharP(ilusrc)
et = ctypes.c_double(et)
fixref = stypes.stringToCharP(fixref)
abcorr = stypes.stringToCharP(abcorr)
obsrvr = stypes.stringToCharP(obsrvr)
spoint = stypes.toDoubleVector(spoint)
trgepc = ctypes.c_double(0)
srfvec = stypes.emptyDoubleVector(3)
phase = ctypes.c_double(0)
incdnc = ctypes.c_double(0)
emissn = ctypes.c_double(0)
libspice.illumg_c(method, target, ilusrc, et, fixref, abcorr, obsrvr, spoint, ctypes.byref(trgepc), srfvec, ctypes.byref(phase), ctypes.byref(incdnc), ctypes.byref(emissn))
return (trgepc.value, stypes.cVectorToPython(srfvec), phase.value, incdnc.value, emissn.value) |
def bin(self, *columns, **vargs):
"""Group values by bin and compute counts per bin by column.
By default, bins are chosen to contain all values in all columns. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
If the original table has n columns, the resulting binned table has
n+1 columns, where column 0 contains the lower bound of each bin.
Args:
``columns`` (str or int): Labels or indices of columns to be
binned. If empty, all columns are binned.
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``density`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is the value of the
probability density function at the bin, normalized such that
the integral over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability mass function.
"""
if columns:
self = self.select(*columns)
if 'normed' in vargs:
vargs.setdefault('density', vargs.pop('normed'))
density = vargs.get('density', False)
tag = 'density' if density else 'count'
cols = list(self._columns.values())
_, bins = np.histogram(cols, **vargs)
binned = type(self)().with_column('bin', bins)
for label in self.labels:
counts, _ = np.histogram(self[label], bins=bins, density=density)
binned[label + ' ' + tag] = np.append(counts, 0)
return binned | def function[bin, parameter[self]]:
constant[Group values by bin and compute counts per bin by column.
By default, bins are chosen to contain all values in all columns. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
If the original table has n columns, the resulting binned table has
n+1 columns, where column 0 contains the lower bound of each bin.
Args:
``columns`` (str or int): Labels or indices of columns to be
binned. If empty, all columns are binned.
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``density`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is the value of the
probability density function at the bin, normalized such that
the integral over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability mass function.
]
if name[columns] begin[:]
variable[self] assign[=] call[name[self].select, parameter[<ast.Starred object at 0x7da18bcc98a0>]]
if compare[constant[normed] in name[vargs]] begin[:]
call[name[vargs].setdefault, parameter[constant[density], call[name[vargs].pop, parameter[constant[normed]]]]]
variable[density] assign[=] call[name[vargs].get, parameter[constant[density], constant[False]]]
variable[tag] assign[=] <ast.IfExp object at 0x7da18bccb040>
variable[cols] assign[=] call[name[list], parameter[call[name[self]._columns.values, parameter[]]]]
<ast.Tuple object at 0x7da18bcc8a60> assign[=] call[name[np].histogram, parameter[name[cols]]]
variable[binned] assign[=] call[call[call[name[type], parameter[name[self]]], parameter[]].with_column, parameter[constant[bin], name[bins]]]
for taget[name[label]] in starred[name[self].labels] begin[:]
<ast.Tuple object at 0x7da2047e95a0> assign[=] call[name[np].histogram, parameter[call[name[self]][name[label]]]]
call[name[binned]][binary_operation[binary_operation[name[label] + constant[ ]] + name[tag]]] assign[=] call[name[np].append, parameter[name[counts], constant[0]]]
return[name[binned]] | keyword[def] identifier[bin] ( identifier[self] ,* identifier[columns] ,** identifier[vargs] ):
literal[string]
keyword[if] identifier[columns] :
identifier[self] = identifier[self] . identifier[select] (* identifier[columns] )
keyword[if] literal[string] keyword[in] identifier[vargs] :
identifier[vargs] . identifier[setdefault] ( literal[string] , identifier[vargs] . identifier[pop] ( literal[string] ))
identifier[density] = identifier[vargs] . identifier[get] ( literal[string] , keyword[False] )
identifier[tag] = literal[string] keyword[if] identifier[density] keyword[else] literal[string]
identifier[cols] = identifier[list] ( identifier[self] . identifier[_columns] . identifier[values] ())
identifier[_] , identifier[bins] = identifier[np] . identifier[histogram] ( identifier[cols] ,** identifier[vargs] )
identifier[binned] = identifier[type] ( identifier[self] )(). identifier[with_column] ( literal[string] , identifier[bins] )
keyword[for] identifier[label] keyword[in] identifier[self] . identifier[labels] :
identifier[counts] , identifier[_] = identifier[np] . identifier[histogram] ( identifier[self] [ identifier[label] ], identifier[bins] = identifier[bins] , identifier[density] = identifier[density] )
identifier[binned] [ identifier[label] + literal[string] + identifier[tag] ]= identifier[np] . identifier[append] ( identifier[counts] , literal[int] )
keyword[return] identifier[binned] | def bin(self, *columns, **vargs):
"""Group values by bin and compute counts per bin by column.
By default, bins are chosen to contain all values in all columns. The
following named arguments from numpy.histogram can be applied to
specialize bin widths:
If the original table has n columns, the resulting binned table has
n+1 columns, where column 0 contains the lower bound of each bin.
Args:
``columns`` (str or int): Labels or indices of columns to be
binned. If empty, all columns are binned.
``bins`` (int or sequence of scalars): If bins is an int,
it defines the number of equal-width bins in the given range
(10, by default). If bins is a sequence, it defines the bin
edges, including the rightmost edge, allowing for non-uniform
bin widths.
``range`` ((float, float)): The lower and upper range of
the bins. If not provided, range contains all values in the
table. Values outside the range are ignored.
``density`` (bool): If False, the result will contain the number of
samples in each bin. If True, the result is the value of the
probability density function at the bin, normalized such that
the integral over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability mass function.
"""
if columns:
self = self.select(*columns) # depends on [control=['if'], data=[]]
if 'normed' in vargs:
vargs.setdefault('density', vargs.pop('normed')) # depends on [control=['if'], data=['vargs']]
density = vargs.get('density', False)
tag = 'density' if density else 'count'
cols = list(self._columns.values())
(_, bins) = np.histogram(cols, **vargs)
binned = type(self)().with_column('bin', bins)
for label in self.labels:
(counts, _) = np.histogram(self[label], bins=bins, density=density)
binned[label + ' ' + tag] = np.append(counts, 0) # depends on [control=['for'], data=['label']]
return binned |
def heterozygosity_expected(af, ploidy, fill=np.nan):
"""Calculate the expected rate of heterozygosity for each variant
under Hardy-Weinberg equilibrium.
Parameters
----------
af : array_like, float, shape (n_variants, n_alleles)
Allele frequencies array.
ploidy : int
Sample ploidy.
fill : float, optional
Use this value for variants where allele frequencies do not sum to 1.
Returns
-------
he : ndarray, float, shape (n_variants,)
Expected heterozygosity
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]],
... [[0, 0], [0, 1], [1, 1]],
... [[0, 0], [1, 1], [2, 2]],
... [[1, 1], [1, 2], [-1, -1]]])
>>> af = g.count_alleles().to_frequencies()
>>> allel.heterozygosity_expected(af, ploidy=2)
array([0. , 0.5 , 0.66666667, 0.375 ])
"""
# check inputs
af = asarray_ndim(af, 2)
# calculate expected heterozygosity
out = 1 - np.sum(np.power(af, ploidy), axis=1)
# fill values where allele frequencies could not be calculated
af_sum = np.sum(af, axis=1)
with ignore_invalid():
out[(af_sum < 1) | np.isnan(af_sum)] = fill
return out | def function[heterozygosity_expected, parameter[af, ploidy, fill]]:
constant[Calculate the expected rate of heterozygosity for each variant
under Hardy-Weinberg equilibrium.
Parameters
----------
af : array_like, float, shape (n_variants, n_alleles)
Allele frequencies array.
ploidy : int
Sample ploidy.
fill : float, optional
Use this value for variants where allele frequencies do not sum to 1.
Returns
-------
he : ndarray, float, shape (n_variants,)
Expected heterozygosity
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]],
... [[0, 0], [0, 1], [1, 1]],
... [[0, 0], [1, 1], [2, 2]],
... [[1, 1], [1, 2], [-1, -1]]])
>>> af = g.count_alleles().to_frequencies()
>>> allel.heterozygosity_expected(af, ploidy=2)
array([0. , 0.5 , 0.66666667, 0.375 ])
]
variable[af] assign[=] call[name[asarray_ndim], parameter[name[af], constant[2]]]
variable[out] assign[=] binary_operation[constant[1] - call[name[np].sum, parameter[call[name[np].power, parameter[name[af], name[ploidy]]]]]]
variable[af_sum] assign[=] call[name[np].sum, parameter[name[af]]]
with call[name[ignore_invalid], parameter[]] begin[:]
call[name[out]][binary_operation[compare[name[af_sum] less[<] constant[1]] <ast.BitOr object at 0x7da2590d6aa0> call[name[np].isnan, parameter[name[af_sum]]]]] assign[=] name[fill]
return[name[out]] | keyword[def] identifier[heterozygosity_expected] ( identifier[af] , identifier[ploidy] , identifier[fill] = identifier[np] . identifier[nan] ):
literal[string]
identifier[af] = identifier[asarray_ndim] ( identifier[af] , literal[int] )
identifier[out] = literal[int] - identifier[np] . identifier[sum] ( identifier[np] . identifier[power] ( identifier[af] , identifier[ploidy] ), identifier[axis] = literal[int] )
identifier[af_sum] = identifier[np] . identifier[sum] ( identifier[af] , identifier[axis] = literal[int] )
keyword[with] identifier[ignore_invalid] ():
identifier[out] [( identifier[af_sum] < literal[int] )| identifier[np] . identifier[isnan] ( identifier[af_sum] )]= identifier[fill]
keyword[return] identifier[out] | def heterozygosity_expected(af, ploidy, fill=np.nan):
"""Calculate the expected rate of heterozygosity for each variant
under Hardy-Weinberg equilibrium.
Parameters
----------
af : array_like, float, shape (n_variants, n_alleles)
Allele frequencies array.
ploidy : int
Sample ploidy.
fill : float, optional
Use this value for variants where allele frequencies do not sum to 1.
Returns
-------
he : ndarray, float, shape (n_variants,)
Expected heterozygosity
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]],
... [[0, 0], [0, 1], [1, 1]],
... [[0, 0], [1, 1], [2, 2]],
... [[1, 1], [1, 2], [-1, -1]]])
>>> af = g.count_alleles().to_frequencies()
>>> allel.heterozygosity_expected(af, ploidy=2)
array([0. , 0.5 , 0.66666667, 0.375 ])
"""
# check inputs
af = asarray_ndim(af, 2)
# calculate expected heterozygosity
out = 1 - np.sum(np.power(af, ploidy), axis=1)
# fill values where allele frequencies could not be calculated
af_sum = np.sum(af, axis=1)
with ignore_invalid():
out[(af_sum < 1) | np.isnan(af_sum)] = fill # depends on [control=['with'], data=[]]
return out |
def within_bounding_box(self, limits):
"""
Selects the earthquakes within a bounding box.
:parameter limits:
A list or a numpy array with four elements in the following order:
- min x (longitude)
- min y (latitude)
- max x (longitude)
- max y (latitude)
:returns:
Returns a :class:htmk.seismicity.catalogue.Catalogue` instance
"""
is_valid = np.logical_and(
self.catalogue.data['longitude'] >= limits[0],
np.logical_and(self.catalogue.data['longitude'] <= limits[2],
np.logical_and(
self.catalogue.data['latitude'] >= limits[1],
self.catalogue.data['latitude'] <= limits[3])))
return self.select_catalogue(is_valid) | def function[within_bounding_box, parameter[self, limits]]:
constant[
Selects the earthquakes within a bounding box.
:parameter limits:
A list or a numpy array with four elements in the following order:
- min x (longitude)
- min y (latitude)
- max x (longitude)
- max y (latitude)
:returns:
Returns a :class:htmk.seismicity.catalogue.Catalogue` instance
]
variable[is_valid] assign[=] call[name[np].logical_and, parameter[compare[call[name[self].catalogue.data][constant[longitude]] greater_or_equal[>=] call[name[limits]][constant[0]]], call[name[np].logical_and, parameter[compare[call[name[self].catalogue.data][constant[longitude]] less_or_equal[<=] call[name[limits]][constant[2]]], call[name[np].logical_and, parameter[compare[call[name[self].catalogue.data][constant[latitude]] greater_or_equal[>=] call[name[limits]][constant[1]]], compare[call[name[self].catalogue.data][constant[latitude]] less_or_equal[<=] call[name[limits]][constant[3]]]]]]]]]
return[call[name[self].select_catalogue, parameter[name[is_valid]]]] | keyword[def] identifier[within_bounding_box] ( identifier[self] , identifier[limits] ):
literal[string]
identifier[is_valid] = identifier[np] . identifier[logical_and] (
identifier[self] . identifier[catalogue] . identifier[data] [ literal[string] ]>= identifier[limits] [ literal[int] ],
identifier[np] . identifier[logical_and] ( identifier[self] . identifier[catalogue] . identifier[data] [ literal[string] ]<= identifier[limits] [ literal[int] ],
identifier[np] . identifier[logical_and] (
identifier[self] . identifier[catalogue] . identifier[data] [ literal[string] ]>= identifier[limits] [ literal[int] ],
identifier[self] . identifier[catalogue] . identifier[data] [ literal[string] ]<= identifier[limits] [ literal[int] ])))
keyword[return] identifier[self] . identifier[select_catalogue] ( identifier[is_valid] ) | def within_bounding_box(self, limits):
"""
Selects the earthquakes within a bounding box.
:parameter limits:
A list or a numpy array with four elements in the following order:
- min x (longitude)
- min y (latitude)
- max x (longitude)
- max y (latitude)
:returns:
Returns a :class:htmk.seismicity.catalogue.Catalogue` instance
"""
is_valid = np.logical_and(self.catalogue.data['longitude'] >= limits[0], np.logical_and(self.catalogue.data['longitude'] <= limits[2], np.logical_and(self.catalogue.data['latitude'] >= limits[1], self.catalogue.data['latitude'] <= limits[3])))
return self.select_catalogue(is_valid) |
def _classify_no_operation(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
"""Classify no-operation gadgets.
"""
# TODO: Flags should be taken into account
matches = []
# Check that registers didn't change their value.
regs_changed = any(regs_init[r] != regs_fini[r] for r in regs_init)
# Check that flags didn't change their value.
flags_changed = False
# Check that memory didn't change.
mem_changed = mem_fini.get_write_count() != 0
if not regs_changed and not flags_changed and not mem_changed:
matches.append({
"op": "nop",
})
return matches | def function[_classify_no_operation, parameter[self, regs_init, regs_fini, mem_fini, written_regs, read_regs]]:
constant[Classify no-operation gadgets.
]
variable[matches] assign[=] list[[]]
variable[regs_changed] assign[=] call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b26aead0>]]
variable[flags_changed] assign[=] constant[False]
variable[mem_changed] assign[=] compare[call[name[mem_fini].get_write_count, parameter[]] not_equal[!=] constant[0]]
if <ast.BoolOp object at 0x7da1b26afb20> begin[:]
call[name[matches].append, parameter[dictionary[[<ast.Constant object at 0x7da1b26aea70>], [<ast.Constant object at 0x7da1b26ac9a0>]]]]
return[name[matches]] | keyword[def] identifier[_classify_no_operation] ( identifier[self] , identifier[regs_init] , identifier[regs_fini] , identifier[mem_fini] , identifier[written_regs] , identifier[read_regs] ):
literal[string]
identifier[matches] =[]
identifier[regs_changed] = identifier[any] ( identifier[regs_init] [ identifier[r] ]!= identifier[regs_fini] [ identifier[r] ] keyword[for] identifier[r] keyword[in] identifier[regs_init] )
identifier[flags_changed] = keyword[False]
identifier[mem_changed] = identifier[mem_fini] . identifier[get_write_count] ()!= literal[int]
keyword[if] keyword[not] identifier[regs_changed] keyword[and] keyword[not] identifier[flags_changed] keyword[and] keyword[not] identifier[mem_changed] :
identifier[matches] . identifier[append] ({
literal[string] : literal[string] ,
})
keyword[return] identifier[matches] | def _classify_no_operation(self, regs_init, regs_fini, mem_fini, written_regs, read_regs):
"""Classify no-operation gadgets.
"""
# TODO: Flags should be taken into account
matches = []
# Check that registers didn't change their value.
regs_changed = any((regs_init[r] != regs_fini[r] for r in regs_init))
# Check that flags didn't change their value.
flags_changed = False
# Check that memory didn't change.
mem_changed = mem_fini.get_write_count() != 0
if not regs_changed and (not flags_changed) and (not mem_changed):
matches.append({'op': 'nop'}) # depends on [control=['if'], data=[]]
return matches |
def has_cache(self):
"""Intended to be called before any call that might access the
cache. If the cache is not selected, then returns False,
otherwise the cache is build if needed and returns True."""
if not self.cache_enabled:
return False
if self._cache is None:
self.build_cache()
return True | def function[has_cache, parameter[self]]:
constant[Intended to be called before any call that might access the
cache. If the cache is not selected, then returns False,
otherwise the cache is build if needed and returns True.]
if <ast.UnaryOp object at 0x7da2047e9150> begin[:]
return[constant[False]]
if compare[name[self]._cache is constant[None]] begin[:]
call[name[self].build_cache, parameter[]]
return[constant[True]] | keyword[def] identifier[has_cache] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[cache_enabled] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_cache] keyword[is] keyword[None] :
identifier[self] . identifier[build_cache] ()
keyword[return] keyword[True] | def has_cache(self):
"""Intended to be called before any call that might access the
cache. If the cache is not selected, then returns False,
otherwise the cache is build if needed and returns True."""
if not self.cache_enabled:
return False # depends on [control=['if'], data=[]]
if self._cache is None:
self.build_cache() # depends on [control=['if'], data=[]]
return True |
def set_elems(self, subs, vals):
"""set_elems(subs, vals) sets the array elements specified by the list
of subscript values subs (each element of subs is a tuple of
subscripts identifying an array element) to the corresponding value
in vals."""
if isinstance(vals, (int, float)):
# if vals is a scalar, extend it to a list of appropriate length
vals = [vals] * len(subs)
for i in range(len(subs)):
self.set_(subs[i], vals[i]) | def function[set_elems, parameter[self, subs, vals]]:
constant[set_elems(subs, vals) sets the array elements specified by the list
of subscript values subs (each element of subs is a tuple of
subscripts identifying an array element) to the corresponding value
in vals.]
if call[name[isinstance], parameter[name[vals], tuple[[<ast.Name object at 0x7da2041dbee0>, <ast.Name object at 0x7da2041d8070>]]]] begin[:]
variable[vals] assign[=] binary_operation[list[[<ast.Name object at 0x7da2041d9450>]] * call[name[len], parameter[name[subs]]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[subs]]]]]] begin[:]
call[name[self].set_, parameter[call[name[subs]][name[i]], call[name[vals]][name[i]]]] | keyword[def] identifier[set_elems] ( identifier[self] , identifier[subs] , identifier[vals] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[vals] ,( identifier[int] , identifier[float] )):
identifier[vals] =[ identifier[vals] ]* identifier[len] ( identifier[subs] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[subs] )):
identifier[self] . identifier[set_] ( identifier[subs] [ identifier[i] ], identifier[vals] [ identifier[i] ]) | def set_elems(self, subs, vals):
"""set_elems(subs, vals) sets the array elements specified by the list
of subscript values subs (each element of subs is a tuple of
subscripts identifying an array element) to the corresponding value
in vals."""
if isinstance(vals, (int, float)):
# if vals is a scalar, extend it to a list of appropriate length
vals = [vals] * len(subs) # depends on [control=['if'], data=[]]
for i in range(len(subs)):
self.set_(subs[i], vals[i]) # depends on [control=['for'], data=['i']] |
def reset(self):
""" Add a RESET message to the outgoing queue, send
it and consume all remaining messages.
"""
def fail(metadata):
raise ProtocolError("RESET failed %r" % metadata)
log_debug("[#%04X] C: RESET", self.local_port)
self._append(b"\x0F", response=Response(self, on_failure=fail))
self.sync() | def function[reset, parameter[self]]:
constant[ Add a RESET message to the outgoing queue, send
it and consume all remaining messages.
]
def function[fail, parameter[metadata]]:
<ast.Raise object at 0x7da207f01f30>
call[name[log_debug], parameter[constant[[#%04X] C: RESET], name[self].local_port]]
call[name[self]._append, parameter[constant[b'\x0f']]]
call[name[self].sync, parameter[]] | keyword[def] identifier[reset] ( identifier[self] ):
literal[string]
keyword[def] identifier[fail] ( identifier[metadata] ):
keyword[raise] identifier[ProtocolError] ( literal[string] % identifier[metadata] )
identifier[log_debug] ( literal[string] , identifier[self] . identifier[local_port] )
identifier[self] . identifier[_append] ( literal[string] , identifier[response] = identifier[Response] ( identifier[self] , identifier[on_failure] = identifier[fail] ))
identifier[self] . identifier[sync] () | def reset(self):
""" Add a RESET message to the outgoing queue, send
it and consume all remaining messages.
"""
def fail(metadata):
raise ProtocolError('RESET failed %r' % metadata)
log_debug('[#%04X] C: RESET', self.local_port)
self._append(b'\x0f', response=Response(self, on_failure=fail))
self.sync() |
def _fetch_dimensions(self, dataset):
""" Declaring available dimensions like this is not mandatory,
but nice, especially if they differ from dataset to dataset.
If you are using a built in datatype, you can specify the dialect
you are expecting, to have values normalized. This scraper will
look for Swedish month names (e.g. 'Januari'), but return them
according to the Statscraper standard ('january').
"""
yield Dimension(u"region",
label="municipality or county",
datatype="region",
dialect="arbetsmiljoverket")
yield Dimension(u"period",
label="Year or month") | def function[_fetch_dimensions, parameter[self, dataset]]:
constant[ Declaring available dimensions like this is not mandatory,
but nice, especially if they differ from dataset to dataset.
If you are using a built in datatype, you can specify the dialect
you are expecting, to have values normalized. This scraper will
look for Swedish month names (e.g. 'Januari'), but return them
according to the Statscraper standard ('january').
]
<ast.Yield object at 0x7da2054a45e0>
<ast.Yield object at 0x7da2054a7940> | keyword[def] identifier[_fetch_dimensions] ( identifier[self] , identifier[dataset] ):
literal[string]
keyword[yield] identifier[Dimension] ( literal[string] ,
identifier[label] = literal[string] ,
identifier[datatype] = literal[string] ,
identifier[dialect] = literal[string] )
keyword[yield] identifier[Dimension] ( literal[string] ,
identifier[label] = literal[string] ) | def _fetch_dimensions(self, dataset):
""" Declaring available dimensions like this is not mandatory,
but nice, especially if they differ from dataset to dataset.
If you are using a built in datatype, you can specify the dialect
you are expecting, to have values normalized. This scraper will
look for Swedish month names (e.g. 'Januari'), but return them
according to the Statscraper standard ('january').
"""
yield Dimension(u'region', label='municipality or county', datatype='region', dialect='arbetsmiljoverket')
yield Dimension(u'period', label='Year or month') |
def _srels_for(phys_reader, source_uri):
"""
Return |_SerializedRelationshipCollection| instance populated with
relationships for source identified by *source_uri*.
"""
rels_xml = phys_reader.rels_xml_for(source_uri)
return _SerializedRelationshipCollection.load_from_xml(
source_uri.baseURI, rels_xml) | def function[_srels_for, parameter[phys_reader, source_uri]]:
constant[
Return |_SerializedRelationshipCollection| instance populated with
relationships for source identified by *source_uri*.
]
variable[rels_xml] assign[=] call[name[phys_reader].rels_xml_for, parameter[name[source_uri]]]
return[call[name[_SerializedRelationshipCollection].load_from_xml, parameter[name[source_uri].baseURI, name[rels_xml]]]] | keyword[def] identifier[_srels_for] ( identifier[phys_reader] , identifier[source_uri] ):
literal[string]
identifier[rels_xml] = identifier[phys_reader] . identifier[rels_xml_for] ( identifier[source_uri] )
keyword[return] identifier[_SerializedRelationshipCollection] . identifier[load_from_xml] (
identifier[source_uri] . identifier[baseURI] , identifier[rels_xml] ) | def _srels_for(phys_reader, source_uri):
"""
Return |_SerializedRelationshipCollection| instance populated with
relationships for source identified by *source_uri*.
"""
rels_xml = phys_reader.rels_xml_for(source_uri)
return _SerializedRelationshipCollection.load_from_xml(source_uri.baseURI, rels_xml) |
def writeGlyph(self,
name,
unicodes=None,
location=None,
masters=None,
note=None,
mute=False,
):
""" Add a new glyph to the current instance.
* name: the glyph name. Required.
* unicodes: unicode values for this glyph if it needs to be different from the unicode values associated with this glyph name in the masters.
* location: a design space location for this glyph if it needs to be different from the instance location.
* masters: a list of masters and locations for this glyph if they need to be different from the masters specified for this instance.
* note: a note for this glyph
* mute: if this glyph is muted. None of the other attributes matter if this one is true.
"""
if self.currentInstance is None:
return
glyphElement = ET.Element('glyph')
if mute:
glyphElement.attrib['mute'] = "1"
if unicodes is not None:
glyphElement.attrib['unicode'] = " ".join([hex(u) for u in unicodes])
if location is not None:
locationElement = self._makeLocationElement(location)
glyphElement.append(locationElement)
if name is not None:
glyphElement.attrib['name'] = name
if note is not None:
noteElement = ET.Element('note')
noteElement.text = note
glyphElement.append(noteElement)
if masters is not None:
mastersElement = ET.Element("masters")
for glyphName, masterName, location in masters:
masterElement = ET.Element("master")
if glyphName is not None:
masterElement.attrib['glyphname'] = glyphName
masterElement.attrib['source'] = masterName
if location is not None:
locationElement = self._makeLocationElement(location)
masterElement.append(locationElement)
mastersElement.append(masterElement)
glyphElement.append(mastersElement)
if self.currentInstance.findall('.glyphs') == []:
glyphsElement = ET.Element('glyphs')
self.currentInstance.append(glyphsElement)
else:
glyphsElement = self.currentInstance.findall('.glyphs')[0]
glyphsElement.append(glyphElement) | def function[writeGlyph, parameter[self, name, unicodes, location, masters, note, mute]]:
constant[ Add a new glyph to the current instance.
* name: the glyph name. Required.
* unicodes: unicode values for this glyph if it needs to be different from the unicode values associated with this glyph name in the masters.
* location: a design space location for this glyph if it needs to be different from the instance location.
* masters: a list of masters and locations for this glyph if they need to be different from the masters specified for this instance.
* note: a note for this glyph
* mute: if this glyph is muted. None of the other attributes matter if this one is true.
]
if compare[name[self].currentInstance is constant[None]] begin[:]
return[None]
variable[glyphElement] assign[=] call[name[ET].Element, parameter[constant[glyph]]]
if name[mute] begin[:]
call[name[glyphElement].attrib][constant[mute]] assign[=] constant[1]
if compare[name[unicodes] is_not constant[None]] begin[:]
call[name[glyphElement].attrib][constant[unicode]] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da2047e95d0>]]
if compare[name[location] is_not constant[None]] begin[:]
variable[locationElement] assign[=] call[name[self]._makeLocationElement, parameter[name[location]]]
call[name[glyphElement].append, parameter[name[locationElement]]]
if compare[name[name] is_not constant[None]] begin[:]
call[name[glyphElement].attrib][constant[name]] assign[=] name[name]
if compare[name[note] is_not constant[None]] begin[:]
variable[noteElement] assign[=] call[name[ET].Element, parameter[constant[note]]]
name[noteElement].text assign[=] name[note]
call[name[glyphElement].append, parameter[name[noteElement]]]
if compare[name[masters] is_not constant[None]] begin[:]
variable[mastersElement] assign[=] call[name[ET].Element, parameter[constant[masters]]]
for taget[tuple[[<ast.Name object at 0x7da2047ead70>, <ast.Name object at 0x7da2047e88b0>, <ast.Name object at 0x7da2047e9ae0>]]] in starred[name[masters]] begin[:]
variable[masterElement] assign[=] call[name[ET].Element, parameter[constant[master]]]
if compare[name[glyphName] is_not constant[None]] begin[:]
call[name[masterElement].attrib][constant[glyphname]] assign[=] name[glyphName]
call[name[masterElement].attrib][constant[source]] assign[=] name[masterName]
if compare[name[location] is_not constant[None]] begin[:]
variable[locationElement] assign[=] call[name[self]._makeLocationElement, parameter[name[location]]]
call[name[masterElement].append, parameter[name[locationElement]]]
call[name[mastersElement].append, parameter[name[masterElement]]]
call[name[glyphElement].append, parameter[name[mastersElement]]]
if compare[call[name[self].currentInstance.findall, parameter[constant[.glyphs]]] equal[==] list[[]]] begin[:]
variable[glyphsElement] assign[=] call[name[ET].Element, parameter[constant[glyphs]]]
call[name[self].currentInstance.append, parameter[name[glyphsElement]]]
call[name[glyphsElement].append, parameter[name[glyphElement]]] | keyword[def] identifier[writeGlyph] ( identifier[self] ,
identifier[name] ,
identifier[unicodes] = keyword[None] ,
identifier[location] = keyword[None] ,
identifier[masters] = keyword[None] ,
identifier[note] = keyword[None] ,
identifier[mute] = keyword[False] ,
):
literal[string]
keyword[if] identifier[self] . identifier[currentInstance] keyword[is] keyword[None] :
keyword[return]
identifier[glyphElement] = identifier[ET] . identifier[Element] ( literal[string] )
keyword[if] identifier[mute] :
identifier[glyphElement] . identifier[attrib] [ literal[string] ]= literal[string]
keyword[if] identifier[unicodes] keyword[is] keyword[not] keyword[None] :
identifier[glyphElement] . identifier[attrib] [ literal[string] ]= literal[string] . identifier[join] ([ identifier[hex] ( identifier[u] ) keyword[for] identifier[u] keyword[in] identifier[unicodes] ])
keyword[if] identifier[location] keyword[is] keyword[not] keyword[None] :
identifier[locationElement] = identifier[self] . identifier[_makeLocationElement] ( identifier[location] )
identifier[glyphElement] . identifier[append] ( identifier[locationElement] )
keyword[if] identifier[name] keyword[is] keyword[not] keyword[None] :
identifier[glyphElement] . identifier[attrib] [ literal[string] ]= identifier[name]
keyword[if] identifier[note] keyword[is] keyword[not] keyword[None] :
identifier[noteElement] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[noteElement] . identifier[text] = identifier[note]
identifier[glyphElement] . identifier[append] ( identifier[noteElement] )
keyword[if] identifier[masters] keyword[is] keyword[not] keyword[None] :
identifier[mastersElement] = identifier[ET] . identifier[Element] ( literal[string] )
keyword[for] identifier[glyphName] , identifier[masterName] , identifier[location] keyword[in] identifier[masters] :
identifier[masterElement] = identifier[ET] . identifier[Element] ( literal[string] )
keyword[if] identifier[glyphName] keyword[is] keyword[not] keyword[None] :
identifier[masterElement] . identifier[attrib] [ literal[string] ]= identifier[glyphName]
identifier[masterElement] . identifier[attrib] [ literal[string] ]= identifier[masterName]
keyword[if] identifier[location] keyword[is] keyword[not] keyword[None] :
identifier[locationElement] = identifier[self] . identifier[_makeLocationElement] ( identifier[location] )
identifier[masterElement] . identifier[append] ( identifier[locationElement] )
identifier[mastersElement] . identifier[append] ( identifier[masterElement] )
identifier[glyphElement] . identifier[append] ( identifier[mastersElement] )
keyword[if] identifier[self] . identifier[currentInstance] . identifier[findall] ( literal[string] )==[]:
identifier[glyphsElement] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[self] . identifier[currentInstance] . identifier[append] ( identifier[glyphsElement] )
keyword[else] :
identifier[glyphsElement] = identifier[self] . identifier[currentInstance] . identifier[findall] ( literal[string] )[ literal[int] ]
identifier[glyphsElement] . identifier[append] ( identifier[glyphElement] ) | def writeGlyph(self, name, unicodes=None, location=None, masters=None, note=None, mute=False):
""" Add a new glyph to the current instance.
* name: the glyph name. Required.
* unicodes: unicode values for this glyph if it needs to be different from the unicode values associated with this glyph name in the masters.
* location: a design space location for this glyph if it needs to be different from the instance location.
* masters: a list of masters and locations for this glyph if they need to be different from the masters specified for this instance.
* note: a note for this glyph
* mute: if this glyph is muted. None of the other attributes matter if this one is true.
"""
if self.currentInstance is None:
return # depends on [control=['if'], data=[]]
glyphElement = ET.Element('glyph')
if mute:
glyphElement.attrib['mute'] = '1' # depends on [control=['if'], data=[]]
if unicodes is not None:
glyphElement.attrib['unicode'] = ' '.join([hex(u) for u in unicodes]) # depends on [control=['if'], data=['unicodes']]
if location is not None:
locationElement = self._makeLocationElement(location)
glyphElement.append(locationElement) # depends on [control=['if'], data=['location']]
if name is not None:
glyphElement.attrib['name'] = name # depends on [control=['if'], data=['name']]
if note is not None:
noteElement = ET.Element('note')
noteElement.text = note
glyphElement.append(noteElement) # depends on [control=['if'], data=['note']]
if masters is not None:
mastersElement = ET.Element('masters')
for (glyphName, masterName, location) in masters:
masterElement = ET.Element('master')
if glyphName is not None:
masterElement.attrib['glyphname'] = glyphName # depends on [control=['if'], data=['glyphName']]
masterElement.attrib['source'] = masterName
if location is not None:
locationElement = self._makeLocationElement(location)
masterElement.append(locationElement) # depends on [control=['if'], data=['location']]
mastersElement.append(masterElement) # depends on [control=['for'], data=[]]
glyphElement.append(mastersElement) # depends on [control=['if'], data=['masters']]
if self.currentInstance.findall('.glyphs') == []:
glyphsElement = ET.Element('glyphs')
self.currentInstance.append(glyphsElement) # depends on [control=['if'], data=[]]
else:
glyphsElement = self.currentInstance.findall('.glyphs')[0]
glyphsElement.append(glyphElement) |
def plotBrightLimitInV(gBright, pdf=False, png=False):
"""
Plot the bright limit of Gaia in V as a function of (V-I).
Parameters
----------
gBright - The bright limit of Gaia in G
"""
vmini=np.linspace(0.0,6.0,1001)
gminv=gminvFromVmini(vmini)
vBright=gBright-gminv
fig=plt.figure(figsize=(10,6.5))
plt.plot(vmini,vBright,'b-')
plt.xlabel('$(V-I)$')
plt.ylabel('Bright limit of Gaia in $V$')
plt.xlim(0,6)
plt.ylim(5,11)
plt.grid(which='both')
plt.title("Bright limit in $G$: {0}".format(gBright))
if (pdf):
plt.savefig('VBandBrightLimit.pdf')
elif (png):
plt.savefig('VBandBrightLimit.png')
else:
plt.show() | def function[plotBrightLimitInV, parameter[gBright, pdf, png]]:
constant[
Plot the bright limit of Gaia in V as a function of (V-I).
Parameters
----------
gBright - The bright limit of Gaia in G
]
variable[vmini] assign[=] call[name[np].linspace, parameter[constant[0.0], constant[6.0], constant[1001]]]
variable[gminv] assign[=] call[name[gminvFromVmini], parameter[name[vmini]]]
variable[vBright] assign[=] binary_operation[name[gBright] - name[gminv]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
call[name[plt].plot, parameter[name[vmini], name[vBright], constant[b-]]]
call[name[plt].xlabel, parameter[constant[$(V-I)$]]]
call[name[plt].ylabel, parameter[constant[Bright limit of Gaia in $V$]]]
call[name[plt].xlim, parameter[constant[0], constant[6]]]
call[name[plt].ylim, parameter[constant[5], constant[11]]]
call[name[plt].grid, parameter[]]
call[name[plt].title, parameter[call[constant[Bright limit in $G$: {0}].format, parameter[name[gBright]]]]]
if name[pdf] begin[:]
call[name[plt].savefig, parameter[constant[VBandBrightLimit.pdf]]] | keyword[def] identifier[plotBrightLimitInV] ( identifier[gBright] , identifier[pdf] = keyword[False] , identifier[png] = keyword[False] ):
literal[string]
identifier[vmini] = identifier[np] . identifier[linspace] ( literal[int] , literal[int] , literal[int] )
identifier[gminv] = identifier[gminvFromVmini] ( identifier[vmini] )
identifier[vBright] = identifier[gBright] - identifier[gminv]
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] , literal[int] ))
identifier[plt] . identifier[plot] ( identifier[vmini] , identifier[vBright] , literal[string] )
identifier[plt] . identifier[xlabel] ( literal[string] )
identifier[plt] . identifier[ylabel] ( literal[string] )
identifier[plt] . identifier[xlim] ( literal[int] , literal[int] )
identifier[plt] . identifier[ylim] ( literal[int] , literal[int] )
identifier[plt] . identifier[grid] ( identifier[which] = literal[string] )
identifier[plt] . identifier[title] ( literal[string] . identifier[format] ( identifier[gBright] ))
keyword[if] ( identifier[pdf] ):
identifier[plt] . identifier[savefig] ( literal[string] )
keyword[elif] ( identifier[png] ):
identifier[plt] . identifier[savefig] ( literal[string] )
keyword[else] :
identifier[plt] . identifier[show] () | def plotBrightLimitInV(gBright, pdf=False, png=False):
"""
Plot the bright limit of Gaia in V as a function of (V-I).
Parameters
----------
gBright - The bright limit of Gaia in G
"""
vmini = np.linspace(0.0, 6.0, 1001)
gminv = gminvFromVmini(vmini)
vBright = gBright - gminv
fig = plt.figure(figsize=(10, 6.5))
plt.plot(vmini, vBright, 'b-')
plt.xlabel('$(V-I)$')
plt.ylabel('Bright limit of Gaia in $V$')
plt.xlim(0, 6)
plt.ylim(5, 11)
plt.grid(which='both')
plt.title('Bright limit in $G$: {0}'.format(gBright))
if pdf:
plt.savefig('VBandBrightLimit.pdf') # depends on [control=['if'], data=[]]
elif png:
plt.savefig('VBandBrightLimit.png') # depends on [control=['if'], data=[]]
else:
plt.show() |
def _get_concatenation(extractors, text, *, ignore_whitespace=True):
"""Returns a concatenation ParseNode whose children are the nodes returned by each of the
methods in the extractors enumerable.
If ignore_whitespace is True, whitespace will be ignored and then attached to the child it
preceeded.
"""
ignored_ws, use_text = _split_ignored(text, ignore_whitespace)
extractor, *remaining = extractors
child = _call_extractor(extractor, use_text)
child.add_ignored(ignored_ws)
# TODO: Should I set node.position = -len(text) for the case that ignored whitespace will cause
# the first child's position to not be the whitespace, and therefore the concatenation's
# position will be the first non-whitespace? I think not, but I'm adding this note in
# case that causes an issue I'm not seeing at the moment.
node = ParseNode(ParseNodeType.concatenation, children=[child])
if remaining:
# child.consumed will include ignored whitespace, so we base the text we pass on on text rather
# than use_text.
return node.merged(_get_concatenation(remaining,
text[child.consumed:],
ignore_whitespace=ignore_whitespace))
else:
return node | def function[_get_concatenation, parameter[extractors, text]]:
constant[Returns a concatenation ParseNode whose children are the nodes returned by each of the
methods in the extractors enumerable.
If ignore_whitespace is True, whitespace will be ignored and then attached to the child it
preceeded.
]
<ast.Tuple object at 0x7da1b013c3a0> assign[=] call[name[_split_ignored], parameter[name[text], name[ignore_whitespace]]]
<ast.Tuple object at 0x7da1b013d6c0> assign[=] name[extractors]
variable[child] assign[=] call[name[_call_extractor], parameter[name[extractor], name[use_text]]]
call[name[child].add_ignored, parameter[name[ignored_ws]]]
variable[node] assign[=] call[name[ParseNode], parameter[name[ParseNodeType].concatenation]]
if name[remaining] begin[:]
return[call[name[node].merged, parameter[call[name[_get_concatenation], parameter[name[remaining], call[name[text]][<ast.Slice object at 0x7da1b013c640>]]]]]] | keyword[def] identifier[_get_concatenation] ( identifier[extractors] , identifier[text] ,*, identifier[ignore_whitespace] = keyword[True] ):
literal[string]
identifier[ignored_ws] , identifier[use_text] = identifier[_split_ignored] ( identifier[text] , identifier[ignore_whitespace] )
identifier[extractor] ,* identifier[remaining] = identifier[extractors]
identifier[child] = identifier[_call_extractor] ( identifier[extractor] , identifier[use_text] )
identifier[child] . identifier[add_ignored] ( identifier[ignored_ws] )
identifier[node] = identifier[ParseNode] ( identifier[ParseNodeType] . identifier[concatenation] , identifier[children] =[ identifier[child] ])
keyword[if] identifier[remaining] :
keyword[return] identifier[node] . identifier[merged] ( identifier[_get_concatenation] ( identifier[remaining] ,
identifier[text] [ identifier[child] . identifier[consumed] :],
identifier[ignore_whitespace] = identifier[ignore_whitespace] ))
keyword[else] :
keyword[return] identifier[node] | def _get_concatenation(extractors, text, *, ignore_whitespace=True):
"""Returns a concatenation ParseNode whose children are the nodes returned by each of the
methods in the extractors enumerable.
If ignore_whitespace is True, whitespace will be ignored and then attached to the child it
preceeded.
"""
(ignored_ws, use_text) = _split_ignored(text, ignore_whitespace)
(extractor, *remaining) = extractors
child = _call_extractor(extractor, use_text)
child.add_ignored(ignored_ws)
# TODO: Should I set node.position = -len(text) for the case that ignored whitespace will cause
# the first child's position to not be the whitespace, and therefore the concatenation's
# position will be the first non-whitespace? I think not, but I'm adding this note in
# case that causes an issue I'm not seeing at the moment.
node = ParseNode(ParseNodeType.concatenation, children=[child])
if remaining:
# child.consumed will include ignored whitespace, so we base the text we pass on on text rather
# than use_text.
return node.merged(_get_concatenation(remaining, text[child.consumed:], ignore_whitespace=ignore_whitespace)) # depends on [control=['if'], data=[]]
else:
return node |
def _ars_to_proxies(ars):
"""wait for async results and return proxy objects
Args:
ars: AsyncResult (or sequence of AsyncResults), each result type ``Ref``.
Returns:
Remote* proxy object (or list of them)
"""
if (isinstance(ars, Remote) or
isinstance(ars, numbers.Number) or
ars is None):
return ars
elif isinstance(ars, collections.Sequence):
res = []
for i in range(len(ars)):
res.append(_ars_to_proxies(ars[i]))
return res
elif isinstance(ars, ipyparallel.AsyncResult):
ref = ars.r
ObClass = ref.type
if ObClass in distob.engine.proxy_types:
RemoteClass = distob.engine.proxy_types[ObClass]
else:
RemoteClass = type(
'Remote' + ObClass.__name__, (Remote, ObClass), dict())
RemoteClass = proxy_methods(ObClass)(RemoteClass)
proxy_obj = RemoteClass(ref)
return proxy_obj
else:
raise DistobTypeError('Unpacking ars: unexpected type %s' % type(ars)) | def function[_ars_to_proxies, parameter[ars]]:
constant[wait for async results and return proxy objects
Args:
ars: AsyncResult (or sequence of AsyncResults), each result type ``Ref``.
Returns:
Remote* proxy object (or list of them)
]
if <ast.BoolOp object at 0x7da1afea5f90> begin[:]
return[name[ars]] | keyword[def] identifier[_ars_to_proxies] ( identifier[ars] ):
literal[string]
keyword[if] ( identifier[isinstance] ( identifier[ars] , identifier[Remote] ) keyword[or]
identifier[isinstance] ( identifier[ars] , identifier[numbers] . identifier[Number] ) keyword[or]
identifier[ars] keyword[is] keyword[None] ):
keyword[return] identifier[ars]
keyword[elif] identifier[isinstance] ( identifier[ars] , identifier[collections] . identifier[Sequence] ):
identifier[res] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[ars] )):
identifier[res] . identifier[append] ( identifier[_ars_to_proxies] ( identifier[ars] [ identifier[i] ]))
keyword[return] identifier[res]
keyword[elif] identifier[isinstance] ( identifier[ars] , identifier[ipyparallel] . identifier[AsyncResult] ):
identifier[ref] = identifier[ars] . identifier[r]
identifier[ObClass] = identifier[ref] . identifier[type]
keyword[if] identifier[ObClass] keyword[in] identifier[distob] . identifier[engine] . identifier[proxy_types] :
identifier[RemoteClass] = identifier[distob] . identifier[engine] . identifier[proxy_types] [ identifier[ObClass] ]
keyword[else] :
identifier[RemoteClass] = identifier[type] (
literal[string] + identifier[ObClass] . identifier[__name__] ,( identifier[Remote] , identifier[ObClass] ), identifier[dict] ())
identifier[RemoteClass] = identifier[proxy_methods] ( identifier[ObClass] )( identifier[RemoteClass] )
identifier[proxy_obj] = identifier[RemoteClass] ( identifier[ref] )
keyword[return] identifier[proxy_obj]
keyword[else] :
keyword[raise] identifier[DistobTypeError] ( literal[string] % identifier[type] ( identifier[ars] )) | def _ars_to_proxies(ars):
"""wait for async results and return proxy objects
Args:
ars: AsyncResult (or sequence of AsyncResults), each result type ``Ref``.
Returns:
Remote* proxy object (or list of them)
"""
if isinstance(ars, Remote) or isinstance(ars, numbers.Number) or ars is None:
return ars # depends on [control=['if'], data=[]]
elif isinstance(ars, collections.Sequence):
res = []
for i in range(len(ars)):
res.append(_ars_to_proxies(ars[i])) # depends on [control=['for'], data=['i']]
return res # depends on [control=['if'], data=[]]
elif isinstance(ars, ipyparallel.AsyncResult):
ref = ars.r
ObClass = ref.type
if ObClass in distob.engine.proxy_types:
RemoteClass = distob.engine.proxy_types[ObClass] # depends on [control=['if'], data=['ObClass']]
else:
RemoteClass = type('Remote' + ObClass.__name__, (Remote, ObClass), dict())
RemoteClass = proxy_methods(ObClass)(RemoteClass)
proxy_obj = RemoteClass(ref)
return proxy_obj # depends on [control=['if'], data=[]]
else:
raise DistobTypeError('Unpacking ars: unexpected type %s' % type(ars)) |
def htmlCtxtUseOptions(self, options):
"""Applies the options to the parser context """
ret = libxml2mod.htmlCtxtUseOptions(self._o, options)
return ret | def function[htmlCtxtUseOptions, parameter[self, options]]:
constant[Applies the options to the parser context ]
variable[ret] assign[=] call[name[libxml2mod].htmlCtxtUseOptions, parameter[name[self]._o, name[options]]]
return[name[ret]] | keyword[def] identifier[htmlCtxtUseOptions] ( identifier[self] , identifier[options] ):
literal[string]
identifier[ret] = identifier[libxml2mod] . identifier[htmlCtxtUseOptions] ( identifier[self] . identifier[_o] , identifier[options] )
keyword[return] identifier[ret] | def htmlCtxtUseOptions(self, options):
"""Applies the options to the parser context """
ret = libxml2mod.htmlCtxtUseOptions(self._o, options)
return ret |
def do_cat(self, line):
"""cat FILENAME...
Concatenates files and sends to stdout.
"""
# note: when we get around to supporting cat from stdin, we'll need
# to write stdin to a temp file, and then copy the file
# since we need to know the filesize when copying to the pyboard.
args = self.line_to_args(line)
for filename in args:
filename = resolve_path(filename)
mode = auto(get_mode, filename)
if not mode_exists(mode):
print_err("Cannot access '%s': No such file" % filename)
continue
if not mode_isfile(mode):
print_err("'%s': is not a file" % filename)
continue
cat(filename, self.stdout) | def function[do_cat, parameter[self, line]]:
constant[cat FILENAME...
Concatenates files and sends to stdout.
]
variable[args] assign[=] call[name[self].line_to_args, parameter[name[line]]]
for taget[name[filename]] in starred[name[args]] begin[:]
variable[filename] assign[=] call[name[resolve_path], parameter[name[filename]]]
variable[mode] assign[=] call[name[auto], parameter[name[get_mode], name[filename]]]
if <ast.UnaryOp object at 0x7da2044c0be0> begin[:]
call[name[print_err], parameter[binary_operation[constant[Cannot access '%s': No such file] <ast.Mod object at 0x7da2590d6920> name[filename]]]]
continue
if <ast.UnaryOp object at 0x7da2044c1b10> begin[:]
call[name[print_err], parameter[binary_operation[constant['%s': is not a file] <ast.Mod object at 0x7da2590d6920> name[filename]]]]
continue
call[name[cat], parameter[name[filename], name[self].stdout]] | keyword[def] identifier[do_cat] ( identifier[self] , identifier[line] ):
literal[string]
identifier[args] = identifier[self] . identifier[line_to_args] ( identifier[line] )
keyword[for] identifier[filename] keyword[in] identifier[args] :
identifier[filename] = identifier[resolve_path] ( identifier[filename] )
identifier[mode] = identifier[auto] ( identifier[get_mode] , identifier[filename] )
keyword[if] keyword[not] identifier[mode_exists] ( identifier[mode] ):
identifier[print_err] ( literal[string] % identifier[filename] )
keyword[continue]
keyword[if] keyword[not] identifier[mode_isfile] ( identifier[mode] ):
identifier[print_err] ( literal[string] % identifier[filename] )
keyword[continue]
identifier[cat] ( identifier[filename] , identifier[self] . identifier[stdout] ) | def do_cat(self, line):
"""cat FILENAME...
Concatenates files and sends to stdout.
"""
# note: when we get around to supporting cat from stdin, we'll need
# to write stdin to a temp file, and then copy the file
# since we need to know the filesize when copying to the pyboard.
args = self.line_to_args(line)
for filename in args:
filename = resolve_path(filename)
mode = auto(get_mode, filename)
if not mode_exists(mode):
print_err("Cannot access '%s': No such file" % filename)
continue # depends on [control=['if'], data=[]]
if not mode_isfile(mode):
print_err("'%s': is not a file" % filename)
continue # depends on [control=['if'], data=[]]
cat(filename, self.stdout) # depends on [control=['for'], data=['filename']] |
def get_contact_by_email(self, email):
""" Returns a Contact by it's email
:param email: email to get contact for
:return: Contact for specified email
:rtype: Contact
"""
if not email:
return None
email = email.strip()
query = self.q().any(collection='email_addresses', attribute='address',
word=email, operation='eq')
contacts = self.get_contacts(limit=1, query=query)
return contacts[0] if contacts else None | def function[get_contact_by_email, parameter[self, email]]:
constant[ Returns a Contact by it's email
:param email: email to get contact for
:return: Contact for specified email
:rtype: Contact
]
if <ast.UnaryOp object at 0x7da1b1b0f790> begin[:]
return[constant[None]]
variable[email] assign[=] call[name[email].strip, parameter[]]
variable[query] assign[=] call[call[name[self].q, parameter[]].any, parameter[]]
variable[contacts] assign[=] call[name[self].get_contacts, parameter[]]
return[<ast.IfExp object at 0x7da1b1c7ef50>] | keyword[def] identifier[get_contact_by_email] ( identifier[self] , identifier[email] ):
literal[string]
keyword[if] keyword[not] identifier[email] :
keyword[return] keyword[None]
identifier[email] = identifier[email] . identifier[strip] ()
identifier[query] = identifier[self] . identifier[q] (). identifier[any] ( identifier[collection] = literal[string] , identifier[attribute] = literal[string] ,
identifier[word] = identifier[email] , identifier[operation] = literal[string] )
identifier[contacts] = identifier[self] . identifier[get_contacts] ( identifier[limit] = literal[int] , identifier[query] = identifier[query] )
keyword[return] identifier[contacts] [ literal[int] ] keyword[if] identifier[contacts] keyword[else] keyword[None] | def get_contact_by_email(self, email):
""" Returns a Contact by it's email
:param email: email to get contact for
:return: Contact for specified email
:rtype: Contact
"""
if not email:
return None # depends on [control=['if'], data=[]]
email = email.strip()
query = self.q().any(collection='email_addresses', attribute='address', word=email, operation='eq')
contacts = self.get_contacts(limit=1, query=query)
return contacts[0] if contacts else None |
def get_serializer(name):
'''
Return the serialize function.
'''
try:
log.debug('Using %s as serializer', name)
return SERIALIZER_LOOKUP[name]
except KeyError:
msg = 'Serializer {} is not available'.format(name)
log.error(msg, exc_info=True)
raise InvalidSerializerException(msg) | def function[get_serializer, parameter[name]]:
constant[
Return the serialize function.
]
<ast.Try object at 0x7da1b157dc00> | keyword[def] identifier[get_serializer] ( identifier[name] ):
literal[string]
keyword[try] :
identifier[log] . identifier[debug] ( literal[string] , identifier[name] )
keyword[return] identifier[SERIALIZER_LOOKUP] [ identifier[name] ]
keyword[except] identifier[KeyError] :
identifier[msg] = literal[string] . identifier[format] ( identifier[name] )
identifier[log] . identifier[error] ( identifier[msg] , identifier[exc_info] = keyword[True] )
keyword[raise] identifier[InvalidSerializerException] ( identifier[msg] ) | def get_serializer(name):
"""
Return the serialize function.
"""
try:
log.debug('Using %s as serializer', name)
return SERIALIZER_LOOKUP[name] # depends on [control=['try'], data=[]]
except KeyError:
msg = 'Serializer {} is not available'.format(name)
log.error(msg, exc_info=True)
raise InvalidSerializerException(msg) # depends on [control=['except'], data=[]] |
def delete_entity(self, partition_key, row_key,
if_match='*'):
'''
Adds a delete entity operation to the batch. See
:func:`~azure.storage.table.tableservice.TableService.delete_entity` for more
information on deletes.
The operation will not be executed until the batch is committed.
:param str partition_key:
The PartitionKey of the entity.
:param str row_key:
The RowKey of the entity.
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The delete operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional delete, set If-Match to the wildcard character (*).
'''
request = _delete_entity(partition_key, row_key, if_match)
self._add_to_batch(partition_key, row_key, request) | def function[delete_entity, parameter[self, partition_key, row_key, if_match]]:
constant[
Adds a delete entity operation to the batch. See
:func:`~azure.storage.table.tableservice.TableService.delete_entity` for more
information on deletes.
The operation will not be executed until the batch is committed.
:param str partition_key:
The PartitionKey of the entity.
:param str row_key:
The RowKey of the entity.
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The delete operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional delete, set If-Match to the wildcard character (*).
]
variable[request] assign[=] call[name[_delete_entity], parameter[name[partition_key], name[row_key], name[if_match]]]
call[name[self]._add_to_batch, parameter[name[partition_key], name[row_key], name[request]]] | keyword[def] identifier[delete_entity] ( identifier[self] , identifier[partition_key] , identifier[row_key] ,
identifier[if_match] = literal[string] ):
literal[string]
identifier[request] = identifier[_delete_entity] ( identifier[partition_key] , identifier[row_key] , identifier[if_match] )
identifier[self] . identifier[_add_to_batch] ( identifier[partition_key] , identifier[row_key] , identifier[request] ) | def delete_entity(self, partition_key, row_key, if_match='*'):
"""
Adds a delete entity operation to the batch. See
:func:`~azure.storage.table.tableservice.TableService.delete_entity` for more
information on deletes.
The operation will not be executed until the batch is committed.
:param str partition_key:
The PartitionKey of the entity.
:param str row_key:
The RowKey of the entity.
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The delete operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional delete, set If-Match to the wildcard character (*).
"""
request = _delete_entity(partition_key, row_key, if_match)
self._add_to_batch(partition_key, row_key, request) |
def vwap(bars):
"""
calculate vwap of entire time series
(input can be pandas series or numpy array)
bars are usually mid [ (h+l)/2 ] or typical [ (h+l+c)/3 ]
"""
typical = ((bars['high'] + bars['low'] + bars['close']) / 3).values
volume = bars['volume'].values
return pd.Series(index=bars.index,
data=np.cumsum(volume * typical) / np.cumsum(volume)) | def function[vwap, parameter[bars]]:
constant[
calculate vwap of entire time series
(input can be pandas series or numpy array)
bars are usually mid [ (h+l)/2 ] or typical [ (h+l+c)/3 ]
]
variable[typical] assign[=] binary_operation[binary_operation[binary_operation[call[name[bars]][constant[high]] + call[name[bars]][constant[low]]] + call[name[bars]][constant[close]]] / constant[3]].values
variable[volume] assign[=] call[name[bars]][constant[volume]].values
return[call[name[pd].Series, parameter[]]] | keyword[def] identifier[vwap] ( identifier[bars] ):
literal[string]
identifier[typical] =(( identifier[bars] [ literal[string] ]+ identifier[bars] [ literal[string] ]+ identifier[bars] [ literal[string] ])/ literal[int] ). identifier[values]
identifier[volume] = identifier[bars] [ literal[string] ]. identifier[values]
keyword[return] identifier[pd] . identifier[Series] ( identifier[index] = identifier[bars] . identifier[index] ,
identifier[data] = identifier[np] . identifier[cumsum] ( identifier[volume] * identifier[typical] )/ identifier[np] . identifier[cumsum] ( identifier[volume] )) | def vwap(bars):
"""
calculate vwap of entire time series
(input can be pandas series or numpy array)
bars are usually mid [ (h+l)/2 ] or typical [ (h+l+c)/3 ]
"""
typical = ((bars['high'] + bars['low'] + bars['close']) / 3).values
volume = bars['volume'].values
return pd.Series(index=bars.index, data=np.cumsum(volume * typical) / np.cumsum(volume)) |
def checkMultipleFiles(input):
""" Evaluates the input to determine whether there is 1 or more than 1 valid input file.
"""
f,i,o,a=buildFileList(input)
return len(f) > 1 | def function[checkMultipleFiles, parameter[input]]:
constant[ Evaluates the input to determine whether there is 1 or more than 1 valid input file.
]
<ast.Tuple object at 0x7da1b1a7fd60> assign[=] call[name[buildFileList], parameter[name[input]]]
return[compare[call[name[len], parameter[name[f]]] greater[>] constant[1]]] | keyword[def] identifier[checkMultipleFiles] ( identifier[input] ):
literal[string]
identifier[f] , identifier[i] , identifier[o] , identifier[a] = identifier[buildFileList] ( identifier[input] )
keyword[return] identifier[len] ( identifier[f] )> literal[int] | def checkMultipleFiles(input):
""" Evaluates the input to determine whether there is 1 or more than 1 valid input file.
"""
(f, i, o, a) = buildFileList(input)
return len(f) > 1 |
def send_http_request_with_form_parameters(context, method):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
set_form_parameters(context)
send_http_request(context, method) | def function[send_http_request_with_form_parameters, parameter[context, method]]:
constant[
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
]
call[name[safe_add_http_request_context_to_behave_context], parameter[name[context]]]
call[name[set_form_parameters], parameter[name[context]]]
call[name[send_http_request], parameter[name[context], name[method]]] | keyword[def] identifier[send_http_request_with_form_parameters] ( identifier[context] , identifier[method] ):
literal[string]
identifier[safe_add_http_request_context_to_behave_context] ( identifier[context] )
identifier[set_form_parameters] ( identifier[context] )
identifier[send_http_request] ( identifier[context] , identifier[method] ) | def send_http_request_with_form_parameters(context, method):
"""
Parameters:
+-------------+--------------+
| param_name | param_value |
+=============+==============+
| param1 | value1 |
+-------------+--------------+
| param2 | value2 |
+-------------+--------------+
"""
safe_add_http_request_context_to_behave_context(context)
set_form_parameters(context)
send_http_request(context, method) |
def _model_error_corr(self, catchment1, catchment2):
"""
Return model error correlation between subject catchment and other catchment.
Methodology source: Kjeldsen & Jones, 2009, table 3
:param catchment1: catchment to calculate error correlation with
:type catchment1: :class:`Catchment`
:param catchment2: catchment to calculate error correlation with
:type catchment2: :class:`Catchment`
:return: correlation coefficient, r
:rtype: float
"""
dist = catchment1.distance_to(catchment2)
return self._dist_corr(dist, 0.3998, 0.0283, 0.9494) | def function[_model_error_corr, parameter[self, catchment1, catchment2]]:
constant[
Return model error correlation between subject catchment and other catchment.
Methodology source: Kjeldsen & Jones, 2009, table 3
:param catchment1: catchment to calculate error correlation with
:type catchment1: :class:`Catchment`
:param catchment2: catchment to calculate error correlation with
:type catchment2: :class:`Catchment`
:return: correlation coefficient, r
:rtype: float
]
variable[dist] assign[=] call[name[catchment1].distance_to, parameter[name[catchment2]]]
return[call[name[self]._dist_corr, parameter[name[dist], constant[0.3998], constant[0.0283], constant[0.9494]]]] | keyword[def] identifier[_model_error_corr] ( identifier[self] , identifier[catchment1] , identifier[catchment2] ):
literal[string]
identifier[dist] = identifier[catchment1] . identifier[distance_to] ( identifier[catchment2] )
keyword[return] identifier[self] . identifier[_dist_corr] ( identifier[dist] , literal[int] , literal[int] , literal[int] ) | def _model_error_corr(self, catchment1, catchment2):
"""
Return model error correlation between subject catchment and other catchment.
Methodology source: Kjeldsen & Jones, 2009, table 3
:param catchment1: catchment to calculate error correlation with
:type catchment1: :class:`Catchment`
:param catchment2: catchment to calculate error correlation with
:type catchment2: :class:`Catchment`
:return: correlation coefficient, r
:rtype: float
"""
dist = catchment1.distance_to(catchment2)
return self._dist_corr(dist, 0.3998, 0.0283, 0.9494) |
def derive_temporalnetwork(data, params):
"""
Derives connectivity from the data. A lot of data is inherently built with edges
(e.g. communication between two individuals).
However other networks are derived from the covariance of time series
(e.g. brain networks between two regions).
Covariance based metrics deriving time-resolved networks can be done in multiple ways.
There are other methods apart from covariance based.
Derive a weight vector for each time point and then the corrrelation coefficient
for each time point.
Paramters
----------
data : array
Time series data to perform connectivity derivation on. (Default dimensions are: (time as rows, nodes as columns). Change params{'dimord'} if you want it the other way (see below).
params : dict
Parameters for each method (see below).
Necessary paramters
===================
method : str
method: "distance","slidingwindow", "taperedslidingwindow",
"jackknife", "multiplytemporalderivative". Alternatively, method can be a weight matrix of size time x time.
**Different methods have method specific paramaters (see below)**
Params for all methods (optional)
=================================
postpro : "no" (default). Other alternatives are: "fisher", "boxcox", "standardize"
and any combination seperated by a + (e,g, "fisher+boxcox").
See postpro_pipeline for more information.
dimord : str
Dimension order: 'node,time' (default) or 'time,node'. People like to represent their data differently and this is an easy way to be sure that you are inputing the data in the correct way.
analysis_id : str or int
add to identify specfic analysis. Generated report will be placed in './report/' + analysis_id + '/derivation_report.html
report : bool
False by default. If true, A report is saved in ./report/[analysis_id]/derivation_report.html if "yes"
report_path : str
String where the report is saved. Default is ./report/[analysis_id]/derivation_report.html
Methods specific parameters
===========================
method == "distance"
~~~~~~~~~~~~~~~~~~~
Distance metric calculates 1/Distance metric weights, and scales between 0 and 1.
W[t,t] is excluded from the scaling and then set to 1.
params['distance']: str
Distance metric (e.g. 'euclidean'). See teneto.utils.getDistanceFunction for more info
When method == "slidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "taperedslidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
params['distribution'] : str
Scipy distribution (e.g. 'norm','expon'). Any distribution here: https://docs.scipy.org/doc/scipy/reference/stats.html
params['distribution_params'] : list
Each parameter, excluding the data "x" (in their scipy function order) to generate pdf.
NOTE
!!!!!!!!!!
The data x should be considered to be centered at 0 and have a length of window size.
(i.e. a window size of 5 entails x is [-2, -1, 0, 1, 2] a window size of 6 entails [-2.5, -1.5, 0.5, 0.5, 1.5, 2.5])
Given x params['distribution_params'] contains the remaining parameters.
e.g. normal distribution requires pdf(x, loc, scale) where loc=mean and scale=std.
This means that the mean and std have to be provided in distribution_params.
Say we have a gaussian distribution, a window size of 21 and params['distribution_params'] is [0,5].
This will lead to a gaussian with its peak at in the middle of each window with a standard deviation of 5.
Instead, if we set params['distribution_params'] is [10,5] this will lead to a half gaussian with its peak at the final time point with a standard deviation of 5.
When method == "temporalderivative"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "jackknife"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
No parameters are necessary.
Optional parameters:
params['weight-var'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC*W). If weightby is selected, do not standerdize in postpro.
params['weight-mean'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC+W). If weightby is selected, do not standerdize in postpro.
Returns
-------
G : array
Connectivity estimates (nodes x nodes x time)
READ MORE
---------
About the general weighted pearson approach used for most methods, see:
Thompson & Fransson (2019) A common framework for the problem of deriving estimates of dynamic functional brain connectivity.
Neuroimage. (https://doi.org/10.1016/j.neuroimage.2017.12.057)
SEE ALSO
--------
*postpro_pipeline*, *gen_report*
"""
report = {}
if 'dimord' not in params.keys():
params['dimord'] = 'node,time'
if 'report' not in params.keys():
params['report'] = False
if 'analysis_id' not in params.keys():
params['analysis_id'] = ''
if 'postpro' not in params.keys():
params['postpro'] = 'no'
if params['report'] == 'yes' or params['report'] == True:
if 'analysis_id' not in params.keys():
params['analysis_id'] = ''
if 'report_path' not in params.keys():
params['report_path'] = './report/' + params['analysis_id']
if 'report_filename' not in params.keys():
params['report_filename'] = 'derivation_report.html'
if params['dimord'] == 'node,time':
data = data.transpose()
if isinstance(params['method'], str):
if params['method'] == 'jackknife':
weights, report = _weightfun_jackknife(data.shape[0], report)
relation = 'weight'
elif params['method'] == 'sliding window' or params['method'] == 'slidingwindow':
weights, report = _weightfun_sliding_window(
data.shape[0], params, report)
relation = 'weight'
elif params['method'] == 'tapered sliding window' or params['method'] == 'taperedslidingwindow':
weights, report = _weightfun_tapered_sliding_window(
data.shape[0], params, report)
relation = 'weight'
elif params['method'] == 'distance' or params['method'] == "spatial distance" or params['method'] == "node distance" or params['method'] == "nodedistance" or params['method'] == "spatialdistance":
weights, report = _weightfun_spatial_distance(data, params, report)
relation = 'weight'
elif params['method'] == 'mtd' or params['method'] == 'multiply temporal derivative' or params['method'] == 'multiplytemporalderivative' or params['method'] == 'temporal derivative' or params['method'] == "temporalderivative":
R, report = _temporal_derivative(data, params, report)
relation = 'coupling'
else:
raise ValueError(
'Unrecognoized method. See derive_with_weighted_pearson documentation for predefined methods or enter own weight matrix')
else:
try:
weights = np.array(params['method'])
relation = 'weight'
except:
raise ValueError(
'Unrecognoized method. See documentation for predefined methods')
if weights.shape[0] != weights.shape[1]:
raise ValueError("weight matrix should be square")
if weights.shape[0] != data.shape[0]:
raise ValueError("weight matrix must equal number of time points")
if relation == 'weight':
# Loop over each weight vector and calculate pearson correlation.
# Note, should see if this can be made quicker in future.
R = np.array(
[DescrStatsW(data, weights[i, :]).corrcoef for i in range(0, weights.shape[0])])
# Make node,node,time
R = R.transpose([1, 2, 0])
# Correct jackknife direction
if params['method'] == 'jackknife':
# Correct inversion
R = R * -1
jc_z = 0
if 'weight-var' in params.keys():
R = np.transpose(R, [2, 0, 1])
R = (R - R.mean(axis=0)) / R.std(axis=0)
jc_z = 1
R = R * params['weight-var']
R = R.transpose([1, 2, 0])
if 'weight-mean' in params.keys():
R = np.transpose(R, [2, 0, 1])
if jc_z == 0:
R = (R - R.mean(axis=0)) / R.std(axis=0)
R = R + params['weight-mean']
R = np.transpose(R, [1, 2, 0])
R = set_diagonal(R, 1)
if params['postpro'] != 'no':
R, report = postpro_pipeline(
R, params['postpro'], report)
R = set_diagonal(R, 1)
if params['report'] == 'yes' or params['report'] == True:
gen_report(report, params['report_path'], params['report_filename'])
return R | def function[derive_temporalnetwork, parameter[data, params]]:
constant[
Derives connectivity from the data. A lot of data is inherently built with edges
(e.g. communication between two individuals).
However other networks are derived from the covariance of time series
(e.g. brain networks between two regions).
Covariance based metrics deriving time-resolved networks can be done in multiple ways.
There are other methods apart from covariance based.
Derive a weight vector for each time point and then the corrrelation coefficient
for each time point.
Paramters
----------
data : array
Time series data to perform connectivity derivation on. (Default dimensions are: (time as rows, nodes as columns). Change params{'dimord'} if you want it the other way (see below).
params : dict
Parameters for each method (see below).
Necessary paramters
===================
method : str
method: "distance","slidingwindow", "taperedslidingwindow",
"jackknife", "multiplytemporalderivative". Alternatively, method can be a weight matrix of size time x time.
**Different methods have method specific paramaters (see below)**
Params for all methods (optional)
=================================
postpro : "no" (default). Other alternatives are: "fisher", "boxcox", "standardize"
and any combination seperated by a + (e,g, "fisher+boxcox").
See postpro_pipeline for more information.
dimord : str
Dimension order: 'node,time' (default) or 'time,node'. People like to represent their data differently and this is an easy way to be sure that you are inputing the data in the correct way.
analysis_id : str or int
add to identify specfic analysis. Generated report will be placed in './report/' + analysis_id + '/derivation_report.html
report : bool
False by default. If true, A report is saved in ./report/[analysis_id]/derivation_report.html if "yes"
report_path : str
String where the report is saved. Default is ./report/[analysis_id]/derivation_report.html
Methods specific parameters
===========================
method == "distance"
~~~~~~~~~~~~~~~~~~~
Distance metric calculates 1/Distance metric weights, and scales between 0 and 1.
W[t,t] is excluded from the scaling and then set to 1.
params['distance']: str
Distance metric (e.g. 'euclidean'). See teneto.utils.getDistanceFunction for more info
When method == "slidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "taperedslidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
params['distribution'] : str
Scipy distribution (e.g. 'norm','expon'). Any distribution here: https://docs.scipy.org/doc/scipy/reference/stats.html
params['distribution_params'] : list
Each parameter, excluding the data "x" (in their scipy function order) to generate pdf.
NOTE
!!!!!!!!!!
The data x should be considered to be centered at 0 and have a length of window size.
(i.e. a window size of 5 entails x is [-2, -1, 0, 1, 2] a window size of 6 entails [-2.5, -1.5, 0.5, 0.5, 1.5, 2.5])
Given x params['distribution_params'] contains the remaining parameters.
e.g. normal distribution requires pdf(x, loc, scale) where loc=mean and scale=std.
This means that the mean and std have to be provided in distribution_params.
Say we have a gaussian distribution, a window size of 21 and params['distribution_params'] is [0,5].
This will lead to a gaussian with its peak at in the middle of each window with a standard deviation of 5.
Instead, if we set params['distribution_params'] is [10,5] this will lead to a half gaussian with its peak at the final time point with a standard deviation of 5.
When method == "temporalderivative"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "jackknife"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
No parameters are necessary.
Optional parameters:
params['weight-var'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC*W). If weightby is selected, do not standerdize in postpro.
params['weight-mean'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC+W). If weightby is selected, do not standerdize in postpro.
Returns
-------
G : array
Connectivity estimates (nodes x nodes x time)
READ MORE
---------
About the general weighted pearson approach used for most methods, see:
Thompson & Fransson (2019) A common framework for the problem of deriving estimates of dynamic functional brain connectivity.
Neuroimage. (https://doi.org/10.1016/j.neuroimage.2017.12.057)
SEE ALSO
--------
*postpro_pipeline*, *gen_report*
]
variable[report] assign[=] dictionary[[], []]
if compare[constant[dimord] <ast.NotIn object at 0x7da2590d7190> call[name[params].keys, parameter[]]] begin[:]
call[name[params]][constant[dimord]] assign[=] constant[node,time]
if compare[constant[report] <ast.NotIn object at 0x7da2590d7190> call[name[params].keys, parameter[]]] begin[:]
call[name[params]][constant[report]] assign[=] constant[False]
if compare[constant[analysis_id] <ast.NotIn object at 0x7da2590d7190> call[name[params].keys, parameter[]]] begin[:]
call[name[params]][constant[analysis_id]] assign[=] constant[]
if compare[constant[postpro] <ast.NotIn object at 0x7da2590d7190> call[name[params].keys, parameter[]]] begin[:]
call[name[params]][constant[postpro]] assign[=] constant[no]
if <ast.BoolOp object at 0x7da207f00f40> begin[:]
if compare[constant[analysis_id] <ast.NotIn object at 0x7da2590d7190> call[name[params].keys, parameter[]]] begin[:]
call[name[params]][constant[analysis_id]] assign[=] constant[]
if compare[constant[report_path] <ast.NotIn object at 0x7da2590d7190> call[name[params].keys, parameter[]]] begin[:]
call[name[params]][constant[report_path]] assign[=] binary_operation[constant[./report/] + call[name[params]][constant[analysis_id]]]
if compare[constant[report_filename] <ast.NotIn object at 0x7da2590d7190> call[name[params].keys, parameter[]]] begin[:]
call[name[params]][constant[report_filename]] assign[=] constant[derivation_report.html]
if compare[call[name[params]][constant[dimord]] equal[==] constant[node,time]] begin[:]
variable[data] assign[=] call[name[data].transpose, parameter[]]
if call[name[isinstance], parameter[call[name[params]][constant[method]], name[str]]] begin[:]
if compare[call[name[params]][constant[method]] equal[==] constant[jackknife]] begin[:]
<ast.Tuple object at 0x7da207f022c0> assign[=] call[name[_weightfun_jackknife], parameter[call[name[data].shape][constant[0]], name[report]]]
variable[relation] assign[=] constant[weight]
if compare[name[relation] equal[==] constant[weight]] begin[:]
variable[R] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da204565e40>]]
variable[R] assign[=] call[name[R].transpose, parameter[list[[<ast.Constant object at 0x7da204564f40>, <ast.Constant object at 0x7da204564df0>, <ast.Constant object at 0x7da204564550>]]]]
if compare[call[name[params]][constant[method]] equal[==] constant[jackknife]] begin[:]
variable[R] assign[=] binary_operation[name[R] * <ast.UnaryOp object at 0x7da2045655d0>]
variable[jc_z] assign[=] constant[0]
if compare[constant[weight-var] in call[name[params].keys, parameter[]]] begin[:]
variable[R] assign[=] call[name[np].transpose, parameter[name[R], list[[<ast.Constant object at 0x7da204566110>, <ast.Constant object at 0x7da204567d90>, <ast.Constant object at 0x7da204566710>]]]]
variable[R] assign[=] binary_operation[binary_operation[name[R] - call[name[R].mean, parameter[]]] / call[name[R].std, parameter[]]]
variable[jc_z] assign[=] constant[1]
variable[R] assign[=] binary_operation[name[R] * call[name[params]][constant[weight-var]]]
variable[R] assign[=] call[name[R].transpose, parameter[list[[<ast.Constant object at 0x7da2045672b0>, <ast.Constant object at 0x7da204567700>, <ast.Constant object at 0x7da204566b60>]]]]
if compare[constant[weight-mean] in call[name[params].keys, parameter[]]] begin[:]
variable[R] assign[=] call[name[np].transpose, parameter[name[R], list[[<ast.Constant object at 0x7da2045654e0>, <ast.Constant object at 0x7da204564490>, <ast.Constant object at 0x7da204564130>]]]]
if compare[name[jc_z] equal[==] constant[0]] begin[:]
variable[R] assign[=] binary_operation[binary_operation[name[R] - call[name[R].mean, parameter[]]] / call[name[R].std, parameter[]]]
variable[R] assign[=] binary_operation[name[R] + call[name[params]][constant[weight-mean]]]
variable[R] assign[=] call[name[np].transpose, parameter[name[R], list[[<ast.Constant object at 0x7da20e7497b0>, <ast.Constant object at 0x7da20e74baf0>, <ast.Constant object at 0x7da20e74b3a0>]]]]
variable[R] assign[=] call[name[set_diagonal], parameter[name[R], constant[1]]]
if compare[call[name[params]][constant[postpro]] not_equal[!=] constant[no]] begin[:]
<ast.Tuple object at 0x7da20e74b2e0> assign[=] call[name[postpro_pipeline], parameter[name[R], call[name[params]][constant[postpro]], name[report]]]
variable[R] assign[=] call[name[set_diagonal], parameter[name[R], constant[1]]]
if <ast.BoolOp object at 0x7da20e74b370> begin[:]
call[name[gen_report], parameter[name[report], call[name[params]][constant[report_path]], call[name[params]][constant[report_filename]]]]
return[name[R]] | keyword[def] identifier[derive_temporalnetwork] ( identifier[data] , identifier[params] ):
literal[string]
identifier[report] ={}
keyword[if] literal[string] keyword[not] keyword[in] identifier[params] . identifier[keys] ():
identifier[params] [ literal[string] ]= literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[params] . identifier[keys] ():
identifier[params] [ literal[string] ]= keyword[False]
keyword[if] literal[string] keyword[not] keyword[in] identifier[params] . identifier[keys] ():
identifier[params] [ literal[string] ]= literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[params] . identifier[keys] ():
identifier[params] [ literal[string] ]= literal[string]
keyword[if] identifier[params] [ literal[string] ]== literal[string] keyword[or] identifier[params] [ literal[string] ]== keyword[True] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[params] . identifier[keys] ():
identifier[params] [ literal[string] ]= literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[params] . identifier[keys] ():
identifier[params] [ literal[string] ]= literal[string] + identifier[params] [ literal[string] ]
keyword[if] literal[string] keyword[not] keyword[in] identifier[params] . identifier[keys] ():
identifier[params] [ literal[string] ]= literal[string]
keyword[if] identifier[params] [ literal[string] ]== literal[string] :
identifier[data] = identifier[data] . identifier[transpose] ()
keyword[if] identifier[isinstance] ( identifier[params] [ literal[string] ], identifier[str] ):
keyword[if] identifier[params] [ literal[string] ]== literal[string] :
identifier[weights] , identifier[report] = identifier[_weightfun_jackknife] ( identifier[data] . identifier[shape] [ literal[int] ], identifier[report] )
identifier[relation] = literal[string]
keyword[elif] identifier[params] [ literal[string] ]== literal[string] keyword[or] identifier[params] [ literal[string] ]== literal[string] :
identifier[weights] , identifier[report] = identifier[_weightfun_sliding_window] (
identifier[data] . identifier[shape] [ literal[int] ], identifier[params] , identifier[report] )
identifier[relation] = literal[string]
keyword[elif] identifier[params] [ literal[string] ]== literal[string] keyword[or] identifier[params] [ literal[string] ]== literal[string] :
identifier[weights] , identifier[report] = identifier[_weightfun_tapered_sliding_window] (
identifier[data] . identifier[shape] [ literal[int] ], identifier[params] , identifier[report] )
identifier[relation] = literal[string]
keyword[elif] identifier[params] [ literal[string] ]== literal[string] keyword[or] identifier[params] [ literal[string] ]== literal[string] keyword[or] identifier[params] [ literal[string] ]== literal[string] keyword[or] identifier[params] [ literal[string] ]== literal[string] keyword[or] identifier[params] [ literal[string] ]== literal[string] :
identifier[weights] , identifier[report] = identifier[_weightfun_spatial_distance] ( identifier[data] , identifier[params] , identifier[report] )
identifier[relation] = literal[string]
keyword[elif] identifier[params] [ literal[string] ]== literal[string] keyword[or] identifier[params] [ literal[string] ]== literal[string] keyword[or] identifier[params] [ literal[string] ]== literal[string] keyword[or] identifier[params] [ literal[string] ]== literal[string] keyword[or] identifier[params] [ literal[string] ]== literal[string] :
identifier[R] , identifier[report] = identifier[_temporal_derivative] ( identifier[data] , identifier[params] , identifier[report] )
identifier[relation] = literal[string]
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[else] :
keyword[try] :
identifier[weights] = identifier[np] . identifier[array] ( identifier[params] [ literal[string] ])
identifier[relation] = literal[string]
keyword[except] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[if] identifier[weights] . identifier[shape] [ literal[int] ]!= identifier[weights] . identifier[shape] [ literal[int] ]:
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[weights] . identifier[shape] [ literal[int] ]!= identifier[data] . identifier[shape] [ literal[int] ]:
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[relation] == literal[string] :
identifier[R] = identifier[np] . identifier[array] (
[ identifier[DescrStatsW] ( identifier[data] , identifier[weights] [ identifier[i] ,:]). identifier[corrcoef] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[weights] . identifier[shape] [ literal[int] ])])
identifier[R] = identifier[R] . identifier[transpose] ([ literal[int] , literal[int] , literal[int] ])
keyword[if] identifier[params] [ literal[string] ]== literal[string] :
identifier[R] = identifier[R] *- literal[int]
identifier[jc_z] = literal[int]
keyword[if] literal[string] keyword[in] identifier[params] . identifier[keys] ():
identifier[R] = identifier[np] . identifier[transpose] ( identifier[R] ,[ literal[int] , literal[int] , literal[int] ])
identifier[R] =( identifier[R] - identifier[R] . identifier[mean] ( identifier[axis] = literal[int] ))/ identifier[R] . identifier[std] ( identifier[axis] = literal[int] )
identifier[jc_z] = literal[int]
identifier[R] = identifier[R] * identifier[params] [ literal[string] ]
identifier[R] = identifier[R] . identifier[transpose] ([ literal[int] , literal[int] , literal[int] ])
keyword[if] literal[string] keyword[in] identifier[params] . identifier[keys] ():
identifier[R] = identifier[np] . identifier[transpose] ( identifier[R] ,[ literal[int] , literal[int] , literal[int] ])
keyword[if] identifier[jc_z] == literal[int] :
identifier[R] =( identifier[R] - identifier[R] . identifier[mean] ( identifier[axis] = literal[int] ))/ identifier[R] . identifier[std] ( identifier[axis] = literal[int] )
identifier[R] = identifier[R] + identifier[params] [ literal[string] ]
identifier[R] = identifier[np] . identifier[transpose] ( identifier[R] ,[ literal[int] , literal[int] , literal[int] ])
identifier[R] = identifier[set_diagonal] ( identifier[R] , literal[int] )
keyword[if] identifier[params] [ literal[string] ]!= literal[string] :
identifier[R] , identifier[report] = identifier[postpro_pipeline] (
identifier[R] , identifier[params] [ literal[string] ], identifier[report] )
identifier[R] = identifier[set_diagonal] ( identifier[R] , literal[int] )
keyword[if] identifier[params] [ literal[string] ]== literal[string] keyword[or] identifier[params] [ literal[string] ]== keyword[True] :
identifier[gen_report] ( identifier[report] , identifier[params] [ literal[string] ], identifier[params] [ literal[string] ])
keyword[return] identifier[R] | def derive_temporalnetwork(data, params):
"""
Derives connectivity from the data. A lot of data is inherently built with edges
(e.g. communication between two individuals).
However other networks are derived from the covariance of time series
(e.g. brain networks between two regions).
Covariance based metrics deriving time-resolved networks can be done in multiple ways.
There are other methods apart from covariance based.
Derive a weight vector for each time point and then the corrrelation coefficient
for each time point.
Paramters
----------
data : array
Time series data to perform connectivity derivation on. (Default dimensions are: (time as rows, nodes as columns). Change params{'dimord'} if you want it the other way (see below).
params : dict
Parameters for each method (see below).
Necessary paramters
===================
method : str
method: "distance","slidingwindow", "taperedslidingwindow",
"jackknife", "multiplytemporalderivative". Alternatively, method can be a weight matrix of size time x time.
**Different methods have method specific paramaters (see below)**
Params for all methods (optional)
=================================
postpro : "no" (default). Other alternatives are: "fisher", "boxcox", "standardize"
and any combination seperated by a + (e,g, "fisher+boxcox").
See postpro_pipeline for more information.
dimord : str
Dimension order: 'node,time' (default) or 'time,node'. People like to represent their data differently and this is an easy way to be sure that you are inputing the data in the correct way.
analysis_id : str or int
add to identify specfic analysis. Generated report will be placed in './report/' + analysis_id + '/derivation_report.html
report : bool
False by default. If true, A report is saved in ./report/[analysis_id]/derivation_report.html if "yes"
report_path : str
String where the report is saved. Default is ./report/[analysis_id]/derivation_report.html
Methods specific parameters
===========================
method == "distance"
~~~~~~~~~~~~~~~~~~~
Distance metric calculates 1/Distance metric weights, and scales between 0 and 1.
W[t,t] is excluded from the scaling and then set to 1.
params['distance']: str
Distance metric (e.g. 'euclidean'). See teneto.utils.getDistanceFunction for more info
When method == "slidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "taperedslidingwindow"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
params['distribution'] : str
Scipy distribution (e.g. 'norm','expon'). Any distribution here: https://docs.scipy.org/doc/scipy/reference/stats.html
params['distribution_params'] : list
Each parameter, excluding the data "x" (in their scipy function order) to generate pdf.
NOTE
!!!!!!!!!!
The data x should be considered to be centered at 0 and have a length of window size.
(i.e. a window size of 5 entails x is [-2, -1, 0, 1, 2] a window size of 6 entails [-2.5, -1.5, 0.5, 0.5, 1.5, 2.5])
Given x params['distribution_params'] contains the remaining parameters.
e.g. normal distribution requires pdf(x, loc, scale) where loc=mean and scale=std.
This means that the mean and std have to be provided in distribution_params.
Say we have a gaussian distribution, a window size of 21 and params['distribution_params'] is [0,5].
This will lead to a gaussian with its peak at in the middle of each window with a standard deviation of 5.
Instead, if we set params['distribution_params'] is [10,5] this will lead to a half gaussian with its peak at the final time point with a standard deviation of 5.
When method == "temporalderivative"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
params['windowsize'] : int
Size of window.
When method == "jackknife"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
No parameters are necessary.
Optional parameters:
params['weight-var'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC*W). If weightby is selected, do not standerdize in postpro.
params['weight-mean'] : array, (optional)
NxN array to weight the JC estimates (standerdized-JC+W). If weightby is selected, do not standerdize in postpro.
Returns
-------
G : array
Connectivity estimates (nodes x nodes x time)
READ MORE
---------
About the general weighted pearson approach used for most methods, see:
Thompson & Fransson (2019) A common framework for the problem of deriving estimates of dynamic functional brain connectivity.
Neuroimage. (https://doi.org/10.1016/j.neuroimage.2017.12.057)
SEE ALSO
--------
*postpro_pipeline*, *gen_report*
"""
report = {}
if 'dimord' not in params.keys():
params['dimord'] = 'node,time' # depends on [control=['if'], data=[]]
if 'report' not in params.keys():
params['report'] = False # depends on [control=['if'], data=[]]
if 'analysis_id' not in params.keys():
params['analysis_id'] = '' # depends on [control=['if'], data=[]]
if 'postpro' not in params.keys():
params['postpro'] = 'no' # depends on [control=['if'], data=[]]
if params['report'] == 'yes' or params['report'] == True:
if 'analysis_id' not in params.keys():
params['analysis_id'] = '' # depends on [control=['if'], data=[]]
if 'report_path' not in params.keys():
params['report_path'] = './report/' + params['analysis_id'] # depends on [control=['if'], data=[]]
if 'report_filename' not in params.keys():
params['report_filename'] = 'derivation_report.html' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if params['dimord'] == 'node,time':
data = data.transpose() # depends on [control=['if'], data=[]]
if isinstance(params['method'], str):
if params['method'] == 'jackknife':
(weights, report) = _weightfun_jackknife(data.shape[0], report)
relation = 'weight' # depends on [control=['if'], data=[]]
elif params['method'] == 'sliding window' or params['method'] == 'slidingwindow':
(weights, report) = _weightfun_sliding_window(data.shape[0], params, report)
relation = 'weight' # depends on [control=['if'], data=[]]
elif params['method'] == 'tapered sliding window' or params['method'] == 'taperedslidingwindow':
(weights, report) = _weightfun_tapered_sliding_window(data.shape[0], params, report)
relation = 'weight' # depends on [control=['if'], data=[]]
elif params['method'] == 'distance' or params['method'] == 'spatial distance' or params['method'] == 'node distance' or (params['method'] == 'nodedistance') or (params['method'] == 'spatialdistance'):
(weights, report) = _weightfun_spatial_distance(data, params, report)
relation = 'weight' # depends on [control=['if'], data=[]]
elif params['method'] == 'mtd' or params['method'] == 'multiply temporal derivative' or params['method'] == 'multiplytemporalderivative' or (params['method'] == 'temporal derivative') or (params['method'] == 'temporalderivative'):
(R, report) = _temporal_derivative(data, params, report)
relation = 'coupling' # depends on [control=['if'], data=[]]
else:
raise ValueError('Unrecognoized method. See derive_with_weighted_pearson documentation for predefined methods or enter own weight matrix') # depends on [control=['if'], data=[]]
else:
try:
weights = np.array(params['method'])
relation = 'weight' # depends on [control=['try'], data=[]]
except:
raise ValueError('Unrecognoized method. See documentation for predefined methods') # depends on [control=['except'], data=[]]
if weights.shape[0] != weights.shape[1]:
raise ValueError('weight matrix should be square') # depends on [control=['if'], data=[]]
if weights.shape[0] != data.shape[0]:
raise ValueError('weight matrix must equal number of time points') # depends on [control=['if'], data=[]]
if relation == 'weight':
# Loop over each weight vector and calculate pearson correlation.
# Note, should see if this can be made quicker in future.
R = np.array([DescrStatsW(data, weights[i, :]).corrcoef for i in range(0, weights.shape[0])])
# Make node,node,time
R = R.transpose([1, 2, 0]) # depends on [control=['if'], data=[]]
# Correct jackknife direction
if params['method'] == 'jackknife':
# Correct inversion
R = R * -1
jc_z = 0
if 'weight-var' in params.keys():
R = np.transpose(R, [2, 0, 1])
R = (R - R.mean(axis=0)) / R.std(axis=0)
jc_z = 1
R = R * params['weight-var']
R = R.transpose([1, 2, 0]) # depends on [control=['if'], data=[]]
if 'weight-mean' in params.keys():
R = np.transpose(R, [2, 0, 1])
if jc_z == 0:
R = (R - R.mean(axis=0)) / R.std(axis=0) # depends on [control=['if'], data=[]]
R = R + params['weight-mean']
R = np.transpose(R, [1, 2, 0]) # depends on [control=['if'], data=[]]
R = set_diagonal(R, 1) # depends on [control=['if'], data=[]]
if params['postpro'] != 'no':
(R, report) = postpro_pipeline(R, params['postpro'], report)
R = set_diagonal(R, 1) # depends on [control=['if'], data=[]]
if params['report'] == 'yes' or params['report'] == True:
gen_report(report, params['report_path'], params['report_filename']) # depends on [control=['if'], data=[]]
return R |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.