code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def asyncPipeSubstr(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously returns a substring. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'from': {'type': 'number', value': <starting position>},
'length': {'type': 'number', 'value': <count of characters to return>}
}
returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of substrings
"""
conf['start'] = conf.pop('from', dict.get(conf, 'start'))
splits = yield asyncGetSplits(_INPUT, conf, **cdicts(opts, kwargs))
parsed = yield asyncDispatch(splits, *get_async_dispatch_funcs())
_OUTPUT = yield asyncStarMap(partial(maybeDeferred, parse_result), parsed)
returnValue(iter(_OUTPUT)) | def function[asyncPipeSubstr, parameter[context, _INPUT, conf]]:
constant[A string module that asynchronously returns a substring. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'from': {'type': 'number', value': <starting position>},
'length': {'type': 'number', 'value': <count of characters to return>}
}
returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of substrings
]
call[name[conf]][constant[start]] assign[=] call[name[conf].pop, parameter[constant[from], call[name[dict].get, parameter[name[conf], constant[start]]]]]
variable[splits] assign[=] <ast.Yield object at 0x7da1b05063e0>
variable[parsed] assign[=] <ast.Yield object at 0x7da1b04602e0>
variable[_OUTPUT] assign[=] <ast.Yield object at 0x7da1b0460b80>
call[name[returnValue], parameter[call[name[iter], parameter[name[_OUTPUT]]]]] | keyword[def] identifier[asyncPipeSubstr] ( identifier[context] = keyword[None] , identifier[_INPUT] = keyword[None] , identifier[conf] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[conf] [ literal[string] ]= identifier[conf] . identifier[pop] ( literal[string] , identifier[dict] . identifier[get] ( identifier[conf] , literal[string] ))
identifier[splits] = keyword[yield] identifier[asyncGetSplits] ( identifier[_INPUT] , identifier[conf] ,** identifier[cdicts] ( identifier[opts] , identifier[kwargs] ))
identifier[parsed] = keyword[yield] identifier[asyncDispatch] ( identifier[splits] ,* identifier[get_async_dispatch_funcs] ())
identifier[_OUTPUT] = keyword[yield] identifier[asyncStarMap] ( identifier[partial] ( identifier[maybeDeferred] , identifier[parse_result] ), identifier[parsed] )
identifier[returnValue] ( identifier[iter] ( identifier[_OUTPUT] )) | def asyncPipeSubstr(context=None, _INPUT=None, conf=None, **kwargs):
"""A string module that asynchronously returns a substring. Loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : twisted Deferred iterable of items or strings
conf : {
'from': {'type': 'number', value': <starting position>},
'length': {'type': 'number', 'value': <count of characters to return>}
}
returns
-------
_OUTPUT : twisted.internet.defer.Deferred generator of substrings
"""
conf['start'] = conf.pop('from', dict.get(conf, 'start'))
splits = (yield asyncGetSplits(_INPUT, conf, **cdicts(opts, kwargs)))
parsed = (yield asyncDispatch(splits, *get_async_dispatch_funcs()))
_OUTPUT = (yield asyncStarMap(partial(maybeDeferred, parse_result), parsed))
returnValue(iter(_OUTPUT)) |
def error(self, *args):
"""Log an error. By default this will also raise an exception."""
if _canShortcutLogging(self.logCategory, ERROR):
return
errorObject(self.logObjectName(), self.logCategory,
*self.logFunction(*args)) | def function[error, parameter[self]]:
constant[Log an error. By default this will also raise an exception.]
if call[name[_canShortcutLogging], parameter[name[self].logCategory, name[ERROR]]] begin[:]
return[None]
call[name[errorObject], parameter[call[name[self].logObjectName, parameter[]], name[self].logCategory, <ast.Starred object at 0x7da1b0ab99f0>]] | keyword[def] identifier[error] ( identifier[self] ,* identifier[args] ):
literal[string]
keyword[if] identifier[_canShortcutLogging] ( identifier[self] . identifier[logCategory] , identifier[ERROR] ):
keyword[return]
identifier[errorObject] ( identifier[self] . identifier[logObjectName] (), identifier[self] . identifier[logCategory] ,
* identifier[self] . identifier[logFunction] (* identifier[args] )) | def error(self, *args):
"""Log an error. By default this will also raise an exception."""
if _canShortcutLogging(self.logCategory, ERROR):
return # depends on [control=['if'], data=[]]
errorObject(self.logObjectName(), self.logCategory, *self.logFunction(*args)) |
def _validate_unique(self, create=True):
""" Validate the unique constraints for the entity """
# Build the filters from the unique constraints
filters, excludes = {}, {}
for field_name, field_obj in self.meta_.unique_fields:
lookup_value = getattr(self, field_name, None)
# Ignore empty lookup values
if lookup_value in Field.empty_values:
continue
# Ignore identifiers on updates
if not create and field_obj.identifier:
excludes[field_name] = lookup_value
continue
filters[field_name] = lookup_value
# Lookup the objects by the filters and raise error on results
for filter_key, lookup_value in filters.items():
if self.exists(excludes, **{filter_key: lookup_value}):
field_obj = self.meta_.declared_fields[filter_key]
field_obj.fail('unique',
entity_name=self.__class__.__name__,
field_name=filter_key) | def function[_validate_unique, parameter[self, create]]:
constant[ Validate the unique constraints for the entity ]
<ast.Tuple object at 0x7da1b195c790> assign[=] tuple[[<ast.Dict object at 0x7da1b195c610>, <ast.Dict object at 0x7da1b195ea70>]]
for taget[tuple[[<ast.Name object at 0x7da1b195c460>, <ast.Name object at 0x7da1b195eda0>]]] in starred[name[self].meta_.unique_fields] begin[:]
variable[lookup_value] assign[=] call[name[getattr], parameter[name[self], name[field_name], constant[None]]]
if compare[name[lookup_value] in name[Field].empty_values] begin[:]
continue
if <ast.BoolOp object at 0x7da1b195ca60> begin[:]
call[name[excludes]][name[field_name]] assign[=] name[lookup_value]
continue
call[name[filters]][name[field_name]] assign[=] name[lookup_value]
for taget[tuple[[<ast.Name object at 0x7da1b195ff70>, <ast.Name object at 0x7da1b195fc10>]]] in starred[call[name[filters].items, parameter[]]] begin[:]
if call[name[self].exists, parameter[name[excludes]]] begin[:]
variable[field_obj] assign[=] call[name[self].meta_.declared_fields][name[filter_key]]
call[name[field_obj].fail, parameter[constant[unique]]] | keyword[def] identifier[_validate_unique] ( identifier[self] , identifier[create] = keyword[True] ):
literal[string]
identifier[filters] , identifier[excludes] ={},{}
keyword[for] identifier[field_name] , identifier[field_obj] keyword[in] identifier[self] . identifier[meta_] . identifier[unique_fields] :
identifier[lookup_value] = identifier[getattr] ( identifier[self] , identifier[field_name] , keyword[None] )
keyword[if] identifier[lookup_value] keyword[in] identifier[Field] . identifier[empty_values] :
keyword[continue]
keyword[if] keyword[not] identifier[create] keyword[and] identifier[field_obj] . identifier[identifier] :
identifier[excludes] [ identifier[field_name] ]= identifier[lookup_value]
keyword[continue]
identifier[filters] [ identifier[field_name] ]= identifier[lookup_value]
keyword[for] identifier[filter_key] , identifier[lookup_value] keyword[in] identifier[filters] . identifier[items] ():
keyword[if] identifier[self] . identifier[exists] ( identifier[excludes] ,**{ identifier[filter_key] : identifier[lookup_value] }):
identifier[field_obj] = identifier[self] . identifier[meta_] . identifier[declared_fields] [ identifier[filter_key] ]
identifier[field_obj] . identifier[fail] ( literal[string] ,
identifier[entity_name] = identifier[self] . identifier[__class__] . identifier[__name__] ,
identifier[field_name] = identifier[filter_key] ) | def _validate_unique(self, create=True):
""" Validate the unique constraints for the entity """
# Build the filters from the unique constraints
(filters, excludes) = ({}, {})
for (field_name, field_obj) in self.meta_.unique_fields:
lookup_value = getattr(self, field_name, None)
# Ignore empty lookup values
if lookup_value in Field.empty_values:
continue # depends on [control=['if'], data=[]]
# Ignore identifiers on updates
if not create and field_obj.identifier:
excludes[field_name] = lookup_value
continue # depends on [control=['if'], data=[]]
filters[field_name] = lookup_value # depends on [control=['for'], data=[]]
# Lookup the objects by the filters and raise error on results
for (filter_key, lookup_value) in filters.items():
if self.exists(excludes, **{filter_key: lookup_value}):
field_obj = self.meta_.declared_fields[filter_key]
field_obj.fail('unique', entity_name=self.__class__.__name__, field_name=filter_key) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def url(self):
"""
We will always check if this song file exists in local library,
if true, we return the url of the local file.
.. note::
As netease song url will be expired after a period of time,
we can not use static url here. Currently, we assume that the
expiration time is 20 minutes, after the url expires, it
will be automaticly refreshed.
"""
local_path = self._find_in_local()
if local_path:
return local_path
if not self._url:
self._refresh_url()
elif time.time() > self._expired_at:
logger.info('song({}) url is expired, refresh...'.format(self))
self._refresh_url()
return self._url | def function[url, parameter[self]]:
constant[
We will always check if this song file exists in local library,
if true, we return the url of the local file.
.. note::
As netease song url will be expired after a period of time,
we can not use static url here. Currently, we assume that the
expiration time is 20 minutes, after the url expires, it
will be automaticly refreshed.
]
variable[local_path] assign[=] call[name[self]._find_in_local, parameter[]]
if name[local_path] begin[:]
return[name[local_path]]
if <ast.UnaryOp object at 0x7da18eb54040> begin[:]
call[name[self]._refresh_url, parameter[]]
return[name[self]._url] | keyword[def] identifier[url] ( identifier[self] ):
literal[string]
identifier[local_path] = identifier[self] . identifier[_find_in_local] ()
keyword[if] identifier[local_path] :
keyword[return] identifier[local_path]
keyword[if] keyword[not] identifier[self] . identifier[_url] :
identifier[self] . identifier[_refresh_url] ()
keyword[elif] identifier[time] . identifier[time] ()> identifier[self] . identifier[_expired_at] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] ))
identifier[self] . identifier[_refresh_url] ()
keyword[return] identifier[self] . identifier[_url] | def url(self):
"""
We will always check if this song file exists in local library,
if true, we return the url of the local file.
.. note::
As netease song url will be expired after a period of time,
we can not use static url here. Currently, we assume that the
expiration time is 20 minutes, after the url expires, it
will be automaticly refreshed.
"""
local_path = self._find_in_local()
if local_path:
return local_path # depends on [control=['if'], data=[]]
if not self._url:
self._refresh_url() # depends on [control=['if'], data=[]]
elif time.time() > self._expired_at:
logger.info('song({}) url is expired, refresh...'.format(self))
self._refresh_url() # depends on [control=['if'], data=[]]
return self._url |
def get_by_oid(self, *oid):
"""SNMP simple request (list of OID).
One request per OID list.
* oid: oid list
> Return a dict
"""
if self.version == '3':
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.UsmUserData(self.user, self.auth),
cmdgen.UdpTransportTarget((self.host, self.port)),
*oid
)
else:
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget((self.host, self.port)),
*oid
)
return self.__get_result__(errorIndication, errorStatus, errorIndex, varBinds) | def function[get_by_oid, parameter[self]]:
constant[SNMP simple request (list of OID).
One request per OID list.
* oid: oid list
> Return a dict
]
if compare[name[self].version equal[==] constant[3]] begin[:]
<ast.Tuple object at 0x7da18f09ece0> assign[=] call[name[self].cmdGen.getCmd, parameter[call[name[cmdgen].UsmUserData, parameter[name[self].user, name[self].auth]], call[name[cmdgen].UdpTransportTarget, parameter[tuple[[<ast.Attribute object at 0x7da18f09ce80>, <ast.Attribute object at 0x7da18f09ee60>]]]], <ast.Starred object at 0x7da18f09cc40>]]
return[call[name[self].__get_result__, parameter[name[errorIndication], name[errorStatus], name[errorIndex], name[varBinds]]]] | keyword[def] identifier[get_by_oid] ( identifier[self] ,* identifier[oid] ):
literal[string]
keyword[if] identifier[self] . identifier[version] == literal[string] :
identifier[errorIndication] , identifier[errorStatus] , identifier[errorIndex] , identifier[varBinds] = identifier[self] . identifier[cmdGen] . identifier[getCmd] (
identifier[cmdgen] . identifier[UsmUserData] ( identifier[self] . identifier[user] , identifier[self] . identifier[auth] ),
identifier[cmdgen] . identifier[UdpTransportTarget] (( identifier[self] . identifier[host] , identifier[self] . identifier[port] )),
* identifier[oid]
)
keyword[else] :
identifier[errorIndication] , identifier[errorStatus] , identifier[errorIndex] , identifier[varBinds] = identifier[self] . identifier[cmdGen] . identifier[getCmd] (
identifier[cmdgen] . identifier[CommunityData] ( identifier[self] . identifier[community] ),
identifier[cmdgen] . identifier[UdpTransportTarget] (( identifier[self] . identifier[host] , identifier[self] . identifier[port] )),
* identifier[oid]
)
keyword[return] identifier[self] . identifier[__get_result__] ( identifier[errorIndication] , identifier[errorStatus] , identifier[errorIndex] , identifier[varBinds] ) | def get_by_oid(self, *oid):
"""SNMP simple request (list of OID).
One request per OID list.
* oid: oid list
> Return a dict
"""
if self.version == '3':
(errorIndication, errorStatus, errorIndex, varBinds) = self.cmdGen.getCmd(cmdgen.UsmUserData(self.user, self.auth), cmdgen.UdpTransportTarget((self.host, self.port)), *oid) # depends on [control=['if'], data=[]]
else:
(errorIndication, errorStatus, errorIndex, varBinds) = self.cmdGen.getCmd(cmdgen.CommunityData(self.community), cmdgen.UdpTransportTarget((self.host, self.port)), *oid)
return self.__get_result__(errorIndication, errorStatus, errorIndex, varBinds) |
def load_file(self):
"""
Loads SAR format logfile in ASCII format (sarXX).
:return: ``True`` if loading and parsing of file went fine, \
``False`` if it failed (at any point)
"""
# We first split file into pieces
searchunks = self._split_file()
if searchunks:
# And then we parse pieces into meaningful data
usage = self._parse_file(searchunks)
if 'CPU' in usage:
return False
self._sarinfo = usage
del usage
return True
else:
return False | def function[load_file, parameter[self]]:
constant[
Loads SAR format logfile in ASCII format (sarXX).
:return: ``True`` if loading and parsing of file went fine, ``False`` if it failed (at any point)
]
variable[searchunks] assign[=] call[name[self]._split_file, parameter[]]
if name[searchunks] begin[:]
variable[usage] assign[=] call[name[self]._parse_file, parameter[name[searchunks]]]
if compare[constant[CPU] in name[usage]] begin[:]
return[constant[False]]
name[self]._sarinfo assign[=] name[usage]
<ast.Delete object at 0x7da1b02e4220>
return[constant[True]] | keyword[def] identifier[load_file] ( identifier[self] ):
literal[string]
identifier[searchunks] = identifier[self] . identifier[_split_file] ()
keyword[if] identifier[searchunks] :
identifier[usage] = identifier[self] . identifier[_parse_file] ( identifier[searchunks] )
keyword[if] literal[string] keyword[in] identifier[usage] :
keyword[return] keyword[False]
identifier[self] . identifier[_sarinfo] = identifier[usage]
keyword[del] identifier[usage]
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def load_file(self):
"""
Loads SAR format logfile in ASCII format (sarXX).
:return: ``True`` if loading and parsing of file went fine, ``False`` if it failed (at any point)
"""
# We first split file into pieces
searchunks = self._split_file()
if searchunks:
# And then we parse pieces into meaningful data
usage = self._parse_file(searchunks)
if 'CPU' in usage:
return False # depends on [control=['if'], data=[]]
self._sarinfo = usage
del usage
return True # depends on [control=['if'], data=[]]
else:
return False |
def parse(md, model, encoding='utf-8', config=None):
"""
Translate the Versa Markdown syntax into Versa model relationships
md -- markdown source text
model -- Versa model to take the output relationship
encoding -- character encoding (defaults to UTF-8)
Returns: The overall base URI (`@base`) specified in the Markdown file, or None
>>> from versa.driver import memory
>>> from versa.reader.md import from_markdown
>>> m = memory.connection()
>>> from_markdown(open('test/resource/poetry.md').read(), m)
'http://uche.ogbuji.net/poems/'
>>> m.size()
40
>>> next(m.match(None, 'http://uche.ogbuji.net/poems/updated', '2013-10-15'))
(I(http://uche.ogbuji.net/poems/1), I(http://uche.ogbuji.net/poems/updated), '2013-10-15', {})
"""
#Set up configuration to interpret the conventions for the Markdown
config = config or {}
#This mapping takes syntactical elements such as the various header levels in Markdown and associates a resource type with the specified resources
syntaxtypemap = {}
if config.get('autotype-h1'): syntaxtypemap['h1'] = config.get('autotype-h1')
if config.get('autotype-h2'): syntaxtypemap['h2'] = config.get('autotype-h2')
if config.get('autotype-h3'): syntaxtypemap['h3'] = config.get('autotype-h3')
interp_stanza = config.get('interpretations', {})
interpretations = {}
def setup_interpretations(interp):
#Map the interpretation IRIs to functions to do the data prep
for prop, interp_key in interp.items():
if interp_key.startswith('@'):
interp_key = iri.absolutize(interp_key[1:], VERSA_BASEIRI)
if interp_key in PREP_METHODS:
interpretations[prop] = PREP_METHODS[interp_key]
else:
#just use the identity, i.e. no-op
interpretations[prop] = lambda x, **kwargs: x
setup_interpretations(interp_stanza)
#Prep ID generator, in case needed
idg = idgen(None)
#Parse the Markdown
#Alternately:
#from xml.sax.saxutils import escape, unescape
#h = markdown.markdown(escape(md.decode(encoding)), output_format='html5')
#Note: even using safe_mode this should not be presumed safe from tainted input
#h = markdown.markdown(md.decode(encoding), safe_mode='escape', output_format='html5')
comments = mkdcomments.CommentsExtension()
h = markdown.markdown(md, safe_mode='escape', output_format='html5', extensions=[comments])
#doc = html.markup_fragment(inputsource.text(h.encode('utf-8')))
tb = treebuilder()
h = '<html>' + h + '</html>'
root = tb.parse(h)
#Each section contains one resource description, but the special one named @docheader contains info to help interpret the rest
first_h1 = next(select_name(descendants(root), 'h1'))
#top_section_fields = itertools.takewhile(lambda x: x.xml_name != 'h1', select_name(following_siblings(first_h1), 'h2'))
#Extract header elements. Notice I use an empty element with an empty parent as the default result
docheader = next(select_value(select_name(descendants(root), 'h1'), '@docheader'), element('empty', parent=root)) # //h1[.="@docheader"]
sections = filter(lambda x: x.xml_value != '@docheader', select_name_pattern(descendants(root), HEADER_PAT)) # //h1[not(.="@docheader")]|h2[not(.="@docheader")]|h3[not(.="@docheader")]
def fields(sect):
'''
Each section represents a resource and contains a list with its properties
This generator parses the list and yields the key value pairs representing the properties
Some properties have attributes, expressed in markdown as a nested list. If present these attributes
Are yielded as well, else None is yielded
'''
#import logging; logging.debug(repr(sect))
#Pull all the list elements until the next header. This accommodates multiple lists in a section
sect_body_items = itertools.takewhile(lambda x: HEADER_PAT.match(x.xml_name) is None, select_elements(following_siblings(sect)))
#results_until(sect.xml_select('following-sibling::*'), 'self::h1|self::h2|self::h3')
#field_list = [ U(li) for ul in sect.xml_select('following-sibling::ul') for li in ul.xml_select('./li') ]
field_list = [ li for elem in select_name(sect_body_items, 'ul') for li in select_name(elem, 'li') ]
def parse_li(pair):
'''
Parse each list item into a property pair
'''
if pair.strip():
matched = REL_PAT.match(pair)
if not matched:
raise ValueError(_('Syntax error in relationship expression: {0}'.format(pair)))
#print matched.groups()
if matched.group(3): prop = matched.group(3).strip()
if matched.group(4): prop = matched.group(4).strip()
if matched.group(7):
val = matched.group(7).strip()
typeindic = RES_VAL
elif matched.group(9):
val = matched.group(9).strip()
typeindic = TEXT_VAL
elif matched.group(11):
val = matched.group(11).strip()
typeindic = TEXT_VAL
elif matched.group(12):
val = matched.group(12).strip()
typeindic = UNKNOWN_VAL
else:
val = ''
typeindic = UNKNOWN_VAL
#prop, val = [ part.strip() for part in U(li.xml_select('string(.)')).split(':', 1) ]
#import logging; logging.debug(repr((prop, val)))
return prop, val, typeindic
return None, None, None
#Go through each list item
for li in field_list:
#Is there a nested list, which expresses attributes on a property
if list(select_name(li, 'ul')):
#main = ''.join([ node.xml_value
# for node in itertools.takewhile(
# lambda x: x.xml_name != 'ul', select_elements(li)
# )
# ])
main = ''.join(itertools.takewhile(
lambda x: isinstance(x, text), li.xml_children
))
#main = li.xml_select('string(ul/preceding-sibling::node())')
prop, val, typeindic = parse_li(main)
subfield_list = [ parse_li(sli.xml_value) for e in select_name(li, 'ul') for sli in (
select_name(e, 'li')
) ]
subfield_list = [ (p, v, t) for (p, v, t) in subfield_list if p is not None ]
#Support a special case for syntax such as in the @iri and @interpretations: stanza of @docheader
if val is None: val = ''
yield prop, val, typeindic, subfield_list
#Just a regular, unadorned property
else:
prop, val, typeindic = parse_li(li.xml_value)
if prop: yield prop, val, typeindic, None
iris = {}
#Gather the document-level metadata from the @docheader section
base = propbase = rtbase = document_iri = default_lang = None
for prop, val, typeindic, subfield_list in fields(docheader):
#The @iri section is where key IRI prefixes can be set
if prop == '@iri':
for (k, uri, typeindic) in subfield_list:
if k == '@base':
base = propbase = rtbase = uri
elif k == '@property':
propbase = uri
elif k == '@resource-type':
rtbase = uri
else:
iris[k] = uri
#The @interpretations section is where defaults can be set as to the primitive types of values from the Markdown, based on the relevant property/relationship
elif prop == '@interpretations':
#Iterate over items from the @docheader/@interpretations section to set up for further parsing
interp = {}
for k, v, x in subfield_list:
interp[I(iri.absolutize(k, propbase))] = v
setup_interpretations(interp)
#Setting an IRI for this very document being parsed
elif prop == '@document':
document_iri = val
elif prop == '@language':
default_lang = val
#If we have a resource to which to attach them, just attach all other properties
elif document_iri or base:
rid = document_iri or base
fullprop = I(iri.absolutize(prop, propbase or base))
if fullprop in interpretations:
val = interpretations[fullprop](val, rid=rid, fullprop=fullprop, base=base, model=model)
if val is not None: model.add(rid, fullprop, val)
else:
model.add(rid, fullprop, val)
#Default IRI prefixes if @iri/@base is set
if not propbase: propbase = base
if not rtbase: rtbase = base
if not document_iri: document_iri = base
#Go through the resources expressed in remaining sections
for sect in sections:
#if U(sect) == '@docheader': continue #Not needed because excluded by ss
#The header can take one of 4 forms: "ResourceID" "ResourceID [ResourceType]" "[ResourceType]" or "[]"
#The 3rd form is for an anonymous resource with specified type and the 4th an anonymous resource with unspecified type
matched = RESOURCE_PAT.match(sect.xml_value)
if not matched:
raise ValueError(_('Syntax error in resource header: {0}'.format(sect.xml_value)))
rid = matched.group(1)
rtype = matched.group(3)
if rtype:
rtype = I(iri.absolutize(rtype, base))
if rid:
rid = I(iri.absolutize(rid, base))
if not rid:
rid = next(idg)
#Resource type might be set by syntax config
if not rtype:
rtype = syntaxtypemap.get(sect.xml_name)
if rtype:
model.add(rid, TYPE_REL, rtype)
#Add the property
for prop, val, typeindic, subfield_list in fields(sect):
attrs = {}
for (aprop, aval, atype) in subfield_list or ():
if atype == RES_VAL:
valmatch = URI_ABBR_PAT.match(aval)
if valmatch:
uri = iris[valmatch.group(1)]
attrs[aprop] = URI_ABBR_PAT.sub(uri + '\\2\\3', aval)
else:
attrs[aprop] = I(iri.absolutize(aval, rtbase))
elif atype == TEXT_VAL:
attrs[aprop] = aval
elif atype == UNKNOWN_VAL:
attrs[aprop] = aval
if aprop in interpretations:
aval = interpretations[aprop](aval, rid=rid, fullprop=aprop, base=base, model=model)
if aval is not None: attrs[aprop] = aval
else:
attrs[aprop] = aval
propmatch = URI_ABBR_PAT.match(prop)
if propmatch:
uri = iris[propmatch.group(1)]
fullprop = URI_ABBR_PAT.sub(uri + '\\2\\3', prop)
else:
fullprop = I(iri.absolutize(prop, propbase))
if typeindic == RES_VAL:
valmatch = URI_ABBR_PAT.match(aval)
if valmatch:
uri = iris[valmatch.group(1)]
val = URI_ABBR_PAT.sub(uri + '\\2\\3', val)
else:
val = I(iri.absolutize(val, rtbase))
model.add(rid, fullprop, val, attrs)
elif typeindic == TEXT_VAL:
if '@lang' not in attrs: attrs['@lang'] = default_lang
model.add(rid, fullprop, val, attrs)
elif typeindic == UNKNOWN_VAL:
if fullprop in interpretations:
val = interpretations[fullprop](val, rid=rid, fullprop=fullprop, base=base, model=model)
if val is not None: model.add(rid, fullprop, val)
else:
model.add(rid, fullprop, val, attrs)
#resinfo = AB_RESOURCE_PAT.match(val)
#if resinfo:
# val = resinfo.group(1)
# valtype = resinfo.group(3)
# if not val: val = model.generate_resource()
# if valtype: attrs[TYPE_REL] = valtype
return document_iri | def function[parse, parameter[md, model, encoding, config]]:
constant[
Translate the Versa Markdown syntax into Versa model relationships
md -- markdown source text
model -- Versa model to take the output relationship
encoding -- character encoding (defaults to UTF-8)
Returns: The overall base URI (`@base`) specified in the Markdown file, or None
>>> from versa.driver import memory
>>> from versa.reader.md import from_markdown
>>> m = memory.connection()
>>> from_markdown(open('test/resource/poetry.md').read(), m)
'http://uche.ogbuji.net/poems/'
>>> m.size()
40
>>> next(m.match(None, 'http://uche.ogbuji.net/poems/updated', '2013-10-15'))
(I(http://uche.ogbuji.net/poems/1), I(http://uche.ogbuji.net/poems/updated), '2013-10-15', {})
]
variable[config] assign[=] <ast.BoolOp object at 0x7da1b24572b0>
variable[syntaxtypemap] assign[=] dictionary[[], []]
if call[name[config].get, parameter[constant[autotype-h1]]] begin[:]
call[name[syntaxtypemap]][constant[h1]] assign[=] call[name[config].get, parameter[constant[autotype-h1]]]
if call[name[config].get, parameter[constant[autotype-h2]]] begin[:]
call[name[syntaxtypemap]][constant[h2]] assign[=] call[name[config].get, parameter[constant[autotype-h2]]]
if call[name[config].get, parameter[constant[autotype-h3]]] begin[:]
call[name[syntaxtypemap]][constant[h3]] assign[=] call[name[config].get, parameter[constant[autotype-h3]]]
variable[interp_stanza] assign[=] call[name[config].get, parameter[constant[interpretations], dictionary[[], []]]]
variable[interpretations] assign[=] dictionary[[], []]
def function[setup_interpretations, parameter[interp]]:
for taget[tuple[[<ast.Name object at 0x7da1b2456770>, <ast.Name object at 0x7da1b2456740>]]] in starred[call[name[interp].items, parameter[]]] begin[:]
if call[name[interp_key].startswith, parameter[constant[@]]] begin[:]
variable[interp_key] assign[=] call[name[iri].absolutize, parameter[call[name[interp_key]][<ast.Slice object at 0x7da1b2456440>], name[VERSA_BASEIRI]]]
if compare[name[interp_key] in name[PREP_METHODS]] begin[:]
call[name[interpretations]][name[prop]] assign[=] call[name[PREP_METHODS]][name[interp_key]]
call[name[setup_interpretations], parameter[name[interp_stanza]]]
variable[idg] assign[=] call[name[idgen], parameter[constant[None]]]
variable[comments] assign[=] call[name[mkdcomments].CommentsExtension, parameter[]]
variable[h] assign[=] call[name[markdown].markdown, parameter[name[md]]]
variable[tb] assign[=] call[name[treebuilder], parameter[]]
variable[h] assign[=] binary_operation[binary_operation[constant[<html>] + name[h]] + constant[</html>]]
variable[root] assign[=] call[name[tb].parse, parameter[name[h]]]
variable[first_h1] assign[=] call[name[next], parameter[call[name[select_name], parameter[call[name[descendants], parameter[name[root]]], constant[h1]]]]]
variable[docheader] assign[=] call[name[next], parameter[call[name[select_value], parameter[call[name[select_name], parameter[call[name[descendants], parameter[name[root]]], constant[h1]]], constant[@docheader]]], call[name[element], parameter[constant[empty]]]]]
variable[sections] assign[=] call[name[filter], parameter[<ast.Lambda object at 0x7da1b2455180>, call[name[select_name_pattern], parameter[call[name[descendants], parameter[name[root]]], name[HEADER_PAT]]]]]
def function[fields, parameter[sect]]:
constant[
Each section represents a resource and contains a list with its properties
This generator parses the list and yields the key value pairs representing the properties
Some properties have attributes, expressed in markdown as a nested list. If present these attributes
Are yielded as well, else None is yielded
]
variable[sect_body_items] assign[=] call[name[itertools].takewhile, parameter[<ast.Lambda object at 0x7da1b2454cd0>, call[name[select_elements], parameter[call[name[following_siblings], parameter[name[sect]]]]]]]
variable[field_list] assign[=] <ast.ListComp object at 0x7da1b24549a0>
def function[parse_li, parameter[pair]]:
constant[
Parse each list item into a property pair
]
if call[name[pair].strip, parameter[]] begin[:]
variable[matched] assign[=] call[name[REL_PAT].match, parameter[name[pair]]]
if <ast.UnaryOp object at 0x7da1b2454340> begin[:]
<ast.Raise object at 0x7da1b24542e0>
if call[name[matched].group, parameter[constant[3]]] begin[:]
variable[prop] assign[=] call[call[name[matched].group, parameter[constant[3]]].strip, parameter[]]
if call[name[matched].group, parameter[constant[4]]] begin[:]
variable[prop] assign[=] call[call[name[matched].group, parameter[constant[4]]].strip, parameter[]]
if call[name[matched].group, parameter[constant[7]]] begin[:]
variable[val] assign[=] call[call[name[matched].group, parameter[constant[7]]].strip, parameter[]]
variable[typeindic] assign[=] name[RES_VAL]
return[tuple[[<ast.Name object at 0x7da1b24680d0>, <ast.Name object at 0x7da1b2469d20>, <ast.Name object at 0x7da1b246bb20>]]]
return[tuple[[<ast.Constant object at 0x7da1b2468580>, <ast.Constant object at 0x7da1b246ab00>, <ast.Constant object at 0x7da1b246a0b0>]]]
for taget[name[li]] in starred[name[field_list]] begin[:]
if call[name[list], parameter[call[name[select_name], parameter[name[li], constant[ul]]]]] begin[:]
variable[main] assign[=] call[constant[].join, parameter[call[name[itertools].takewhile, parameter[<ast.Lambda object at 0x7da1b2469b70>, name[li].xml_children]]]]
<ast.Tuple object at 0x7da1b2468130> assign[=] call[name[parse_li], parameter[name[main]]]
variable[subfield_list] assign[=] <ast.ListComp object at 0x7da1b2469840>
variable[subfield_list] assign[=] <ast.ListComp object at 0x7da1b246baf0>
if compare[name[val] is constant[None]] begin[:]
variable[val] assign[=] constant[]
<ast.Yield object at 0x7da1b246a9b0>
variable[iris] assign[=] dictionary[[], []]
variable[base] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da1b246a800>, <ast.Name object at 0x7da1b2469a80>, <ast.Name object at 0x7da1b2469360>, <ast.Name object at 0x7da1b24689a0>]]] in starred[call[name[fields], parameter[name[docheader]]]] begin[:]
if compare[name[prop] equal[==] constant[@iri]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b246bdf0>, <ast.Name object at 0x7da1b2468190>, <ast.Name object at 0x7da1b246b370>]]] in starred[name[subfield_list]] begin[:]
if compare[name[k] equal[==] constant[@base]] begin[:]
variable[base] assign[=] name[uri]
if <ast.UnaryOp object at 0x7da18dc062f0> begin[:]
variable[propbase] assign[=] name[base]
if <ast.UnaryOp object at 0x7da18dc07670> begin[:]
variable[rtbase] assign[=] name[base]
if <ast.UnaryOp object at 0x7da18dc048b0> begin[:]
variable[document_iri] assign[=] name[base]
for taget[name[sect]] in starred[name[sections]] begin[:]
variable[matched] assign[=] call[name[RESOURCE_PAT].match, parameter[name[sect].xml_value]]
if <ast.UnaryOp object at 0x7da1b24e8940> begin[:]
<ast.Raise object at 0x7da1b24e8520>
variable[rid] assign[=] call[name[matched].group, parameter[constant[1]]]
variable[rtype] assign[=] call[name[matched].group, parameter[constant[3]]]
if name[rtype] begin[:]
variable[rtype] assign[=] call[name[I], parameter[call[name[iri].absolutize, parameter[name[rtype], name[base]]]]]
if name[rid] begin[:]
variable[rid] assign[=] call[name[I], parameter[call[name[iri].absolutize, parameter[name[rid], name[base]]]]]
if <ast.UnaryOp object at 0x7da18ede6f20> begin[:]
variable[rid] assign[=] call[name[next], parameter[name[idg]]]
if <ast.UnaryOp object at 0x7da18f58ebf0> begin[:]
variable[rtype] assign[=] call[name[syntaxtypemap].get, parameter[name[sect].xml_name]]
if name[rtype] begin[:]
call[name[model].add, parameter[name[rid], name[TYPE_REL], name[rtype]]]
for taget[tuple[[<ast.Name object at 0x7da20c6a9930>, <ast.Name object at 0x7da20c6aaef0>, <ast.Name object at 0x7da20c6a9150>, <ast.Name object at 0x7da20c6a8eb0>]]] in starred[call[name[fields], parameter[name[sect]]]] begin[:]
variable[attrs] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c6ab250>, <ast.Name object at 0x7da20c6aa320>, <ast.Name object at 0x7da20c6a81c0>]]] in starred[<ast.BoolOp object at 0x7da20c6ab4c0>] begin[:]
if compare[name[atype] equal[==] name[RES_VAL]] begin[:]
variable[valmatch] assign[=] call[name[URI_ABBR_PAT].match, parameter[name[aval]]]
if name[valmatch] begin[:]
variable[uri] assign[=] call[name[iris]][call[name[valmatch].group, parameter[constant[1]]]]
call[name[attrs]][name[aprop]] assign[=] call[name[URI_ABBR_PAT].sub, parameter[binary_operation[name[uri] + constant[\2\3]], name[aval]]]
variable[propmatch] assign[=] call[name[URI_ABBR_PAT].match, parameter[name[prop]]]
if name[propmatch] begin[:]
variable[uri] assign[=] call[name[iris]][call[name[propmatch].group, parameter[constant[1]]]]
variable[fullprop] assign[=] call[name[URI_ABBR_PAT].sub, parameter[binary_operation[name[uri] + constant[\2\3]], name[prop]]]
if compare[name[typeindic] equal[==] name[RES_VAL]] begin[:]
variable[valmatch] assign[=] call[name[URI_ABBR_PAT].match, parameter[name[aval]]]
if name[valmatch] begin[:]
variable[uri] assign[=] call[name[iris]][call[name[valmatch].group, parameter[constant[1]]]]
variable[val] assign[=] call[name[URI_ABBR_PAT].sub, parameter[binary_operation[name[uri] + constant[\2\3]], name[val]]]
call[name[model].add, parameter[name[rid], name[fullprop], name[val], name[attrs]]]
return[name[document_iri]] | keyword[def] identifier[parse] ( identifier[md] , identifier[model] , identifier[encoding] = literal[string] , identifier[config] = keyword[None] ):
literal[string]
identifier[config] = identifier[config] keyword[or] {}
identifier[syntaxtypemap] ={}
keyword[if] identifier[config] . identifier[get] ( literal[string] ): identifier[syntaxtypemap] [ literal[string] ]= identifier[config] . identifier[get] ( literal[string] )
keyword[if] identifier[config] . identifier[get] ( literal[string] ): identifier[syntaxtypemap] [ literal[string] ]= identifier[config] . identifier[get] ( literal[string] )
keyword[if] identifier[config] . identifier[get] ( literal[string] ): identifier[syntaxtypemap] [ literal[string] ]= identifier[config] . identifier[get] ( literal[string] )
identifier[interp_stanza] = identifier[config] . identifier[get] ( literal[string] ,{})
identifier[interpretations] ={}
keyword[def] identifier[setup_interpretations] ( identifier[interp] ):
keyword[for] identifier[prop] , identifier[interp_key] keyword[in] identifier[interp] . identifier[items] ():
keyword[if] identifier[interp_key] . identifier[startswith] ( literal[string] ):
identifier[interp_key] = identifier[iri] . identifier[absolutize] ( identifier[interp_key] [ literal[int] :], identifier[VERSA_BASEIRI] )
keyword[if] identifier[interp_key] keyword[in] identifier[PREP_METHODS] :
identifier[interpretations] [ identifier[prop] ]= identifier[PREP_METHODS] [ identifier[interp_key] ]
keyword[else] :
identifier[interpretations] [ identifier[prop] ]= keyword[lambda] identifier[x] ,** identifier[kwargs] : identifier[x]
identifier[setup_interpretations] ( identifier[interp_stanza] )
identifier[idg] = identifier[idgen] ( keyword[None] )
identifier[comments] = identifier[mkdcomments] . identifier[CommentsExtension] ()
identifier[h] = identifier[markdown] . identifier[markdown] ( identifier[md] , identifier[safe_mode] = literal[string] , identifier[output_format] = literal[string] , identifier[extensions] =[ identifier[comments] ])
identifier[tb] = identifier[treebuilder] ()
identifier[h] = literal[string] + identifier[h] + literal[string]
identifier[root] = identifier[tb] . identifier[parse] ( identifier[h] )
identifier[first_h1] = identifier[next] ( identifier[select_name] ( identifier[descendants] ( identifier[root] ), literal[string] ))
identifier[docheader] = identifier[next] ( identifier[select_value] ( identifier[select_name] ( identifier[descendants] ( identifier[root] ), literal[string] ), literal[string] ), identifier[element] ( literal[string] , identifier[parent] = identifier[root] ))
identifier[sections] = identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] . identifier[xml_value] != literal[string] , identifier[select_name_pattern] ( identifier[descendants] ( identifier[root] ), identifier[HEADER_PAT] ))
keyword[def] identifier[fields] ( identifier[sect] ):
literal[string]
identifier[sect_body_items] = identifier[itertools] . identifier[takewhile] ( keyword[lambda] identifier[x] : identifier[HEADER_PAT] . identifier[match] ( identifier[x] . identifier[xml_name] ) keyword[is] keyword[None] , identifier[select_elements] ( identifier[following_siblings] ( identifier[sect] )))
identifier[field_list] =[ identifier[li] keyword[for] identifier[elem] keyword[in] identifier[select_name] ( identifier[sect_body_items] , literal[string] ) keyword[for] identifier[li] keyword[in] identifier[select_name] ( identifier[elem] , literal[string] )]
keyword[def] identifier[parse_li] ( identifier[pair] ):
literal[string]
keyword[if] identifier[pair] . identifier[strip] ():
identifier[matched] = identifier[REL_PAT] . identifier[match] ( identifier[pair] )
keyword[if] keyword[not] identifier[matched] :
keyword[raise] identifier[ValueError] ( identifier[_] ( literal[string] . identifier[format] ( identifier[pair] )))
keyword[if] identifier[matched] . identifier[group] ( literal[int] ): identifier[prop] = identifier[matched] . identifier[group] ( literal[int] ). identifier[strip] ()
keyword[if] identifier[matched] . identifier[group] ( literal[int] ): identifier[prop] = identifier[matched] . identifier[group] ( literal[int] ). identifier[strip] ()
keyword[if] identifier[matched] . identifier[group] ( literal[int] ):
identifier[val] = identifier[matched] . identifier[group] ( literal[int] ). identifier[strip] ()
identifier[typeindic] = identifier[RES_VAL]
keyword[elif] identifier[matched] . identifier[group] ( literal[int] ):
identifier[val] = identifier[matched] . identifier[group] ( literal[int] ). identifier[strip] ()
identifier[typeindic] = identifier[TEXT_VAL]
keyword[elif] identifier[matched] . identifier[group] ( literal[int] ):
identifier[val] = identifier[matched] . identifier[group] ( literal[int] ). identifier[strip] ()
identifier[typeindic] = identifier[TEXT_VAL]
keyword[elif] identifier[matched] . identifier[group] ( literal[int] ):
identifier[val] = identifier[matched] . identifier[group] ( literal[int] ). identifier[strip] ()
identifier[typeindic] = identifier[UNKNOWN_VAL]
keyword[else] :
identifier[val] = literal[string]
identifier[typeindic] = identifier[UNKNOWN_VAL]
keyword[return] identifier[prop] , identifier[val] , identifier[typeindic]
keyword[return] keyword[None] , keyword[None] , keyword[None]
keyword[for] identifier[li] keyword[in] identifier[field_list] :
keyword[if] identifier[list] ( identifier[select_name] ( identifier[li] , literal[string] )):
identifier[main] = literal[string] . identifier[join] ( identifier[itertools] . identifier[takewhile] (
keyword[lambda] identifier[x] : identifier[isinstance] ( identifier[x] , identifier[text] ), identifier[li] . identifier[xml_children]
))
identifier[prop] , identifier[val] , identifier[typeindic] = identifier[parse_li] ( identifier[main] )
identifier[subfield_list] =[ identifier[parse_li] ( identifier[sli] . identifier[xml_value] ) keyword[for] identifier[e] keyword[in] identifier[select_name] ( identifier[li] , literal[string] ) keyword[for] identifier[sli] keyword[in] (
identifier[select_name] ( identifier[e] , literal[string] )
)]
identifier[subfield_list] =[( identifier[p] , identifier[v] , identifier[t] ) keyword[for] ( identifier[p] , identifier[v] , identifier[t] ) keyword[in] identifier[subfield_list] keyword[if] identifier[p] keyword[is] keyword[not] keyword[None] ]
keyword[if] identifier[val] keyword[is] keyword[None] : identifier[val] = literal[string]
keyword[yield] identifier[prop] , identifier[val] , identifier[typeindic] , identifier[subfield_list]
keyword[else] :
identifier[prop] , identifier[val] , identifier[typeindic] = identifier[parse_li] ( identifier[li] . identifier[xml_value] )
keyword[if] identifier[prop] : keyword[yield] identifier[prop] , identifier[val] , identifier[typeindic] , keyword[None]
identifier[iris] ={}
identifier[base] = identifier[propbase] = identifier[rtbase] = identifier[document_iri] = identifier[default_lang] = keyword[None]
keyword[for] identifier[prop] , identifier[val] , identifier[typeindic] , identifier[subfield_list] keyword[in] identifier[fields] ( identifier[docheader] ):
keyword[if] identifier[prop] == literal[string] :
keyword[for] ( identifier[k] , identifier[uri] , identifier[typeindic] ) keyword[in] identifier[subfield_list] :
keyword[if] identifier[k] == literal[string] :
identifier[base] = identifier[propbase] = identifier[rtbase] = identifier[uri]
keyword[elif] identifier[k] == literal[string] :
identifier[propbase] = identifier[uri]
keyword[elif] identifier[k] == literal[string] :
identifier[rtbase] = identifier[uri]
keyword[else] :
identifier[iris] [ identifier[k] ]= identifier[uri]
keyword[elif] identifier[prop] == literal[string] :
identifier[interp] ={}
keyword[for] identifier[k] , identifier[v] , identifier[x] keyword[in] identifier[subfield_list] :
identifier[interp] [ identifier[I] ( identifier[iri] . identifier[absolutize] ( identifier[k] , identifier[propbase] ))]= identifier[v]
identifier[setup_interpretations] ( identifier[interp] )
keyword[elif] identifier[prop] == literal[string] :
identifier[document_iri] = identifier[val]
keyword[elif] identifier[prop] == literal[string] :
identifier[default_lang] = identifier[val]
keyword[elif] identifier[document_iri] keyword[or] identifier[base] :
identifier[rid] = identifier[document_iri] keyword[or] identifier[base]
identifier[fullprop] = identifier[I] ( identifier[iri] . identifier[absolutize] ( identifier[prop] , identifier[propbase] keyword[or] identifier[base] ))
keyword[if] identifier[fullprop] keyword[in] identifier[interpretations] :
identifier[val] = identifier[interpretations] [ identifier[fullprop] ]( identifier[val] , identifier[rid] = identifier[rid] , identifier[fullprop] = identifier[fullprop] , identifier[base] = identifier[base] , identifier[model] = identifier[model] )
keyword[if] identifier[val] keyword[is] keyword[not] keyword[None] : identifier[model] . identifier[add] ( identifier[rid] , identifier[fullprop] , identifier[val] )
keyword[else] :
identifier[model] . identifier[add] ( identifier[rid] , identifier[fullprop] , identifier[val] )
keyword[if] keyword[not] identifier[propbase] : identifier[propbase] = identifier[base]
keyword[if] keyword[not] identifier[rtbase] : identifier[rtbase] = identifier[base]
keyword[if] keyword[not] identifier[document_iri] : identifier[document_iri] = identifier[base]
keyword[for] identifier[sect] keyword[in] identifier[sections] :
identifier[matched] = identifier[RESOURCE_PAT] . identifier[match] ( identifier[sect] . identifier[xml_value] )
keyword[if] keyword[not] identifier[matched] :
keyword[raise] identifier[ValueError] ( identifier[_] ( literal[string] . identifier[format] ( identifier[sect] . identifier[xml_value] )))
identifier[rid] = identifier[matched] . identifier[group] ( literal[int] )
identifier[rtype] = identifier[matched] . identifier[group] ( literal[int] )
keyword[if] identifier[rtype] :
identifier[rtype] = identifier[I] ( identifier[iri] . identifier[absolutize] ( identifier[rtype] , identifier[base] ))
keyword[if] identifier[rid] :
identifier[rid] = identifier[I] ( identifier[iri] . identifier[absolutize] ( identifier[rid] , identifier[base] ))
keyword[if] keyword[not] identifier[rid] :
identifier[rid] = identifier[next] ( identifier[idg] )
keyword[if] keyword[not] identifier[rtype] :
identifier[rtype] = identifier[syntaxtypemap] . identifier[get] ( identifier[sect] . identifier[xml_name] )
keyword[if] identifier[rtype] :
identifier[model] . identifier[add] ( identifier[rid] , identifier[TYPE_REL] , identifier[rtype] )
keyword[for] identifier[prop] , identifier[val] , identifier[typeindic] , identifier[subfield_list] keyword[in] identifier[fields] ( identifier[sect] ):
identifier[attrs] ={}
keyword[for] ( identifier[aprop] , identifier[aval] , identifier[atype] ) keyword[in] identifier[subfield_list] keyword[or] ():
keyword[if] identifier[atype] == identifier[RES_VAL] :
identifier[valmatch] = identifier[URI_ABBR_PAT] . identifier[match] ( identifier[aval] )
keyword[if] identifier[valmatch] :
identifier[uri] = identifier[iris] [ identifier[valmatch] . identifier[group] ( literal[int] )]
identifier[attrs] [ identifier[aprop] ]= identifier[URI_ABBR_PAT] . identifier[sub] ( identifier[uri] + literal[string] , identifier[aval] )
keyword[else] :
identifier[attrs] [ identifier[aprop] ]= identifier[I] ( identifier[iri] . identifier[absolutize] ( identifier[aval] , identifier[rtbase] ))
keyword[elif] identifier[atype] == identifier[TEXT_VAL] :
identifier[attrs] [ identifier[aprop] ]= identifier[aval]
keyword[elif] identifier[atype] == identifier[UNKNOWN_VAL] :
identifier[attrs] [ identifier[aprop] ]= identifier[aval]
keyword[if] identifier[aprop] keyword[in] identifier[interpretations] :
identifier[aval] = identifier[interpretations] [ identifier[aprop] ]( identifier[aval] , identifier[rid] = identifier[rid] , identifier[fullprop] = identifier[aprop] , identifier[base] = identifier[base] , identifier[model] = identifier[model] )
keyword[if] identifier[aval] keyword[is] keyword[not] keyword[None] : identifier[attrs] [ identifier[aprop] ]= identifier[aval]
keyword[else] :
identifier[attrs] [ identifier[aprop] ]= identifier[aval]
identifier[propmatch] = identifier[URI_ABBR_PAT] . identifier[match] ( identifier[prop] )
keyword[if] identifier[propmatch] :
identifier[uri] = identifier[iris] [ identifier[propmatch] . identifier[group] ( literal[int] )]
identifier[fullprop] = identifier[URI_ABBR_PAT] . identifier[sub] ( identifier[uri] + literal[string] , identifier[prop] )
keyword[else] :
identifier[fullprop] = identifier[I] ( identifier[iri] . identifier[absolutize] ( identifier[prop] , identifier[propbase] ))
keyword[if] identifier[typeindic] == identifier[RES_VAL] :
identifier[valmatch] = identifier[URI_ABBR_PAT] . identifier[match] ( identifier[aval] )
keyword[if] identifier[valmatch] :
identifier[uri] = identifier[iris] [ identifier[valmatch] . identifier[group] ( literal[int] )]
identifier[val] = identifier[URI_ABBR_PAT] . identifier[sub] ( identifier[uri] + literal[string] , identifier[val] )
keyword[else] :
identifier[val] = identifier[I] ( identifier[iri] . identifier[absolutize] ( identifier[val] , identifier[rtbase] ))
identifier[model] . identifier[add] ( identifier[rid] , identifier[fullprop] , identifier[val] , identifier[attrs] )
keyword[elif] identifier[typeindic] == identifier[TEXT_VAL] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[attrs] : identifier[attrs] [ literal[string] ]= identifier[default_lang]
identifier[model] . identifier[add] ( identifier[rid] , identifier[fullprop] , identifier[val] , identifier[attrs] )
keyword[elif] identifier[typeindic] == identifier[UNKNOWN_VAL] :
keyword[if] identifier[fullprop] keyword[in] identifier[interpretations] :
identifier[val] = identifier[interpretations] [ identifier[fullprop] ]( identifier[val] , identifier[rid] = identifier[rid] , identifier[fullprop] = identifier[fullprop] , identifier[base] = identifier[base] , identifier[model] = identifier[model] )
keyword[if] identifier[val] keyword[is] keyword[not] keyword[None] : identifier[model] . identifier[add] ( identifier[rid] , identifier[fullprop] , identifier[val] )
keyword[else] :
identifier[model] . identifier[add] ( identifier[rid] , identifier[fullprop] , identifier[val] , identifier[attrs] )
keyword[return] identifier[document_iri] | def parse(md, model, encoding='utf-8', config=None):
"""
Translate the Versa Markdown syntax into Versa model relationships
md -- markdown source text
model -- Versa model to take the output relationship
encoding -- character encoding (defaults to UTF-8)
Returns: The overall base URI (`@base`) specified in the Markdown file, or None
>>> from versa.driver import memory
>>> from versa.reader.md import from_markdown
>>> m = memory.connection()
>>> from_markdown(open('test/resource/poetry.md').read(), m)
'http://uche.ogbuji.net/poems/'
>>> m.size()
40
>>> next(m.match(None, 'http://uche.ogbuji.net/poems/updated', '2013-10-15'))
(I(http://uche.ogbuji.net/poems/1), I(http://uche.ogbuji.net/poems/updated), '2013-10-15', {})
"""
#Set up configuration to interpret the conventions for the Markdown
config = config or {}
#This mapping takes syntactical elements such as the various header levels in Markdown and associates a resource type with the specified resources
syntaxtypemap = {}
if config.get('autotype-h1'):
syntaxtypemap['h1'] = config.get('autotype-h1') # depends on [control=['if'], data=[]]
if config.get('autotype-h2'):
syntaxtypemap['h2'] = config.get('autotype-h2') # depends on [control=['if'], data=[]]
if config.get('autotype-h3'):
syntaxtypemap['h3'] = config.get('autotype-h3') # depends on [control=['if'], data=[]]
interp_stanza = config.get('interpretations', {})
interpretations = {}
def setup_interpretations(interp):
#Map the interpretation IRIs to functions to do the data prep
for (prop, interp_key) in interp.items():
if interp_key.startswith('@'):
interp_key = iri.absolutize(interp_key[1:], VERSA_BASEIRI) # depends on [control=['if'], data=[]]
if interp_key in PREP_METHODS:
interpretations[prop] = PREP_METHODS[interp_key] # depends on [control=['if'], data=['interp_key', 'PREP_METHODS']]
else:
#just use the identity, i.e. no-op
interpretations[prop] = lambda x, **kwargs: x # depends on [control=['for'], data=[]]
setup_interpretations(interp_stanza)
#Prep ID generator, in case needed
idg = idgen(None)
#Parse the Markdown
#Alternately:
#from xml.sax.saxutils import escape, unescape
#h = markdown.markdown(escape(md.decode(encoding)), output_format='html5')
#Note: even using safe_mode this should not be presumed safe from tainted input
#h = markdown.markdown(md.decode(encoding), safe_mode='escape', output_format='html5')
comments = mkdcomments.CommentsExtension()
h = markdown.markdown(md, safe_mode='escape', output_format='html5', extensions=[comments])
#doc = html.markup_fragment(inputsource.text(h.encode('utf-8')))
tb = treebuilder()
h = '<html>' + h + '</html>'
root = tb.parse(h)
#Each section contains one resource description, but the special one named @docheader contains info to help interpret the rest
first_h1 = next(select_name(descendants(root), 'h1'))
#top_section_fields = itertools.takewhile(lambda x: x.xml_name != 'h1', select_name(following_siblings(first_h1), 'h2'))
#Extract header elements. Notice I use an empty element with an empty parent as the default result
docheader = next(select_value(select_name(descendants(root), 'h1'), '@docheader'), element('empty', parent=root)) # //h1[.="@docheader"]
sections = filter(lambda x: x.xml_value != '@docheader', select_name_pattern(descendants(root), HEADER_PAT)) # //h1[not(.="@docheader")]|h2[not(.="@docheader")]|h3[not(.="@docheader")]
def fields(sect):
"""
Each section represents a resource and contains a list with its properties
This generator parses the list and yields the key value pairs representing the properties
Some properties have attributes, expressed in markdown as a nested list. If present these attributes
Are yielded as well, else None is yielded
"""
#import logging; logging.debug(repr(sect))
#Pull all the list elements until the next header. This accommodates multiple lists in a section
sect_body_items = itertools.takewhile(lambda x: HEADER_PAT.match(x.xml_name) is None, select_elements(following_siblings(sect)))
#results_until(sect.xml_select('following-sibling::*'), 'self::h1|self::h2|self::h3')
#field_list = [ U(li) for ul in sect.xml_select('following-sibling::ul') for li in ul.xml_select('./li') ]
field_list = [li for elem in select_name(sect_body_items, 'ul') for li in select_name(elem, 'li')]
def parse_li(pair):
"""
Parse each list item into a property pair
"""
if pair.strip():
matched = REL_PAT.match(pair)
if not matched:
raise ValueError(_('Syntax error in relationship expression: {0}'.format(pair))) # depends on [control=['if'], data=[]]
#print matched.groups()
if matched.group(3):
prop = matched.group(3).strip() # depends on [control=['if'], data=[]]
if matched.group(4):
prop = matched.group(4).strip() # depends on [control=['if'], data=[]]
if matched.group(7):
val = matched.group(7).strip()
typeindic = RES_VAL # depends on [control=['if'], data=[]]
elif matched.group(9):
val = matched.group(9).strip()
typeindic = TEXT_VAL # depends on [control=['if'], data=[]]
elif matched.group(11):
val = matched.group(11).strip()
typeindic = TEXT_VAL # depends on [control=['if'], data=[]]
elif matched.group(12):
val = matched.group(12).strip()
typeindic = UNKNOWN_VAL # depends on [control=['if'], data=[]]
else:
val = ''
typeindic = UNKNOWN_VAL
#prop, val = [ part.strip() for part in U(li.xml_select('string(.)')).split(':', 1) ]
#import logging; logging.debug(repr((prop, val)))
return (prop, val, typeindic) # depends on [control=['if'], data=[]]
return (None, None, None)
#Go through each list item
for li in field_list:
#Is there a nested list, which expresses attributes on a property
if list(select_name(li, 'ul')):
#main = ''.join([ node.xml_value
# for node in itertools.takewhile(
# lambda x: x.xml_name != 'ul', select_elements(li)
# )
# ])
main = ''.join(itertools.takewhile(lambda x: isinstance(x, text), li.xml_children))
#main = li.xml_select('string(ul/preceding-sibling::node())')
(prop, val, typeindic) = parse_li(main)
subfield_list = [parse_li(sli.xml_value) for e in select_name(li, 'ul') for sli in select_name(e, 'li')]
subfield_list = [(p, v, t) for (p, v, t) in subfield_list if p is not None]
#Support a special case for syntax such as in the @iri and @interpretations: stanza of @docheader
if val is None:
val = '' # depends on [control=['if'], data=['val']]
yield (prop, val, typeindic, subfield_list) # depends on [control=['if'], data=[]]
else:
#Just a regular, unadorned property
(prop, val, typeindic) = parse_li(li.xml_value)
if prop:
yield (prop, val, typeindic, None) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['li']]
iris = {}
#Gather the document-level metadata from the @docheader section
base = propbase = rtbase = document_iri = default_lang = None
for (prop, val, typeindic, subfield_list) in fields(docheader):
#The @iri section is where key IRI prefixes can be set
if prop == '@iri':
for (k, uri, typeindic) in subfield_list:
if k == '@base':
base = propbase = rtbase = uri # depends on [control=['if'], data=[]]
elif k == '@property':
propbase = uri # depends on [control=['if'], data=[]]
elif k == '@resource-type':
rtbase = uri # depends on [control=['if'], data=[]]
else:
iris[k] = uri # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
#The @interpretations section is where defaults can be set as to the primitive types of values from the Markdown, based on the relevant property/relationship
elif prop == '@interpretations':
#Iterate over items from the @docheader/@interpretations section to set up for further parsing
interp = {}
for (k, v, x) in subfield_list:
interp[I(iri.absolutize(k, propbase))] = v # depends on [control=['for'], data=[]]
setup_interpretations(interp) # depends on [control=['if'], data=[]]
#Setting an IRI for this very document being parsed
elif prop == '@document':
document_iri = val # depends on [control=['if'], data=[]]
elif prop == '@language':
default_lang = val # depends on [control=['if'], data=[]]
#If we have a resource to which to attach them, just attach all other properties
elif document_iri or base:
rid = document_iri or base
fullprop = I(iri.absolutize(prop, propbase or base))
if fullprop in interpretations:
val = interpretations[fullprop](val, rid=rid, fullprop=fullprop, base=base, model=model)
if val is not None:
model.add(rid, fullprop, val) # depends on [control=['if'], data=['val']] # depends on [control=['if'], data=['fullprop', 'interpretations']]
else:
model.add(rid, fullprop, val) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
#Default IRI prefixes if @iri/@base is set
if not propbase:
propbase = base # depends on [control=['if'], data=[]]
if not rtbase:
rtbase = base # depends on [control=['if'], data=[]]
if not document_iri:
document_iri = base # depends on [control=['if'], data=[]]
#Go through the resources expressed in remaining sections
for sect in sections:
#if U(sect) == '@docheader': continue #Not needed because excluded by ss
#The header can take one of 4 forms: "ResourceID" "ResourceID [ResourceType]" "[ResourceType]" or "[]"
#The 3rd form is for an anonymous resource with specified type and the 4th an anonymous resource with unspecified type
matched = RESOURCE_PAT.match(sect.xml_value)
if not matched:
raise ValueError(_('Syntax error in resource header: {0}'.format(sect.xml_value))) # depends on [control=['if'], data=[]]
rid = matched.group(1)
rtype = matched.group(3)
if rtype:
rtype = I(iri.absolutize(rtype, base)) # depends on [control=['if'], data=[]]
if rid:
rid = I(iri.absolutize(rid, base)) # depends on [control=['if'], data=[]]
if not rid:
rid = next(idg) # depends on [control=['if'], data=[]]
#Resource type might be set by syntax config
if not rtype:
rtype = syntaxtypemap.get(sect.xml_name) # depends on [control=['if'], data=[]]
if rtype:
model.add(rid, TYPE_REL, rtype) # depends on [control=['if'], data=[]]
#Add the property
for (prop, val, typeindic, subfield_list) in fields(sect):
attrs = {}
for (aprop, aval, atype) in subfield_list or ():
if atype == RES_VAL:
valmatch = URI_ABBR_PAT.match(aval)
if valmatch:
uri = iris[valmatch.group(1)]
attrs[aprop] = URI_ABBR_PAT.sub(uri + '\\2\\3', aval) # depends on [control=['if'], data=[]]
else:
attrs[aprop] = I(iri.absolutize(aval, rtbase)) # depends on [control=['if'], data=[]]
elif atype == TEXT_VAL:
attrs[aprop] = aval # depends on [control=['if'], data=[]]
elif atype == UNKNOWN_VAL:
attrs[aprop] = aval
if aprop in interpretations:
aval = interpretations[aprop](aval, rid=rid, fullprop=aprop, base=base, model=model)
if aval is not None:
attrs[aprop] = aval # depends on [control=['if'], data=['aval']] # depends on [control=['if'], data=['aprop', 'interpretations']]
else:
attrs[aprop] = aval # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
propmatch = URI_ABBR_PAT.match(prop)
if propmatch:
uri = iris[propmatch.group(1)]
fullprop = URI_ABBR_PAT.sub(uri + '\\2\\3', prop) # depends on [control=['if'], data=[]]
else:
fullprop = I(iri.absolutize(prop, propbase))
if typeindic == RES_VAL:
valmatch = URI_ABBR_PAT.match(aval)
if valmatch:
uri = iris[valmatch.group(1)]
val = URI_ABBR_PAT.sub(uri + '\\2\\3', val) # depends on [control=['if'], data=[]]
else:
val = I(iri.absolutize(val, rtbase))
model.add(rid, fullprop, val, attrs) # depends on [control=['if'], data=[]]
elif typeindic == TEXT_VAL:
if '@lang' not in attrs:
attrs['@lang'] = default_lang # depends on [control=['if'], data=['attrs']]
model.add(rid, fullprop, val, attrs) # depends on [control=['if'], data=[]]
elif typeindic == UNKNOWN_VAL:
if fullprop in interpretations:
val = interpretations[fullprop](val, rid=rid, fullprop=fullprop, base=base, model=model)
if val is not None:
model.add(rid, fullprop, val) # depends on [control=['if'], data=['val']] # depends on [control=['if'], data=['fullprop', 'interpretations']]
else:
model.add(rid, fullprop, val, attrs) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['sect']]
#resinfo = AB_RESOURCE_PAT.match(val)
#if resinfo:
# val = resinfo.group(1)
# valtype = resinfo.group(3)
# if not val: val = model.generate_resource()
# if valtype: attrs[TYPE_REL] = valtype
return document_iri |
def parse(self, xml_data):
""" Parse XML data """
# parse tree
if isinstance(xml_data, string_types):
# Presumably, this is textual xml data.
try:
root = ET.fromstring(xml_data)
except StdlibParseError as e:
raise ParseError(str(e))
else:
# Otherwise, assume it has already been parsed into a tree
root = xml_data
# get type
if 'type' in root.attrib:
self.kind = root.attrib['type']
# parse component
for c1 in root:
# <id>
if c1.tag == 'id':
self.id = c1.text
# <updatecontact>
elif c1.tag == 'updatecontact' or c1.tag == 'update_contact':
self.update_contact = c1.text
# <metadata_license>
elif c1.tag == 'metadata_license':
self.metadata_license = c1.text
# <releases>
elif c1.tag == 'releases':
for c2 in c1:
if c2.tag == 'release':
rel = Release()
rel._parse_tree(c2)
self.add_release(rel)
# <reviews>
elif c1.tag == 'reviews':
for c2 in c1:
if c2.tag == 'review':
rev = Review()
rev._parse_tree(c2)
self.add_review(rev)
# <screenshots>
elif c1.tag == 'screenshots':
for c2 in c1:
if c2.tag == 'screenshot':
ss = Screenshot()
ss._parse_tree(c2)
self.add_screenshot(ss)
# <provides>
elif c1.tag == 'provides':
for c2 in c1:
prov = Provide()
prov._parse_tree(c2)
self.add_provide(prov)
# <requires>
elif c1.tag == 'requires':
for c2 in c1:
req = Require()
req._parse_tree(c2)
self.add_require(req)
# <kudos>
elif c1.tag == 'kudos':
for c2 in c1:
if not c2.tag == 'kudo':
continue
self.kudos.append(c2.text)
# <keywords>
elif c1.tag == 'keywords':
for c2 in c1:
if not c2.tag == 'keyword':
continue
self.keywords.append(c2.text)
# <categories>
elif c1.tag == 'categories':
for c2 in c1:
if not c2.tag == 'category':
continue
self.categories.append(c2.text)
# <custom>
elif c1.tag == 'custom':
for c2 in c1:
if not c2.tag == 'value':
continue
if 'key' not in c2.attrib:
continue
self.custom[c2.attrib['key']] = c2.text
# <project_license>
elif c1.tag == 'project_license' or c1.tag == 'licence':
self.project_license = c1.text
# <developer_name>
elif c1.tag == 'developer_name':
self.developer_name = _join_lines(c1.text)
# <name>
elif c1.tag == 'name' and not self.name:
self.name = _join_lines(c1.text)
# <pkgname>
elif c1.tag == 'pkgname' and not self.pkgname:
self.pkgname = _join_lines(c1.text)
# <summary>
elif c1.tag == 'summary' and not self.summary:
self.summary = _join_lines(c1.text)
# <description>
elif c1.tag == 'description' and not self.description:
self.description = _parse_desc(c1)
# <url>
elif c1.tag == 'url':
key = 'homepage'
if 'type' in c1.attrib:
key = c1.attrib['type']
self.urls[key] = c1.text
elif c1.tag == 'icon':
key = c1.attrib.pop('type', 'unknown')
c1.attrib['value'] = c1.text
self.icons[key] = self.icons.get(key, []) + [c1.attrib] | def function[parse, parameter[self, xml_data]]:
constant[ Parse XML data ]
if call[name[isinstance], parameter[name[xml_data], name[string_types]]] begin[:]
<ast.Try object at 0x7da1b1da2ef0>
if compare[constant[type] in name[root].attrib] begin[:]
name[self].kind assign[=] call[name[root].attrib][constant[type]]
for taget[name[c1]] in starred[name[root]] begin[:]
if compare[name[c1].tag equal[==] constant[id]] begin[:]
name[self].id assign[=] name[c1].text | keyword[def] identifier[parse] ( identifier[self] , identifier[xml_data] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[xml_data] , identifier[string_types] ):
keyword[try] :
identifier[root] = identifier[ET] . identifier[fromstring] ( identifier[xml_data] )
keyword[except] identifier[StdlibParseError] keyword[as] identifier[e] :
keyword[raise] identifier[ParseError] ( identifier[str] ( identifier[e] ))
keyword[else] :
identifier[root] = identifier[xml_data]
keyword[if] literal[string] keyword[in] identifier[root] . identifier[attrib] :
identifier[self] . identifier[kind] = identifier[root] . identifier[attrib] [ literal[string] ]
keyword[for] identifier[c1] keyword[in] identifier[root] :
keyword[if] identifier[c1] . identifier[tag] == literal[string] :
identifier[self] . identifier[id] = identifier[c1] . identifier[text]
keyword[elif] identifier[c1] . identifier[tag] == literal[string] keyword[or] identifier[c1] . identifier[tag] == literal[string] :
identifier[self] . identifier[update_contact] = identifier[c1] . identifier[text]
keyword[elif] identifier[c1] . identifier[tag] == literal[string] :
identifier[self] . identifier[metadata_license] = identifier[c1] . identifier[text]
keyword[elif] identifier[c1] . identifier[tag] == literal[string] :
keyword[for] identifier[c2] keyword[in] identifier[c1] :
keyword[if] identifier[c2] . identifier[tag] == literal[string] :
identifier[rel] = identifier[Release] ()
identifier[rel] . identifier[_parse_tree] ( identifier[c2] )
identifier[self] . identifier[add_release] ( identifier[rel] )
keyword[elif] identifier[c1] . identifier[tag] == literal[string] :
keyword[for] identifier[c2] keyword[in] identifier[c1] :
keyword[if] identifier[c2] . identifier[tag] == literal[string] :
identifier[rev] = identifier[Review] ()
identifier[rev] . identifier[_parse_tree] ( identifier[c2] )
identifier[self] . identifier[add_review] ( identifier[rev] )
keyword[elif] identifier[c1] . identifier[tag] == literal[string] :
keyword[for] identifier[c2] keyword[in] identifier[c1] :
keyword[if] identifier[c2] . identifier[tag] == literal[string] :
identifier[ss] = identifier[Screenshot] ()
identifier[ss] . identifier[_parse_tree] ( identifier[c2] )
identifier[self] . identifier[add_screenshot] ( identifier[ss] )
keyword[elif] identifier[c1] . identifier[tag] == literal[string] :
keyword[for] identifier[c2] keyword[in] identifier[c1] :
identifier[prov] = identifier[Provide] ()
identifier[prov] . identifier[_parse_tree] ( identifier[c2] )
identifier[self] . identifier[add_provide] ( identifier[prov] )
keyword[elif] identifier[c1] . identifier[tag] == literal[string] :
keyword[for] identifier[c2] keyword[in] identifier[c1] :
identifier[req] = identifier[Require] ()
identifier[req] . identifier[_parse_tree] ( identifier[c2] )
identifier[self] . identifier[add_require] ( identifier[req] )
keyword[elif] identifier[c1] . identifier[tag] == literal[string] :
keyword[for] identifier[c2] keyword[in] identifier[c1] :
keyword[if] keyword[not] identifier[c2] . identifier[tag] == literal[string] :
keyword[continue]
identifier[self] . identifier[kudos] . identifier[append] ( identifier[c2] . identifier[text] )
keyword[elif] identifier[c1] . identifier[tag] == literal[string] :
keyword[for] identifier[c2] keyword[in] identifier[c1] :
keyword[if] keyword[not] identifier[c2] . identifier[tag] == literal[string] :
keyword[continue]
identifier[self] . identifier[keywords] . identifier[append] ( identifier[c2] . identifier[text] )
keyword[elif] identifier[c1] . identifier[tag] == literal[string] :
keyword[for] identifier[c2] keyword[in] identifier[c1] :
keyword[if] keyword[not] identifier[c2] . identifier[tag] == literal[string] :
keyword[continue]
identifier[self] . identifier[categories] . identifier[append] ( identifier[c2] . identifier[text] )
keyword[elif] identifier[c1] . identifier[tag] == literal[string] :
keyword[for] identifier[c2] keyword[in] identifier[c1] :
keyword[if] keyword[not] identifier[c2] . identifier[tag] == literal[string] :
keyword[continue]
keyword[if] literal[string] keyword[not] keyword[in] identifier[c2] . identifier[attrib] :
keyword[continue]
identifier[self] . identifier[custom] [ identifier[c2] . identifier[attrib] [ literal[string] ]]= identifier[c2] . identifier[text]
keyword[elif] identifier[c1] . identifier[tag] == literal[string] keyword[or] identifier[c1] . identifier[tag] == literal[string] :
identifier[self] . identifier[project_license] = identifier[c1] . identifier[text]
keyword[elif] identifier[c1] . identifier[tag] == literal[string] :
identifier[self] . identifier[developer_name] = identifier[_join_lines] ( identifier[c1] . identifier[text] )
keyword[elif] identifier[c1] . identifier[tag] == literal[string] keyword[and] keyword[not] identifier[self] . identifier[name] :
identifier[self] . identifier[name] = identifier[_join_lines] ( identifier[c1] . identifier[text] )
keyword[elif] identifier[c1] . identifier[tag] == literal[string] keyword[and] keyword[not] identifier[self] . identifier[pkgname] :
identifier[self] . identifier[pkgname] = identifier[_join_lines] ( identifier[c1] . identifier[text] )
keyword[elif] identifier[c1] . identifier[tag] == literal[string] keyword[and] keyword[not] identifier[self] . identifier[summary] :
identifier[self] . identifier[summary] = identifier[_join_lines] ( identifier[c1] . identifier[text] )
keyword[elif] identifier[c1] . identifier[tag] == literal[string] keyword[and] keyword[not] identifier[self] . identifier[description] :
identifier[self] . identifier[description] = identifier[_parse_desc] ( identifier[c1] )
keyword[elif] identifier[c1] . identifier[tag] == literal[string] :
identifier[key] = literal[string]
keyword[if] literal[string] keyword[in] identifier[c1] . identifier[attrib] :
identifier[key] = identifier[c1] . identifier[attrib] [ literal[string] ]
identifier[self] . identifier[urls] [ identifier[key] ]= identifier[c1] . identifier[text]
keyword[elif] identifier[c1] . identifier[tag] == literal[string] :
identifier[key] = identifier[c1] . identifier[attrib] . identifier[pop] ( literal[string] , literal[string] )
identifier[c1] . identifier[attrib] [ literal[string] ]= identifier[c1] . identifier[text]
identifier[self] . identifier[icons] [ identifier[key] ]= identifier[self] . identifier[icons] . identifier[get] ( identifier[key] ,[])+[ identifier[c1] . identifier[attrib] ] | def parse(self, xml_data):
""" Parse XML data """
# parse tree
if isinstance(xml_data, string_types):
# Presumably, this is textual xml data.
try:
root = ET.fromstring(xml_data) # depends on [control=['try'], data=[]]
except StdlibParseError as e:
raise ParseError(str(e)) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
else:
# Otherwise, assume it has already been parsed into a tree
root = xml_data
# get type
if 'type' in root.attrib:
self.kind = root.attrib['type'] # depends on [control=['if'], data=[]]
# parse component
for c1 in root:
# <id>
if c1.tag == 'id':
self.id = c1.text # depends on [control=['if'], data=[]]
# <updatecontact>
elif c1.tag == 'updatecontact' or c1.tag == 'update_contact':
self.update_contact = c1.text # depends on [control=['if'], data=[]]
# <metadata_license>
elif c1.tag == 'metadata_license':
self.metadata_license = c1.text # depends on [control=['if'], data=[]]
# <releases>
elif c1.tag == 'releases':
for c2 in c1:
if c2.tag == 'release':
rel = Release()
rel._parse_tree(c2)
self.add_release(rel) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c2']] # depends on [control=['if'], data=[]]
# <reviews>
elif c1.tag == 'reviews':
for c2 in c1:
if c2.tag == 'review':
rev = Review()
rev._parse_tree(c2)
self.add_review(rev) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c2']] # depends on [control=['if'], data=[]]
# <screenshots>
elif c1.tag == 'screenshots':
for c2 in c1:
if c2.tag == 'screenshot':
ss = Screenshot()
ss._parse_tree(c2)
self.add_screenshot(ss) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c2']] # depends on [control=['if'], data=[]]
# <provides>
elif c1.tag == 'provides':
for c2 in c1:
prov = Provide()
prov._parse_tree(c2)
self.add_provide(prov) # depends on [control=['for'], data=['c2']] # depends on [control=['if'], data=[]]
# <requires>
elif c1.tag == 'requires':
for c2 in c1:
req = Require()
req._parse_tree(c2)
self.add_require(req) # depends on [control=['for'], data=['c2']] # depends on [control=['if'], data=[]]
# <kudos>
elif c1.tag == 'kudos':
for c2 in c1:
if not c2.tag == 'kudo':
continue # depends on [control=['if'], data=[]]
self.kudos.append(c2.text) # depends on [control=['for'], data=['c2']] # depends on [control=['if'], data=[]]
# <keywords>
elif c1.tag == 'keywords':
for c2 in c1:
if not c2.tag == 'keyword':
continue # depends on [control=['if'], data=[]]
self.keywords.append(c2.text) # depends on [control=['for'], data=['c2']] # depends on [control=['if'], data=[]]
# <categories>
elif c1.tag == 'categories':
for c2 in c1:
if not c2.tag == 'category':
continue # depends on [control=['if'], data=[]]
self.categories.append(c2.text) # depends on [control=['for'], data=['c2']] # depends on [control=['if'], data=[]]
# <custom>
elif c1.tag == 'custom':
for c2 in c1:
if not c2.tag == 'value':
continue # depends on [control=['if'], data=[]]
if 'key' not in c2.attrib:
continue # depends on [control=['if'], data=[]]
self.custom[c2.attrib['key']] = c2.text # depends on [control=['for'], data=['c2']] # depends on [control=['if'], data=[]]
# <project_license>
elif c1.tag == 'project_license' or c1.tag == 'licence':
self.project_license = c1.text # depends on [control=['if'], data=[]]
# <developer_name>
elif c1.tag == 'developer_name':
self.developer_name = _join_lines(c1.text) # depends on [control=['if'], data=[]]
# <name>
elif c1.tag == 'name' and (not self.name):
self.name = _join_lines(c1.text) # depends on [control=['if'], data=[]]
# <pkgname>
elif c1.tag == 'pkgname' and (not self.pkgname):
self.pkgname = _join_lines(c1.text) # depends on [control=['if'], data=[]]
# <summary>
elif c1.tag == 'summary' and (not self.summary):
self.summary = _join_lines(c1.text) # depends on [control=['if'], data=[]]
# <description>
elif c1.tag == 'description' and (not self.description):
self.description = _parse_desc(c1) # depends on [control=['if'], data=[]]
# <url>
elif c1.tag == 'url':
key = 'homepage'
if 'type' in c1.attrib:
key = c1.attrib['type'] # depends on [control=['if'], data=[]]
self.urls[key] = c1.text # depends on [control=['if'], data=[]]
elif c1.tag == 'icon':
key = c1.attrib.pop('type', 'unknown')
c1.attrib['value'] = c1.text
self.icons[key] = self.icons.get(key, []) + [c1.attrib] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c1']] |
def u(d):
"""
convert string, string container or unicode
:param d:
:return:
"""
if six.PY2:
if isinstance(d, six.binary_type):
return d.decode("utf8", "ignore")
elif isinstance(d, list):
return [u(x) for x in d]
elif isinstance(d, tuple):
return tuple(u(x) for x in d)
elif isinstance(d, dict):
return dict( (u(k), u(v)) for k, v in six.iteritems(d))
return d | def function[u, parameter[d]]:
constant[
convert string, string container or unicode
:param d:
:return:
]
if name[six].PY2 begin[:]
if call[name[isinstance], parameter[name[d], name[six].binary_type]] begin[:]
return[call[name[d].decode, parameter[constant[utf8], constant[ignore]]]]
return[name[d]] | keyword[def] identifier[u] ( identifier[d] ):
literal[string]
keyword[if] identifier[six] . identifier[PY2] :
keyword[if] identifier[isinstance] ( identifier[d] , identifier[six] . identifier[binary_type] ):
keyword[return] identifier[d] . identifier[decode] ( literal[string] , literal[string] )
keyword[elif] identifier[isinstance] ( identifier[d] , identifier[list] ):
keyword[return] [ identifier[u] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[d] ]
keyword[elif] identifier[isinstance] ( identifier[d] , identifier[tuple] ):
keyword[return] identifier[tuple] ( identifier[u] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[d] )
keyword[elif] identifier[isinstance] ( identifier[d] , identifier[dict] ):
keyword[return] identifier[dict] (( identifier[u] ( identifier[k] ), identifier[u] ( identifier[v] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[d] ))
keyword[return] identifier[d] | def u(d):
"""
convert string, string container or unicode
:param d:
:return:
"""
if six.PY2:
if isinstance(d, six.binary_type):
return d.decode('utf8', 'ignore') # depends on [control=['if'], data=[]]
elif isinstance(d, list):
return [u(x) for x in d] # depends on [control=['if'], data=[]]
elif isinstance(d, tuple):
return tuple((u(x) for x in d)) # depends on [control=['if'], data=[]]
elif isinstance(d, dict):
return dict(((u(k), u(v)) for (k, v) in six.iteritems(d))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return d |
def _set_flow_id(self, v, load=False):
"""
Setter method for flow_id, mapped from YANG variable /openflow_state/flow_id (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_flow_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flow_id() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=flow_id.flow_id, is_container='container', presence=False, yang_name="flow-id", rest_name="flow-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-flow-info-flow-id-1'}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """flow_id must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=flow_id.flow_id, is_container='container', presence=False, yang_name="flow-id", rest_name="flow-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-flow-info-flow-id-1'}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)""",
})
self.__flow_id = t
if hasattr(self, '_set'):
self._set() | def function[_set_flow_id, parameter[self, v, load]]:
constant[
Setter method for flow_id, mapped from YANG variable /openflow_state/flow_id (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_flow_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flow_id() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da18f810d60>
name[self].__flow_id assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_flow_id] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[flow_id] . identifier[flow_id] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[False] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__flow_id] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_flow_id(self, v, load=False):
"""
Setter method for flow_id, mapped from YANG variable /openflow_state/flow_id (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_flow_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flow_id() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=flow_id.flow_id, is_container='container', presence=False, yang_name='flow-id', rest_name='flow-id', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-flow-info-flow-id-1'}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'flow_id must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=flow_id.flow_id, is_container=\'container\', presence=False, yang_name="flow-id", rest_name="flow-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'callpoint\': u\'openflow-flow-info-flow-id-1\'}}, namespace=\'urn:brocade.com:mgmt:brocade-openflow-operational\', defining_module=\'brocade-openflow-operational\', yang_type=\'container\', is_config=False)'}) # depends on [control=['except'], data=[]]
self.__flow_id = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def _do_create_list_action(act, kw):
"""A factory for list actions. Convert the input list into Actions
and then wrap them in a ListAction."""
acts = []
for a in act:
aa = _do_create_action(a, kw)
if aa is not None: acts.append(aa)
if not acts:
return ListAction([])
elif len(acts) == 1:
return acts[0]
else:
return ListAction(acts) | def function[_do_create_list_action, parameter[act, kw]]:
constant[A factory for list actions. Convert the input list into Actions
and then wrap them in a ListAction.]
variable[acts] assign[=] list[[]]
for taget[name[a]] in starred[name[act]] begin[:]
variable[aa] assign[=] call[name[_do_create_action], parameter[name[a], name[kw]]]
if compare[name[aa] is_not constant[None]] begin[:]
call[name[acts].append, parameter[name[aa]]]
if <ast.UnaryOp object at 0x7da18f00c280> begin[:]
return[call[name[ListAction], parameter[list[[]]]]] | keyword[def] identifier[_do_create_list_action] ( identifier[act] , identifier[kw] ):
literal[string]
identifier[acts] =[]
keyword[for] identifier[a] keyword[in] identifier[act] :
identifier[aa] = identifier[_do_create_action] ( identifier[a] , identifier[kw] )
keyword[if] identifier[aa] keyword[is] keyword[not] keyword[None] : identifier[acts] . identifier[append] ( identifier[aa] )
keyword[if] keyword[not] identifier[acts] :
keyword[return] identifier[ListAction] ([])
keyword[elif] identifier[len] ( identifier[acts] )== literal[int] :
keyword[return] identifier[acts] [ literal[int] ]
keyword[else] :
keyword[return] identifier[ListAction] ( identifier[acts] ) | def _do_create_list_action(act, kw):
"""A factory for list actions. Convert the input list into Actions
and then wrap them in a ListAction."""
acts = []
for a in act:
aa = _do_create_action(a, kw)
if aa is not None:
acts.append(aa) # depends on [control=['if'], data=['aa']] # depends on [control=['for'], data=['a']]
if not acts:
return ListAction([]) # depends on [control=['if'], data=[]]
elif len(acts) == 1:
return acts[0] # depends on [control=['if'], data=[]]
else:
return ListAction(acts) |
def declare_directory(self, value): # type: (MutableMapping) -> ProvEntity
"""Register any nested files/directories."""
# FIXME: Calculate a hash-like identifier for directory
# so we get same value if it's the same filenames/hashes
# in a different location.
# For now, mint a new UUID to identify this directory, but
# attempt to keep it inside the value dictionary
dir_id = value.setdefault("@id", uuid.uuid4().urn)
# New annotation file to keep the ORE Folder listing
ore_doc_fn = dir_id.replace("urn:uuid:", "directory-") + ".ttl"
dir_bundle = self.document.bundle(self.metadata_ns[ore_doc_fn])
coll = self.document.entity(
dir_id, [(provM.PROV_TYPE, WFPROV["Artifact"]),
(provM.PROV_TYPE, PROV["Collection"]),
(provM.PROV_TYPE, PROV["Dictionary"]),
(provM.PROV_TYPE, RO["Folder"])])
# ORE description of ro:Folder, saved separately
coll_b = dir_bundle.entity(
dir_id, [(provM.PROV_TYPE, RO["Folder"]),
(provM.PROV_TYPE, ORE["Aggregation"])])
self.document.mentionOf(dir_id + "#ore", dir_id, dir_bundle.identifier)
# dir_manifest = dir_bundle.entity(
# dir_bundle.identifier, {PROV["type"]: ORE["ResourceMap"],
# ORE["describes"]: coll_b.identifier})
coll_attribs = [(ORE["isDescribedBy"], dir_bundle.identifier)]
coll_b_attribs = [] # type: List[Tuple[Identifier, ProvEntity]]
# FIXME: .listing might not be populated yet - hopefully
# a later call to this method will sort that
is_empty = True
if "listing" not in value:
fsaccess = StdFsAccess("")
get_listing(fsaccess, value)
for entry in value.get("listing", []):
is_empty = False
# Declare child-artifacts
entity = self.declare_artefact(entry)
self.document.membership(coll, entity)
# Membership relation aka our ORE Proxy
m_id = uuid.uuid4().urn
m_entity = self.document.entity(m_id)
m_b = dir_bundle.entity(m_id)
# PROV-O style Dictionary
# https://www.w3.org/TR/prov-dictionary/#dictionary-ontological-definition
# ..as prov.py do not currently allow PROV-N extensions
# like hadDictionaryMember(..)
m_entity.add_asserted_type(PROV["KeyEntityPair"])
m_entity.add_attributes({
PROV["pairKey"]: entry["basename"],
PROV["pairEntity"]: entity,
})
# As well as a being a
# http://wf4ever.github.io/ro/2016-01-28/ro/#FolderEntry
m_b.add_asserted_type(RO["FolderEntry"])
m_b.add_asserted_type(ORE["Proxy"])
m_b.add_attributes({
RO["entryName"]: entry["basename"],
ORE["proxyIn"]: coll,
ORE["proxyFor"]: entity,
})
coll_attribs.append((PROV["hadDictionaryMember"], m_entity))
coll_b_attribs.append((ORE["aggregates"], m_b))
coll.add_attributes(coll_attribs)
coll_b.add_attributes(coll_b_attribs)
# Also Save ORE Folder as annotation metadata
ore_doc = ProvDocument()
ore_doc.add_namespace(ORE)
ore_doc.add_namespace(RO)
ore_doc.add_namespace(UUID)
ore_doc.add_bundle(dir_bundle)
ore_doc = ore_doc.flattened()
ore_doc_path = posixpath.join(_posix_path(METADATA), ore_doc_fn)
with self.research_object.write_bag_file(ore_doc_path) as provenance_file:
ore_doc.serialize(provenance_file, format="rdf", rdf_format="turtle")
self.research_object.add_annotation(dir_id, [ore_doc_fn], ORE["isDescribedBy"].uri)
if is_empty:
# Empty directory
coll.add_asserted_type(PROV["EmptyCollection"])
coll.add_asserted_type(PROV["EmptyDictionary"])
self.research_object.add_uri(coll.identifier.uri)
return coll | def function[declare_directory, parameter[self, value]]:
constant[Register any nested files/directories.]
variable[dir_id] assign[=] call[name[value].setdefault, parameter[constant[@id], call[name[uuid].uuid4, parameter[]].urn]]
variable[ore_doc_fn] assign[=] binary_operation[call[name[dir_id].replace, parameter[constant[urn:uuid:], constant[directory-]]] + constant[.ttl]]
variable[dir_bundle] assign[=] call[name[self].document.bundle, parameter[call[name[self].metadata_ns][name[ore_doc_fn]]]]
variable[coll] assign[=] call[name[self].document.entity, parameter[name[dir_id], list[[<ast.Tuple object at 0x7da18dc05ea0>, <ast.Tuple object at 0x7da18dc069b0>, <ast.Tuple object at 0x7da18bc71600>, <ast.Tuple object at 0x7da18bc717e0>]]]]
variable[coll_b] assign[=] call[name[dir_bundle].entity, parameter[name[dir_id], list[[<ast.Tuple object at 0x7da18bc73550>, <ast.Tuple object at 0x7da18bc72bc0>]]]]
call[name[self].document.mentionOf, parameter[binary_operation[name[dir_id] + constant[#ore]], name[dir_id], name[dir_bundle].identifier]]
variable[coll_attribs] assign[=] list[[<ast.Tuple object at 0x7da18bc70580>]]
variable[coll_b_attribs] assign[=] list[[]]
variable[is_empty] assign[=] constant[True]
if compare[constant[listing] <ast.NotIn object at 0x7da2590d7190> name[value]] begin[:]
variable[fsaccess] assign[=] call[name[StdFsAccess], parameter[constant[]]]
call[name[get_listing], parameter[name[fsaccess], name[value]]]
for taget[name[entry]] in starred[call[name[value].get, parameter[constant[listing], list[[]]]]] begin[:]
variable[is_empty] assign[=] constant[False]
variable[entity] assign[=] call[name[self].declare_artefact, parameter[name[entry]]]
call[name[self].document.membership, parameter[name[coll], name[entity]]]
variable[m_id] assign[=] call[name[uuid].uuid4, parameter[]].urn
variable[m_entity] assign[=] call[name[self].document.entity, parameter[name[m_id]]]
variable[m_b] assign[=] call[name[dir_bundle].entity, parameter[name[m_id]]]
call[name[m_entity].add_asserted_type, parameter[call[name[PROV]][constant[KeyEntityPair]]]]
call[name[m_entity].add_attributes, parameter[dictionary[[<ast.Subscript object at 0x7da18bc734f0>, <ast.Subscript object at 0x7da18bc73940>], [<ast.Subscript object at 0x7da18bc70b80>, <ast.Name object at 0x7da18bc73040>]]]]
call[name[m_b].add_asserted_type, parameter[call[name[RO]][constant[FolderEntry]]]]
call[name[m_b].add_asserted_type, parameter[call[name[ORE]][constant[Proxy]]]]
call[name[m_b].add_attributes, parameter[dictionary[[<ast.Subscript object at 0x7da18bc71ab0>, <ast.Subscript object at 0x7da18bc71ea0>, <ast.Subscript object at 0x7da18bc72e60>], [<ast.Subscript object at 0x7da18bc71690>, <ast.Name object at 0x7da18bc72830>, <ast.Name object at 0x7da18bc719c0>]]]]
call[name[coll_attribs].append, parameter[tuple[[<ast.Subscript object at 0x7da18bc73d60>, <ast.Name object at 0x7da18bc73cd0>]]]]
call[name[coll_b_attribs].append, parameter[tuple[[<ast.Subscript object at 0x7da18bc70fd0>, <ast.Name object at 0x7da18bc73520>]]]]
call[name[coll].add_attributes, parameter[name[coll_attribs]]]
call[name[coll_b].add_attributes, parameter[name[coll_b_attribs]]]
variable[ore_doc] assign[=] call[name[ProvDocument], parameter[]]
call[name[ore_doc].add_namespace, parameter[name[ORE]]]
call[name[ore_doc].add_namespace, parameter[name[RO]]]
call[name[ore_doc].add_namespace, parameter[name[UUID]]]
call[name[ore_doc].add_bundle, parameter[name[dir_bundle]]]
variable[ore_doc] assign[=] call[name[ore_doc].flattened, parameter[]]
variable[ore_doc_path] assign[=] call[name[posixpath].join, parameter[call[name[_posix_path], parameter[name[METADATA]]], name[ore_doc_fn]]]
with call[name[self].research_object.write_bag_file, parameter[name[ore_doc_path]]] begin[:]
call[name[ore_doc].serialize, parameter[name[provenance_file]]]
call[name[self].research_object.add_annotation, parameter[name[dir_id], list[[<ast.Name object at 0x7da18bc71f90>]], call[name[ORE]][constant[isDescribedBy]].uri]]
if name[is_empty] begin[:]
call[name[coll].add_asserted_type, parameter[call[name[PROV]][constant[EmptyCollection]]]]
call[name[coll].add_asserted_type, parameter[call[name[PROV]][constant[EmptyDictionary]]]]
call[name[self].research_object.add_uri, parameter[name[coll].identifier.uri]]
return[name[coll]] | keyword[def] identifier[declare_directory] ( identifier[self] , identifier[value] ):
literal[string]
identifier[dir_id] = identifier[value] . identifier[setdefault] ( literal[string] , identifier[uuid] . identifier[uuid4] (). identifier[urn] )
identifier[ore_doc_fn] = identifier[dir_id] . identifier[replace] ( literal[string] , literal[string] )+ literal[string]
identifier[dir_bundle] = identifier[self] . identifier[document] . identifier[bundle] ( identifier[self] . identifier[metadata_ns] [ identifier[ore_doc_fn] ])
identifier[coll] = identifier[self] . identifier[document] . identifier[entity] (
identifier[dir_id] ,[( identifier[provM] . identifier[PROV_TYPE] , identifier[WFPROV] [ literal[string] ]),
( identifier[provM] . identifier[PROV_TYPE] , identifier[PROV] [ literal[string] ]),
( identifier[provM] . identifier[PROV_TYPE] , identifier[PROV] [ literal[string] ]),
( identifier[provM] . identifier[PROV_TYPE] , identifier[RO] [ literal[string] ])])
identifier[coll_b] = identifier[dir_bundle] . identifier[entity] (
identifier[dir_id] ,[( identifier[provM] . identifier[PROV_TYPE] , identifier[RO] [ literal[string] ]),
( identifier[provM] . identifier[PROV_TYPE] , identifier[ORE] [ literal[string] ])])
identifier[self] . identifier[document] . identifier[mentionOf] ( identifier[dir_id] + literal[string] , identifier[dir_id] , identifier[dir_bundle] . identifier[identifier] )
identifier[coll_attribs] =[( identifier[ORE] [ literal[string] ], identifier[dir_bundle] . identifier[identifier] )]
identifier[coll_b_attribs] =[]
identifier[is_empty] = keyword[True]
keyword[if] literal[string] keyword[not] keyword[in] identifier[value] :
identifier[fsaccess] = identifier[StdFsAccess] ( literal[string] )
identifier[get_listing] ( identifier[fsaccess] , identifier[value] )
keyword[for] identifier[entry] keyword[in] identifier[value] . identifier[get] ( literal[string] ,[]):
identifier[is_empty] = keyword[False]
identifier[entity] = identifier[self] . identifier[declare_artefact] ( identifier[entry] )
identifier[self] . identifier[document] . identifier[membership] ( identifier[coll] , identifier[entity] )
identifier[m_id] = identifier[uuid] . identifier[uuid4] (). identifier[urn]
identifier[m_entity] = identifier[self] . identifier[document] . identifier[entity] ( identifier[m_id] )
identifier[m_b] = identifier[dir_bundle] . identifier[entity] ( identifier[m_id] )
identifier[m_entity] . identifier[add_asserted_type] ( identifier[PROV] [ literal[string] ])
identifier[m_entity] . identifier[add_attributes] ({
identifier[PROV] [ literal[string] ]: identifier[entry] [ literal[string] ],
identifier[PROV] [ literal[string] ]: identifier[entity] ,
})
identifier[m_b] . identifier[add_asserted_type] ( identifier[RO] [ literal[string] ])
identifier[m_b] . identifier[add_asserted_type] ( identifier[ORE] [ literal[string] ])
identifier[m_b] . identifier[add_attributes] ({
identifier[RO] [ literal[string] ]: identifier[entry] [ literal[string] ],
identifier[ORE] [ literal[string] ]: identifier[coll] ,
identifier[ORE] [ literal[string] ]: identifier[entity] ,
})
identifier[coll_attribs] . identifier[append] (( identifier[PROV] [ literal[string] ], identifier[m_entity] ))
identifier[coll_b_attribs] . identifier[append] (( identifier[ORE] [ literal[string] ], identifier[m_b] ))
identifier[coll] . identifier[add_attributes] ( identifier[coll_attribs] )
identifier[coll_b] . identifier[add_attributes] ( identifier[coll_b_attribs] )
identifier[ore_doc] = identifier[ProvDocument] ()
identifier[ore_doc] . identifier[add_namespace] ( identifier[ORE] )
identifier[ore_doc] . identifier[add_namespace] ( identifier[RO] )
identifier[ore_doc] . identifier[add_namespace] ( identifier[UUID] )
identifier[ore_doc] . identifier[add_bundle] ( identifier[dir_bundle] )
identifier[ore_doc] = identifier[ore_doc] . identifier[flattened] ()
identifier[ore_doc_path] = identifier[posixpath] . identifier[join] ( identifier[_posix_path] ( identifier[METADATA] ), identifier[ore_doc_fn] )
keyword[with] identifier[self] . identifier[research_object] . identifier[write_bag_file] ( identifier[ore_doc_path] ) keyword[as] identifier[provenance_file] :
identifier[ore_doc] . identifier[serialize] ( identifier[provenance_file] , identifier[format] = literal[string] , identifier[rdf_format] = literal[string] )
identifier[self] . identifier[research_object] . identifier[add_annotation] ( identifier[dir_id] ,[ identifier[ore_doc_fn] ], identifier[ORE] [ literal[string] ]. identifier[uri] )
keyword[if] identifier[is_empty] :
identifier[coll] . identifier[add_asserted_type] ( identifier[PROV] [ literal[string] ])
identifier[coll] . identifier[add_asserted_type] ( identifier[PROV] [ literal[string] ])
identifier[self] . identifier[research_object] . identifier[add_uri] ( identifier[coll] . identifier[identifier] . identifier[uri] )
keyword[return] identifier[coll] | def declare_directory(self, value): # type: (MutableMapping) -> ProvEntity
'Register any nested files/directories.'
# FIXME: Calculate a hash-like identifier for directory
# so we get same value if it's the same filenames/hashes
# in a different location.
# For now, mint a new UUID to identify this directory, but
# attempt to keep it inside the value dictionary
dir_id = value.setdefault('@id', uuid.uuid4().urn)
# New annotation file to keep the ORE Folder listing
ore_doc_fn = dir_id.replace('urn:uuid:', 'directory-') + '.ttl'
dir_bundle = self.document.bundle(self.metadata_ns[ore_doc_fn])
coll = self.document.entity(dir_id, [(provM.PROV_TYPE, WFPROV['Artifact']), (provM.PROV_TYPE, PROV['Collection']), (provM.PROV_TYPE, PROV['Dictionary']), (provM.PROV_TYPE, RO['Folder'])])
# ORE description of ro:Folder, saved separately
coll_b = dir_bundle.entity(dir_id, [(provM.PROV_TYPE, RO['Folder']), (provM.PROV_TYPE, ORE['Aggregation'])])
self.document.mentionOf(dir_id + '#ore', dir_id, dir_bundle.identifier)
# dir_manifest = dir_bundle.entity(
# dir_bundle.identifier, {PROV["type"]: ORE["ResourceMap"],
# ORE["describes"]: coll_b.identifier})
coll_attribs = [(ORE['isDescribedBy'], dir_bundle.identifier)]
coll_b_attribs = [] # type: List[Tuple[Identifier, ProvEntity]]
# FIXME: .listing might not be populated yet - hopefully
# a later call to this method will sort that
is_empty = True
if 'listing' not in value:
fsaccess = StdFsAccess('')
get_listing(fsaccess, value) # depends on [control=['if'], data=['value']]
for entry in value.get('listing', []):
is_empty = False
# Declare child-artifacts
entity = self.declare_artefact(entry)
self.document.membership(coll, entity)
# Membership relation aka our ORE Proxy
m_id = uuid.uuid4().urn
m_entity = self.document.entity(m_id)
m_b = dir_bundle.entity(m_id)
# PROV-O style Dictionary
# https://www.w3.org/TR/prov-dictionary/#dictionary-ontological-definition
# ..as prov.py do not currently allow PROV-N extensions
# like hadDictionaryMember(..)
m_entity.add_asserted_type(PROV['KeyEntityPair'])
m_entity.add_attributes({PROV['pairKey']: entry['basename'], PROV['pairEntity']: entity})
# As well as a being a
# http://wf4ever.github.io/ro/2016-01-28/ro/#FolderEntry
m_b.add_asserted_type(RO['FolderEntry'])
m_b.add_asserted_type(ORE['Proxy'])
m_b.add_attributes({RO['entryName']: entry['basename'], ORE['proxyIn']: coll, ORE['proxyFor']: entity})
coll_attribs.append((PROV['hadDictionaryMember'], m_entity))
coll_b_attribs.append((ORE['aggregates'], m_b)) # depends on [control=['for'], data=['entry']]
coll.add_attributes(coll_attribs)
coll_b.add_attributes(coll_b_attribs)
# Also Save ORE Folder as annotation metadata
ore_doc = ProvDocument()
ore_doc.add_namespace(ORE)
ore_doc.add_namespace(RO)
ore_doc.add_namespace(UUID)
ore_doc.add_bundle(dir_bundle)
ore_doc = ore_doc.flattened()
ore_doc_path = posixpath.join(_posix_path(METADATA), ore_doc_fn)
with self.research_object.write_bag_file(ore_doc_path) as provenance_file:
ore_doc.serialize(provenance_file, format='rdf', rdf_format='turtle') # depends on [control=['with'], data=['provenance_file']]
self.research_object.add_annotation(dir_id, [ore_doc_fn], ORE['isDescribedBy'].uri)
if is_empty:
# Empty directory
coll.add_asserted_type(PROV['EmptyCollection'])
coll.add_asserted_type(PROV['EmptyDictionary']) # depends on [control=['if'], data=[]]
self.research_object.add_uri(coll.identifier.uri)
return coll |
def _set_scroll_v(self, *args):
"""Scroll both categories Canvas and scrolling container"""
self._canvas_categories.yview(*args)
self._canvas_scroll.yview(*args) | def function[_set_scroll_v, parameter[self]]:
constant[Scroll both categories Canvas and scrolling container]
call[name[self]._canvas_categories.yview, parameter[<ast.Starred object at 0x7da1b23c6e90>]]
call[name[self]._canvas_scroll.yview, parameter[<ast.Starred object at 0x7da1b1d4b1c0>]] | keyword[def] identifier[_set_scroll_v] ( identifier[self] ,* identifier[args] ):
literal[string]
identifier[self] . identifier[_canvas_categories] . identifier[yview] (* identifier[args] )
identifier[self] . identifier[_canvas_scroll] . identifier[yview] (* identifier[args] ) | def _set_scroll_v(self, *args):
"""Scroll both categories Canvas and scrolling container"""
self._canvas_categories.yview(*args)
self._canvas_scroll.yview(*args) |
def MOVS(cpu, dest, src):
"""
Moves data from string to string.
Moves the byte, word, or doubleword specified with the second operand (source operand) to the location specified
with the first operand (destination operand). Both the source and destination operands are located in memory. The
address of the source operand is read from the DS:ESI or the DS:SI registers (depending on the address-size
attribute of the instruction, 32 or 16, respectively). The address of the destination operand is read from the ES:EDI
or the ES:DI registers (again depending on the address-size attribute of the instruction). The DS segment may be
overridden with a segment override prefix, but the ES segment cannot be overridden.
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
"""
base, size, ty = cpu.get_descriptor(cpu.DS)
src_addr = src.address() + base
dest_addr = dest.address() + base
src_reg = src.mem.base
dest_reg = dest.mem.base
size = dest.size
# Copy the data
dest.write(src.read())
#Advance EDI/ESI pointers
increment = Operators.ITEBV(cpu.address_bit_size, cpu.DF, -size // 8, size // 8)
cpu.write_register(src_reg, cpu.read_register(src_reg) + increment)
cpu.write_register(dest_reg, cpu.read_register(dest_reg) + increment) | def function[MOVS, parameter[cpu, dest, src]]:
constant[
Moves data from string to string.
Moves the byte, word, or doubleword specified with the second operand (source operand) to the location specified
with the first operand (destination operand). Both the source and destination operands are located in memory. The
address of the source operand is read from the DS:ESI or the DS:SI registers (depending on the address-size
attribute of the instruction, 32 or 16, respectively). The address of the destination operand is read from the ES:EDI
or the ES:DI registers (again depending on the address-size attribute of the instruction). The DS segment may be
overridden with a segment override prefix, but the ES segment cannot be overridden.
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
]
<ast.Tuple object at 0x7da1b26af190> assign[=] call[name[cpu].get_descriptor, parameter[name[cpu].DS]]
variable[src_addr] assign[=] binary_operation[call[name[src].address, parameter[]] + name[base]]
variable[dest_addr] assign[=] binary_operation[call[name[dest].address, parameter[]] + name[base]]
variable[src_reg] assign[=] name[src].mem.base
variable[dest_reg] assign[=] name[dest].mem.base
variable[size] assign[=] name[dest].size
call[name[dest].write, parameter[call[name[src].read, parameter[]]]]
variable[increment] assign[=] call[name[Operators].ITEBV, parameter[name[cpu].address_bit_size, name[cpu].DF, binary_operation[<ast.UnaryOp object at 0x7da20c6e5ea0> <ast.FloorDiv object at 0x7da2590d6bc0> constant[8]], binary_operation[name[size] <ast.FloorDiv object at 0x7da2590d6bc0> constant[8]]]]
call[name[cpu].write_register, parameter[name[src_reg], binary_operation[call[name[cpu].read_register, parameter[name[src_reg]]] + name[increment]]]]
call[name[cpu].write_register, parameter[name[dest_reg], binary_operation[call[name[cpu].read_register, parameter[name[dest_reg]]] + name[increment]]]] | keyword[def] identifier[MOVS] ( identifier[cpu] , identifier[dest] , identifier[src] ):
literal[string]
identifier[base] , identifier[size] , identifier[ty] = identifier[cpu] . identifier[get_descriptor] ( identifier[cpu] . identifier[DS] )
identifier[src_addr] = identifier[src] . identifier[address] ()+ identifier[base]
identifier[dest_addr] = identifier[dest] . identifier[address] ()+ identifier[base]
identifier[src_reg] = identifier[src] . identifier[mem] . identifier[base]
identifier[dest_reg] = identifier[dest] . identifier[mem] . identifier[base]
identifier[size] = identifier[dest] . identifier[size]
identifier[dest] . identifier[write] ( identifier[src] . identifier[read] ())
identifier[increment] = identifier[Operators] . identifier[ITEBV] ( identifier[cpu] . identifier[address_bit_size] , identifier[cpu] . identifier[DF] ,- identifier[size] // literal[int] , identifier[size] // literal[int] )
identifier[cpu] . identifier[write_register] ( identifier[src_reg] , identifier[cpu] . identifier[read_register] ( identifier[src_reg] )+ identifier[increment] )
identifier[cpu] . identifier[write_register] ( identifier[dest_reg] , identifier[cpu] . identifier[read_register] ( identifier[dest_reg] )+ identifier[increment] ) | def MOVS(cpu, dest, src):
"""
Moves data from string to string.
Moves the byte, word, or doubleword specified with the second operand (source operand) to the location specified
with the first operand (destination operand). Both the source and destination operands are located in memory. The
address of the source operand is read from the DS:ESI or the DS:SI registers (depending on the address-size
attribute of the instruction, 32 or 16, respectively). The address of the destination operand is read from the ES:EDI
or the ES:DI registers (again depending on the address-size attribute of the instruction). The DS segment may be
overridden with a segment override prefix, but the ES segment cannot be overridden.
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
"""
(base, size, ty) = cpu.get_descriptor(cpu.DS)
src_addr = src.address() + base
dest_addr = dest.address() + base
src_reg = src.mem.base
dest_reg = dest.mem.base
size = dest.size
# Copy the data
dest.write(src.read())
#Advance EDI/ESI pointers
increment = Operators.ITEBV(cpu.address_bit_size, cpu.DF, -size // 8, size // 8)
cpu.write_register(src_reg, cpu.read_register(src_reg) + increment)
cpu.write_register(dest_reg, cpu.read_register(dest_reg) + increment) |
def add_bpmn_files(self, filenames):
"""
Add all filenames in the given list to the parser's set.
"""
for filename in filenames:
f = open(filename, 'r')
try:
self.add_bpmn_xml(ET.parse(f), filename=filename)
finally:
f.close() | def function[add_bpmn_files, parameter[self, filenames]]:
constant[
Add all filenames in the given list to the parser's set.
]
for taget[name[filename]] in starred[name[filenames]] begin[:]
variable[f] assign[=] call[name[open], parameter[name[filename], constant[r]]]
<ast.Try object at 0x7da1b01fda50> | keyword[def] identifier[add_bpmn_files] ( identifier[self] , identifier[filenames] ):
literal[string]
keyword[for] identifier[filename] keyword[in] identifier[filenames] :
identifier[f] = identifier[open] ( identifier[filename] , literal[string] )
keyword[try] :
identifier[self] . identifier[add_bpmn_xml] ( identifier[ET] . identifier[parse] ( identifier[f] ), identifier[filename] = identifier[filename] )
keyword[finally] :
identifier[f] . identifier[close] () | def add_bpmn_files(self, filenames):
"""
Add all filenames in the given list to the parser's set.
"""
for filename in filenames:
f = open(filename, 'r')
try:
self.add_bpmn_xml(ET.parse(f), filename=filename) # depends on [control=['try'], data=[]]
finally:
f.close() # depends on [control=['for'], data=['filename']] |
def add_pattern(self, pattern, category=SourceRootCategories.UNKNOWN):
"""Add a pattern to the trie."""
self._do_add_pattern(pattern, tuple(), category) | def function[add_pattern, parameter[self, pattern, category]]:
constant[Add a pattern to the trie.]
call[name[self]._do_add_pattern, parameter[name[pattern], call[name[tuple], parameter[]], name[category]]] | keyword[def] identifier[add_pattern] ( identifier[self] , identifier[pattern] , identifier[category] = identifier[SourceRootCategories] . identifier[UNKNOWN] ):
literal[string]
identifier[self] . identifier[_do_add_pattern] ( identifier[pattern] , identifier[tuple] (), identifier[category] ) | def add_pattern(self, pattern, category=SourceRootCategories.UNKNOWN):
"""Add a pattern to the trie."""
self._do_add_pattern(pattern, tuple(), category) |
def create_item(self, **kwargs):
"""
Return a model instance created from kwargs.
"""
item, created = self.queryset.model.objects.get_or_create(**kwargs)
return item | def function[create_item, parameter[self]]:
constant[
Return a model instance created from kwargs.
]
<ast.Tuple object at 0x7da18c4cf250> assign[=] call[name[self].queryset.model.objects.get_or_create, parameter[]]
return[name[item]] | keyword[def] identifier[create_item] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[item] , identifier[created] = identifier[self] . identifier[queryset] . identifier[model] . identifier[objects] . identifier[get_or_create] (** identifier[kwargs] )
keyword[return] identifier[item] | def create_item(self, **kwargs):
"""
Return a model instance created from kwargs.
"""
(item, created) = self.queryset.model.objects.get_or_create(**kwargs)
return item |
def _get_role_arn():
"""
Return role arn from X-Role-ARN header,
lookup role arn from source IP,
or fall back to command line default.
"""
role_arn = bottle.request.headers.get('X-Role-ARN')
if not role_arn:
role_arn = _lookup_ip_role_arn(bottle.request.environ.get('REMOTE_ADDR'))
if not role_arn:
role_arn = _role_arn
return role_arn | def function[_get_role_arn, parameter[]]:
constant[
Return role arn from X-Role-ARN header,
lookup role arn from source IP,
or fall back to command line default.
]
variable[role_arn] assign[=] call[name[bottle].request.headers.get, parameter[constant[X-Role-ARN]]]
if <ast.UnaryOp object at 0x7da1b2344100> begin[:]
variable[role_arn] assign[=] call[name[_lookup_ip_role_arn], parameter[call[name[bottle].request.environ.get, parameter[constant[REMOTE_ADDR]]]]]
if <ast.UnaryOp object at 0x7da1b2347490> begin[:]
variable[role_arn] assign[=] name[_role_arn]
return[name[role_arn]] | keyword[def] identifier[_get_role_arn] ():
literal[string]
identifier[role_arn] = identifier[bottle] . identifier[request] . identifier[headers] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[role_arn] :
identifier[role_arn] = identifier[_lookup_ip_role_arn] ( identifier[bottle] . identifier[request] . identifier[environ] . identifier[get] ( literal[string] ))
keyword[if] keyword[not] identifier[role_arn] :
identifier[role_arn] = identifier[_role_arn]
keyword[return] identifier[role_arn] | def _get_role_arn():
"""
Return role arn from X-Role-ARN header,
lookup role arn from source IP,
or fall back to command line default.
"""
role_arn = bottle.request.headers.get('X-Role-ARN')
if not role_arn:
role_arn = _lookup_ip_role_arn(bottle.request.environ.get('REMOTE_ADDR')) # depends on [control=['if'], data=[]]
if not role_arn:
role_arn = _role_arn # depends on [control=['if'], data=[]]
return role_arn |
def adet(z, x):
"""d|A|/dA = adj(A).T
See Jacobi's formula: https://en.wikipedia.org/wiki/Jacobi%27s_formula
"""
adjugate = numpy.linalg.det(x) * numpy.linalg.pinv(x)
d[x] = d[z] * numpy.transpose(adjugate) | def function[adet, parameter[z, x]]:
constant[d|A|/dA = adj(A).T
See Jacobi's formula: https://en.wikipedia.org/wiki/Jacobi%27s_formula
]
variable[adjugate] assign[=] binary_operation[call[name[numpy].linalg.det, parameter[name[x]]] * call[name[numpy].linalg.pinv, parameter[name[x]]]]
call[name[d]][name[x]] assign[=] binary_operation[call[name[d]][name[z]] * call[name[numpy].transpose, parameter[name[adjugate]]]] | keyword[def] identifier[adet] ( identifier[z] , identifier[x] ):
literal[string]
identifier[adjugate] = identifier[numpy] . identifier[linalg] . identifier[det] ( identifier[x] )* identifier[numpy] . identifier[linalg] . identifier[pinv] ( identifier[x] )
identifier[d] [ identifier[x] ]= identifier[d] [ identifier[z] ]* identifier[numpy] . identifier[transpose] ( identifier[adjugate] ) | def adet(z, x):
"""d|A|/dA = adj(A).T
See Jacobi's formula: https://en.wikipedia.org/wiki/Jacobi%27s_formula
"""
adjugate = numpy.linalg.det(x) * numpy.linalg.pinv(x)
d[x] = d[z] * numpy.transpose(adjugate) |
def set_points(self, pt1, pt2):
"""Reset the rectangle coordinates."""
(x1, y1) = pt1.as_tuple()
(x2, y2) = pt2.as_tuple()
self.left = min(x1, x2)
self.top = min(y1, y2)
self.right = max(x1, x2)
self.bottom = max(y1, y2) | def function[set_points, parameter[self, pt1, pt2]]:
constant[Reset the rectangle coordinates.]
<ast.Tuple object at 0x7da18fe93340> assign[=] call[name[pt1].as_tuple, parameter[]]
<ast.Tuple object at 0x7da18fe913c0> assign[=] call[name[pt2].as_tuple, parameter[]]
name[self].left assign[=] call[name[min], parameter[name[x1], name[x2]]]
name[self].top assign[=] call[name[min], parameter[name[y1], name[y2]]]
name[self].right assign[=] call[name[max], parameter[name[x1], name[x2]]]
name[self].bottom assign[=] call[name[max], parameter[name[y1], name[y2]]] | keyword[def] identifier[set_points] ( identifier[self] , identifier[pt1] , identifier[pt2] ):
literal[string]
( identifier[x1] , identifier[y1] )= identifier[pt1] . identifier[as_tuple] ()
( identifier[x2] , identifier[y2] )= identifier[pt2] . identifier[as_tuple] ()
identifier[self] . identifier[left] = identifier[min] ( identifier[x1] , identifier[x2] )
identifier[self] . identifier[top] = identifier[min] ( identifier[y1] , identifier[y2] )
identifier[self] . identifier[right] = identifier[max] ( identifier[x1] , identifier[x2] )
identifier[self] . identifier[bottom] = identifier[max] ( identifier[y1] , identifier[y2] ) | def set_points(self, pt1, pt2):
"""Reset the rectangle coordinates."""
(x1, y1) = pt1.as_tuple()
(x2, y2) = pt2.as_tuple()
self.left = min(x1, x2)
self.top = min(y1, y2)
self.right = max(x1, x2)
self.bottom = max(y1, y2) |
def download(self, source, target, mpi=None, pos=0, chunk=0, part=0):
'''Thread worker for download operation.'''
s3url = S3URL(source)
obj = self.lookup(s3url)
if obj is None:
raise Failure('The obj "%s" does not exists.' % (s3url.path,))
# Initialization: Set up multithreaded downloads.
if not mpi:
# optional checks
if self.opt.dry_run:
message('%s => %s', source, target)
return
elif self.opt.sync_check and self.sync_check(LocalMD5Cache(target), obj):
message('%s => %s (synced)', source, target)
return
elif not self.opt.force and os.path.exists(target):
raise Failure('File already exists: %s' % target)
fsize = int(obj['ContentLength'])
# Small file optimization.
if fsize < self.opt.max_singlepart_download_size:
# Create a single part to chain back main download operation.
mpi = ThreadUtil.MultipartItem(tempfile_get(target))
mpi.total = 1
pos = 0
chunk = fsize
# Continue as one part download.
else:
# Here we use temp filename as the id of mpi.
for args in self.get_file_splits(tempfile_get(target), source, target, fsize, self.opt.multipart_split_size):
self.pool.download(*args)
return
tempfile = mpi.id
if self.opt.recursive:
self.mkdirs(tempfile)
# Download part of the file, range is inclusive.
response = self.s3.get_object(Bucket=s3url.bucket, Key=s3url.path, Range='bytes=%d-%d' % (pos, pos + chunk - 1))
self.write_file_chunk(tempfile, pos, chunk, response['Body'])
# Finalize
if mpi.complete({'PartNumber': part}):
try:
self.update_privilege(obj, tempfile)
self._verify_file_size(obj, tempfile)
tempfile_set(tempfile, target)
message('%s => %s', source, target)
except Exception as e:
# Note that we don't retry in this case, because
# We are going to remove the temp file, and if we
# retry here with original parameters (wrapped in
# the task item), it would fail anyway
tempfile_set(tempfile, None)
raise Failure('Download Failure: %s, Source: %s.' % (e.message, source)) | def function[download, parameter[self, source, target, mpi, pos, chunk, part]]:
constant[Thread worker for download operation.]
variable[s3url] assign[=] call[name[S3URL], parameter[name[source]]]
variable[obj] assign[=] call[name[self].lookup, parameter[name[s3url]]]
if compare[name[obj] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b02977f0>
if <ast.UnaryOp object at 0x7da1b02976d0> begin[:]
if name[self].opt.dry_run begin[:]
call[name[message], parameter[constant[%s => %s], name[source], name[target]]]
return[None]
variable[fsize] assign[=] call[name[int], parameter[call[name[obj]][constant[ContentLength]]]]
if compare[name[fsize] less[<] name[self].opt.max_singlepart_download_size] begin[:]
variable[mpi] assign[=] call[name[ThreadUtil].MultipartItem, parameter[call[name[tempfile_get], parameter[name[target]]]]]
name[mpi].total assign[=] constant[1]
variable[pos] assign[=] constant[0]
variable[chunk] assign[=] name[fsize]
variable[tempfile] assign[=] name[mpi].id
if name[self].opt.recursive begin[:]
call[name[self].mkdirs, parameter[name[tempfile]]]
variable[response] assign[=] call[name[self].s3.get_object, parameter[]]
call[name[self].write_file_chunk, parameter[name[tempfile], name[pos], name[chunk], call[name[response]][constant[Body]]]]
if call[name[mpi].complete, parameter[dictionary[[<ast.Constant object at 0x7da1b0297760>], [<ast.Name object at 0x7da1b0297880>]]]] begin[:]
<ast.Try object at 0x7da1b0297730> | keyword[def] identifier[download] ( identifier[self] , identifier[source] , identifier[target] , identifier[mpi] = keyword[None] , identifier[pos] = literal[int] , identifier[chunk] = literal[int] , identifier[part] = literal[int] ):
literal[string]
identifier[s3url] = identifier[S3URL] ( identifier[source] )
identifier[obj] = identifier[self] . identifier[lookup] ( identifier[s3url] )
keyword[if] identifier[obj] keyword[is] keyword[None] :
keyword[raise] identifier[Failure] ( literal[string] %( identifier[s3url] . identifier[path] ,))
keyword[if] keyword[not] identifier[mpi] :
keyword[if] identifier[self] . identifier[opt] . identifier[dry_run] :
identifier[message] ( literal[string] , identifier[source] , identifier[target] )
keyword[return]
keyword[elif] identifier[self] . identifier[opt] . identifier[sync_check] keyword[and] identifier[self] . identifier[sync_check] ( identifier[LocalMD5Cache] ( identifier[target] ), identifier[obj] ):
identifier[message] ( literal[string] , identifier[source] , identifier[target] )
keyword[return]
keyword[elif] keyword[not] identifier[self] . identifier[opt] . identifier[force] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[target] ):
keyword[raise] identifier[Failure] ( literal[string] % identifier[target] )
identifier[fsize] = identifier[int] ( identifier[obj] [ literal[string] ])
keyword[if] identifier[fsize] < identifier[self] . identifier[opt] . identifier[max_singlepart_download_size] :
identifier[mpi] = identifier[ThreadUtil] . identifier[MultipartItem] ( identifier[tempfile_get] ( identifier[target] ))
identifier[mpi] . identifier[total] = literal[int]
identifier[pos] = literal[int]
identifier[chunk] = identifier[fsize]
keyword[else] :
keyword[for] identifier[args] keyword[in] identifier[self] . identifier[get_file_splits] ( identifier[tempfile_get] ( identifier[target] ), identifier[source] , identifier[target] , identifier[fsize] , identifier[self] . identifier[opt] . identifier[multipart_split_size] ):
identifier[self] . identifier[pool] . identifier[download] (* identifier[args] )
keyword[return]
identifier[tempfile] = identifier[mpi] . identifier[id]
keyword[if] identifier[self] . identifier[opt] . identifier[recursive] :
identifier[self] . identifier[mkdirs] ( identifier[tempfile] )
identifier[response] = identifier[self] . identifier[s3] . identifier[get_object] ( identifier[Bucket] = identifier[s3url] . identifier[bucket] , identifier[Key] = identifier[s3url] . identifier[path] , identifier[Range] = literal[string] %( identifier[pos] , identifier[pos] + identifier[chunk] - literal[int] ))
identifier[self] . identifier[write_file_chunk] ( identifier[tempfile] , identifier[pos] , identifier[chunk] , identifier[response] [ literal[string] ])
keyword[if] identifier[mpi] . identifier[complete] ({ literal[string] : identifier[part] }):
keyword[try] :
identifier[self] . identifier[update_privilege] ( identifier[obj] , identifier[tempfile] )
identifier[self] . identifier[_verify_file_size] ( identifier[obj] , identifier[tempfile] )
identifier[tempfile_set] ( identifier[tempfile] , identifier[target] )
identifier[message] ( literal[string] , identifier[source] , identifier[target] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[tempfile_set] ( identifier[tempfile] , keyword[None] )
keyword[raise] identifier[Failure] ( literal[string] %( identifier[e] . identifier[message] , identifier[source] )) | def download(self, source, target, mpi=None, pos=0, chunk=0, part=0):
"""Thread worker for download operation."""
s3url = S3URL(source)
obj = self.lookup(s3url)
if obj is None:
raise Failure('The obj "%s" does not exists.' % (s3url.path,)) # depends on [control=['if'], data=[]]
# Initialization: Set up multithreaded downloads.
if not mpi:
# optional checks
if self.opt.dry_run:
message('%s => %s', source, target)
return # depends on [control=['if'], data=[]]
elif self.opt.sync_check and self.sync_check(LocalMD5Cache(target), obj):
message('%s => %s (synced)', source, target)
return # depends on [control=['if'], data=[]]
elif not self.opt.force and os.path.exists(target):
raise Failure('File already exists: %s' % target) # depends on [control=['if'], data=[]]
fsize = int(obj['ContentLength'])
# Small file optimization.
if fsize < self.opt.max_singlepart_download_size:
# Create a single part to chain back main download operation.
mpi = ThreadUtil.MultipartItem(tempfile_get(target))
mpi.total = 1
pos = 0
chunk = fsize # depends on [control=['if'], data=['fsize']]
else:
# Continue as one part download.
# Here we use temp filename as the id of mpi.
for args in self.get_file_splits(tempfile_get(target), source, target, fsize, self.opt.multipart_split_size):
self.pool.download(*args) # depends on [control=['for'], data=['args']]
return # depends on [control=['if'], data=[]]
tempfile = mpi.id
if self.opt.recursive:
self.mkdirs(tempfile) # depends on [control=['if'], data=[]]
# Download part of the file, range is inclusive.
response = self.s3.get_object(Bucket=s3url.bucket, Key=s3url.path, Range='bytes=%d-%d' % (pos, pos + chunk - 1))
self.write_file_chunk(tempfile, pos, chunk, response['Body'])
# Finalize
if mpi.complete({'PartNumber': part}):
try:
self.update_privilege(obj, tempfile)
self._verify_file_size(obj, tempfile)
tempfile_set(tempfile, target)
message('%s => %s', source, target) # depends on [control=['try'], data=[]]
except Exception as e:
# Note that we don't retry in this case, because
# We are going to remove the temp file, and if we
# retry here with original parameters (wrapped in
# the task item), it would fail anyway
tempfile_set(tempfile, None)
raise Failure('Download Failure: %s, Source: %s.' % (e.message, source)) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] |
def _extract_mask_distance(image, mask = slice(None), voxelspacing = None):
"""
Internal, single-image version of `mask_distance`.
"""
if isinstance(mask, slice):
mask = numpy.ones(image.shape, numpy.bool)
distance_map = distance_transform_edt(mask, sampling=voxelspacing)
return _extract_intensities(distance_map, mask) | def function[_extract_mask_distance, parameter[image, mask, voxelspacing]]:
constant[
Internal, single-image version of `mask_distance`.
]
if call[name[isinstance], parameter[name[mask], name[slice]]] begin[:]
variable[mask] assign[=] call[name[numpy].ones, parameter[name[image].shape, name[numpy].bool]]
variable[distance_map] assign[=] call[name[distance_transform_edt], parameter[name[mask]]]
return[call[name[_extract_intensities], parameter[name[distance_map], name[mask]]]] | keyword[def] identifier[_extract_mask_distance] ( identifier[image] , identifier[mask] = identifier[slice] ( keyword[None] ), identifier[voxelspacing] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[mask] , identifier[slice] ):
identifier[mask] = identifier[numpy] . identifier[ones] ( identifier[image] . identifier[shape] , identifier[numpy] . identifier[bool] )
identifier[distance_map] = identifier[distance_transform_edt] ( identifier[mask] , identifier[sampling] = identifier[voxelspacing] )
keyword[return] identifier[_extract_intensities] ( identifier[distance_map] , identifier[mask] ) | def _extract_mask_distance(image, mask=slice(None), voxelspacing=None):
"""
Internal, single-image version of `mask_distance`.
"""
if isinstance(mask, slice):
mask = numpy.ones(image.shape, numpy.bool) # depends on [control=['if'], data=[]]
distance_map = distance_transform_edt(mask, sampling=voxelspacing)
return _extract_intensities(distance_map, mask) |
def parse(file_path, content=None):
"""
Create a PythonFile object with specified file_path and content.
If content is None then, it is loaded from the file_path method.
Otherwise, file_path is only used for reporting errors.
"""
try:
if content is None:
with open(file_path) as f:
content = f.read()
py_tree = RedBaron(content)
return RedbaronPythonFile(file_path, py_tree)
except Exception as ex:
# Trim parsing error message to only include failure location
msg = str(ex)
marker = "<---- here\n"
marker_pos = msg.find(marker)
if marker_pos > 0:
msg = msg[:marker_pos + len(marker)]
logging.error("Failed to parse {}: {}".format(file_path, msg)) | def function[parse, parameter[file_path, content]]:
constant[
Create a PythonFile object with specified file_path and content.
If content is None then, it is loaded from the file_path method.
Otherwise, file_path is only used for reporting errors.
]
<ast.Try object at 0x7da1b025d540> | keyword[def] identifier[parse] ( identifier[file_path] , identifier[content] = keyword[None] ):
literal[string]
keyword[try] :
keyword[if] identifier[content] keyword[is] keyword[None] :
keyword[with] identifier[open] ( identifier[file_path] ) keyword[as] identifier[f] :
identifier[content] = identifier[f] . identifier[read] ()
identifier[py_tree] = identifier[RedBaron] ( identifier[content] )
keyword[return] identifier[RedbaronPythonFile] ( identifier[file_path] , identifier[py_tree] )
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[msg] = identifier[str] ( identifier[ex] )
identifier[marker] = literal[string]
identifier[marker_pos] = identifier[msg] . identifier[find] ( identifier[marker] )
keyword[if] identifier[marker_pos] > literal[int] :
identifier[msg] = identifier[msg] [: identifier[marker_pos] + identifier[len] ( identifier[marker] )]
identifier[logging] . identifier[error] ( literal[string] . identifier[format] ( identifier[file_path] , identifier[msg] )) | def parse(file_path, content=None):
"""
Create a PythonFile object with specified file_path and content.
If content is None then, it is loaded from the file_path method.
Otherwise, file_path is only used for reporting errors.
"""
try:
if content is None:
with open(file_path) as f:
content = f.read() # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=['content']]
py_tree = RedBaron(content)
return RedbaronPythonFile(file_path, py_tree) # depends on [control=['try'], data=[]]
except Exception as ex:
# Trim parsing error message to only include failure location
msg = str(ex)
marker = '<---- here\n'
marker_pos = msg.find(marker)
if marker_pos > 0:
msg = msg[:marker_pos + len(marker)] # depends on [control=['if'], data=['marker_pos']]
logging.error('Failed to parse {}: {}'.format(file_path, msg)) # depends on [control=['except'], data=['ex']] |
def accounts_frontiers(self, accounts):
"""
Returns a list of pairs of account and block hash representing the
head block for **accounts** list
:param accounts: Accounts to return frontier blocks for
:type accounts: list of str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.accounts_frontiers(
... accounts=[
... "xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3",
... "xrb_3i1aq1cchnmbn9x5rsbap8b15akfh7wj7pwskuzi7ahz8oq6cobd99d4r3b7"
... ]
... )
{
"xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3":
"791AF413173EEE674A6FCF633B5DFC0F3C33F397F0DA08E987D9E0741D40D81A",
"xrb_3i1aq1cchnmbn9x5rsbap8b15akfh7wj7pwskuzi7ahz8oq6cobd99d4r3b7":
"6A32397F4E95AF025DE29D9BF1ACE864D5404362258E06489FABDBA9DCCC046F"
}
"""
accounts = self._process_value(accounts, 'list')
payload = {"accounts": accounts}
resp = self.call('accounts_frontiers', payload)
return resp.get('frontiers') or {} | def function[accounts_frontiers, parameter[self, accounts]]:
constant[
Returns a list of pairs of account and block hash representing the
head block for **accounts** list
:param accounts: Accounts to return frontier blocks for
:type accounts: list of str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.accounts_frontiers(
... accounts=[
... "xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3",
... "xrb_3i1aq1cchnmbn9x5rsbap8b15akfh7wj7pwskuzi7ahz8oq6cobd99d4r3b7"
... ]
... )
{
"xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3":
"791AF413173EEE674A6FCF633B5DFC0F3C33F397F0DA08E987D9E0741D40D81A",
"xrb_3i1aq1cchnmbn9x5rsbap8b15akfh7wj7pwskuzi7ahz8oq6cobd99d4r3b7":
"6A32397F4E95AF025DE29D9BF1ACE864D5404362258E06489FABDBA9DCCC046F"
}
]
variable[accounts] assign[=] call[name[self]._process_value, parameter[name[accounts], constant[list]]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b2518730>], [<ast.Name object at 0x7da1b251afb0>]]
variable[resp] assign[=] call[name[self].call, parameter[constant[accounts_frontiers], name[payload]]]
return[<ast.BoolOp object at 0x7da1b2518d90>] | keyword[def] identifier[accounts_frontiers] ( identifier[self] , identifier[accounts] ):
literal[string]
identifier[accounts] = identifier[self] . identifier[_process_value] ( identifier[accounts] , literal[string] )
identifier[payload] ={ literal[string] : identifier[accounts] }
identifier[resp] = identifier[self] . identifier[call] ( literal[string] , identifier[payload] )
keyword[return] identifier[resp] . identifier[get] ( literal[string] ) keyword[or] {} | def accounts_frontiers(self, accounts):
"""
Returns a list of pairs of account and block hash representing the
head block for **accounts** list
:param accounts: Accounts to return frontier blocks for
:type accounts: list of str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.accounts_frontiers(
... accounts=[
... "xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3",
... "xrb_3i1aq1cchnmbn9x5rsbap8b15akfh7wj7pwskuzi7ahz8oq6cobd99d4r3b7"
... ]
... )
{
"xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3":
"791AF413173EEE674A6FCF633B5DFC0F3C33F397F0DA08E987D9E0741D40D81A",
"xrb_3i1aq1cchnmbn9x5rsbap8b15akfh7wj7pwskuzi7ahz8oq6cobd99d4r3b7":
"6A32397F4E95AF025DE29D9BF1ACE864D5404362258E06489FABDBA9DCCC046F"
}
"""
accounts = self._process_value(accounts, 'list')
payload = {'accounts': accounts}
resp = self.call('accounts_frontiers', payload)
return resp.get('frontiers') or {} |
def checkIfEmailWasHacked(email=None, sleepSeconds=1):
"""
Method that checks if the given email is stored in the HIBP website.
This function automatically waits a second to avoid problems with the API
rate limit. An example of the json received:
```
[{"Title":"Adobe","Name":"Adobe","Domain":"adobe.com","BreachDate":"2013-10-4","AddedDate":"2013-12-04T00:12Z","PwnCount":152445165,"Description":"The big one. In October 2013, 153 million Adobe accounts were breached with each containing an internal ID, username, email, <em>encrypted</em> password and a password hint in plain text. The password cryptography was poorly done and <a href=\"http://stricture-group.com/files/adobe-top100.txt\" target=\"_blank\">many were quickly resolved back to plain text</a>. The unencrypted hints also <a href=\"http://www.troyhunt.com/2013/11/adobe-credentials-and-serious.html\" target=\"_blank\">disclosed much about the passwords</a> adding further to the risk that hundreds of millions of Adobe customers already faced.","DataClasses":["Email addresses","Password hints","Passwords","Usernames"]}]
```
Args:
-----
email: Email to verify in HIBP.
Returns:
--------
A python structure for the json received. If nothing was found, it will
return an empty list.
"""
# Sleeping just a little bit
time.sleep(sleepSeconds)
print("\t[*] Bypassing Cloudflare Restriction...")
ua = 'osrframework 0.18'
useragent = {'User-Agent': ua}
cookies, user_agent = cfscrape.get_tokens('https://haveibeenpwned.com/api/v2/breachedaccount/[email protected]', user_agent=ua)
leaks = []
apiURL = "https://haveibeenpwned.com/api/v2/breachedaccount/{}".format(email)
# Accessing the HIBP API
time.sleep(sleepSeconds)
# Building API query
data = requests.get(
apiURL,
headers=useragent,
cookies=cookies,
verify=True
).text
# Reading the text data onto python structures
try:
jsonData = json.loads(data)
for e in jsonData:
# Building the i3visio like structure
new = {}
new["value"] = "(HIBP) " + e.get("Name") + " - " + email
new["type"] = "i3visio.profile"
new["attributes"] = [
{
"type": "i3visio.platform_leaked",
"value": e.get("Name"),
"attributes": []
},
{
"type": "@source",
"value": "haveibeenpwned.com",
"attributes": []
},
{
"type": "@source_uri",
"value": apiURL,
"attributes": []
},
{
"type": "@pwn_count",
"value": e.get("PwnCount"),
"attributes": []
},
{
"type": "@added_date",
"value": e.get("AddedDate"),
"attributes": []
},
{
"type": "@breach_date",
"value": e.get("BreachDate"),
"attributes": []
},
{
"type": "@description",
"value": e.get("Description"),
"attributes": []
}
] + general.expandEntitiesFromEmail(email)
leaks.append(new)
except ValueError:
return []
except Exception:
print("ERROR: Something happenned when using HIBP API.")
return []
return leaks | def function[checkIfEmailWasHacked, parameter[email, sleepSeconds]]:
constant[
Method that checks if the given email is stored in the HIBP website.
This function automatically waits a second to avoid problems with the API
rate limit. An example of the json received:
```
[{"Title":"Adobe","Name":"Adobe","Domain":"adobe.com","BreachDate":"2013-10-4","AddedDate":"2013-12-04T00:12Z","PwnCount":152445165,"Description":"The big one. In October 2013, 153 million Adobe accounts were breached with each containing an internal ID, username, email, <em>encrypted</em> password and a password hint in plain text. The password cryptography was poorly done and <a href="http://stricture-group.com/files/adobe-top100.txt" target="_blank">many were quickly resolved back to plain text</a>. The unencrypted hints also <a href="http://www.troyhunt.com/2013/11/adobe-credentials-and-serious.html" target="_blank">disclosed much about the passwords</a> adding further to the risk that hundreds of millions of Adobe customers already faced.","DataClasses":["Email addresses","Password hints","Passwords","Usernames"]}]
```
Args:
-----
email: Email to verify in HIBP.
Returns:
--------
A python structure for the json received. If nothing was found, it will
return an empty list.
]
call[name[time].sleep, parameter[name[sleepSeconds]]]
call[name[print], parameter[constant[ [*] Bypassing Cloudflare Restriction...]]]
variable[ua] assign[=] constant[osrframework 0.18]
variable[useragent] assign[=] dictionary[[<ast.Constant object at 0x7da20e954eb0>], [<ast.Name object at 0x7da20e955fc0>]]
<ast.Tuple object at 0x7da20e9548e0> assign[=] call[name[cfscrape].get_tokens, parameter[constant[https://haveibeenpwned.com/api/v2/breachedaccount/[email protected]]]]
variable[leaks] assign[=] list[[]]
variable[apiURL] assign[=] call[constant[https://haveibeenpwned.com/api/v2/breachedaccount/{}].format, parameter[name[email]]]
call[name[time].sleep, parameter[name[sleepSeconds]]]
variable[data] assign[=] call[name[requests].get, parameter[name[apiURL]]].text
<ast.Try object at 0x7da20e957f10>
return[name[leaks]] | keyword[def] identifier[checkIfEmailWasHacked] ( identifier[email] = keyword[None] , identifier[sleepSeconds] = literal[int] ):
literal[string]
identifier[time] . identifier[sleep] ( identifier[sleepSeconds] )
identifier[print] ( literal[string] )
identifier[ua] = literal[string]
identifier[useragent] ={ literal[string] : identifier[ua] }
identifier[cookies] , identifier[user_agent] = identifier[cfscrape] . identifier[get_tokens] ( literal[string] , identifier[user_agent] = identifier[ua] )
identifier[leaks] =[]
identifier[apiURL] = literal[string] . identifier[format] ( identifier[email] )
identifier[time] . identifier[sleep] ( identifier[sleepSeconds] )
identifier[data] = identifier[requests] . identifier[get] (
identifier[apiURL] ,
identifier[headers] = identifier[useragent] ,
identifier[cookies] = identifier[cookies] ,
identifier[verify] = keyword[True]
). identifier[text]
keyword[try] :
identifier[jsonData] = identifier[json] . identifier[loads] ( identifier[data] )
keyword[for] identifier[e] keyword[in] identifier[jsonData] :
identifier[new] ={}
identifier[new] [ literal[string] ]= literal[string] + identifier[e] . identifier[get] ( literal[string] )+ literal[string] + identifier[email]
identifier[new] [ literal[string] ]= literal[string]
identifier[new] [ literal[string] ]=[
{
literal[string] : literal[string] ,
literal[string] : identifier[e] . identifier[get] ( literal[string] ),
literal[string] :[]
},
{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] :[]
},
{
literal[string] : literal[string] ,
literal[string] : identifier[apiURL] ,
literal[string] :[]
},
{
literal[string] : literal[string] ,
literal[string] : identifier[e] . identifier[get] ( literal[string] ),
literal[string] :[]
},
{
literal[string] : literal[string] ,
literal[string] : identifier[e] . identifier[get] ( literal[string] ),
literal[string] :[]
},
{
literal[string] : literal[string] ,
literal[string] : identifier[e] . identifier[get] ( literal[string] ),
literal[string] :[]
},
{
literal[string] : literal[string] ,
literal[string] : identifier[e] . identifier[get] ( literal[string] ),
literal[string] :[]
}
]+ identifier[general] . identifier[expandEntitiesFromEmail] ( identifier[email] )
identifier[leaks] . identifier[append] ( identifier[new] )
keyword[except] identifier[ValueError] :
keyword[return] []
keyword[except] identifier[Exception] :
identifier[print] ( literal[string] )
keyword[return] []
keyword[return] identifier[leaks] | def checkIfEmailWasHacked(email=None, sleepSeconds=1):
"""
Method that checks if the given email is stored in the HIBP website.
This function automatically waits a second to avoid problems with the API
rate limit. An example of the json received:
```
[{"Title":"Adobe","Name":"Adobe","Domain":"adobe.com","BreachDate":"2013-10-4","AddedDate":"2013-12-04T00:12Z","PwnCount":152445165,"Description":"The big one. In October 2013, 153 million Adobe accounts were breached with each containing an internal ID, username, email, <em>encrypted</em> password and a password hint in plain text. The password cryptography was poorly done and <a href="http://stricture-group.com/files/adobe-top100.txt" target="_blank">many were quickly resolved back to plain text</a>. The unencrypted hints also <a href="http://www.troyhunt.com/2013/11/adobe-credentials-and-serious.html" target="_blank">disclosed much about the passwords</a> adding further to the risk that hundreds of millions of Adobe customers already faced.","DataClasses":["Email addresses","Password hints","Passwords","Usernames"]}]
```
Args:
-----
email: Email to verify in HIBP.
Returns:
--------
A python structure for the json received. If nothing was found, it will
return an empty list.
"""
# Sleeping just a little bit
time.sleep(sleepSeconds)
print('\t[*] Bypassing Cloudflare Restriction...')
ua = 'osrframework 0.18'
useragent = {'User-Agent': ua}
(cookies, user_agent) = cfscrape.get_tokens('https://haveibeenpwned.com/api/v2/breachedaccount/[email protected]', user_agent=ua)
leaks = []
apiURL = 'https://haveibeenpwned.com/api/v2/breachedaccount/{}'.format(email)
# Accessing the HIBP API
time.sleep(sleepSeconds)
# Building API query
data = requests.get(apiURL, headers=useragent, cookies=cookies, verify=True).text
# Reading the text data onto python structures
try:
jsonData = json.loads(data)
for e in jsonData:
# Building the i3visio like structure
new = {}
new['value'] = '(HIBP) ' + e.get('Name') + ' - ' + email
new['type'] = 'i3visio.profile'
new['attributes'] = [{'type': 'i3visio.platform_leaked', 'value': e.get('Name'), 'attributes': []}, {'type': '@source', 'value': 'haveibeenpwned.com', 'attributes': []}, {'type': '@source_uri', 'value': apiURL, 'attributes': []}, {'type': '@pwn_count', 'value': e.get('PwnCount'), 'attributes': []}, {'type': '@added_date', 'value': e.get('AddedDate'), 'attributes': []}, {'type': '@breach_date', 'value': e.get('BreachDate'), 'attributes': []}, {'type': '@description', 'value': e.get('Description'), 'attributes': []}] + general.expandEntitiesFromEmail(email)
leaks.append(new) # depends on [control=['for'], data=['e']] # depends on [control=['try'], data=[]]
except ValueError:
return [] # depends on [control=['except'], data=[]]
except Exception:
print('ERROR: Something happenned when using HIBP API.')
return [] # depends on [control=['except'], data=[]]
return leaks |
def _extract_axes_for_slice(self, axes):
"""
Return the slice dictionary for these axes.
"""
return {self._AXIS_SLICEMAP[i]: a for i, a in
zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)} | def function[_extract_axes_for_slice, parameter[self, axes]]:
constant[
Return the slice dictionary for these axes.
]
return[<ast.DictComp object at 0x7da1b2346da0>] | keyword[def] identifier[_extract_axes_for_slice] ( identifier[self] , identifier[axes] ):
literal[string]
keyword[return] { identifier[self] . identifier[_AXIS_SLICEMAP] [ identifier[i] ]: identifier[a] keyword[for] identifier[i] , identifier[a] keyword[in]
identifier[zip] ( identifier[self] . identifier[_AXIS_ORDERS] [ identifier[self] . identifier[_AXIS_LEN] - identifier[len] ( identifier[axes] ):], identifier[axes] )} | def _extract_axes_for_slice(self, axes):
"""
Return the slice dictionary for these axes.
"""
return {self._AXIS_SLICEMAP[i]: a for (i, a) in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)} |
def _add_assert(self, **kwargs):
"""
if screenshot is None, only failed case will take screenshot
"""
# convert screenshot to relative path from <None|True|False|PIL.Image>
screenshot = kwargs.get('screenshot')
is_success = kwargs.get('success')
screenshot = (not is_success) if screenshot is None else screenshot
kwargs['screenshot'] = self._take_screenshot(screenshot=screenshot, name_prefix='assert')
action = kwargs.pop('action', 'assert')
self.add_step(action, **kwargs)
if not is_success:
message = kwargs.get('message')
frame, filename, line_number, function_name, lines, index = inspect.stack()[2]
print('Assert [%s: %d] WARN: %s' % (filename, line_number, message))
if not kwargs.get('safe', False):
raise AssertionError(message) | def function[_add_assert, parameter[self]]:
constant[
if screenshot is None, only failed case will take screenshot
]
variable[screenshot] assign[=] call[name[kwargs].get, parameter[constant[screenshot]]]
variable[is_success] assign[=] call[name[kwargs].get, parameter[constant[success]]]
variable[screenshot] assign[=] <ast.IfExp object at 0x7da207f02620>
call[name[kwargs]][constant[screenshot]] assign[=] call[name[self]._take_screenshot, parameter[]]
variable[action] assign[=] call[name[kwargs].pop, parameter[constant[action], constant[assert]]]
call[name[self].add_step, parameter[name[action]]]
if <ast.UnaryOp object at 0x7da20c6c7b50> begin[:]
variable[message] assign[=] call[name[kwargs].get, parameter[constant[message]]]
<ast.Tuple object at 0x7da20c6c5f30> assign[=] call[call[name[inspect].stack, parameter[]]][constant[2]]
call[name[print], parameter[binary_operation[constant[Assert [%s: %d] WARN: %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c6c6f50>, <ast.Name object at 0x7da20c6c6a10>, <ast.Name object at 0x7da20c6c7040>]]]]]
if <ast.UnaryOp object at 0x7da2044c0580> begin[:]
<ast.Raise object at 0x7da2044c0ac0> | keyword[def] identifier[_add_assert] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[screenshot] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[is_success] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[screenshot] =( keyword[not] identifier[is_success] ) keyword[if] identifier[screenshot] keyword[is] keyword[None] keyword[else] identifier[screenshot]
identifier[kwargs] [ literal[string] ]= identifier[self] . identifier[_take_screenshot] ( identifier[screenshot] = identifier[screenshot] , identifier[name_prefix] = literal[string] )
identifier[action] = identifier[kwargs] . identifier[pop] ( literal[string] , literal[string] )
identifier[self] . identifier[add_step] ( identifier[action] ,** identifier[kwargs] )
keyword[if] keyword[not] identifier[is_success] :
identifier[message] = identifier[kwargs] . identifier[get] ( literal[string] )
identifier[frame] , identifier[filename] , identifier[line_number] , identifier[function_name] , identifier[lines] , identifier[index] = identifier[inspect] . identifier[stack] ()[ literal[int] ]
identifier[print] ( literal[string] %( identifier[filename] , identifier[line_number] , identifier[message] ))
keyword[if] keyword[not] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ):
keyword[raise] identifier[AssertionError] ( identifier[message] ) | def _add_assert(self, **kwargs):
"""
if screenshot is None, only failed case will take screenshot
"""
# convert screenshot to relative path from <None|True|False|PIL.Image>
screenshot = kwargs.get('screenshot')
is_success = kwargs.get('success')
screenshot = not is_success if screenshot is None else screenshot
kwargs['screenshot'] = self._take_screenshot(screenshot=screenshot, name_prefix='assert')
action = kwargs.pop('action', 'assert')
self.add_step(action, **kwargs)
if not is_success:
message = kwargs.get('message')
(frame, filename, line_number, function_name, lines, index) = inspect.stack()[2]
print('Assert [%s: %d] WARN: %s' % (filename, line_number, message))
if not kwargs.get('safe', False):
raise AssertionError(message) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def update(self, portfolio, date, perfs=None):
'''
Actualizes the portfolio universe with the alog state
'''
# Make the manager aware of current simulation
self.portfolio = portfolio
self.perfs = perfs
self.date = date | def function[update, parameter[self, portfolio, date, perfs]]:
constant[
Actualizes the portfolio universe with the alog state
]
name[self].portfolio assign[=] name[portfolio]
name[self].perfs assign[=] name[perfs]
name[self].date assign[=] name[date] | keyword[def] identifier[update] ( identifier[self] , identifier[portfolio] , identifier[date] , identifier[perfs] = keyword[None] ):
literal[string]
identifier[self] . identifier[portfolio] = identifier[portfolio]
identifier[self] . identifier[perfs] = identifier[perfs]
identifier[self] . identifier[date] = identifier[date] | def update(self, portfolio, date, perfs=None):
"""
Actualizes the portfolio universe with the alog state
"""
# Make the manager aware of current simulation
self.portfolio = portfolio
self.perfs = perfs
self.date = date |
def rpc(ctx, call, arguments, api):
""" Construct RPC call directly
\b
You can specify which API to send the call to:
peerplays rpc --api bookie get_matched_bets_for_bettor 1.2.0
You can also specify lists using
peerplays rpc get_objects "['2.0.0', '2.1.0']"
"""
try:
data = list(eval(d) for d in arguments)
except:
data = arguments
ret = getattr(ctx.peerplays.rpc, call)(*data, api=api)
pprint(ret) | def function[rpc, parameter[ctx, call, arguments, api]]:
constant[ Construct RPC call directly
You can specify which API to send the call to:
peerplays rpc --api bookie get_matched_bets_for_bettor 1.2.0
You can also specify lists using
peerplays rpc get_objects "['2.0.0', '2.1.0']"
]
<ast.Try object at 0x7da1b10198d0>
variable[ret] assign[=] call[call[name[getattr], parameter[name[ctx].peerplays.rpc, name[call]]], parameter[<ast.Starred object at 0x7da1b101b010>]]
call[name[pprint], parameter[name[ret]]] | keyword[def] identifier[rpc] ( identifier[ctx] , identifier[call] , identifier[arguments] , identifier[api] ):
literal[string]
keyword[try] :
identifier[data] = identifier[list] ( identifier[eval] ( identifier[d] ) keyword[for] identifier[d] keyword[in] identifier[arguments] )
keyword[except] :
identifier[data] = identifier[arguments]
identifier[ret] = identifier[getattr] ( identifier[ctx] . identifier[peerplays] . identifier[rpc] , identifier[call] )(* identifier[data] , identifier[api] = identifier[api] )
identifier[pprint] ( identifier[ret] ) | def rpc(ctx, call, arguments, api):
""" Construct RPC call directly
\x08
You can specify which API to send the call to:
peerplays rpc --api bookie get_matched_bets_for_bettor 1.2.0
You can also specify lists using
peerplays rpc get_objects "['2.0.0', '2.1.0']"
"""
try:
data = list((eval(d) for d in arguments)) # depends on [control=['try'], data=[]]
except:
data = arguments # depends on [control=['except'], data=[]]
ret = getattr(ctx.peerplays.rpc, call)(*data, api=api)
pprint(ret) |
def clear_lock(self, abspath=True):
"""Clean any conda lock in the system."""
cmd_list = ['clean', '--lock', '--json']
return self._call_and_parse(cmd_list, abspath=abspath) | def function[clear_lock, parameter[self, abspath]]:
constant[Clean any conda lock in the system.]
variable[cmd_list] assign[=] list[[<ast.Constant object at 0x7da1b272f760>, <ast.Constant object at 0x7da1b2845660>, <ast.Constant object at 0x7da1b2844880>]]
return[call[name[self]._call_and_parse, parameter[name[cmd_list]]]] | keyword[def] identifier[clear_lock] ( identifier[self] , identifier[abspath] = keyword[True] ):
literal[string]
identifier[cmd_list] =[ literal[string] , literal[string] , literal[string] ]
keyword[return] identifier[self] . identifier[_call_and_parse] ( identifier[cmd_list] , identifier[abspath] = identifier[abspath] ) | def clear_lock(self, abspath=True):
"""Clean any conda lock in the system."""
cmd_list = ['clean', '--lock', '--json']
return self._call_and_parse(cmd_list, abspath=abspath) |
def argmaxMulti(a, groupKeys, assumeSorted=False):
"""
This is like numpy's argmax, but it returns multiple maximums.
It gets the indices of the max values of each group in 'a', grouping the
elements by their corresponding value in 'groupKeys'.
@param a (numpy array)
An array of values that will be compared
@param groupKeys (numpy array)
An array with the same length of 'a'. Each entry identifies the group for
each 'a' value.
@param assumeSorted (bool)
If true, group keys must be organized together (e.g. sorted).
@return (numpy array)
The indices of one maximum value per group
@example
_argmaxMulti([5, 4, 7, 2, 9, 8],
[0, 0, 0, 1, 1, 1])
returns
[2, 4]
"""
if not assumeSorted:
# Use a stable sort algorithm
sorter = np.argsort(groupKeys, kind="mergesort")
a = a[sorter]
groupKeys = groupKeys[sorter]
_, indices, lengths = np.unique(groupKeys, return_index=True,
return_counts=True)
maxValues = np.maximum.reduceat(a, indices)
allMaxIndices = np.flatnonzero(np.repeat(maxValues, lengths) == a)
# Break ties by finding the insertion points of the the group start indices
# and using the values currently at those points. This approach will choose
# the first occurrence of each max value.
indices = allMaxIndices[np.searchsorted(allMaxIndices, indices)]
if assumeSorted:
return indices
else:
return sorter[indices] | def function[argmaxMulti, parameter[a, groupKeys, assumeSorted]]:
constant[
This is like numpy's argmax, but it returns multiple maximums.
It gets the indices of the max values of each group in 'a', grouping the
elements by their corresponding value in 'groupKeys'.
@param a (numpy array)
An array of values that will be compared
@param groupKeys (numpy array)
An array with the same length of 'a'. Each entry identifies the group for
each 'a' value.
@param assumeSorted (bool)
If true, group keys must be organized together (e.g. sorted).
@return (numpy array)
The indices of one maximum value per group
@example
_argmaxMulti([5, 4, 7, 2, 9, 8],
[0, 0, 0, 1, 1, 1])
returns
[2, 4]
]
if <ast.UnaryOp object at 0x7da1b08c5000> begin[:]
variable[sorter] assign[=] call[name[np].argsort, parameter[name[groupKeys]]]
variable[a] assign[=] call[name[a]][name[sorter]]
variable[groupKeys] assign[=] call[name[groupKeys]][name[sorter]]
<ast.Tuple object at 0x7da1b0889990> assign[=] call[name[np].unique, parameter[name[groupKeys]]]
variable[maxValues] assign[=] call[name[np].maximum.reduceat, parameter[name[a], name[indices]]]
variable[allMaxIndices] assign[=] call[name[np].flatnonzero, parameter[compare[call[name[np].repeat, parameter[name[maxValues], name[lengths]]] equal[==] name[a]]]]
variable[indices] assign[=] call[name[allMaxIndices]][call[name[np].searchsorted, parameter[name[allMaxIndices], name[indices]]]]
if name[assumeSorted] begin[:]
return[name[indices]] | keyword[def] identifier[argmaxMulti] ( identifier[a] , identifier[groupKeys] , identifier[assumeSorted] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[assumeSorted] :
identifier[sorter] = identifier[np] . identifier[argsort] ( identifier[groupKeys] , identifier[kind] = literal[string] )
identifier[a] = identifier[a] [ identifier[sorter] ]
identifier[groupKeys] = identifier[groupKeys] [ identifier[sorter] ]
identifier[_] , identifier[indices] , identifier[lengths] = identifier[np] . identifier[unique] ( identifier[groupKeys] , identifier[return_index] = keyword[True] ,
identifier[return_counts] = keyword[True] )
identifier[maxValues] = identifier[np] . identifier[maximum] . identifier[reduceat] ( identifier[a] , identifier[indices] )
identifier[allMaxIndices] = identifier[np] . identifier[flatnonzero] ( identifier[np] . identifier[repeat] ( identifier[maxValues] , identifier[lengths] )== identifier[a] )
identifier[indices] = identifier[allMaxIndices] [ identifier[np] . identifier[searchsorted] ( identifier[allMaxIndices] , identifier[indices] )]
keyword[if] identifier[assumeSorted] :
keyword[return] identifier[indices]
keyword[else] :
keyword[return] identifier[sorter] [ identifier[indices] ] | def argmaxMulti(a, groupKeys, assumeSorted=False):
"""
This is like numpy's argmax, but it returns multiple maximums.
It gets the indices of the max values of each group in 'a', grouping the
elements by their corresponding value in 'groupKeys'.
@param a (numpy array)
An array of values that will be compared
@param groupKeys (numpy array)
An array with the same length of 'a'. Each entry identifies the group for
each 'a' value.
@param assumeSorted (bool)
If true, group keys must be organized together (e.g. sorted).
@return (numpy array)
The indices of one maximum value per group
@example
_argmaxMulti([5, 4, 7, 2, 9, 8],
[0, 0, 0, 1, 1, 1])
returns
[2, 4]
"""
if not assumeSorted:
# Use a stable sort algorithm
sorter = np.argsort(groupKeys, kind='mergesort')
a = a[sorter]
groupKeys = groupKeys[sorter] # depends on [control=['if'], data=[]]
(_, indices, lengths) = np.unique(groupKeys, return_index=True, return_counts=True)
maxValues = np.maximum.reduceat(a, indices)
allMaxIndices = np.flatnonzero(np.repeat(maxValues, lengths) == a)
# Break ties by finding the insertion points of the the group start indices
# and using the values currently at those points. This approach will choose
# the first occurrence of each max value.
indices = allMaxIndices[np.searchsorted(allMaxIndices, indices)]
if assumeSorted:
return indices # depends on [control=['if'], data=[]]
else:
return sorter[indices] |
def clamp(value, lower=0, upper=sys.maxsize):
"""Clamp float between given range"""
return max(lower, min(upper, value)) | def function[clamp, parameter[value, lower, upper]]:
constant[Clamp float between given range]
return[call[name[max], parameter[name[lower], call[name[min], parameter[name[upper], name[value]]]]]] | keyword[def] identifier[clamp] ( identifier[value] , identifier[lower] = literal[int] , identifier[upper] = identifier[sys] . identifier[maxsize] ):
literal[string]
keyword[return] identifier[max] ( identifier[lower] , identifier[min] ( identifier[upper] , identifier[value] )) | def clamp(value, lower=0, upper=sys.maxsize):
"""Clamp float between given range"""
return max(lower, min(upper, value)) |
def put(self, storagemodel:object, modeldefinition = None) -> StorageQueueModel:
""" insert queue message into storage """
try:
message = modeldefinition['queueservice'].put_message(storagemodel._queuename, storagemodel.getmessage())
storagemodel.mergemessage(message)
except Exception as e:
storagemodel = None
msg = 'can not save queue message: queue {} with message {} because {!s}'.format(storagemodel._queuename, storagemodel.content, e)
raise AzureStorageWrapException(msg=msg)
finally:
return storagemodel | def function[put, parameter[self, storagemodel, modeldefinition]]:
constant[ insert queue message into storage ]
<ast.Try object at 0x7da20e954550> | keyword[def] identifier[put] ( identifier[self] , identifier[storagemodel] : identifier[object] , identifier[modeldefinition] = keyword[None] )-> identifier[StorageQueueModel] :
literal[string]
keyword[try] :
identifier[message] = identifier[modeldefinition] [ literal[string] ]. identifier[put_message] ( identifier[storagemodel] . identifier[_queuename] , identifier[storagemodel] . identifier[getmessage] ())
identifier[storagemodel] . identifier[mergemessage] ( identifier[message] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[storagemodel] = keyword[None]
identifier[msg] = literal[string] . identifier[format] ( identifier[storagemodel] . identifier[_queuename] , identifier[storagemodel] . identifier[content] , identifier[e] )
keyword[raise] identifier[AzureStorageWrapException] ( identifier[msg] = identifier[msg] )
keyword[finally] :
keyword[return] identifier[storagemodel] | def put(self, storagemodel: object, modeldefinition=None) -> StorageQueueModel:
""" insert queue message into storage """
try:
message = modeldefinition['queueservice'].put_message(storagemodel._queuename, storagemodel.getmessage())
storagemodel.mergemessage(message) # depends on [control=['try'], data=[]]
except Exception as e:
storagemodel = None
msg = 'can not save queue message: queue {} with message {} because {!s}'.format(storagemodel._queuename, storagemodel.content, e)
raise AzureStorageWrapException(msg=msg) # depends on [control=['except'], data=['e']]
finally:
return storagemodel |
def _clone_properties(self):
"""Internal helper to clone self._properties if necessary."""
cls = self.__class__
if self._properties is cls._properties:
self._properties = dict(cls._properties) | def function[_clone_properties, parameter[self]]:
constant[Internal helper to clone self._properties if necessary.]
variable[cls] assign[=] name[self].__class__
if compare[name[self]._properties is name[cls]._properties] begin[:]
name[self]._properties assign[=] call[name[dict], parameter[name[cls]._properties]] | keyword[def] identifier[_clone_properties] ( identifier[self] ):
literal[string]
identifier[cls] = identifier[self] . identifier[__class__]
keyword[if] identifier[self] . identifier[_properties] keyword[is] identifier[cls] . identifier[_properties] :
identifier[self] . identifier[_properties] = identifier[dict] ( identifier[cls] . identifier[_properties] ) | def _clone_properties(self):
"""Internal helper to clone self._properties if necessary."""
cls = self.__class__
if self._properties is cls._properties:
self._properties = dict(cls._properties) # depends on [control=['if'], data=[]] |
def landweber(op, x, rhs, niter, omega=None, projection=None, callback=None):
r"""Optimized implementation of Landweber's method.
Solves the inverse problem::
A(x) = rhs
Parameters
----------
op : `Operator`
Operator in the inverse problem. ``op.derivative(x).adjoint`` must be
well-defined for ``x`` in the operator domain.
x : ``op.domain`` element
Element to which the result is written. Its initial value is
used as starting point of the iteration, and its values are
updated in each iteration step.
rhs : ``op.range`` element
Right-hand side of the equation defining the inverse problem.
niter : int
Number of iterations.
omega : positive float, optional
Relaxation parameter in the iteration.
Default: ``1 / op.norm(estimate=True) ** 2``
projection : callable, optional
Function that can be used to modify the iterates in each iteration,
for example enforcing positivity. The function should take one
argument and modify it in-place.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
Notes
-----
This method calculates an approximate least-squares solution of
the inverse problem of the first kind
.. math::
\mathcal{A} (x) = y,
for a given :math:`y\in \mathcal{Y}`, i.e. an approximate
solution :math:`x^*` to
.. math::
\min_{x\in \mathcal{X}} \| \mathcal{A}(x) - y \|_{\mathcal{Y}}^2
for a (Frechet-) differentiable operator
:math:`\mathcal{A}: \mathcal{X} \to \mathcal{Y}` between Hilbert
spaces :math:`\mathcal{X}` and :math:`\mathcal{Y}`. The method
starts from an initial guess :math:`x_0` and uses the
iteration
.. math::
x_{k+1} = x_k -
\omega \ \partial \mathcal{A}(x)^* (\mathcal{A}(x_k) - y),
where :math:`\partial \mathcal{A}(x)` is the Frechet derivative
of :math:`\mathcal{A}` at :math:`x` and :math:`\omega` is a
relaxation parameter. For linear problems, a choice
:math:`0 < \omega < 2/\lVert \mathcal{A}^2\rVert` guarantees
convergence, where :math:`\lVert\mathcal{A}\rVert` stands for the
operator norm of :math:`\mathcal{A}`.
Users may also optionally provide a projection to project each
iterate onto some subset. For example enforcing positivity.
This implementation uses a minimum amount of memory copies by
applying re-usable temporaries and in-place evaluation.
The method is also described in a
`Wikipedia article
<https://en.wikipedia.org/wiki/Landweber_iteration>`_.
"""
# TODO: add a book reference
if x not in op.domain:
raise TypeError('`x` {!r} is not in the domain of `op` {!r}'
''.format(x, op.domain))
if omega is None:
omega = 1 / op.norm(estimate=True) ** 2
# Reusable temporaries
tmp_ran = op.range.element()
tmp_dom = op.domain.element()
for _ in range(niter):
op(x, out=tmp_ran)
tmp_ran -= rhs
op.derivative(x).adjoint(tmp_ran, out=tmp_dom)
x.lincomb(1, x, -omega, tmp_dom)
if projection is not None:
projection(x)
if callback is not None:
callback(x) | def function[landweber, parameter[op, x, rhs, niter, omega, projection, callback]]:
constant[Optimized implementation of Landweber's method.
Solves the inverse problem::
A(x) = rhs
Parameters
----------
op : `Operator`
Operator in the inverse problem. ``op.derivative(x).adjoint`` must be
well-defined for ``x`` in the operator domain.
x : ``op.domain`` element
Element to which the result is written. Its initial value is
used as starting point of the iteration, and its values are
updated in each iteration step.
rhs : ``op.range`` element
Right-hand side of the equation defining the inverse problem.
niter : int
Number of iterations.
omega : positive float, optional
Relaxation parameter in the iteration.
Default: ``1 / op.norm(estimate=True) ** 2``
projection : callable, optional
Function that can be used to modify the iterates in each iteration,
for example enforcing positivity. The function should take one
argument and modify it in-place.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
Notes
-----
This method calculates an approximate least-squares solution of
the inverse problem of the first kind
.. math::
\mathcal{A} (x) = y,
for a given :math:`y\in \mathcal{Y}`, i.e. an approximate
solution :math:`x^*` to
.. math::
\min_{x\in \mathcal{X}} \| \mathcal{A}(x) - y \|_{\mathcal{Y}}^2
for a (Frechet-) differentiable operator
:math:`\mathcal{A}: \mathcal{X} \to \mathcal{Y}` between Hilbert
spaces :math:`\mathcal{X}` and :math:`\mathcal{Y}`. The method
starts from an initial guess :math:`x_0` and uses the
iteration
.. math::
x_{k+1} = x_k -
\omega \ \partial \mathcal{A}(x)^* (\mathcal{A}(x_k) - y),
where :math:`\partial \mathcal{A}(x)` is the Frechet derivative
of :math:`\mathcal{A}` at :math:`x` and :math:`\omega` is a
relaxation parameter. For linear problems, a choice
:math:`0 < \omega < 2/\lVert \mathcal{A}^2\rVert` guarantees
convergence, where :math:`\lVert\mathcal{A}\rVert` stands for the
operator norm of :math:`\mathcal{A}`.
Users may also optionally provide a projection to project each
iterate onto some subset. For example enforcing positivity.
This implementation uses a minimum amount of memory copies by
applying re-usable temporaries and in-place evaluation.
The method is also described in a
`Wikipedia article
<https://en.wikipedia.org/wiki/Landweber_iteration>`_.
]
if compare[name[x] <ast.NotIn object at 0x7da2590d7190> name[op].domain] begin[:]
<ast.Raise object at 0x7da18f58d120>
if compare[name[omega] is constant[None]] begin[:]
variable[omega] assign[=] binary_operation[constant[1] / binary_operation[call[name[op].norm, parameter[]] ** constant[2]]]
variable[tmp_ran] assign[=] call[name[op].range.element, parameter[]]
variable[tmp_dom] assign[=] call[name[op].domain.element, parameter[]]
for taget[name[_]] in starred[call[name[range], parameter[name[niter]]]] begin[:]
call[name[op], parameter[name[x]]]
<ast.AugAssign object at 0x7da18f58fc40>
call[call[name[op].derivative, parameter[name[x]]].adjoint, parameter[name[tmp_ran]]]
call[name[x].lincomb, parameter[constant[1], name[x], <ast.UnaryOp object at 0x7da18f58e800>, name[tmp_dom]]]
if compare[name[projection] is_not constant[None]] begin[:]
call[name[projection], parameter[name[x]]]
if compare[name[callback] is_not constant[None]] begin[:]
call[name[callback], parameter[name[x]]] | keyword[def] identifier[landweber] ( identifier[op] , identifier[x] , identifier[rhs] , identifier[niter] , identifier[omega] = keyword[None] , identifier[projection] = keyword[None] , identifier[callback] = keyword[None] ):
literal[string]
keyword[if] identifier[x] keyword[not] keyword[in] identifier[op] . identifier[domain] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] . identifier[format] ( identifier[x] , identifier[op] . identifier[domain] ))
keyword[if] identifier[omega] keyword[is] keyword[None] :
identifier[omega] = literal[int] / identifier[op] . identifier[norm] ( identifier[estimate] = keyword[True] )** literal[int]
identifier[tmp_ran] = identifier[op] . identifier[range] . identifier[element] ()
identifier[tmp_dom] = identifier[op] . identifier[domain] . identifier[element] ()
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[niter] ):
identifier[op] ( identifier[x] , identifier[out] = identifier[tmp_ran] )
identifier[tmp_ran] -= identifier[rhs]
identifier[op] . identifier[derivative] ( identifier[x] ). identifier[adjoint] ( identifier[tmp_ran] , identifier[out] = identifier[tmp_dom] )
identifier[x] . identifier[lincomb] ( literal[int] , identifier[x] ,- identifier[omega] , identifier[tmp_dom] )
keyword[if] identifier[projection] keyword[is] keyword[not] keyword[None] :
identifier[projection] ( identifier[x] )
keyword[if] identifier[callback] keyword[is] keyword[not] keyword[None] :
identifier[callback] ( identifier[x] ) | def landweber(op, x, rhs, niter, omega=None, projection=None, callback=None):
"""Optimized implementation of Landweber's method.
Solves the inverse problem::
A(x) = rhs
Parameters
----------
op : `Operator`
Operator in the inverse problem. ``op.derivative(x).adjoint`` must be
well-defined for ``x`` in the operator domain.
x : ``op.domain`` element
Element to which the result is written. Its initial value is
used as starting point of the iteration, and its values are
updated in each iteration step.
rhs : ``op.range`` element
Right-hand side of the equation defining the inverse problem.
niter : int
Number of iterations.
omega : positive float, optional
Relaxation parameter in the iteration.
Default: ``1 / op.norm(estimate=True) ** 2``
projection : callable, optional
Function that can be used to modify the iterates in each iteration,
for example enforcing positivity. The function should take one
argument and modify it in-place.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
Notes
-----
This method calculates an approximate least-squares solution of
the inverse problem of the first kind
.. math::
\\mathcal{A} (x) = y,
for a given :math:`y\\in \\mathcal{Y}`, i.e. an approximate
solution :math:`x^*` to
.. math::
\\min_{x\\in \\mathcal{X}} \\| \\mathcal{A}(x) - y \\|_{\\mathcal{Y}}^2
for a (Frechet-) differentiable operator
:math:`\\mathcal{A}: \\mathcal{X} \\to \\mathcal{Y}` between Hilbert
spaces :math:`\\mathcal{X}` and :math:`\\mathcal{Y}`. The method
starts from an initial guess :math:`x_0` and uses the
iteration
.. math::
x_{k+1} = x_k -
\\omega \\ \\partial \\mathcal{A}(x)^* (\\mathcal{A}(x_k) - y),
where :math:`\\partial \\mathcal{A}(x)` is the Frechet derivative
of :math:`\\mathcal{A}` at :math:`x` and :math:`\\omega` is a
relaxation parameter. For linear problems, a choice
:math:`0 < \\omega < 2/\\lVert \\mathcal{A}^2\\rVert` guarantees
convergence, where :math:`\\lVert\\mathcal{A}\\rVert` stands for the
operator norm of :math:`\\mathcal{A}`.
Users may also optionally provide a projection to project each
iterate onto some subset. For example enforcing positivity.
This implementation uses a minimum amount of memory copies by
applying re-usable temporaries and in-place evaluation.
The method is also described in a
`Wikipedia article
<https://en.wikipedia.org/wiki/Landweber_iteration>`_.
"""
# TODO: add a book reference
if x not in op.domain:
raise TypeError('`x` {!r} is not in the domain of `op` {!r}'.format(x, op.domain)) # depends on [control=['if'], data=['x']]
if omega is None:
omega = 1 / op.norm(estimate=True) ** 2 # depends on [control=['if'], data=['omega']]
# Reusable temporaries
tmp_ran = op.range.element()
tmp_dom = op.domain.element()
for _ in range(niter):
op(x, out=tmp_ran)
tmp_ran -= rhs
op.derivative(x).adjoint(tmp_ran, out=tmp_dom)
x.lincomb(1, x, -omega, tmp_dom)
if projection is not None:
projection(x) # depends on [control=['if'], data=['projection']]
if callback is not None:
callback(x) # depends on [control=['if'], data=['callback']] # depends on [control=['for'], data=[]] |
def _validate_message_type(self):
"""Check to see if the current message's AMQP type property is
supported and if not, raise either a :exc:`DropMessage` or
:exc:`MessageException`.
:raises: DropMessage
:raises: MessageException
"""
if self._unsupported_message_type():
self.logger.warning(
'Received unsupported message type: %s', self.message_type)
if self._drop_invalid:
if self._drop_exchange:
self._republish_dropped_message('invalid type')
raise DropMessage
raise MessageException | def function[_validate_message_type, parameter[self]]:
constant[Check to see if the current message's AMQP type property is
supported and if not, raise either a :exc:`DropMessage` or
:exc:`MessageException`.
:raises: DropMessage
:raises: MessageException
]
if call[name[self]._unsupported_message_type, parameter[]] begin[:]
call[name[self].logger.warning, parameter[constant[Received unsupported message type: %s], name[self].message_type]]
if name[self]._drop_invalid begin[:]
if name[self]._drop_exchange begin[:]
call[name[self]._republish_dropped_message, parameter[constant[invalid type]]]
<ast.Raise object at 0x7da20e9b01c0>
<ast.Raise object at 0x7da20e9b2b00> | keyword[def] identifier[_validate_message_type] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_unsupported_message_type] ():
identifier[self] . identifier[logger] . identifier[warning] (
literal[string] , identifier[self] . identifier[message_type] )
keyword[if] identifier[self] . identifier[_drop_invalid] :
keyword[if] identifier[self] . identifier[_drop_exchange] :
identifier[self] . identifier[_republish_dropped_message] ( literal[string] )
keyword[raise] identifier[DropMessage]
keyword[raise] identifier[MessageException] | def _validate_message_type(self):
"""Check to see if the current message's AMQP type property is
supported and if not, raise either a :exc:`DropMessage` or
:exc:`MessageException`.
:raises: DropMessage
:raises: MessageException
"""
if self._unsupported_message_type():
self.logger.warning('Received unsupported message type: %s', self.message_type)
if self._drop_invalid:
if self._drop_exchange:
self._republish_dropped_message('invalid type') # depends on [control=['if'], data=[]]
raise DropMessage # depends on [control=['if'], data=[]]
raise MessageException # depends on [control=['if'], data=[]] |
def ismount(self, path):
"""Return true if the given path is a mount point.
Args:
path: Path to filesystem object to be checked
Returns:
`True` if path is a mount point added to the fake file system.
Under Windows also returns True for drive and UNC roots
(independent of their existence).
"""
path = make_string_path(path)
if not path:
return False
normed_path = self.filesystem.absnormpath(path)
sep = self.filesystem._path_separator(path)
if self.filesystem.is_windows_fs:
if self.filesystem.alternative_path_separator is not None:
path_seps = (
sep, self.filesystem._alternative_path_separator(path)
)
else:
path_seps = (sep, )
drive, rest = self.filesystem.splitdrive(normed_path)
if drive and drive[:1] in path_seps:
return (not rest) or (rest in path_seps)
if rest in path_seps:
return True
for mount_point in self.filesystem.mount_points:
if normed_path.rstrip(sep) == mount_point.rstrip(sep):
return True
return False | def function[ismount, parameter[self, path]]:
constant[Return true if the given path is a mount point.
Args:
path: Path to filesystem object to be checked
Returns:
`True` if path is a mount point added to the fake file system.
Under Windows also returns True for drive and UNC roots
(independent of their existence).
]
variable[path] assign[=] call[name[make_string_path], parameter[name[path]]]
if <ast.UnaryOp object at 0x7da20c6e5bd0> begin[:]
return[constant[False]]
variable[normed_path] assign[=] call[name[self].filesystem.absnormpath, parameter[name[path]]]
variable[sep] assign[=] call[name[self].filesystem._path_separator, parameter[name[path]]]
if name[self].filesystem.is_windows_fs begin[:]
if compare[name[self].filesystem.alternative_path_separator is_not constant[None]] begin[:]
variable[path_seps] assign[=] tuple[[<ast.Name object at 0x7da20c6e52a0>, <ast.Call object at 0x7da20c6e7ee0>]]
<ast.Tuple object at 0x7da20c6e7d90> assign[=] call[name[self].filesystem.splitdrive, parameter[name[normed_path]]]
if <ast.BoolOp object at 0x7da20c6e5c30> begin[:]
return[<ast.BoolOp object at 0x7da20c7965f0>]
if compare[name[rest] in name[path_seps]] begin[:]
return[constant[True]]
for taget[name[mount_point]] in starred[name[self].filesystem.mount_points] begin[:]
if compare[call[name[normed_path].rstrip, parameter[name[sep]]] equal[==] call[name[mount_point].rstrip, parameter[name[sep]]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[ismount] ( identifier[self] , identifier[path] ):
literal[string]
identifier[path] = identifier[make_string_path] ( identifier[path] )
keyword[if] keyword[not] identifier[path] :
keyword[return] keyword[False]
identifier[normed_path] = identifier[self] . identifier[filesystem] . identifier[absnormpath] ( identifier[path] )
identifier[sep] = identifier[self] . identifier[filesystem] . identifier[_path_separator] ( identifier[path] )
keyword[if] identifier[self] . identifier[filesystem] . identifier[is_windows_fs] :
keyword[if] identifier[self] . identifier[filesystem] . identifier[alternative_path_separator] keyword[is] keyword[not] keyword[None] :
identifier[path_seps] =(
identifier[sep] , identifier[self] . identifier[filesystem] . identifier[_alternative_path_separator] ( identifier[path] )
)
keyword[else] :
identifier[path_seps] =( identifier[sep] ,)
identifier[drive] , identifier[rest] = identifier[self] . identifier[filesystem] . identifier[splitdrive] ( identifier[normed_path] )
keyword[if] identifier[drive] keyword[and] identifier[drive] [: literal[int] ] keyword[in] identifier[path_seps] :
keyword[return] ( keyword[not] identifier[rest] ) keyword[or] ( identifier[rest] keyword[in] identifier[path_seps] )
keyword[if] identifier[rest] keyword[in] identifier[path_seps] :
keyword[return] keyword[True]
keyword[for] identifier[mount_point] keyword[in] identifier[self] . identifier[filesystem] . identifier[mount_points] :
keyword[if] identifier[normed_path] . identifier[rstrip] ( identifier[sep] )== identifier[mount_point] . identifier[rstrip] ( identifier[sep] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def ismount(self, path):
"""Return true if the given path is a mount point.
Args:
path: Path to filesystem object to be checked
Returns:
`True` if path is a mount point added to the fake file system.
Under Windows also returns True for drive and UNC roots
(independent of their existence).
"""
path = make_string_path(path)
if not path:
return False # depends on [control=['if'], data=[]]
normed_path = self.filesystem.absnormpath(path)
sep = self.filesystem._path_separator(path)
if self.filesystem.is_windows_fs:
if self.filesystem.alternative_path_separator is not None:
path_seps = (sep, self.filesystem._alternative_path_separator(path)) # depends on [control=['if'], data=[]]
else:
path_seps = (sep,)
(drive, rest) = self.filesystem.splitdrive(normed_path)
if drive and drive[:1] in path_seps:
return not rest or rest in path_seps # depends on [control=['if'], data=[]]
if rest in path_seps:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
for mount_point in self.filesystem.mount_points:
if normed_path.rstrip(sep) == mount_point.rstrip(sep):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['mount_point']]
return False |
def __cancel(self, subscription_id, **kwargs):
"""Call documentation: `/subscription/cancel
<https://www.wepay.com/developer/reference/subscription#cancel>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {
'subscription_id': subscription_id
}
return self.make_call(self.__cancel, params, kwargs) | def function[__cancel, parameter[self, subscription_id]]:
constant[Call documentation: `/subscription/cancel
<https://www.wepay.com/developer/reference/subscription#cancel>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b1501cc0>], [<ast.Name object at 0x7da1b1460cd0>]]
return[call[name[self].make_call, parameter[name[self].__cancel, name[params], name[kwargs]]]] | keyword[def] identifier[__cancel] ( identifier[self] , identifier[subscription_id] ,** identifier[kwargs] ):
literal[string]
identifier[params] ={
literal[string] : identifier[subscription_id]
}
keyword[return] identifier[self] . identifier[make_call] ( identifier[self] . identifier[__cancel] , identifier[params] , identifier[kwargs] ) | def __cancel(self, subscription_id, **kwargs):
"""Call documentation: `/subscription/cancel
<https://www.wepay.com/developer/reference/subscription#cancel>`_, plus
extra keyword parameters:
:keyword str access_token: will be used instead of instance's
``access_token``, with ``batch_mode=True`` will set `authorization`
param to it's value.
:keyword bool batch_mode: turn on/off the batch_mode, see
:class:`wepay.api.WePay`
:keyword str batch_reference_id: `reference_id` param for batch call,
see :class:`wepay.api.WePay`
:keyword str api_version: WePay API version, see
:class:`wepay.api.WePay`
"""
params = {'subscription_id': subscription_id}
return self.make_call(self.__cancel, params, kwargs) |
def get_called_sequence(self, section=None, fastq=False):
""" Return either the called sequence data, if present.
:param section: ['template', 'complement' or '2D']
:param fastq: If True, return a single, multiline fastq string. If
False, return a tuple of (name, sequence, qstring).
:return: Either the fastq string or the (name, sequence, qstring) tuple.
"""
if section != "2D":
warnings.warn("Basecall2DTools.get_called_sequence() should specify section='2D'", DeprecationWarning)
# Backwards compatibilty to 0.3.3, if no "2D" section, bump args by 1 and pass to super
if section == None:
# We assume that a named arg or no-arg was given
return super(Basecall2DTools, self).get_called_sequence("2D", fastq)
# We assume that a single unnamed arg was given for fastq
return super(Basecall2DTools, self).get_called_sequence("2D", section)
return super(Basecall2DTools, self).get_called_sequence(section, fastq) | def function[get_called_sequence, parameter[self, section, fastq]]:
constant[ Return either the called sequence data, if present.
:param section: ['template', 'complement' or '2D']
:param fastq: If True, return a single, multiline fastq string. If
False, return a tuple of (name, sequence, qstring).
:return: Either the fastq string or the (name, sequence, qstring) tuple.
]
if compare[name[section] not_equal[!=] constant[2D]] begin[:]
call[name[warnings].warn, parameter[constant[Basecall2DTools.get_called_sequence() should specify section='2D'], name[DeprecationWarning]]]
if compare[name[section] equal[==] constant[None]] begin[:]
return[call[call[name[super], parameter[name[Basecall2DTools], name[self]]].get_called_sequence, parameter[constant[2D], name[fastq]]]]
return[call[call[name[super], parameter[name[Basecall2DTools], name[self]]].get_called_sequence, parameter[constant[2D], name[section]]]]
return[call[call[name[super], parameter[name[Basecall2DTools], name[self]]].get_called_sequence, parameter[name[section], name[fastq]]]] | keyword[def] identifier[get_called_sequence] ( identifier[self] , identifier[section] = keyword[None] , identifier[fastq] = keyword[False] ):
literal[string]
keyword[if] identifier[section] != literal[string] :
identifier[warnings] . identifier[warn] ( literal[string] , identifier[DeprecationWarning] )
keyword[if] identifier[section] == keyword[None] :
keyword[return] identifier[super] ( identifier[Basecall2DTools] , identifier[self] ). identifier[get_called_sequence] ( literal[string] , identifier[fastq] )
keyword[return] identifier[super] ( identifier[Basecall2DTools] , identifier[self] ). identifier[get_called_sequence] ( literal[string] , identifier[section] )
keyword[return] identifier[super] ( identifier[Basecall2DTools] , identifier[self] ). identifier[get_called_sequence] ( identifier[section] , identifier[fastq] ) | def get_called_sequence(self, section=None, fastq=False):
""" Return either the called sequence data, if present.
:param section: ['template', 'complement' or '2D']
:param fastq: If True, return a single, multiline fastq string. If
False, return a tuple of (name, sequence, qstring).
:return: Either the fastq string or the (name, sequence, qstring) tuple.
"""
if section != '2D':
warnings.warn("Basecall2DTools.get_called_sequence() should specify section='2D'", DeprecationWarning)
# Backwards compatibilty to 0.3.3, if no "2D" section, bump args by 1 and pass to super
if section == None:
# We assume that a named arg or no-arg was given
return super(Basecall2DTools, self).get_called_sequence('2D', fastq) # depends on [control=['if'], data=[]]
# We assume that a single unnamed arg was given for fastq
return super(Basecall2DTools, self).get_called_sequence('2D', section) # depends on [control=['if'], data=['section']]
return super(Basecall2DTools, self).get_called_sequence(section, fastq) |
def to_XML(self, xml_declaration=True, xmlns=True):
"""
Dumps object fields to an XML-formatted string. The 'xml_declaration'
switch enables printing of a leading standard XML line containing XML
version and encoding. The 'xmlns' switch enables printing of qualified
XMLNS prefixes.
:param XML_declaration: if ``True`` (default) prints a leading XML
declaration line
:type XML_declaration: bool
:param xmlns: if ``True`` (default) prints full XMLNS prefixes
:type xmlns: bool
:returns: an XML-formatted string
"""
root_node = self._to_DOM()
if xmlns:
xmlutils.annotate_with_XMLNS(root_node,
OZONE_XMLNS_PREFIX,
OZONE_XMLNS_URL)
return xmlutils.DOM_node_to_XML(root_node, xml_declaration) | def function[to_XML, parameter[self, xml_declaration, xmlns]]:
constant[
Dumps object fields to an XML-formatted string. The 'xml_declaration'
switch enables printing of a leading standard XML line containing XML
version and encoding. The 'xmlns' switch enables printing of qualified
XMLNS prefixes.
:param XML_declaration: if ``True`` (default) prints a leading XML
declaration line
:type XML_declaration: bool
:param xmlns: if ``True`` (default) prints full XMLNS prefixes
:type xmlns: bool
:returns: an XML-formatted string
]
variable[root_node] assign[=] call[name[self]._to_DOM, parameter[]]
if name[xmlns] begin[:]
call[name[xmlutils].annotate_with_XMLNS, parameter[name[root_node], name[OZONE_XMLNS_PREFIX], name[OZONE_XMLNS_URL]]]
return[call[name[xmlutils].DOM_node_to_XML, parameter[name[root_node], name[xml_declaration]]]] | keyword[def] identifier[to_XML] ( identifier[self] , identifier[xml_declaration] = keyword[True] , identifier[xmlns] = keyword[True] ):
literal[string]
identifier[root_node] = identifier[self] . identifier[_to_DOM] ()
keyword[if] identifier[xmlns] :
identifier[xmlutils] . identifier[annotate_with_XMLNS] ( identifier[root_node] ,
identifier[OZONE_XMLNS_PREFIX] ,
identifier[OZONE_XMLNS_URL] )
keyword[return] identifier[xmlutils] . identifier[DOM_node_to_XML] ( identifier[root_node] , identifier[xml_declaration] ) | def to_XML(self, xml_declaration=True, xmlns=True):
"""
Dumps object fields to an XML-formatted string. The 'xml_declaration'
switch enables printing of a leading standard XML line containing XML
version and encoding. The 'xmlns' switch enables printing of qualified
XMLNS prefixes.
:param XML_declaration: if ``True`` (default) prints a leading XML
declaration line
:type XML_declaration: bool
:param xmlns: if ``True`` (default) prints full XMLNS prefixes
:type xmlns: bool
:returns: an XML-formatted string
"""
root_node = self._to_DOM()
if xmlns:
xmlutils.annotate_with_XMLNS(root_node, OZONE_XMLNS_PREFIX, OZONE_XMLNS_URL) # depends on [control=['if'], data=[]]
return xmlutils.DOM_node_to_XML(root_node, xml_declaration) |
def get_weather(self):
"""
Returns an instance of the Weather Service.
"""
import predix.data.weather
weather = predix.data.weather.WeatherForecast()
return weather | def function[get_weather, parameter[self]]:
constant[
Returns an instance of the Weather Service.
]
import module[predix.data.weather]
variable[weather] assign[=] call[name[predix].data.weather.WeatherForecast, parameter[]]
return[name[weather]] | keyword[def] identifier[get_weather] ( identifier[self] ):
literal[string]
keyword[import] identifier[predix] . identifier[data] . identifier[weather]
identifier[weather] = identifier[predix] . identifier[data] . identifier[weather] . identifier[WeatherForecast] ()
keyword[return] identifier[weather] | def get_weather(self):
"""
Returns an instance of the Weather Service.
"""
import predix.data.weather
weather = predix.data.weather.WeatherForecast()
return weather |
def createPopulationFile(inputFiles, labels, outputFileName):
"""Creates a population file.
:param inputFiles: the list of input files.
:param labels: the list of labels (corresponding to the input files).
:param outputFileName: the name of the output file.
:type inputFiles: list
:type labels: list
:type outputFileName: str
The ``inputFiles`` is in reality a list of ``tfam`` files composed of
samples. For each of those ``tfam`` files, there is a label associated with
it (representing the name of the population).
The output file consists of one row per sample, with the following three
columns: the family ID, the individual ID and the population of each
sample.
"""
outputFile = None
try:
outputFile = open(outputFileName, 'w')
except IOError:
msg = "%(outputFileName)s: can't write file"
raise ProgramError(msg)
for i in xrange(len(inputFiles)):
# For each file
fileName = inputFiles[i]
label = labels[i]
try:
with open(fileName, 'r') as inputFile:
for line in inputFile:
row = line.rstrip("\r\n").split(" ")
# Getting the informations
famID = row[0]
indID = row[1]
# Printing to file
print >>outputFile, "\t".join([famID, indID, label])
except IOError:
msg = "%(fileName)s: no such file" % locals()
raise ProgramError(msg)
# Closing the output file
outputFile.close() | def function[createPopulationFile, parameter[inputFiles, labels, outputFileName]]:
constant[Creates a population file.
:param inputFiles: the list of input files.
:param labels: the list of labels (corresponding to the input files).
:param outputFileName: the name of the output file.
:type inputFiles: list
:type labels: list
:type outputFileName: str
The ``inputFiles`` is in reality a list of ``tfam`` files composed of
samples. For each of those ``tfam`` files, there is a label associated with
it (representing the name of the population).
The output file consists of one row per sample, with the following three
columns: the family ID, the individual ID and the population of each
sample.
]
variable[outputFile] assign[=] constant[None]
<ast.Try object at 0x7da1b094b280>
for taget[name[i]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[inputFiles]]]]]] begin[:]
variable[fileName] assign[=] call[name[inputFiles]][name[i]]
variable[label] assign[=] call[name[labels]][name[i]]
<ast.Try object at 0x7da1b094a350>
call[name[outputFile].close, parameter[]] | keyword[def] identifier[createPopulationFile] ( identifier[inputFiles] , identifier[labels] , identifier[outputFileName] ):
literal[string]
identifier[outputFile] = keyword[None]
keyword[try] :
identifier[outputFile] = identifier[open] ( identifier[outputFileName] , literal[string] )
keyword[except] identifier[IOError] :
identifier[msg] = literal[string]
keyword[raise] identifier[ProgramError] ( identifier[msg] )
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[inputFiles] )):
identifier[fileName] = identifier[inputFiles] [ identifier[i] ]
identifier[label] = identifier[labels] [ identifier[i] ]
keyword[try] :
keyword[with] identifier[open] ( identifier[fileName] , literal[string] ) keyword[as] identifier[inputFile] :
keyword[for] identifier[line] keyword[in] identifier[inputFile] :
identifier[row] = identifier[line] . identifier[rstrip] ( literal[string] ). identifier[split] ( literal[string] )
identifier[famID] = identifier[row] [ literal[int] ]
identifier[indID] = identifier[row] [ literal[int] ]
identifier[print] >> identifier[outputFile] , literal[string] . identifier[join] ([ identifier[famID] , identifier[indID] , identifier[label] ])
keyword[except] identifier[IOError] :
identifier[msg] = literal[string] % identifier[locals] ()
keyword[raise] identifier[ProgramError] ( identifier[msg] )
identifier[outputFile] . identifier[close] () | def createPopulationFile(inputFiles, labels, outputFileName):
"""Creates a population file.
:param inputFiles: the list of input files.
:param labels: the list of labels (corresponding to the input files).
:param outputFileName: the name of the output file.
:type inputFiles: list
:type labels: list
:type outputFileName: str
The ``inputFiles`` is in reality a list of ``tfam`` files composed of
samples. For each of those ``tfam`` files, there is a label associated with
it (representing the name of the population).
The output file consists of one row per sample, with the following three
columns: the family ID, the individual ID and the population of each
sample.
"""
outputFile = None
try:
outputFile = open(outputFileName, 'w') # depends on [control=['try'], data=[]]
except IOError:
msg = "%(outputFileName)s: can't write file"
raise ProgramError(msg) # depends on [control=['except'], data=[]]
for i in xrange(len(inputFiles)):
# For each file
fileName = inputFiles[i]
label = labels[i]
try:
with open(fileName, 'r') as inputFile:
for line in inputFile:
row = line.rstrip('\r\n').split(' ')
# Getting the informations
famID = row[0]
indID = row[1]
# Printing to file
(print >> outputFile, '\t'.join([famID, indID, label])) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['inputFile']] # depends on [control=['try'], data=[]]
except IOError:
msg = '%(fileName)s: no such file' % locals()
raise ProgramError(msg) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['i']]
# Closing the output file
outputFile.close() |
def transaction_stop(self):
"""stop/commit a transaction if ready"""
if self.transaction_count > 0:
logger.debug("{}. Stop transaction".format(self.transaction_count))
if self.transaction_count == 1:
self._transaction_stop()
self.transaction_count -= 1
return self.transaction_count | def function[transaction_stop, parameter[self]]:
constant[stop/commit a transaction if ready]
if compare[name[self].transaction_count greater[>] constant[0]] begin[:]
call[name[logger].debug, parameter[call[constant[{}. Stop transaction].format, parameter[name[self].transaction_count]]]]
if compare[name[self].transaction_count equal[==] constant[1]] begin[:]
call[name[self]._transaction_stop, parameter[]]
<ast.AugAssign object at 0x7da18f09e350>
return[name[self].transaction_count] | keyword[def] identifier[transaction_stop] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[transaction_count] > literal[int] :
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[transaction_count] ))
keyword[if] identifier[self] . identifier[transaction_count] == literal[int] :
identifier[self] . identifier[_transaction_stop] ()
identifier[self] . identifier[transaction_count] -= literal[int]
keyword[return] identifier[self] . identifier[transaction_count] | def transaction_stop(self):
"""stop/commit a transaction if ready"""
if self.transaction_count > 0:
logger.debug('{}. Stop transaction'.format(self.transaction_count))
if self.transaction_count == 1:
self._transaction_stop() # depends on [control=['if'], data=[]]
self.transaction_count -= 1 # depends on [control=['if'], data=[]]
return self.transaction_count |
def _get_consent_id(self, requester, user_id, filtered_attr):
"""
Get a hashed id based on requester, user id and filtered attributes
:type requester: str
:type user_id: str
:type filtered_attr: dict[str, str]
:param requester: The calling requester
:param user_id: The authorized user id
:param filtered_attr: a list containing all attributes to be sent
:return: an id
"""
filtered_attr_key_list = sorted(filtered_attr.keys())
hash_str = ""
for key in filtered_attr_key_list:
_hash_value = "".join(sorted(filtered_attr[key]))
hash_str += key + _hash_value
id_string = "%s%s%s" % (requester, user_id, hash_str)
return urlsafe_b64encode(hashlib.sha512(id_string.encode("utf-8")).hexdigest().encode("utf-8")).decode("utf-8") | def function[_get_consent_id, parameter[self, requester, user_id, filtered_attr]]:
constant[
Get a hashed id based on requester, user id and filtered attributes
:type requester: str
:type user_id: str
:type filtered_attr: dict[str, str]
:param requester: The calling requester
:param user_id: The authorized user id
:param filtered_attr: a list containing all attributes to be sent
:return: an id
]
variable[filtered_attr_key_list] assign[=] call[name[sorted], parameter[call[name[filtered_attr].keys, parameter[]]]]
variable[hash_str] assign[=] constant[]
for taget[name[key]] in starred[name[filtered_attr_key_list]] begin[:]
variable[_hash_value] assign[=] call[constant[].join, parameter[call[name[sorted], parameter[call[name[filtered_attr]][name[key]]]]]]
<ast.AugAssign object at 0x7da1b153dab0>
variable[id_string] assign[=] binary_operation[constant[%s%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b153f190>, <ast.Name object at 0x7da1b153c460>, <ast.Name object at 0x7da1b153fd60>]]]
return[call[call[name[urlsafe_b64encode], parameter[call[call[call[name[hashlib].sha512, parameter[call[name[id_string].encode, parameter[constant[utf-8]]]]].hexdigest, parameter[]].encode, parameter[constant[utf-8]]]]].decode, parameter[constant[utf-8]]]] | keyword[def] identifier[_get_consent_id] ( identifier[self] , identifier[requester] , identifier[user_id] , identifier[filtered_attr] ):
literal[string]
identifier[filtered_attr_key_list] = identifier[sorted] ( identifier[filtered_attr] . identifier[keys] ())
identifier[hash_str] = literal[string]
keyword[for] identifier[key] keyword[in] identifier[filtered_attr_key_list] :
identifier[_hash_value] = literal[string] . identifier[join] ( identifier[sorted] ( identifier[filtered_attr] [ identifier[key] ]))
identifier[hash_str] += identifier[key] + identifier[_hash_value]
identifier[id_string] = literal[string] %( identifier[requester] , identifier[user_id] , identifier[hash_str] )
keyword[return] identifier[urlsafe_b64encode] ( identifier[hashlib] . identifier[sha512] ( identifier[id_string] . identifier[encode] ( literal[string] )). identifier[hexdigest] (). identifier[encode] ( literal[string] )). identifier[decode] ( literal[string] ) | def _get_consent_id(self, requester, user_id, filtered_attr):
"""
Get a hashed id based on requester, user id and filtered attributes
:type requester: str
:type user_id: str
:type filtered_attr: dict[str, str]
:param requester: The calling requester
:param user_id: The authorized user id
:param filtered_attr: a list containing all attributes to be sent
:return: an id
"""
filtered_attr_key_list = sorted(filtered_attr.keys())
hash_str = ''
for key in filtered_attr_key_list:
_hash_value = ''.join(sorted(filtered_attr[key]))
hash_str += key + _hash_value # depends on [control=['for'], data=['key']]
id_string = '%s%s%s' % (requester, user_id, hash_str)
return urlsafe_b64encode(hashlib.sha512(id_string.encode('utf-8')).hexdigest().encode('utf-8')).decode('utf-8') |
def read_array(self, key, start=None, stop=None):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
attrs = node._v_attrs
transposed = getattr(attrs, 'transposed', False)
if isinstance(node, tables.VLArray):
ret = node[0][start:stop]
else:
dtype = getattr(attrs, 'value_type', None)
shape = getattr(attrs, 'shape', None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = node[start:stop]
if dtype == 'datetime64':
# reconstruct a timezone if indicated
ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)
elif dtype == 'timedelta64':
ret = np.asarray(ret, dtype='m8[ns]')
if transposed:
return ret.T
else:
return ret | def function[read_array, parameter[self, key, start, stop]]:
constant[ read an array for the specified node (off of group ]
import module[tables]
variable[node] assign[=] call[name[getattr], parameter[name[self].group, name[key]]]
variable[attrs] assign[=] name[node]._v_attrs
variable[transposed] assign[=] call[name[getattr], parameter[name[attrs], constant[transposed], constant[False]]]
if call[name[isinstance], parameter[name[node], name[tables].VLArray]] begin[:]
variable[ret] assign[=] call[call[name[node]][constant[0]]][<ast.Slice object at 0x7da204345ea0>]
if name[transposed] begin[:]
return[name[ret].T] | keyword[def] identifier[read_array] ( identifier[self] , identifier[key] , identifier[start] = keyword[None] , identifier[stop] = keyword[None] ):
literal[string]
keyword[import] identifier[tables]
identifier[node] = identifier[getattr] ( identifier[self] . identifier[group] , identifier[key] )
identifier[attrs] = identifier[node] . identifier[_v_attrs]
identifier[transposed] = identifier[getattr] ( identifier[attrs] , literal[string] , keyword[False] )
keyword[if] identifier[isinstance] ( identifier[node] , identifier[tables] . identifier[VLArray] ):
identifier[ret] = identifier[node] [ literal[int] ][ identifier[start] : identifier[stop] ]
keyword[else] :
identifier[dtype] = identifier[getattr] ( identifier[attrs] , literal[string] , keyword[None] )
identifier[shape] = identifier[getattr] ( identifier[attrs] , literal[string] , keyword[None] )
keyword[if] identifier[shape] keyword[is] keyword[not] keyword[None] :
identifier[ret] = identifier[np] . identifier[empty] ( identifier[shape] , identifier[dtype] = identifier[dtype] )
keyword[else] :
identifier[ret] = identifier[node] [ identifier[start] : identifier[stop] ]
keyword[if] identifier[dtype] == literal[string] :
identifier[ret] = identifier[_set_tz] ( identifier[ret] , identifier[getattr] ( identifier[attrs] , literal[string] , keyword[None] ), identifier[coerce] = keyword[True] )
keyword[elif] identifier[dtype] == literal[string] :
identifier[ret] = identifier[np] . identifier[asarray] ( identifier[ret] , identifier[dtype] = literal[string] )
keyword[if] identifier[transposed] :
keyword[return] identifier[ret] . identifier[T]
keyword[else] :
keyword[return] identifier[ret] | def read_array(self, key, start=None, stop=None):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
attrs = node._v_attrs
transposed = getattr(attrs, 'transposed', False)
if isinstance(node, tables.VLArray):
ret = node[0][start:stop] # depends on [control=['if'], data=[]]
else:
dtype = getattr(attrs, 'value_type', None)
shape = getattr(attrs, 'shape', None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype) # depends on [control=['if'], data=['shape']]
else:
ret = node[start:stop]
if dtype == 'datetime64':
# reconstruct a timezone if indicated
ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True) # depends on [control=['if'], data=[]]
elif dtype == 'timedelta64':
ret = np.asarray(ret, dtype='m8[ns]') # depends on [control=['if'], data=[]]
if transposed:
return ret.T # depends on [control=['if'], data=[]]
else:
return ret |
def _folder_item_method(self, analysis_brain, item):
"""Fills the analysis' method to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
is_editable = self.is_analysis_edition_allowed(analysis_brain)
method_title = analysis_brain.getMethodTitle
item['Method'] = method_title or ''
if is_editable:
method_vocabulary = self.get_methods_vocabulary(analysis_brain)
if method_vocabulary:
item['Method'] = analysis_brain.getMethodUID
item['choices']['Method'] = method_vocabulary
item['allow_edit'].append('Method')
self.show_methodinstr_columns = True
elif method_title:
item['replace']['Method'] = get_link(analysis_brain.getMethodURL,
method_title)
self.show_methodinstr_columns = True | def function[_folder_item_method, parameter[self, analysis_brain, item]]:
constant[Fills the analysis' method to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
]
variable[is_editable] assign[=] call[name[self].is_analysis_edition_allowed, parameter[name[analysis_brain]]]
variable[method_title] assign[=] name[analysis_brain].getMethodTitle
call[name[item]][constant[Method]] assign[=] <ast.BoolOp object at 0x7da2054a4d00>
if name[is_editable] begin[:]
variable[method_vocabulary] assign[=] call[name[self].get_methods_vocabulary, parameter[name[analysis_brain]]]
if name[method_vocabulary] begin[:]
call[name[item]][constant[Method]] assign[=] name[analysis_brain].getMethodUID
call[call[name[item]][constant[choices]]][constant[Method]] assign[=] name[method_vocabulary]
call[call[name[item]][constant[allow_edit]].append, parameter[constant[Method]]]
name[self].show_methodinstr_columns assign[=] constant[True] | keyword[def] identifier[_folder_item_method] ( identifier[self] , identifier[analysis_brain] , identifier[item] ):
literal[string]
identifier[is_editable] = identifier[self] . identifier[is_analysis_edition_allowed] ( identifier[analysis_brain] )
identifier[method_title] = identifier[analysis_brain] . identifier[getMethodTitle]
identifier[item] [ literal[string] ]= identifier[method_title] keyword[or] literal[string]
keyword[if] identifier[is_editable] :
identifier[method_vocabulary] = identifier[self] . identifier[get_methods_vocabulary] ( identifier[analysis_brain] )
keyword[if] identifier[method_vocabulary] :
identifier[item] [ literal[string] ]= identifier[analysis_brain] . identifier[getMethodUID]
identifier[item] [ literal[string] ][ literal[string] ]= identifier[method_vocabulary]
identifier[item] [ literal[string] ]. identifier[append] ( literal[string] )
identifier[self] . identifier[show_methodinstr_columns] = keyword[True]
keyword[elif] identifier[method_title] :
identifier[item] [ literal[string] ][ literal[string] ]= identifier[get_link] ( identifier[analysis_brain] . identifier[getMethodURL] ,
identifier[method_title] )
identifier[self] . identifier[show_methodinstr_columns] = keyword[True] | def _folder_item_method(self, analysis_brain, item):
"""Fills the analysis' method to the item passed in.
:param analysis_brain: Brain that represents an analysis
:param item: analysis' dictionary counterpart that represents a row
"""
is_editable = self.is_analysis_edition_allowed(analysis_brain)
method_title = analysis_brain.getMethodTitle
item['Method'] = method_title or ''
if is_editable:
method_vocabulary = self.get_methods_vocabulary(analysis_brain)
if method_vocabulary:
item['Method'] = analysis_brain.getMethodUID
item['choices']['Method'] = method_vocabulary
item['allow_edit'].append('Method')
self.show_methodinstr_columns = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif method_title:
item['replace']['Method'] = get_link(analysis_brain.getMethodURL, method_title)
self.show_methodinstr_columns = True # depends on [control=['if'], data=[]] |
def get_dsl_by_hash(self, node_hash: str) -> Optional[BaseEntity]:
"""Look up a node by the hash and returns the corresponding PyBEL node tuple."""
node = self.get_node_by_hash(node_hash)
if node is not None:
return node.as_bel() | def function[get_dsl_by_hash, parameter[self, node_hash]]:
constant[Look up a node by the hash and returns the corresponding PyBEL node tuple.]
variable[node] assign[=] call[name[self].get_node_by_hash, parameter[name[node_hash]]]
if compare[name[node] is_not constant[None]] begin[:]
return[call[name[node].as_bel, parameter[]]] | keyword[def] identifier[get_dsl_by_hash] ( identifier[self] , identifier[node_hash] : identifier[str] )-> identifier[Optional] [ identifier[BaseEntity] ]:
literal[string]
identifier[node] = identifier[self] . identifier[get_node_by_hash] ( identifier[node_hash] )
keyword[if] identifier[node] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[node] . identifier[as_bel] () | def get_dsl_by_hash(self, node_hash: str) -> Optional[BaseEntity]:
"""Look up a node by the hash and returns the corresponding PyBEL node tuple."""
node = self.get_node_by_hash(node_hash)
if node is not None:
return node.as_bel() # depends on [control=['if'], data=['node']] |
def last_seen_utc(self) -> Optional[datetime]:
"""Timestamp when the story has last been watched or None (UTC)."""
if self._node['seen']:
return datetime.utcfromtimestamp(self._node['seen']) | def function[last_seen_utc, parameter[self]]:
constant[Timestamp when the story has last been watched or None (UTC).]
if call[name[self]._node][constant[seen]] begin[:]
return[call[name[datetime].utcfromtimestamp, parameter[call[name[self]._node][constant[seen]]]]] | keyword[def] identifier[last_seen_utc] ( identifier[self] )-> identifier[Optional] [ identifier[datetime] ]:
literal[string]
keyword[if] identifier[self] . identifier[_node] [ literal[string] ]:
keyword[return] identifier[datetime] . identifier[utcfromtimestamp] ( identifier[self] . identifier[_node] [ literal[string] ]) | def last_seen_utc(self) -> Optional[datetime]:
"""Timestamp when the story has last been watched or None (UTC)."""
if self._node['seen']:
return datetime.utcfromtimestamp(self._node['seen']) # depends on [control=['if'], data=[]] |
def try_wrapper(fxn):
"""Wraps a function, returning (True, fxn(val)) if successful, (False, val) if not."""
def wrapper(val):
"""Try to call fxn with the given value. If successful, return (True, fxn(val)), otherwise
returns (False, val).
"""
try:
return (True, fxn(val))
except Exception:
return (False, val)
return wrapper | def function[try_wrapper, parameter[fxn]]:
constant[Wraps a function, returning (True, fxn(val)) if successful, (False, val) if not.]
def function[wrapper, parameter[val]]:
constant[Try to call fxn with the given value. If successful, return (True, fxn(val)), otherwise
returns (False, val).
]
<ast.Try object at 0x7da1b14c4af0>
return[name[wrapper]] | keyword[def] identifier[try_wrapper] ( identifier[fxn] ):
literal[string]
keyword[def] identifier[wrapper] ( identifier[val] ):
literal[string]
keyword[try] :
keyword[return] ( keyword[True] , identifier[fxn] ( identifier[val] ))
keyword[except] identifier[Exception] :
keyword[return] ( keyword[False] , identifier[val] )
keyword[return] identifier[wrapper] | def try_wrapper(fxn):
"""Wraps a function, returning (True, fxn(val)) if successful, (False, val) if not."""
def wrapper(val):
"""Try to call fxn with the given value. If successful, return (True, fxn(val)), otherwise
returns (False, val).
"""
try:
return (True, fxn(val)) # depends on [control=['try'], data=[]]
except Exception:
return (False, val) # depends on [control=['except'], data=[]]
return wrapper |
def legacy_notes_view(request):
"""
View to see legacy notes.
"""
notes = TeacherNote.objects.all()
note_count = notes.count()
paginator = Paginator(notes, 100)
page = request.GET.get('page')
try:
notes = paginator.page(page)
except PageNotAnInteger:
notes = paginator.page(1)
except EmptyPage:
notes = paginator.page(paginator.num_pages)
return render_to_response(
'teacher_notes.html',
{'page_name': "Legacy Notes",
'notes': notes,
'note_count': note_count,},
context_instance=RequestContext(request)
) | def function[legacy_notes_view, parameter[request]]:
constant[
View to see legacy notes.
]
variable[notes] assign[=] call[name[TeacherNote].objects.all, parameter[]]
variable[note_count] assign[=] call[name[notes].count, parameter[]]
variable[paginator] assign[=] call[name[Paginator], parameter[name[notes], constant[100]]]
variable[page] assign[=] call[name[request].GET.get, parameter[constant[page]]]
<ast.Try object at 0x7da1b2347e80>
return[call[name[render_to_response], parameter[constant[teacher_notes.html], dictionary[[<ast.Constant object at 0x7da1b2345d50>, <ast.Constant object at 0x7da1b23474c0>, <ast.Constant object at 0x7da1b2344220>], [<ast.Constant object at 0x7da1b2344640>, <ast.Name object at 0x7da2054a5390>, <ast.Name object at 0x7da2054a7bb0>]]]]] | keyword[def] identifier[legacy_notes_view] ( identifier[request] ):
literal[string]
identifier[notes] = identifier[TeacherNote] . identifier[objects] . identifier[all] ()
identifier[note_count] = identifier[notes] . identifier[count] ()
identifier[paginator] = identifier[Paginator] ( identifier[notes] , literal[int] )
identifier[page] = identifier[request] . identifier[GET] . identifier[get] ( literal[string] )
keyword[try] :
identifier[notes] = identifier[paginator] . identifier[page] ( identifier[page] )
keyword[except] identifier[PageNotAnInteger] :
identifier[notes] = identifier[paginator] . identifier[page] ( literal[int] )
keyword[except] identifier[EmptyPage] :
identifier[notes] = identifier[paginator] . identifier[page] ( identifier[paginator] . identifier[num_pages] )
keyword[return] identifier[render_to_response] (
literal[string] ,
{ literal[string] : literal[string] ,
literal[string] : identifier[notes] ,
literal[string] : identifier[note_count] ,},
identifier[context_instance] = identifier[RequestContext] ( identifier[request] )
) | def legacy_notes_view(request):
"""
View to see legacy notes.
"""
notes = TeacherNote.objects.all()
note_count = notes.count()
paginator = Paginator(notes, 100)
page = request.GET.get('page')
try:
notes = paginator.page(page) # depends on [control=['try'], data=[]]
except PageNotAnInteger:
notes = paginator.page(1) # depends on [control=['except'], data=[]]
except EmptyPage:
notes = paginator.page(paginator.num_pages) # depends on [control=['except'], data=[]]
return render_to_response('teacher_notes.html', {'page_name': 'Legacy Notes', 'notes': notes, 'note_count': note_count}, context_instance=RequestContext(request)) |
def match(column, term, match_type=None, options=None):
"""Generates match predicate for fulltext search
:param column: A reference to a column or an index, or a subcolumn, or a
dictionary of subcolumns with boost values.
:param term: The term to match against. This string is analyzed and the
resulting tokens are compared to the index.
:param match_type (optional): The match type. Determine how the term is
applied and the score calculated.
:param options (optional): The match options. Specify match type behaviour.
(Not possible without a specified match type.) Match options must be
supplied as a dictionary.
"""
return Match(column, term, match_type, options) | def function[match, parameter[column, term, match_type, options]]:
constant[Generates match predicate for fulltext search
:param column: A reference to a column or an index, or a subcolumn, or a
dictionary of subcolumns with boost values.
:param term: The term to match against. This string is analyzed and the
resulting tokens are compared to the index.
:param match_type (optional): The match type. Determine how the term is
applied and the score calculated.
:param options (optional): The match options. Specify match type behaviour.
(Not possible without a specified match type.) Match options must be
supplied as a dictionary.
]
return[call[name[Match], parameter[name[column], name[term], name[match_type], name[options]]]] | keyword[def] identifier[match] ( identifier[column] , identifier[term] , identifier[match_type] = keyword[None] , identifier[options] = keyword[None] ):
literal[string]
keyword[return] identifier[Match] ( identifier[column] , identifier[term] , identifier[match_type] , identifier[options] ) | def match(column, term, match_type=None, options=None):
"""Generates match predicate for fulltext search
:param column: A reference to a column or an index, or a subcolumn, or a
dictionary of subcolumns with boost values.
:param term: The term to match against. This string is analyzed and the
resulting tokens are compared to the index.
:param match_type (optional): The match type. Determine how the term is
applied and the score calculated.
:param options (optional): The match options. Specify match type behaviour.
(Not possible without a specified match type.) Match options must be
supplied as a dictionary.
"""
return Match(column, term, match_type, options) |
def decode(encoded_histogram, b64_wrap=True):
'''Decode a wire histogram encoding into a read-only Hdr Payload instance
Args:
encoded_histogram a string containing the wire encoding of a histogram
such as one returned from encode()
Returns:
an hdr_payload instance with all the decoded/uncompressed fields
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
HdrHistogramSettingsException:
mismatch in the significant figures, lowest and highest
trackable value
zlib.error:
in case of zlib decompression error
'''
if b64_wrap:
b64decode = base64.b64decode(encoded_histogram)
# this string has 2 parts in it: the header (raw) and the payload (compressed)
b64dec_len = len(b64decode)
if b64dec_len < ext_header_size:
raise HdrLengthException('Base64 decoded message too short')
header = ExternalHeader.from_buffer_copy(b64decode)
if get_cookie_base(header.cookie) != V2_COMPRESSION_COOKIE_BASE:
raise HdrCookieException()
if header.length != b64dec_len - ext_header_size:
raise HdrLengthException('Decoded length=%d buffer length=%d' %
(header.length, b64dec_len - ext_header_size))
# this will result in a copy of the compressed payload part
# could not find a way to do otherwise since zlib.decompress()
# expects a string (and does not like a buffer or a memoryview object)
cpayload = b64decode[ext_header_size:]
else:
cpayload = encoded_histogram
hdr_payload = HdrPayload(8, compressed_payload=cpayload)
return hdr_payload | def function[decode, parameter[encoded_histogram, b64_wrap]]:
constant[Decode a wire histogram encoding into a read-only Hdr Payload instance
Args:
encoded_histogram a string containing the wire encoding of a histogram
such as one returned from encode()
Returns:
an hdr_payload instance with all the decoded/uncompressed fields
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
HdrHistogramSettingsException:
mismatch in the significant figures, lowest and highest
trackable value
zlib.error:
in case of zlib decompression error
]
if name[b64_wrap] begin[:]
variable[b64decode] assign[=] call[name[base64].b64decode, parameter[name[encoded_histogram]]]
variable[b64dec_len] assign[=] call[name[len], parameter[name[b64decode]]]
if compare[name[b64dec_len] less[<] name[ext_header_size]] begin[:]
<ast.Raise object at 0x7da1b0f9d120>
variable[header] assign[=] call[name[ExternalHeader].from_buffer_copy, parameter[name[b64decode]]]
if compare[call[name[get_cookie_base], parameter[name[header].cookie]] not_equal[!=] name[V2_COMPRESSION_COOKIE_BASE]] begin[:]
<ast.Raise object at 0x7da1b0f9ffd0>
if compare[name[header].length not_equal[!=] binary_operation[name[b64dec_len] - name[ext_header_size]]] begin[:]
<ast.Raise object at 0x7da1b0f9c970>
variable[cpayload] assign[=] call[name[b64decode]][<ast.Slice object at 0x7da1b0f9db40>]
variable[hdr_payload] assign[=] call[name[HdrPayload], parameter[constant[8]]]
return[name[hdr_payload]] | keyword[def] identifier[decode] ( identifier[encoded_histogram] , identifier[b64_wrap] = keyword[True] ):
literal[string]
keyword[if] identifier[b64_wrap] :
identifier[b64decode] = identifier[base64] . identifier[b64decode] ( identifier[encoded_histogram] )
identifier[b64dec_len] = identifier[len] ( identifier[b64decode] )
keyword[if] identifier[b64dec_len] < identifier[ext_header_size] :
keyword[raise] identifier[HdrLengthException] ( literal[string] )
identifier[header] = identifier[ExternalHeader] . identifier[from_buffer_copy] ( identifier[b64decode] )
keyword[if] identifier[get_cookie_base] ( identifier[header] . identifier[cookie] )!= identifier[V2_COMPRESSION_COOKIE_BASE] :
keyword[raise] identifier[HdrCookieException] ()
keyword[if] identifier[header] . identifier[length] != identifier[b64dec_len] - identifier[ext_header_size] :
keyword[raise] identifier[HdrLengthException] ( literal[string] %
( identifier[header] . identifier[length] , identifier[b64dec_len] - identifier[ext_header_size] ))
identifier[cpayload] = identifier[b64decode] [ identifier[ext_header_size] :]
keyword[else] :
identifier[cpayload] = identifier[encoded_histogram]
identifier[hdr_payload] = identifier[HdrPayload] ( literal[int] , identifier[compressed_payload] = identifier[cpayload] )
keyword[return] identifier[hdr_payload] | def decode(encoded_histogram, b64_wrap=True):
"""Decode a wire histogram encoding into a read-only Hdr Payload instance
Args:
encoded_histogram a string containing the wire encoding of a histogram
such as one returned from encode()
Returns:
an hdr_payload instance with all the decoded/uncompressed fields
Exception:
TypeError in case of base64 decode error
HdrCookieException:
the main header has an invalid cookie
the compressed payload header has an invalid cookie
HdrLengthException:
the decompressed size is too small for the HdrPayload structure
or is not aligned or is too large for the passed payload class
HdrHistogramSettingsException:
mismatch in the significant figures, lowest and highest
trackable value
zlib.error:
in case of zlib decompression error
"""
if b64_wrap:
b64decode = base64.b64decode(encoded_histogram)
# this string has 2 parts in it: the header (raw) and the payload (compressed)
b64dec_len = len(b64decode)
if b64dec_len < ext_header_size:
raise HdrLengthException('Base64 decoded message too short') # depends on [control=['if'], data=[]]
header = ExternalHeader.from_buffer_copy(b64decode)
if get_cookie_base(header.cookie) != V2_COMPRESSION_COOKIE_BASE:
raise HdrCookieException() # depends on [control=['if'], data=[]]
if header.length != b64dec_len - ext_header_size:
raise HdrLengthException('Decoded length=%d buffer length=%d' % (header.length, b64dec_len - ext_header_size)) # depends on [control=['if'], data=[]]
# this will result in a copy of the compressed payload part
# could not find a way to do otherwise since zlib.decompress()
# expects a string (and does not like a buffer or a memoryview object)
cpayload = b64decode[ext_header_size:] # depends on [control=['if'], data=[]]
else:
cpayload = encoded_histogram
hdr_payload = HdrPayload(8, compressed_payload=cpayload)
return hdr_payload |
def download_file(self, remote_filename, local_filename=None):
"""Download file from github.
Args:
remote_filename (str): The name of the file as defined in git repository.
local_filename (str, optional): Defaults to None. The name of the file as it should be
be written to local filesystem.
"""
status = 'Failed'
if local_filename is None:
local_filename = remote_filename
if not self.args.force and os.access(local_filename, os.F_OK):
if not self._confirm_overwrite(local_filename):
self._print_results(local_filename, 'Skipped')
return
url = '{}{}'.format(self.base_url, remote_filename)
r = requests.get(url, allow_redirects=True)
if r.ok:
open(local_filename, 'wb').write(r.content)
status = 'Success'
else:
self.handle_error('Error requesting: {}'.format(url), False)
# print download status
self._print_results(local_filename, status) | def function[download_file, parameter[self, remote_filename, local_filename]]:
constant[Download file from github.
Args:
remote_filename (str): The name of the file as defined in git repository.
local_filename (str, optional): Defaults to None. The name of the file as it should be
be written to local filesystem.
]
variable[status] assign[=] constant[Failed]
if compare[name[local_filename] is constant[None]] begin[:]
variable[local_filename] assign[=] name[remote_filename]
if <ast.BoolOp object at 0x7da20c6c4850> begin[:]
if <ast.UnaryOp object at 0x7da20c6c4fa0> begin[:]
call[name[self]._print_results, parameter[name[local_filename], constant[Skipped]]]
return[None]
variable[url] assign[=] call[constant[{}{}].format, parameter[name[self].base_url, name[remote_filename]]]
variable[r] assign[=] call[name[requests].get, parameter[name[url]]]
if name[r].ok begin[:]
call[call[name[open], parameter[name[local_filename], constant[wb]]].write, parameter[name[r].content]]
variable[status] assign[=] constant[Success]
call[name[self]._print_results, parameter[name[local_filename], name[status]]] | keyword[def] identifier[download_file] ( identifier[self] , identifier[remote_filename] , identifier[local_filename] = keyword[None] ):
literal[string]
identifier[status] = literal[string]
keyword[if] identifier[local_filename] keyword[is] keyword[None] :
identifier[local_filename] = identifier[remote_filename]
keyword[if] keyword[not] identifier[self] . identifier[args] . identifier[force] keyword[and] identifier[os] . identifier[access] ( identifier[local_filename] , identifier[os] . identifier[F_OK] ):
keyword[if] keyword[not] identifier[self] . identifier[_confirm_overwrite] ( identifier[local_filename] ):
identifier[self] . identifier[_print_results] ( identifier[local_filename] , literal[string] )
keyword[return]
identifier[url] = literal[string] . identifier[format] ( identifier[self] . identifier[base_url] , identifier[remote_filename] )
identifier[r] = identifier[requests] . identifier[get] ( identifier[url] , identifier[allow_redirects] = keyword[True] )
keyword[if] identifier[r] . identifier[ok] :
identifier[open] ( identifier[local_filename] , literal[string] ). identifier[write] ( identifier[r] . identifier[content] )
identifier[status] = literal[string]
keyword[else] :
identifier[self] . identifier[handle_error] ( literal[string] . identifier[format] ( identifier[url] ), keyword[False] )
identifier[self] . identifier[_print_results] ( identifier[local_filename] , identifier[status] ) | def download_file(self, remote_filename, local_filename=None):
"""Download file from github.
Args:
remote_filename (str): The name of the file as defined in git repository.
local_filename (str, optional): Defaults to None. The name of the file as it should be
be written to local filesystem.
"""
status = 'Failed'
if local_filename is None:
local_filename = remote_filename # depends on [control=['if'], data=['local_filename']]
if not self.args.force and os.access(local_filename, os.F_OK):
if not self._confirm_overwrite(local_filename):
self._print_results(local_filename, 'Skipped')
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
url = '{}{}'.format(self.base_url, remote_filename)
r = requests.get(url, allow_redirects=True)
if r.ok:
open(local_filename, 'wb').write(r.content)
status = 'Success' # depends on [control=['if'], data=[]]
else:
self.handle_error('Error requesting: {}'.format(url), False)
# print download status
self._print_results(local_filename, status) |
def is_image(f, types=('png', 'jpeg', 'gif'), set_content_type=True):
"""
Return True if file f is image (types type) and set its correct content_type and filename extension.
Example:
if is_image(request.FILES['file']):
print 'File is image'
if is_image(open('/tmp/image.jpeg', 'rb')):
print 'File is image'
"""
assert isinstance(types, (list, tuple))
t = image_get_format(f)
if t not in [t.lower() for t in types]:
return False
if set_content_type:
set_uploaded_file_content_type_and_file_ext(f, t)
return True | def function[is_image, parameter[f, types, set_content_type]]:
constant[
Return True if file f is image (types type) and set its correct content_type and filename extension.
Example:
if is_image(request.FILES['file']):
print 'File is image'
if is_image(open('/tmp/image.jpeg', 'rb')):
print 'File is image'
]
assert[call[name[isinstance], parameter[name[types], tuple[[<ast.Name object at 0x7da2054a46d0>, <ast.Name object at 0x7da2054a6ec0>]]]]]
variable[t] assign[=] call[name[image_get_format], parameter[name[f]]]
if compare[name[t] <ast.NotIn object at 0x7da2590d7190> <ast.ListComp object at 0x7da2054a4f40>] begin[:]
return[constant[False]]
if name[set_content_type] begin[:]
call[name[set_uploaded_file_content_type_and_file_ext], parameter[name[f], name[t]]]
return[constant[True]] | keyword[def] identifier[is_image] ( identifier[f] , identifier[types] =( literal[string] , literal[string] , literal[string] ), identifier[set_content_type] = keyword[True] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[types] ,( identifier[list] , identifier[tuple] ))
identifier[t] = identifier[image_get_format] ( identifier[f] )
keyword[if] identifier[t] keyword[not] keyword[in] [ identifier[t] . identifier[lower] () keyword[for] identifier[t] keyword[in] identifier[types] ]:
keyword[return] keyword[False]
keyword[if] identifier[set_content_type] :
identifier[set_uploaded_file_content_type_and_file_ext] ( identifier[f] , identifier[t] )
keyword[return] keyword[True] | def is_image(f, types=('png', 'jpeg', 'gif'), set_content_type=True):
"""
Return True if file f is image (types type) and set its correct content_type and filename extension.
Example:
if is_image(request.FILES['file']):
print 'File is image'
if is_image(open('/tmp/image.jpeg', 'rb')):
print 'File is image'
"""
assert isinstance(types, (list, tuple))
t = image_get_format(f)
if t not in [t.lower() for t in types]:
return False # depends on [control=['if'], data=[]]
if set_content_type:
set_uploaded_file_content_type_and_file_ext(f, t) # depends on [control=['if'], data=[]]
return True |
def quit_all(editor, force=False):
"""
Quit all.
"""
quit(editor, all_=True, force=force) | def function[quit_all, parameter[editor, force]]:
constant[
Quit all.
]
call[name[quit], parameter[name[editor]]] | keyword[def] identifier[quit_all] ( identifier[editor] , identifier[force] = keyword[False] ):
literal[string]
identifier[quit] ( identifier[editor] , identifier[all_] = keyword[True] , identifier[force] = identifier[force] ) | def quit_all(editor, force=False):
"""
Quit all.
"""
quit(editor, all_=True, force=force) |
def call_cmd(cmdlist, stdin=None):
"""
get a shell commands output, error message and return value and immediately
return.
.. warning::
This returns with the first screen content for interactive commands.
:param cmdlist: shellcommand to call, already splitted into a list accepted
by :meth:`subprocess.Popen`
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str, bytes, or None
:return: triple of stdout, stderr, return value of the shell command
:rtype: str, str, int
"""
termenc = urwid.util.detected_encoding
if isinstance(stdin, str):
stdin = stdin.encode(termenc)
try:
logging.debug("Calling %s" % cmdlist)
proc = subprocess.Popen(
cmdlist,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE if stdin is not None else None)
except OSError as e:
out = b''
err = e.strerror
ret = e.errno
else:
out, err = proc.communicate(stdin)
ret = proc.returncode
out = string_decode(out, termenc)
err = string_decode(err, termenc)
return out, err, ret | def function[call_cmd, parameter[cmdlist, stdin]]:
constant[
get a shell commands output, error message and return value and immediately
return.
.. warning::
This returns with the first screen content for interactive commands.
:param cmdlist: shellcommand to call, already splitted into a list accepted
by :meth:`subprocess.Popen`
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str, bytes, or None
:return: triple of stdout, stderr, return value of the shell command
:rtype: str, str, int
]
variable[termenc] assign[=] name[urwid].util.detected_encoding
if call[name[isinstance], parameter[name[stdin], name[str]]] begin[:]
variable[stdin] assign[=] call[name[stdin].encode, parameter[name[termenc]]]
<ast.Try object at 0x7da1b0847ca0>
variable[out] assign[=] call[name[string_decode], parameter[name[out], name[termenc]]]
variable[err] assign[=] call[name[string_decode], parameter[name[err], name[termenc]]]
return[tuple[[<ast.Name object at 0x7da1b07f7d00>, <ast.Name object at 0x7da1b07f50c0>, <ast.Name object at 0x7da1b07f5ab0>]]] | keyword[def] identifier[call_cmd] ( identifier[cmdlist] , identifier[stdin] = keyword[None] ):
literal[string]
identifier[termenc] = identifier[urwid] . identifier[util] . identifier[detected_encoding]
keyword[if] identifier[isinstance] ( identifier[stdin] , identifier[str] ):
identifier[stdin] = identifier[stdin] . identifier[encode] ( identifier[termenc] )
keyword[try] :
identifier[logging] . identifier[debug] ( literal[string] % identifier[cmdlist] )
identifier[proc] = identifier[subprocess] . identifier[Popen] (
identifier[cmdlist] ,
identifier[stdout] = identifier[subprocess] . identifier[PIPE] ,
identifier[stderr] = identifier[subprocess] . identifier[PIPE] ,
identifier[stdin] = identifier[subprocess] . identifier[PIPE] keyword[if] identifier[stdin] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None] )
keyword[except] identifier[OSError] keyword[as] identifier[e] :
identifier[out] = literal[string]
identifier[err] = identifier[e] . identifier[strerror]
identifier[ret] = identifier[e] . identifier[errno]
keyword[else] :
identifier[out] , identifier[err] = identifier[proc] . identifier[communicate] ( identifier[stdin] )
identifier[ret] = identifier[proc] . identifier[returncode]
identifier[out] = identifier[string_decode] ( identifier[out] , identifier[termenc] )
identifier[err] = identifier[string_decode] ( identifier[err] , identifier[termenc] )
keyword[return] identifier[out] , identifier[err] , identifier[ret] | def call_cmd(cmdlist, stdin=None):
"""
get a shell commands output, error message and return value and immediately
return.
.. warning::
This returns with the first screen content for interactive commands.
:param cmdlist: shellcommand to call, already splitted into a list accepted
by :meth:`subprocess.Popen`
:type cmdlist: list of str
:param stdin: string to pipe to the process
:type stdin: str, bytes, or None
:return: triple of stdout, stderr, return value of the shell command
:rtype: str, str, int
"""
termenc = urwid.util.detected_encoding
if isinstance(stdin, str):
stdin = stdin.encode(termenc) # depends on [control=['if'], data=[]]
try:
logging.debug('Calling %s' % cmdlist)
proc = subprocess.Popen(cmdlist, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE if stdin is not None else None) # depends on [control=['try'], data=[]]
except OSError as e:
out = b''
err = e.strerror
ret = e.errno # depends on [control=['except'], data=['e']]
else:
(out, err) = proc.communicate(stdin)
ret = proc.returncode
out = string_decode(out, termenc)
err = string_decode(err, termenc)
return (out, err, ret) |
def check(self, solution):
"""Check that a solution satisfies the constraint.
Args:
solution (container):
An assignment for the variables in the constraint.
Returns:
bool: True if the solution satisfies the constraint; otherwise False.
Examples:
This example creates a constraint that :math:`a \\ne b` on binary variables
and tests it for two candidate solutions, with additional unconstrained
variable c.
>>> import dwavebinarycsp
>>> const = dwavebinarycsp.Constraint.from_configurations([(0, 1), (1, 0)],
... ['a', 'b'], dwavebinarycsp.BINARY)
>>> solution = {'a': 1, 'b': 1, 'c': 0}
>>> const.check(solution)
False
>>> solution = {'a': 1, 'b': 0, 'c': 0}
>>> const.check(solution)
True
"""
return self.func(*(solution[v] for v in self.variables)) | def function[check, parameter[self, solution]]:
constant[Check that a solution satisfies the constraint.
Args:
solution (container):
An assignment for the variables in the constraint.
Returns:
bool: True if the solution satisfies the constraint; otherwise False.
Examples:
This example creates a constraint that :math:`a \ne b` on binary variables
and tests it for two candidate solutions, with additional unconstrained
variable c.
>>> import dwavebinarycsp
>>> const = dwavebinarycsp.Constraint.from_configurations([(0, 1), (1, 0)],
... ['a', 'b'], dwavebinarycsp.BINARY)
>>> solution = {'a': 1, 'b': 1, 'c': 0}
>>> const.check(solution)
False
>>> solution = {'a': 1, 'b': 0, 'c': 0}
>>> const.check(solution)
True
]
return[call[name[self].func, parameter[<ast.Starred object at 0x7da1b00aeb00>]]] | keyword[def] identifier[check] ( identifier[self] , identifier[solution] ):
literal[string]
keyword[return] identifier[self] . identifier[func] (*( identifier[solution] [ identifier[v] ] keyword[for] identifier[v] keyword[in] identifier[self] . identifier[variables] )) | def check(self, solution):
"""Check that a solution satisfies the constraint.
Args:
solution (container):
An assignment for the variables in the constraint.
Returns:
bool: True if the solution satisfies the constraint; otherwise False.
Examples:
This example creates a constraint that :math:`a \\ne b` on binary variables
and tests it for two candidate solutions, with additional unconstrained
variable c.
>>> import dwavebinarycsp
>>> const = dwavebinarycsp.Constraint.from_configurations([(0, 1), (1, 0)],
... ['a', 'b'], dwavebinarycsp.BINARY)
>>> solution = {'a': 1, 'b': 1, 'c': 0}
>>> const.check(solution)
False
>>> solution = {'a': 1, 'b': 0, 'c': 0}
>>> const.check(solution)
True
"""
return self.func(*(solution[v] for v in self.variables)) |
async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# parse page
parser = lxml.etree.HTMLParser()
html = lxml.etree.XML(api_data.decode("utf-8", "ignore"), parser)
for page_struct_version, result_selector in enumerate(__class__.RESULTS_SELECTORS):
result_nodes = result_selector(html)
if result_nodes:
break
for rank, result_node in enumerate(result_nodes, 1):
try:
img_node = __class__.IMG_SELECTORS[page_struct_version](result_node)[0]
except IndexError:
# no image for that product
continue
# get thumbnail & full image url
thumbnail_url = img_node.get("src")
url_parts = thumbnail_url.rsplit(".", 2)
img_url = ".".join((url_parts[0], url_parts[2]))
# assume size is fixed
size = (500, 500)
check_metadata = CoverImageMetadata.SIZE
# try to get higher res image...
if ((self.target_size > size[0]) and # ...only if needed
(rank <= 3)): # and only for first 3 results because this is time
# consuming (1 more GET request per result)
product_url = __class__.PRODUCT_LINK_SELECTORS[page_struct_version](result_node)[0].get("href")
product_url_split = urllib.parse.urlsplit(product_url)
if not product_url_split.scheme:
# relative redirect url
product_url_query = urllib.parse.parse_qsl(product_url_split.query)
product_url_query = collections.OrderedDict(product_url_query)
try:
# needed if page_struct_version == 1
product_url = product_url_query["url"]
except KeyError:
# page_struct_version == 0, make url absolute
product_url = urllib.parse.urljoin(self.base_url, product_url)
product_url_split = urllib.parse.urlsplit(product_url)
product_url_query = urllib.parse.parse_qsl(product_url_split.query)
product_url_query = collections.OrderedDict(product_url_query)
try:
# remove timestamp from url to improve future cache hit rate
del product_url_query["qid"]
except KeyError:
pass
product_url_query = urllib.parse.urlencode(product_url_query)
product_url_no_ts = urllib.parse.urlunsplit(product_url_split[:3] + (product_url_query,) + product_url_split[4:])
store_in_cache_callback, product_page_data = await self.fetchResults(product_url_no_ts)
product_page_html = lxml.etree.XML(product_page_data.decode("latin-1"), parser)
try:
img_node = __class__.PRODUCT_PAGE_IMG_SELECTOR(product_page_html)[0]
except IndexError:
# unable to get better image
pass
else:
better_img_url = img_node.get("data-old-hires")
# img_node.get("data-a-dynamic-image") contains json with image urls too, but they are not larger than
# previous 500px image and are often covered by autorip badges (can be removed by cleaning url though)
if better_img_url:
img_url = better_img_url
size_url_hint = img_url.rsplit(".", 2)[1].strip("_")
assert(size_url_hint.startswith("SL"))
size_url_hint = int(size_url_hint[2:])
size = (size_url_hint, size_url_hint)
check_metadata = CoverImageMetadata.NONE
await store_in_cache_callback()
# assume format is always jpg
format = CoverImageFormat.JPEG
# add result
results.append(AmazonCdCoverSourceResult(img_url,
size,
format,
thumbnail_url=thumbnail_url,
source=self,
rank=rank,
check_metadata=check_metadata))
return results | <ast.AsyncFunctionDef object at 0x7da1b0666d10> | keyword[async] keyword[def] identifier[parseResults] ( identifier[self] , identifier[api_data] ):
literal[string]
identifier[results] =[]
identifier[parser] = identifier[lxml] . identifier[etree] . identifier[HTMLParser] ()
identifier[html] = identifier[lxml] . identifier[etree] . identifier[XML] ( identifier[api_data] . identifier[decode] ( literal[string] , literal[string] ), identifier[parser] )
keyword[for] identifier[page_struct_version] , identifier[result_selector] keyword[in] identifier[enumerate] ( identifier[__class__] . identifier[RESULTS_SELECTORS] ):
identifier[result_nodes] = identifier[result_selector] ( identifier[html] )
keyword[if] identifier[result_nodes] :
keyword[break]
keyword[for] identifier[rank] , identifier[result_node] keyword[in] identifier[enumerate] ( identifier[result_nodes] , literal[int] ):
keyword[try] :
identifier[img_node] = identifier[__class__] . identifier[IMG_SELECTORS] [ identifier[page_struct_version] ]( identifier[result_node] )[ literal[int] ]
keyword[except] identifier[IndexError] :
keyword[continue]
identifier[thumbnail_url] = identifier[img_node] . identifier[get] ( literal[string] )
identifier[url_parts] = identifier[thumbnail_url] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[img_url] = literal[string] . identifier[join] (( identifier[url_parts] [ literal[int] ], identifier[url_parts] [ literal[int] ]))
identifier[size] =( literal[int] , literal[int] )
identifier[check_metadata] = identifier[CoverImageMetadata] . identifier[SIZE]
keyword[if] (( identifier[self] . identifier[target_size] > identifier[size] [ literal[int] ]) keyword[and]
( identifier[rank] <= literal[int] )):
identifier[product_url] = identifier[__class__] . identifier[PRODUCT_LINK_SELECTORS] [ identifier[page_struct_version] ]( identifier[result_node] )[ literal[int] ]. identifier[get] ( literal[string] )
identifier[product_url_split] = identifier[urllib] . identifier[parse] . identifier[urlsplit] ( identifier[product_url] )
keyword[if] keyword[not] identifier[product_url_split] . identifier[scheme] :
identifier[product_url_query] = identifier[urllib] . identifier[parse] . identifier[parse_qsl] ( identifier[product_url_split] . identifier[query] )
identifier[product_url_query] = identifier[collections] . identifier[OrderedDict] ( identifier[product_url_query] )
keyword[try] :
identifier[product_url] = identifier[product_url_query] [ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[product_url] = identifier[urllib] . identifier[parse] . identifier[urljoin] ( identifier[self] . identifier[base_url] , identifier[product_url] )
identifier[product_url_split] = identifier[urllib] . identifier[parse] . identifier[urlsplit] ( identifier[product_url] )
identifier[product_url_query] = identifier[urllib] . identifier[parse] . identifier[parse_qsl] ( identifier[product_url_split] . identifier[query] )
identifier[product_url_query] = identifier[collections] . identifier[OrderedDict] ( identifier[product_url_query] )
keyword[try] :
keyword[del] identifier[product_url_query] [ literal[string] ]
keyword[except] identifier[KeyError] :
keyword[pass]
identifier[product_url_query] = identifier[urllib] . identifier[parse] . identifier[urlencode] ( identifier[product_url_query] )
identifier[product_url_no_ts] = identifier[urllib] . identifier[parse] . identifier[urlunsplit] ( identifier[product_url_split] [: literal[int] ]+( identifier[product_url_query] ,)+ identifier[product_url_split] [ literal[int] :])
identifier[store_in_cache_callback] , identifier[product_page_data] = keyword[await] identifier[self] . identifier[fetchResults] ( identifier[product_url_no_ts] )
identifier[product_page_html] = identifier[lxml] . identifier[etree] . identifier[XML] ( identifier[product_page_data] . identifier[decode] ( literal[string] ), identifier[parser] )
keyword[try] :
identifier[img_node] = identifier[__class__] . identifier[PRODUCT_PAGE_IMG_SELECTOR] ( identifier[product_page_html] )[ literal[int] ]
keyword[except] identifier[IndexError] :
keyword[pass]
keyword[else] :
identifier[better_img_url] = identifier[img_node] . identifier[get] ( literal[string] )
keyword[if] identifier[better_img_url] :
identifier[img_url] = identifier[better_img_url]
identifier[size_url_hint] = identifier[img_url] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]. identifier[strip] ( literal[string] )
keyword[assert] ( identifier[size_url_hint] . identifier[startswith] ( literal[string] ))
identifier[size_url_hint] = identifier[int] ( identifier[size_url_hint] [ literal[int] :])
identifier[size] =( identifier[size_url_hint] , identifier[size_url_hint] )
identifier[check_metadata] = identifier[CoverImageMetadata] . identifier[NONE]
keyword[await] identifier[store_in_cache_callback] ()
identifier[format] = identifier[CoverImageFormat] . identifier[JPEG]
identifier[results] . identifier[append] ( identifier[AmazonCdCoverSourceResult] ( identifier[img_url] ,
identifier[size] ,
identifier[format] ,
identifier[thumbnail_url] = identifier[thumbnail_url] ,
identifier[source] = identifier[self] ,
identifier[rank] = identifier[rank] ,
identifier[check_metadata] = identifier[check_metadata] ))
keyword[return] identifier[results] | async def parseResults(self, api_data):
""" See CoverSource.parseResults. """
results = []
# parse page
parser = lxml.etree.HTMLParser()
html = lxml.etree.XML(api_data.decode('utf-8', 'ignore'), parser)
for (page_struct_version, result_selector) in enumerate(__class__.RESULTS_SELECTORS):
result_nodes = result_selector(html)
if result_nodes:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
for (rank, result_node) in enumerate(result_nodes, 1):
try:
img_node = __class__.IMG_SELECTORS[page_struct_version](result_node)[0] # depends on [control=['try'], data=[]]
except IndexError:
# no image for that product
continue # depends on [control=['except'], data=[]]
# get thumbnail & full image url
thumbnail_url = img_node.get('src')
url_parts = thumbnail_url.rsplit('.', 2)
img_url = '.'.join((url_parts[0], url_parts[2]))
# assume size is fixed
size = (500, 500)
check_metadata = CoverImageMetadata.SIZE
# try to get higher res image...
if self.target_size > size[0] and rank <= 3: # ...only if needed
# and only for first 3 results because this is time
# consuming (1 more GET request per result)
product_url = __class__.PRODUCT_LINK_SELECTORS[page_struct_version](result_node)[0].get('href')
product_url_split = urllib.parse.urlsplit(product_url)
if not product_url_split.scheme:
# relative redirect url
product_url_query = urllib.parse.parse_qsl(product_url_split.query)
product_url_query = collections.OrderedDict(product_url_query)
try:
# needed if page_struct_version == 1
product_url = product_url_query['url'] # depends on [control=['try'], data=[]]
except KeyError:
# page_struct_version == 0, make url absolute
product_url = urllib.parse.urljoin(self.base_url, product_url) # depends on [control=['except'], data=[]]
product_url_split = urllib.parse.urlsplit(product_url) # depends on [control=['if'], data=[]]
product_url_query = urllib.parse.parse_qsl(product_url_split.query)
product_url_query = collections.OrderedDict(product_url_query)
try:
# remove timestamp from url to improve future cache hit rate
del product_url_query['qid'] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
product_url_query = urllib.parse.urlencode(product_url_query)
product_url_no_ts = urllib.parse.urlunsplit(product_url_split[:3] + (product_url_query,) + product_url_split[4:])
(store_in_cache_callback, product_page_data) = await self.fetchResults(product_url_no_ts)
product_page_html = lxml.etree.XML(product_page_data.decode('latin-1'), parser)
try:
img_node = __class__.PRODUCT_PAGE_IMG_SELECTOR(product_page_html)[0] # depends on [control=['try'], data=[]]
except IndexError:
# unable to get better image
pass # depends on [control=['except'], data=[]]
else:
better_img_url = img_node.get('data-old-hires')
# img_node.get("data-a-dynamic-image") contains json with image urls too, but they are not larger than
# previous 500px image and are often covered by autorip badges (can be removed by cleaning url though)
if better_img_url:
img_url = better_img_url
size_url_hint = img_url.rsplit('.', 2)[1].strip('_')
assert size_url_hint.startswith('SL')
size_url_hint = int(size_url_hint[2:])
size = (size_url_hint, size_url_hint)
check_metadata = CoverImageMetadata.NONE # depends on [control=['if'], data=[]]
await store_in_cache_callback() # depends on [control=['if'], data=[]]
# assume format is always jpg
format = CoverImageFormat.JPEG
# add result
results.append(AmazonCdCoverSourceResult(img_url, size, format, thumbnail_url=thumbnail_url, source=self, rank=rank, check_metadata=check_metadata)) # depends on [control=['for'], data=[]]
return results |
def get_slot_offsets(self):
"""
col_offset
- from bottom left corner of 1 to bottom corner of 2
row_offset
- from bottom left corner of 1 to bottom corner of 4
TODO: figure out actual X and Y offsets (from origin)
"""
SLOT_OFFSETS = {
'slots': {
'col_offset': 132.50,
'row_offset': 90.5
}
}
slot_settings = SLOT_OFFSETS.get(self.get_deck_slot_types())
row_offset = slot_settings.get('row_offset')
col_offset = slot_settings.get('col_offset')
return (row_offset, col_offset) | def function[get_slot_offsets, parameter[self]]:
constant[
col_offset
- from bottom left corner of 1 to bottom corner of 2
row_offset
- from bottom left corner of 1 to bottom corner of 4
TODO: figure out actual X and Y offsets (from origin)
]
variable[SLOT_OFFSETS] assign[=] dictionary[[<ast.Constant object at 0x7da2044c1b70>], [<ast.Dict object at 0x7da2044c3340>]]
variable[slot_settings] assign[=] call[name[SLOT_OFFSETS].get, parameter[call[name[self].get_deck_slot_types, parameter[]]]]
variable[row_offset] assign[=] call[name[slot_settings].get, parameter[constant[row_offset]]]
variable[col_offset] assign[=] call[name[slot_settings].get, parameter[constant[col_offset]]]
return[tuple[[<ast.Name object at 0x7da2044c3b80>, <ast.Name object at 0x7da2044c2b00>]]] | keyword[def] identifier[get_slot_offsets] ( identifier[self] ):
literal[string]
identifier[SLOT_OFFSETS] ={
literal[string] :{
literal[string] : literal[int] ,
literal[string] : literal[int]
}
}
identifier[slot_settings] = identifier[SLOT_OFFSETS] . identifier[get] ( identifier[self] . identifier[get_deck_slot_types] ())
identifier[row_offset] = identifier[slot_settings] . identifier[get] ( literal[string] )
identifier[col_offset] = identifier[slot_settings] . identifier[get] ( literal[string] )
keyword[return] ( identifier[row_offset] , identifier[col_offset] ) | def get_slot_offsets(self):
"""
col_offset
- from bottom left corner of 1 to bottom corner of 2
row_offset
- from bottom left corner of 1 to bottom corner of 4
TODO: figure out actual X and Y offsets (from origin)
"""
SLOT_OFFSETS = {'slots': {'col_offset': 132.5, 'row_offset': 90.5}}
slot_settings = SLOT_OFFSETS.get(self.get_deck_slot_types())
row_offset = slot_settings.get('row_offset')
col_offset = slot_settings.get('col_offset')
return (row_offset, col_offset) |
def get_c_extension(support_legacy=False, system_zstd=False, name='zstd',
warnings_as_errors=False, root=None):
"""Obtain a distutils.extension.Extension for the C extension.
``support_legacy`` controls whether to compile in legacy zstd format support.
``system_zstd`` controls whether to compile against the system zstd library.
For this to work, the system zstd library and headers must match what
python-zstandard is coded against exactly.
``name`` is the module name of the C extension to produce.
``warnings_as_errors`` controls whether compiler warnings are turned into
compiler errors.
``root`` defines a root path that source should be computed as relative
to. This should be the directory with the main ``setup.py`` that is
being invoked. If not defined, paths will be relative to this file.
"""
actual_root = os.path.abspath(os.path.dirname(__file__))
root = root or actual_root
sources = set([os.path.join(actual_root, p) for p in ext_sources])
if not system_zstd:
sources.update([os.path.join(actual_root, p) for p in zstd_sources])
if support_legacy:
sources.update([os.path.join(actual_root, p)
for p in zstd_sources_legacy])
sources = list(sources)
include_dirs = set([os.path.join(actual_root, d) for d in ext_includes])
if not system_zstd:
include_dirs.update([os.path.join(actual_root, d)
for d in zstd_includes])
if support_legacy:
include_dirs.update([os.path.join(actual_root, d)
for d in zstd_includes_legacy])
include_dirs = list(include_dirs)
depends = [os.path.join(actual_root, p) for p in zstd_depends]
compiler = distutils.ccompiler.new_compiler()
# Needed for MSVC.
if hasattr(compiler, 'initialize'):
compiler.initialize()
if compiler.compiler_type == 'unix':
compiler_type = 'unix'
elif compiler.compiler_type == 'msvc':
compiler_type = 'msvc'
elif compiler.compiler_type == 'mingw32':
compiler_type = 'mingw32'
else:
raise Exception('unhandled compiler type: %s' %
compiler.compiler_type)
extra_args = ['-DZSTD_MULTITHREAD']
if not system_zstd:
extra_args.append('-DZSTDLIB_VISIBILITY=')
extra_args.append('-DZDICTLIB_VISIBILITY=')
extra_args.append('-DZSTDERRORLIB_VISIBILITY=')
if compiler_type == 'unix':
extra_args.append('-fvisibility=hidden')
if not system_zstd and support_legacy:
extra_args.append('-DZSTD_LEGACY_SUPPORT=1')
if warnings_as_errors:
if compiler_type in ('unix', 'mingw32'):
extra_args.append('-Werror')
elif compiler_type == 'msvc':
extra_args.append('/WX')
else:
assert False
libraries = ['zstd'] if system_zstd else []
# Python 3.7 doesn't like absolute paths. So normalize to relative.
sources = [os.path.relpath(p, root) for p in sources]
include_dirs = [os.path.relpath(p, root) for p in include_dirs]
depends = [os.path.relpath(p, root) for p in depends]
# TODO compile with optimizations.
return Extension(name, sources,
include_dirs=include_dirs,
depends=depends,
extra_compile_args=extra_args,
libraries=libraries) | def function[get_c_extension, parameter[support_legacy, system_zstd, name, warnings_as_errors, root]]:
constant[Obtain a distutils.extension.Extension for the C extension.
``support_legacy`` controls whether to compile in legacy zstd format support.
``system_zstd`` controls whether to compile against the system zstd library.
For this to work, the system zstd library and headers must match what
python-zstandard is coded against exactly.
``name`` is the module name of the C extension to produce.
``warnings_as_errors`` controls whether compiler warnings are turned into
compiler errors.
``root`` defines a root path that source should be computed as relative
to. This should be the directory with the main ``setup.py`` that is
being invoked. If not defined, paths will be relative to this file.
]
variable[actual_root] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.dirname, parameter[name[__file__]]]]]
variable[root] assign[=] <ast.BoolOp object at 0x7da204347730>
variable[sources] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da204347790>]]
if <ast.UnaryOp object at 0x7da204347640> begin[:]
call[name[sources].update, parameter[<ast.ListComp object at 0x7da204345ff0>]]
if name[support_legacy] begin[:]
call[name[sources].update, parameter[<ast.ListComp object at 0x7da204346b90>]]
variable[sources] assign[=] call[name[list], parameter[name[sources]]]
variable[include_dirs] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da2043474f0>]]
if <ast.UnaryOp object at 0x7da204344310> begin[:]
call[name[include_dirs].update, parameter[<ast.ListComp object at 0x7da204346410>]]
if name[support_legacy] begin[:]
call[name[include_dirs].update, parameter[<ast.ListComp object at 0x7da204345a80>]]
variable[include_dirs] assign[=] call[name[list], parameter[name[include_dirs]]]
variable[depends] assign[=] <ast.ListComp object at 0x7da1b23454b0>
variable[compiler] assign[=] call[name[distutils].ccompiler.new_compiler, parameter[]]
if call[name[hasattr], parameter[name[compiler], constant[initialize]]] begin[:]
call[name[compiler].initialize, parameter[]]
if compare[name[compiler].compiler_type equal[==] constant[unix]] begin[:]
variable[compiler_type] assign[=] constant[unix]
variable[extra_args] assign[=] list[[<ast.Constant object at 0x7da20c6e76d0>]]
if <ast.UnaryOp object at 0x7da20c6e72e0> begin[:]
call[name[extra_args].append, parameter[constant[-DZSTDLIB_VISIBILITY=]]]
call[name[extra_args].append, parameter[constant[-DZDICTLIB_VISIBILITY=]]]
call[name[extra_args].append, parameter[constant[-DZSTDERRORLIB_VISIBILITY=]]]
if compare[name[compiler_type] equal[==] constant[unix]] begin[:]
call[name[extra_args].append, parameter[constant[-fvisibility=hidden]]]
if <ast.BoolOp object at 0x7da204345e70> begin[:]
call[name[extra_args].append, parameter[constant[-DZSTD_LEGACY_SUPPORT=1]]]
if name[warnings_as_errors] begin[:]
if compare[name[compiler_type] in tuple[[<ast.Constant object at 0x7da2043447c0>, <ast.Constant object at 0x7da204347010>]]] begin[:]
call[name[extra_args].append, parameter[constant[-Werror]]]
variable[libraries] assign[=] <ast.IfExp object at 0x7da204344b20>
variable[sources] assign[=] <ast.ListComp object at 0x7da204346d10>
variable[include_dirs] assign[=] <ast.ListComp object at 0x7da204344100>
variable[depends] assign[=] <ast.ListComp object at 0x7da204346200>
return[call[name[Extension], parameter[name[name], name[sources]]]] | keyword[def] identifier[get_c_extension] ( identifier[support_legacy] = keyword[False] , identifier[system_zstd] = keyword[False] , identifier[name] = literal[string] ,
identifier[warnings_as_errors] = keyword[False] , identifier[root] = keyword[None] ):
literal[string]
identifier[actual_root] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[__file__] ))
identifier[root] = identifier[root] keyword[or] identifier[actual_root]
identifier[sources] = identifier[set] ([ identifier[os] . identifier[path] . identifier[join] ( identifier[actual_root] , identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[ext_sources] ])
keyword[if] keyword[not] identifier[system_zstd] :
identifier[sources] . identifier[update] ([ identifier[os] . identifier[path] . identifier[join] ( identifier[actual_root] , identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[zstd_sources] ])
keyword[if] identifier[support_legacy] :
identifier[sources] . identifier[update] ([ identifier[os] . identifier[path] . identifier[join] ( identifier[actual_root] , identifier[p] )
keyword[for] identifier[p] keyword[in] identifier[zstd_sources_legacy] ])
identifier[sources] = identifier[list] ( identifier[sources] )
identifier[include_dirs] = identifier[set] ([ identifier[os] . identifier[path] . identifier[join] ( identifier[actual_root] , identifier[d] ) keyword[for] identifier[d] keyword[in] identifier[ext_includes] ])
keyword[if] keyword[not] identifier[system_zstd] :
identifier[include_dirs] . identifier[update] ([ identifier[os] . identifier[path] . identifier[join] ( identifier[actual_root] , identifier[d] )
keyword[for] identifier[d] keyword[in] identifier[zstd_includes] ])
keyword[if] identifier[support_legacy] :
identifier[include_dirs] . identifier[update] ([ identifier[os] . identifier[path] . identifier[join] ( identifier[actual_root] , identifier[d] )
keyword[for] identifier[d] keyword[in] identifier[zstd_includes_legacy] ])
identifier[include_dirs] = identifier[list] ( identifier[include_dirs] )
identifier[depends] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[actual_root] , identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[zstd_depends] ]
identifier[compiler] = identifier[distutils] . identifier[ccompiler] . identifier[new_compiler] ()
keyword[if] identifier[hasattr] ( identifier[compiler] , literal[string] ):
identifier[compiler] . identifier[initialize] ()
keyword[if] identifier[compiler] . identifier[compiler_type] == literal[string] :
identifier[compiler_type] = literal[string]
keyword[elif] identifier[compiler] . identifier[compiler_type] == literal[string] :
identifier[compiler_type] = literal[string]
keyword[elif] identifier[compiler] . identifier[compiler_type] == literal[string] :
identifier[compiler_type] = literal[string]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] %
identifier[compiler] . identifier[compiler_type] )
identifier[extra_args] =[ literal[string] ]
keyword[if] keyword[not] identifier[system_zstd] :
identifier[extra_args] . identifier[append] ( literal[string] )
identifier[extra_args] . identifier[append] ( literal[string] )
identifier[extra_args] . identifier[append] ( literal[string] )
keyword[if] identifier[compiler_type] == literal[string] :
identifier[extra_args] . identifier[append] ( literal[string] )
keyword[if] keyword[not] identifier[system_zstd] keyword[and] identifier[support_legacy] :
identifier[extra_args] . identifier[append] ( literal[string] )
keyword[if] identifier[warnings_as_errors] :
keyword[if] identifier[compiler_type] keyword[in] ( literal[string] , literal[string] ):
identifier[extra_args] . identifier[append] ( literal[string] )
keyword[elif] identifier[compiler_type] == literal[string] :
identifier[extra_args] . identifier[append] ( literal[string] )
keyword[else] :
keyword[assert] keyword[False]
identifier[libraries] =[ literal[string] ] keyword[if] identifier[system_zstd] keyword[else] []
identifier[sources] =[ identifier[os] . identifier[path] . identifier[relpath] ( identifier[p] , identifier[root] ) keyword[for] identifier[p] keyword[in] identifier[sources] ]
identifier[include_dirs] =[ identifier[os] . identifier[path] . identifier[relpath] ( identifier[p] , identifier[root] ) keyword[for] identifier[p] keyword[in] identifier[include_dirs] ]
identifier[depends] =[ identifier[os] . identifier[path] . identifier[relpath] ( identifier[p] , identifier[root] ) keyword[for] identifier[p] keyword[in] identifier[depends] ]
keyword[return] identifier[Extension] ( identifier[name] , identifier[sources] ,
identifier[include_dirs] = identifier[include_dirs] ,
identifier[depends] = identifier[depends] ,
identifier[extra_compile_args] = identifier[extra_args] ,
identifier[libraries] = identifier[libraries] ) | def get_c_extension(support_legacy=False, system_zstd=False, name='zstd', warnings_as_errors=False, root=None):
"""Obtain a distutils.extension.Extension for the C extension.
``support_legacy`` controls whether to compile in legacy zstd format support.
``system_zstd`` controls whether to compile against the system zstd library.
For this to work, the system zstd library and headers must match what
python-zstandard is coded against exactly.
``name`` is the module name of the C extension to produce.
``warnings_as_errors`` controls whether compiler warnings are turned into
compiler errors.
``root`` defines a root path that source should be computed as relative
to. This should be the directory with the main ``setup.py`` that is
being invoked. If not defined, paths will be relative to this file.
"""
actual_root = os.path.abspath(os.path.dirname(__file__))
root = root or actual_root
sources = set([os.path.join(actual_root, p) for p in ext_sources])
if not system_zstd:
sources.update([os.path.join(actual_root, p) for p in zstd_sources])
if support_legacy:
sources.update([os.path.join(actual_root, p) for p in zstd_sources_legacy]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
sources = list(sources)
include_dirs = set([os.path.join(actual_root, d) for d in ext_includes])
if not system_zstd:
include_dirs.update([os.path.join(actual_root, d) for d in zstd_includes])
if support_legacy:
include_dirs.update([os.path.join(actual_root, d) for d in zstd_includes_legacy]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
include_dirs = list(include_dirs)
depends = [os.path.join(actual_root, p) for p in zstd_depends]
compiler = distutils.ccompiler.new_compiler()
# Needed for MSVC.
if hasattr(compiler, 'initialize'):
compiler.initialize() # depends on [control=['if'], data=[]]
if compiler.compiler_type == 'unix':
compiler_type = 'unix' # depends on [control=['if'], data=[]]
elif compiler.compiler_type == 'msvc':
compiler_type = 'msvc' # depends on [control=['if'], data=[]]
elif compiler.compiler_type == 'mingw32':
compiler_type = 'mingw32' # depends on [control=['if'], data=[]]
else:
raise Exception('unhandled compiler type: %s' % compiler.compiler_type)
extra_args = ['-DZSTD_MULTITHREAD']
if not system_zstd:
extra_args.append('-DZSTDLIB_VISIBILITY=')
extra_args.append('-DZDICTLIB_VISIBILITY=')
extra_args.append('-DZSTDERRORLIB_VISIBILITY=')
if compiler_type == 'unix':
extra_args.append('-fvisibility=hidden') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not system_zstd and support_legacy:
extra_args.append('-DZSTD_LEGACY_SUPPORT=1') # depends on [control=['if'], data=[]]
if warnings_as_errors:
if compiler_type in ('unix', 'mingw32'):
extra_args.append('-Werror') # depends on [control=['if'], data=[]]
elif compiler_type == 'msvc':
extra_args.append('/WX') # depends on [control=['if'], data=[]]
else:
assert False # depends on [control=['if'], data=[]]
libraries = ['zstd'] if system_zstd else []
# Python 3.7 doesn't like absolute paths. So normalize to relative.
sources = [os.path.relpath(p, root) for p in sources]
include_dirs = [os.path.relpath(p, root) for p in include_dirs]
depends = [os.path.relpath(p, root) for p in depends]
# TODO compile with optimizations.
return Extension(name, sources, include_dirs=include_dirs, depends=depends, extra_compile_args=extra_args, libraries=libraries) |
def population_analysis_summary_report(feature, parent):
"""Retrieve an HTML population analysis table report from a multi exposure
analysis.
"""
_ = feature, parent # NOQA
analysis_dir = get_analysis_dir(exposure_population['key'])
if analysis_dir:
return get_impact_report_as_string(analysis_dir)
return None | def function[population_analysis_summary_report, parameter[feature, parent]]:
constant[Retrieve an HTML population analysis table report from a multi exposure
analysis.
]
variable[_] assign[=] tuple[[<ast.Name object at 0x7da1b0c516f0>, <ast.Name object at 0x7da1b0c520e0>]]
variable[analysis_dir] assign[=] call[name[get_analysis_dir], parameter[call[name[exposure_population]][constant[key]]]]
if name[analysis_dir] begin[:]
return[call[name[get_impact_report_as_string], parameter[name[analysis_dir]]]]
return[constant[None]] | keyword[def] identifier[population_analysis_summary_report] ( identifier[feature] , identifier[parent] ):
literal[string]
identifier[_] = identifier[feature] , identifier[parent]
identifier[analysis_dir] = identifier[get_analysis_dir] ( identifier[exposure_population] [ literal[string] ])
keyword[if] identifier[analysis_dir] :
keyword[return] identifier[get_impact_report_as_string] ( identifier[analysis_dir] )
keyword[return] keyword[None] | def population_analysis_summary_report(feature, parent):
"""Retrieve an HTML population analysis table report from a multi exposure
analysis.
"""
_ = (feature, parent) # NOQA
analysis_dir = get_analysis_dir(exposure_population['key'])
if analysis_dir:
return get_impact_report_as_string(analysis_dir) # depends on [control=['if'], data=[]]
return None |
def link_or_copy(src, dst, verbosity=0):
"""Try to make a hard link from src to dst and if that fails
copy the file. Hard links save some disk space and linking
should fail fast since no copying is involved.
"""
if verbosity > 0:
log_info("Copying %s -> %s" % (src, dst))
try:
os.link(src, dst)
except (AttributeError, OSError):
try:
shutil.copy(src, dst)
except OSError as msg:
raise PatoolError(msg) | def function[link_or_copy, parameter[src, dst, verbosity]]:
constant[Try to make a hard link from src to dst and if that fails
copy the file. Hard links save some disk space and linking
should fail fast since no copying is involved.
]
if compare[name[verbosity] greater[>] constant[0]] begin[:]
call[name[log_info], parameter[binary_operation[constant[Copying %s -> %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0760f40>, <ast.Name object at 0x7da1b0761570>]]]]]
<ast.Try object at 0x7da1b0763880> | keyword[def] identifier[link_or_copy] ( identifier[src] , identifier[dst] , identifier[verbosity] = literal[int] ):
literal[string]
keyword[if] identifier[verbosity] > literal[int] :
identifier[log_info] ( literal[string] %( identifier[src] , identifier[dst] ))
keyword[try] :
identifier[os] . identifier[link] ( identifier[src] , identifier[dst] )
keyword[except] ( identifier[AttributeError] , identifier[OSError] ):
keyword[try] :
identifier[shutil] . identifier[copy] ( identifier[src] , identifier[dst] )
keyword[except] identifier[OSError] keyword[as] identifier[msg] :
keyword[raise] identifier[PatoolError] ( identifier[msg] ) | def link_or_copy(src, dst, verbosity=0):
"""Try to make a hard link from src to dst and if that fails
copy the file. Hard links save some disk space and linking
should fail fast since no copying is involved.
"""
if verbosity > 0:
log_info('Copying %s -> %s' % (src, dst)) # depends on [control=['if'], data=[]]
try:
os.link(src, dst) # depends on [control=['try'], data=[]]
except (AttributeError, OSError):
try:
shutil.copy(src, dst) # depends on [control=['try'], data=[]]
except OSError as msg:
raise PatoolError(msg) # depends on [control=['except'], data=['msg']] # depends on [control=['except'], data=[]] |
def from_defaults(clz, defaults):
""" Given a dictionary of defaults, ie {attribute: value},
this classmethod constructs a new instance of the class and
merges the defaults"""
if isinstance(defaults, (str, unicode)):
defaults = json.loads(defaults)
c = clz()
for attribute in defaults.keys():
if attribute in c:
value = defaults[attribute]
c[attribute].merge(value)
# in case any values were not specified, attempt to merge them with
# the settings provided by clz.random()
cr = clz.random()
for attribute, value in cr:
try:
c[attribute].merge(value)
except Contradiction:
pass
return c | def function[from_defaults, parameter[clz, defaults]]:
constant[ Given a dictionary of defaults, ie {attribute: value},
this classmethod constructs a new instance of the class and
merges the defaults]
if call[name[isinstance], parameter[name[defaults], tuple[[<ast.Name object at 0x7da1b13bb820>, <ast.Name object at 0x7da1b13ba530>]]]] begin[:]
variable[defaults] assign[=] call[name[json].loads, parameter[name[defaults]]]
variable[c] assign[=] call[name[clz], parameter[]]
for taget[name[attribute]] in starred[call[name[defaults].keys, parameter[]]] begin[:]
if compare[name[attribute] in name[c]] begin[:]
variable[value] assign[=] call[name[defaults]][name[attribute]]
call[call[name[c]][name[attribute]].merge, parameter[name[value]]]
variable[cr] assign[=] call[name[clz].random, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b13ba650>, <ast.Name object at 0x7da1b13b9de0>]]] in starred[name[cr]] begin[:]
<ast.Try object at 0x7da1b13b8a60>
return[name[c]] | keyword[def] identifier[from_defaults] ( identifier[clz] , identifier[defaults] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[defaults] ,( identifier[str] , identifier[unicode] )):
identifier[defaults] = identifier[json] . identifier[loads] ( identifier[defaults] )
identifier[c] = identifier[clz] ()
keyword[for] identifier[attribute] keyword[in] identifier[defaults] . identifier[keys] ():
keyword[if] identifier[attribute] keyword[in] identifier[c] :
identifier[value] = identifier[defaults] [ identifier[attribute] ]
identifier[c] [ identifier[attribute] ]. identifier[merge] ( identifier[value] )
identifier[cr] = identifier[clz] . identifier[random] ()
keyword[for] identifier[attribute] , identifier[value] keyword[in] identifier[cr] :
keyword[try] :
identifier[c] [ identifier[attribute] ]. identifier[merge] ( identifier[value] )
keyword[except] identifier[Contradiction] :
keyword[pass]
keyword[return] identifier[c] | def from_defaults(clz, defaults):
""" Given a dictionary of defaults, ie {attribute: value},
this classmethod constructs a new instance of the class and
merges the defaults"""
if isinstance(defaults, (str, unicode)):
defaults = json.loads(defaults) # depends on [control=['if'], data=[]]
c = clz()
for attribute in defaults.keys():
if attribute in c:
value = defaults[attribute]
c[attribute].merge(value) # depends on [control=['if'], data=['attribute', 'c']] # depends on [control=['for'], data=['attribute']] # in case any values were not specified, attempt to merge them with
# the settings provided by clz.random()
cr = clz.random()
for (attribute, value) in cr:
try:
c[attribute].merge(value) # depends on [control=['try'], data=[]]
except Contradiction:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
return c |
def a2s_rules(server_addr, timeout=2, challenge=0):
"""Get rules from server
:param server_addr: (ip, port) for the server
:type server_addr: tuple
:param timeout: (optional) timeout in seconds
:type timeout: float
:param challenge: (optional) challenge number
:type challenge: int
:raises: :class:`RuntimeError`, :class:`socket.timeout`
:returns: a list of players
:rtype: :class:`list`
"""
ss = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ss.connect(server_addr)
ss.settimeout(timeout)
# request challenge number
if challenge in (-1, 0):
ss.send(_pack('<lci', -1, b'V', challenge))
try:
_, header, challenge = _unpack_from('<lcl', ss.recv(512))
except:
ss.close()
raise
if header != b'A':
raise RuntimeError("Unexpected challenge response")
# request player info
ss.send(_pack('<lci', -1, b'V', challenge))
try:
data = StructReader(_handle_a2s_response(ss))
finally:
ss.close()
header, num_rules = data.unpack('<4xcH')
if header != b'E':
raise RuntimeError("Invalid reponse header - %s" % repr(header))
rules = {}
while len(rules) != num_rules:
name = data.read_cstring()
value = data.read_cstring()
if _re_match(r'^\-?[0-9]+$', value):
value = int(value)
elif _re_match(r'^\-?[0-9]+\.[0-9]+$', value):
value = float(value)
rules[name] = value
return rules | def function[a2s_rules, parameter[server_addr, timeout, challenge]]:
constant[Get rules from server
:param server_addr: (ip, port) for the server
:type server_addr: tuple
:param timeout: (optional) timeout in seconds
:type timeout: float
:param challenge: (optional) challenge number
:type challenge: int
:raises: :class:`RuntimeError`, :class:`socket.timeout`
:returns: a list of players
:rtype: :class:`list`
]
variable[ss] assign[=] call[name[socket].socket, parameter[name[socket].AF_INET, name[socket].SOCK_DGRAM]]
call[name[ss].connect, parameter[name[server_addr]]]
call[name[ss].settimeout, parameter[name[timeout]]]
if compare[name[challenge] in tuple[[<ast.UnaryOp object at 0x7da1b231f1c0>, <ast.Constant object at 0x7da1b231e140>]]] begin[:]
call[name[ss].send, parameter[call[name[_pack], parameter[constant[<lci], <ast.UnaryOp object at 0x7da1b231d690>, constant[b'V'], name[challenge]]]]]
<ast.Try object at 0x7da1b231d3f0>
if compare[name[header] not_equal[!=] constant[b'A']] begin[:]
<ast.Raise object at 0x7da1b231e9b0>
call[name[ss].send, parameter[call[name[_pack], parameter[constant[<lci], <ast.UnaryOp object at 0x7da1b231e4a0>, constant[b'V'], name[challenge]]]]]
<ast.Try object at 0x7da1b231e0e0>
<ast.Tuple object at 0x7da1b231d2d0> assign[=] call[name[data].unpack, parameter[constant[<4xcH]]]
if compare[name[header] not_equal[!=] constant[b'E']] begin[:]
<ast.Raise object at 0x7da1b231d000>
variable[rules] assign[=] dictionary[[], []]
while compare[call[name[len], parameter[name[rules]]] not_equal[!=] name[num_rules]] begin[:]
variable[name] assign[=] call[name[data].read_cstring, parameter[]]
variable[value] assign[=] call[name[data].read_cstring, parameter[]]
if call[name[_re_match], parameter[constant[^\-?[0-9]+$], name[value]]] begin[:]
variable[value] assign[=] call[name[int], parameter[name[value]]]
call[name[rules]][name[name]] assign[=] name[value]
return[name[rules]] | keyword[def] identifier[a2s_rules] ( identifier[server_addr] , identifier[timeout] = literal[int] , identifier[challenge] = literal[int] ):
literal[string]
identifier[ss] = identifier[socket] . identifier[socket] ( identifier[socket] . identifier[AF_INET] , identifier[socket] . identifier[SOCK_DGRAM] )
identifier[ss] . identifier[connect] ( identifier[server_addr] )
identifier[ss] . identifier[settimeout] ( identifier[timeout] )
keyword[if] identifier[challenge] keyword[in] (- literal[int] , literal[int] ):
identifier[ss] . identifier[send] ( identifier[_pack] ( literal[string] ,- literal[int] , literal[string] , identifier[challenge] ))
keyword[try] :
identifier[_] , identifier[header] , identifier[challenge] = identifier[_unpack_from] ( literal[string] , identifier[ss] . identifier[recv] ( literal[int] ))
keyword[except] :
identifier[ss] . identifier[close] ()
keyword[raise]
keyword[if] identifier[header] != literal[string] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[ss] . identifier[send] ( identifier[_pack] ( literal[string] ,- literal[int] , literal[string] , identifier[challenge] ))
keyword[try] :
identifier[data] = identifier[StructReader] ( identifier[_handle_a2s_response] ( identifier[ss] ))
keyword[finally] :
identifier[ss] . identifier[close] ()
identifier[header] , identifier[num_rules] = identifier[data] . identifier[unpack] ( literal[string] )
keyword[if] identifier[header] != literal[string] :
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[repr] ( identifier[header] ))
identifier[rules] ={}
keyword[while] identifier[len] ( identifier[rules] )!= identifier[num_rules] :
identifier[name] = identifier[data] . identifier[read_cstring] ()
identifier[value] = identifier[data] . identifier[read_cstring] ()
keyword[if] identifier[_re_match] ( literal[string] , identifier[value] ):
identifier[value] = identifier[int] ( identifier[value] )
keyword[elif] identifier[_re_match] ( literal[string] , identifier[value] ):
identifier[value] = identifier[float] ( identifier[value] )
identifier[rules] [ identifier[name] ]= identifier[value]
keyword[return] identifier[rules] | def a2s_rules(server_addr, timeout=2, challenge=0):
"""Get rules from server
:param server_addr: (ip, port) for the server
:type server_addr: tuple
:param timeout: (optional) timeout in seconds
:type timeout: float
:param challenge: (optional) challenge number
:type challenge: int
:raises: :class:`RuntimeError`, :class:`socket.timeout`
:returns: a list of players
:rtype: :class:`list`
"""
ss = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ss.connect(server_addr)
ss.settimeout(timeout)
# request challenge number
if challenge in (-1, 0):
ss.send(_pack('<lci', -1, b'V', challenge))
try:
(_, header, challenge) = _unpack_from('<lcl', ss.recv(512)) # depends on [control=['try'], data=[]]
except:
ss.close()
raise # depends on [control=['except'], data=[]]
if header != b'A':
raise RuntimeError('Unexpected challenge response') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['challenge']]
# request player info
ss.send(_pack('<lci', -1, b'V', challenge))
try:
data = StructReader(_handle_a2s_response(ss)) # depends on [control=['try'], data=[]]
finally:
ss.close()
(header, num_rules) = data.unpack('<4xcH')
if header != b'E':
raise RuntimeError('Invalid reponse header - %s' % repr(header)) # depends on [control=['if'], data=['header']]
rules = {}
while len(rules) != num_rules:
name = data.read_cstring()
value = data.read_cstring()
if _re_match('^\\-?[0-9]+$', value):
value = int(value) # depends on [control=['if'], data=[]]
elif _re_match('^\\-?[0-9]+\\.[0-9]+$', value):
value = float(value) # depends on [control=['if'], data=[]]
rules[name] = value # depends on [control=['while'], data=[]]
return rules |
def get_google_playlist_songs(self, playlist, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):
"""Create song list from a user-generated Google Music playlist.
Parameters:
playlist (str): Name or ID of Google Music playlist. Names are case-sensitive.
Google allows multiple playlists with the same name.
If multiple playlists have the same name, the first one encountered is used.
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts in the playlist matching criteria and
a list of Google Music song dicts in the playlist filtered out using filter criteria.
"""
logger.info("Loading Google Music playlist songs...")
google_playlist = self.get_google_playlist(playlist)
if not google_playlist:
return [], []
playlist_song_ids = [track['trackId'] for track in google_playlist['tracks']]
playlist_songs = [song for song in self.api.get_all_songs() if song['id'] in playlist_song_ids]
matched_songs, filtered_songs = filter_google_songs(
playlist_songs, include_filters=include_filters, exclude_filters=exclude_filters,
all_includes=all_includes, all_excludes=all_excludes
)
logger.info("Filtered {0} Google playlist songs".format(len(filtered_songs)))
logger.info("Loaded {0} Google playlist songs".format(len(matched_songs)))
return matched_songs, filtered_songs | def function[get_google_playlist_songs, parameter[self, playlist, include_filters, exclude_filters, all_includes, all_excludes]]:
constant[Create song list from a user-generated Google Music playlist.
Parameters:
playlist (str): Name or ID of Google Music playlist. Names are case-sensitive.
Google allows multiple playlists with the same name.
If multiple playlists have the same name, the first one encountered is used.
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts in the playlist matching criteria and
a list of Google Music song dicts in the playlist filtered out using filter criteria.
]
call[name[logger].info, parameter[constant[Loading Google Music playlist songs...]]]
variable[google_playlist] assign[=] call[name[self].get_google_playlist, parameter[name[playlist]]]
if <ast.UnaryOp object at 0x7da1aff75a80> begin[:]
return[tuple[[<ast.List object at 0x7da1aff74370>, <ast.List object at 0x7da1aff745e0>]]]
variable[playlist_song_ids] assign[=] <ast.ListComp object at 0x7da1aff74850>
variable[playlist_songs] assign[=] <ast.ListComp object at 0x7da1aff76800>
<ast.Tuple object at 0x7da1aff76770> assign[=] call[name[filter_google_songs], parameter[name[playlist_songs]]]
call[name[logger].info, parameter[call[constant[Filtered {0} Google playlist songs].format, parameter[call[name[len], parameter[name[filtered_songs]]]]]]]
call[name[logger].info, parameter[call[constant[Loaded {0} Google playlist songs].format, parameter[call[name[len], parameter[name[matched_songs]]]]]]]
return[tuple[[<ast.Name object at 0x7da1aff76230>, <ast.Name object at 0x7da1aff759c0>]]] | keyword[def] identifier[get_google_playlist_songs] ( identifier[self] , identifier[playlist] , identifier[include_filters] = keyword[None] , identifier[exclude_filters] = keyword[None] , identifier[all_includes] = keyword[False] , identifier[all_excludes] = keyword[False] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] )
identifier[google_playlist] = identifier[self] . identifier[get_google_playlist] ( identifier[playlist] )
keyword[if] keyword[not] identifier[google_playlist] :
keyword[return] [],[]
identifier[playlist_song_ids] =[ identifier[track] [ literal[string] ] keyword[for] identifier[track] keyword[in] identifier[google_playlist] [ literal[string] ]]
identifier[playlist_songs] =[ identifier[song] keyword[for] identifier[song] keyword[in] identifier[self] . identifier[api] . identifier[get_all_songs] () keyword[if] identifier[song] [ literal[string] ] keyword[in] identifier[playlist_song_ids] ]
identifier[matched_songs] , identifier[filtered_songs] = identifier[filter_google_songs] (
identifier[playlist_songs] , identifier[include_filters] = identifier[include_filters] , identifier[exclude_filters] = identifier[exclude_filters] ,
identifier[all_includes] = identifier[all_includes] , identifier[all_excludes] = identifier[all_excludes]
)
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[len] ( identifier[filtered_songs] )))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[len] ( identifier[matched_songs] )))
keyword[return] identifier[matched_songs] , identifier[filtered_songs] | def get_google_playlist_songs(self, playlist, include_filters=None, exclude_filters=None, all_includes=False, all_excludes=False):
"""Create song list from a user-generated Google Music playlist.
Parameters:
playlist (str): Name or ID of Google Music playlist. Names are case-sensitive.
Google allows multiple playlists with the same name.
If multiple playlists have the same name, the first one encountered is used.
include_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values don't match any of the given patterns.
exclude_filters (list): A list of ``(field, pattern)`` tuples.
Fields are any valid Google Music metadata field available to the Musicmanager client.
Patterns are Python regex patterns.
Google Music songs are filtered out if the given metadata field values match any of the given patterns.
all_includes (bool): If ``True``, all include_filters criteria must match to include a song.
all_excludes (bool): If ``True``, all exclude_filters criteria must match to exclude a song.
Returns:
A list of Google Music song dicts in the playlist matching criteria and
a list of Google Music song dicts in the playlist filtered out using filter criteria.
"""
logger.info('Loading Google Music playlist songs...')
google_playlist = self.get_google_playlist(playlist)
if not google_playlist:
return ([], []) # depends on [control=['if'], data=[]]
playlist_song_ids = [track['trackId'] for track in google_playlist['tracks']]
playlist_songs = [song for song in self.api.get_all_songs() if song['id'] in playlist_song_ids]
(matched_songs, filtered_songs) = filter_google_songs(playlist_songs, include_filters=include_filters, exclude_filters=exclude_filters, all_includes=all_includes, all_excludes=all_excludes)
logger.info('Filtered {0} Google playlist songs'.format(len(filtered_songs)))
logger.info('Loaded {0} Google playlist songs'.format(len(matched_songs)))
return (matched_songs, filtered_songs) |
def set_url(self, url):
"""Sets the url.
arg: url (string): the new copyright
raise: InvalidArgument - ``url`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``url`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetContentForm.set_url_template
if self.get_url_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_string(
url,
self.get_url_metadata()):
raise errors.InvalidArgument()
self._my_map['url'] = url | def function[set_url, parameter[self, url]]:
constant[Sets the url.
arg: url (string): the new copyright
raise: InvalidArgument - ``url`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``url`` is ``null``
*compliance: mandatory -- This method must be implemented.*
]
if call[call[name[self].get_url_metadata, parameter[]].is_read_only, parameter[]] begin[:]
<ast.Raise object at 0x7da18f58d2a0>
if <ast.UnaryOp object at 0x7da18c4cc7c0> begin[:]
<ast.Raise object at 0x7da18c4ce0b0>
call[name[self]._my_map][constant[url]] assign[=] name[url] | keyword[def] identifier[set_url] ( identifier[self] , identifier[url] ):
literal[string]
keyword[if] identifier[self] . identifier[get_url_metadata] (). identifier[is_read_only] ():
keyword[raise] identifier[errors] . identifier[NoAccess] ()
keyword[if] keyword[not] identifier[self] . identifier[_is_valid_string] (
identifier[url] ,
identifier[self] . identifier[get_url_metadata] ()):
keyword[raise] identifier[errors] . identifier[InvalidArgument] ()
identifier[self] . identifier[_my_map] [ literal[string] ]= identifier[url] | def set_url(self, url):
"""Sets the url.
arg: url (string): the new copyright
raise: InvalidArgument - ``url`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``url`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.repository.AssetContentForm.set_url_template
if self.get_url_metadata().is_read_only():
raise errors.NoAccess() # depends on [control=['if'], data=[]]
if not self._is_valid_string(url, self.get_url_metadata()):
raise errors.InvalidArgument() # depends on [control=['if'], data=[]]
self._my_map['url'] = url |
async def save(self, fp, *, seek_begin=True, use_cached=False):
"""|coro|
Saves this attachment into a file-like object.
Parameters
-----------
fp: Union[BinaryIO, :class:`os.PathLike`]
The file-like object to save this attachment to or the filename
to use. If a filename is passed then a file is created with that
filename and used instead.
seek_begin: :class:`bool`
Whether to seek to the beginning of the file after saving is
successfully done.
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some type of attachments.
Raises
--------
HTTPException
Saving the attachment failed.
NotFound
The attachment was deleted.
Returns
--------
:class:`int`
The number of bytes written.
"""
url = self.proxy_url if use_cached else self.url
data = await self._http.get_from_cdn(url)
if isinstance(fp, io.IOBase) and fp.writable():
written = fp.write(data)
if seek_begin:
fp.seek(0)
return written
else:
with open(fp, 'wb') as f:
return f.write(data) | <ast.AsyncFunctionDef object at 0x7da1b1ea3fd0> | keyword[async] keyword[def] identifier[save] ( identifier[self] , identifier[fp] ,*, identifier[seek_begin] = keyword[True] , identifier[use_cached] = keyword[False] ):
literal[string]
identifier[url] = identifier[self] . identifier[proxy_url] keyword[if] identifier[use_cached] keyword[else] identifier[self] . identifier[url]
identifier[data] = keyword[await] identifier[self] . identifier[_http] . identifier[get_from_cdn] ( identifier[url] )
keyword[if] identifier[isinstance] ( identifier[fp] , identifier[io] . identifier[IOBase] ) keyword[and] identifier[fp] . identifier[writable] ():
identifier[written] = identifier[fp] . identifier[write] ( identifier[data] )
keyword[if] identifier[seek_begin] :
identifier[fp] . identifier[seek] ( literal[int] )
keyword[return] identifier[written]
keyword[else] :
keyword[with] identifier[open] ( identifier[fp] , literal[string] ) keyword[as] identifier[f] :
keyword[return] identifier[f] . identifier[write] ( identifier[data] ) | async def save(self, fp, *, seek_begin=True, use_cached=False):
"""|coro|
Saves this attachment into a file-like object.
Parameters
-----------
fp: Union[BinaryIO, :class:`os.PathLike`]
The file-like object to save this attachment to or the filename
to use. If a filename is passed then a file is created with that
filename and used instead.
seek_begin: :class:`bool`
Whether to seek to the beginning of the file after saving is
successfully done.
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some type of attachments.
Raises
--------
HTTPException
Saving the attachment failed.
NotFound
The attachment was deleted.
Returns
--------
:class:`int`
The number of bytes written.
"""
url = self.proxy_url if use_cached else self.url
data = await self._http.get_from_cdn(url)
if isinstance(fp, io.IOBase) and fp.writable():
written = fp.write(data)
if seek_begin:
fp.seek(0) # depends on [control=['if'], data=[]]
return written # depends on [control=['if'], data=[]]
else:
with open(fp, 'wb') as f:
return f.write(data) # depends on [control=['with'], data=['f']] |
def check_sub_path_create(sub_path):
"""
检查当前路径下的某个子路径是否存在, 不存在则创建;
:param:
* sub_path: (string) 下一级的某路径名称
:return:
* 返回类型 (tuple),有两个值
* True: 路径存在,False: 不需要创建
* False: 路径不存在,True: 创建成功
举例如下::
print('--- check_sub_path_create demo ---')
# 定义子路径名称
sub_path = 'demo_sub_dir'
# 检查当前路径下的一个子路径是否存在,不存在则创建
print('check sub path:', sub_path)
result = check_sub_path_create(sub_path)
print(result)
print('---')
输出结果::
--- check_sub_path_create demo ---
check sub path: demo_sub_dir
(True, False)
---
"""
# 获得当前路径
temp_path = pathlib.Path()
cur_path = temp_path.resolve()
# 生成 带有 sub_path_name 的路径
path = cur_path / pathlib.Path(sub_path)
# 判断是否存在带有 sub_path 路径
if path.exists():
# 返回 True: 路径存在, False: 不需要创建
return True, False
else:
path.mkdir(parents=True)
# 返回 False: 路径不存在 True: 路径已经创建
return False, True | def function[check_sub_path_create, parameter[sub_path]]:
constant[
检查当前路径下的某个子路径是否存在, 不存在则创建;
:param:
* sub_path: (string) 下一级的某路径名称
:return:
* 返回类型 (tuple),有两个值
* True: 路径存在,False: 不需要创建
* False: 路径不存在,True: 创建成功
举例如下::
print('--- check_sub_path_create demo ---')
# 定义子路径名称
sub_path = 'demo_sub_dir'
# 检查当前路径下的一个子路径是否存在,不存在则创建
print('check sub path:', sub_path)
result = check_sub_path_create(sub_path)
print(result)
print('---')
输出结果::
--- check_sub_path_create demo ---
check sub path: demo_sub_dir
(True, False)
---
]
variable[temp_path] assign[=] call[name[pathlib].Path, parameter[]]
variable[cur_path] assign[=] call[name[temp_path].resolve, parameter[]]
variable[path] assign[=] binary_operation[name[cur_path] / call[name[pathlib].Path, parameter[name[sub_path]]]]
if call[name[path].exists, parameter[]] begin[:]
return[tuple[[<ast.Constant object at 0x7da1b08e7070>, <ast.Constant object at 0x7da1b08e64d0>]]] | keyword[def] identifier[check_sub_path_create] ( identifier[sub_path] ):
literal[string]
identifier[temp_path] = identifier[pathlib] . identifier[Path] ()
identifier[cur_path] = identifier[temp_path] . identifier[resolve] ()
identifier[path] = identifier[cur_path] / identifier[pathlib] . identifier[Path] ( identifier[sub_path] )
keyword[if] identifier[path] . identifier[exists] ():
keyword[return] keyword[True] , keyword[False]
keyword[else] :
identifier[path] . identifier[mkdir] ( identifier[parents] = keyword[True] )
keyword[return] keyword[False] , keyword[True] | def check_sub_path_create(sub_path):
"""
检查当前路径下的某个子路径是否存在, 不存在则创建;
:param:
* sub_path: (string) 下一级的某路径名称
:return:
* 返回类型 (tuple),有两个值
* True: 路径存在,False: 不需要创建
* False: 路径不存在,True: 创建成功
举例如下::
print('--- check_sub_path_create demo ---')
# 定义子路径名称
sub_path = 'demo_sub_dir'
# 检查当前路径下的一个子路径是否存在,不存在则创建
print('check sub path:', sub_path)
result = check_sub_path_create(sub_path)
print(result)
print('---')
输出结果::
--- check_sub_path_create demo ---
check sub path: demo_sub_dir
(True, False)
---
"""
# 获得当前路径
temp_path = pathlib.Path()
cur_path = temp_path.resolve()
# 生成 带有 sub_path_name 的路径
path = cur_path / pathlib.Path(sub_path)
# 判断是否存在带有 sub_path 路径
if path.exists():
# 返回 True: 路径存在, False: 不需要创建
return (True, False) # depends on [control=['if'], data=[]]
else:
path.mkdir(parents=True)
# 返回 False: 路径不存在 True: 路径已经创建
return (False, True) |
def project_search(auth=None, **kwargs):
'''
Search projects
CLI Example:
.. code-block:: bash
salt '*' keystoneng.project_search
salt '*' keystoneng.project_search name=project1
salt '*' keystoneng.project_search domain_id=b62e76fbeeff4e8fb77073f591cf211e
'''
cloud = get_openstack_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.search_projects(**kwargs) | def function[project_search, parameter[auth]]:
constant[
Search projects
CLI Example:
.. code-block:: bash
salt '*' keystoneng.project_search
salt '*' keystoneng.project_search name=project1
salt '*' keystoneng.project_search domain_id=b62e76fbeeff4e8fb77073f591cf211e
]
variable[cloud] assign[=] call[name[get_openstack_cloud], parameter[name[auth]]]
variable[kwargs] assign[=] call[name[_clean_kwargs], parameter[]]
return[call[name[cloud].search_projects, parameter[]]] | keyword[def] identifier[project_search] ( identifier[auth] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[cloud] = identifier[get_openstack_cloud] ( identifier[auth] )
identifier[kwargs] = identifier[_clean_kwargs] (** identifier[kwargs] )
keyword[return] identifier[cloud] . identifier[search_projects] (** identifier[kwargs] ) | def project_search(auth=None, **kwargs):
"""
Search projects
CLI Example:
.. code-block:: bash
salt '*' keystoneng.project_search
salt '*' keystoneng.project_search name=project1
salt '*' keystoneng.project_search domain_id=b62e76fbeeff4e8fb77073f591cf211e
"""
cloud = get_openstack_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.search_projects(**kwargs) |
def rolling_percentileofscore(series, window, min_periods=None):
"""Computue the score percentile for the specified window."""
import scipy.stats as stats
def _percentile(arr):
score = arr[-1]
vals = arr[:-1]
return stats.percentileofscore(vals, score)
notnull = series.dropna()
min_periods = min_periods or window
if notnull.empty:
return pd.Series(np.nan, index=series.index)
else:
return pd.rolling_apply(notnull, window, _percentile, min_periods=min_periods).reindex(series.index) | def function[rolling_percentileofscore, parameter[series, window, min_periods]]:
constant[Computue the score percentile for the specified window.]
import module[scipy.stats] as alias[stats]
def function[_percentile, parameter[arr]]:
variable[score] assign[=] call[name[arr]][<ast.UnaryOp object at 0x7da1b1e58700>]
variable[vals] assign[=] call[name[arr]][<ast.Slice object at 0x7da1b1e586a0>]
return[call[name[stats].percentileofscore, parameter[name[vals], name[score]]]]
variable[notnull] assign[=] call[name[series].dropna, parameter[]]
variable[min_periods] assign[=] <ast.BoolOp object at 0x7da1b1e5b340>
if name[notnull].empty begin[:]
return[call[name[pd].Series, parameter[name[np].nan]]] | keyword[def] identifier[rolling_percentileofscore] ( identifier[series] , identifier[window] , identifier[min_periods] = keyword[None] ):
literal[string]
keyword[import] identifier[scipy] . identifier[stats] keyword[as] identifier[stats]
keyword[def] identifier[_percentile] ( identifier[arr] ):
identifier[score] = identifier[arr] [- literal[int] ]
identifier[vals] = identifier[arr] [:- literal[int] ]
keyword[return] identifier[stats] . identifier[percentileofscore] ( identifier[vals] , identifier[score] )
identifier[notnull] = identifier[series] . identifier[dropna] ()
identifier[min_periods] = identifier[min_periods] keyword[or] identifier[window]
keyword[if] identifier[notnull] . identifier[empty] :
keyword[return] identifier[pd] . identifier[Series] ( identifier[np] . identifier[nan] , identifier[index] = identifier[series] . identifier[index] )
keyword[else] :
keyword[return] identifier[pd] . identifier[rolling_apply] ( identifier[notnull] , identifier[window] , identifier[_percentile] , identifier[min_periods] = identifier[min_periods] ). identifier[reindex] ( identifier[series] . identifier[index] ) | def rolling_percentileofscore(series, window, min_periods=None):
"""Computue the score percentile for the specified window."""
import scipy.stats as stats
def _percentile(arr):
score = arr[-1]
vals = arr[:-1]
return stats.percentileofscore(vals, score)
notnull = series.dropna()
min_periods = min_periods or window
if notnull.empty:
return pd.Series(np.nan, index=series.index) # depends on [control=['if'], data=[]]
else:
return pd.rolling_apply(notnull, window, _percentile, min_periods=min_periods).reindex(series.index) |
def extract_objects(self, fname, type_filter=None):
'''Extract objects from a source file
Args:
fname(str): Name of file to read from
type_filter (class, optional): Object class to filter results
Returns:
List of objects extracted from the file.
'''
objects = []
if fname in self.object_cache:
objects = self.object_cache[fname]
else:
with io.open(fname, 'rt', encoding='utf-8') as fh:
text = fh.read()
objects = parse_verilog(text)
self.object_cache[fname] = objects
if type_filter:
objects = [o for o in objects if isinstance(o, type_filter)]
return objects | def function[extract_objects, parameter[self, fname, type_filter]]:
constant[Extract objects from a source file
Args:
fname(str): Name of file to read from
type_filter (class, optional): Object class to filter results
Returns:
List of objects extracted from the file.
]
variable[objects] assign[=] list[[]]
if compare[name[fname] in name[self].object_cache] begin[:]
variable[objects] assign[=] call[name[self].object_cache][name[fname]]
if name[type_filter] begin[:]
variable[objects] assign[=] <ast.ListComp object at 0x7da1b11da140>
return[name[objects]] | keyword[def] identifier[extract_objects] ( identifier[self] , identifier[fname] , identifier[type_filter] = keyword[None] ):
literal[string]
identifier[objects] =[]
keyword[if] identifier[fname] keyword[in] identifier[self] . identifier[object_cache] :
identifier[objects] = identifier[self] . identifier[object_cache] [ identifier[fname] ]
keyword[else] :
keyword[with] identifier[io] . identifier[open] ( identifier[fname] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[fh] :
identifier[text] = identifier[fh] . identifier[read] ()
identifier[objects] = identifier[parse_verilog] ( identifier[text] )
identifier[self] . identifier[object_cache] [ identifier[fname] ]= identifier[objects]
keyword[if] identifier[type_filter] :
identifier[objects] =[ identifier[o] keyword[for] identifier[o] keyword[in] identifier[objects] keyword[if] identifier[isinstance] ( identifier[o] , identifier[type_filter] )]
keyword[return] identifier[objects] | def extract_objects(self, fname, type_filter=None):
"""Extract objects from a source file
Args:
fname(str): Name of file to read from
type_filter (class, optional): Object class to filter results
Returns:
List of objects extracted from the file.
"""
objects = []
if fname in self.object_cache:
objects = self.object_cache[fname] # depends on [control=['if'], data=['fname']]
else:
with io.open(fname, 'rt', encoding='utf-8') as fh:
text = fh.read()
objects = parse_verilog(text)
self.object_cache[fname] = objects # depends on [control=['with'], data=['fh']]
if type_filter:
objects = [o for o in objects if isinstance(o, type_filter)] # depends on [control=['if'], data=[]]
return objects |
def src_new(converter_type, channels):
"""Initialise a new sample rate converter.
Parameters
----------
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
error : int
Error code.
"""
error = ffi.new('int*')
state = _lib.src_new(converter_type, channels, error)
return state, error[0] | def function[src_new, parameter[converter_type, channels]]:
constant[Initialise a new sample rate converter.
Parameters
----------
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
error : int
Error code.
]
variable[error] assign[=] call[name[ffi].new, parameter[constant[int*]]]
variable[state] assign[=] call[name[_lib].src_new, parameter[name[converter_type], name[channels], name[error]]]
return[tuple[[<ast.Name object at 0x7da1b0f3d6f0>, <ast.Subscript object at 0x7da1b0f3e4a0>]]] | keyword[def] identifier[src_new] ( identifier[converter_type] , identifier[channels] ):
literal[string]
identifier[error] = identifier[ffi] . identifier[new] ( literal[string] )
identifier[state] = identifier[_lib] . identifier[src_new] ( identifier[converter_type] , identifier[channels] , identifier[error] )
keyword[return] identifier[state] , identifier[error] [ literal[int] ] | def src_new(converter_type, channels):
"""Initialise a new sample rate converter.
Parameters
----------
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
error : int
Error code.
"""
error = ffi.new('int*')
state = _lib.src_new(converter_type, channels, error)
return (state, error[0]) |
def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others)) | def function[difference, parameter[self]]:
constant[Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
]
return[call[name[self].copy, parameter[call[call[name[super], parameter[name[NGram], name[self]]].difference, parameter[<ast.Starred object at 0x7da1b0f11fc0>]]]]] | keyword[def] identifier[difference] ( identifier[self] ,* identifier[others] ):
literal[string]
keyword[return] identifier[self] . identifier[copy] ( identifier[super] ( identifier[NGram] , identifier[self] ). identifier[difference] (* identifier[others] )) | def difference(self, *others):
"""Return the difference of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.difference(b))
['eggs']
"""
return self.copy(super(NGram, self).difference(*others)) |
def restore_definition(self, project, definition_id, deleted):
"""RestoreDefinition.
Restores a deleted definition
:param str project: Project ID or project name
:param int definition_id: The identifier of the definition to restore.
:param bool deleted: When false, restores a deleted definition.
:rtype: :class:`<BuildDefinition> <azure.devops.v5_0.build.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if deleted is not None:
query_parameters['deleted'] = self._serialize.query('deleted', deleted, 'bool')
response = self._send(http_method='PATCH',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('BuildDefinition', response) | def function[restore_definition, parameter[self, project, definition_id, deleted]]:
constant[RestoreDefinition.
Restores a deleted definition
:param str project: Project ID or project name
:param int definition_id: The identifier of the definition to restore.
:param bool deleted: When false, restores a deleted definition.
:rtype: :class:`<BuildDefinition> <azure.devops.v5_0.build.models.BuildDefinition>`
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[project] is_not constant[None]] begin[:]
call[name[route_values]][constant[project]] assign[=] call[name[self]._serialize.url, parameter[constant[project], name[project], constant[str]]]
if compare[name[definition_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[definitionId]] assign[=] call[name[self]._serialize.url, parameter[constant[definition_id], name[definition_id], constant[int]]]
variable[query_parameters] assign[=] dictionary[[], []]
if compare[name[deleted] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[deleted]] assign[=] call[name[self]._serialize.query, parameter[constant[deleted], name[deleted], constant[bool]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[BuildDefinition], name[response]]]] | keyword[def] identifier[restore_definition] ( identifier[self] , identifier[project] , identifier[definition_id] , identifier[deleted] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[project] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[project] , literal[string] )
keyword[if] identifier[definition_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[definition_id] , literal[string] )
identifier[query_parameters] ={}
keyword[if] identifier[deleted] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[deleted] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] ,
identifier[query_parameters] = identifier[query_parameters] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] ) | def restore_definition(self, project, definition_id, deleted):
"""RestoreDefinition.
Restores a deleted definition
:param str project: Project ID or project name
:param int definition_id: The identifier of the definition to restore.
:param bool deleted: When false, restores a deleted definition.
:rtype: :class:`<BuildDefinition> <azure.devops.v5_0.build.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str') # depends on [control=['if'], data=['project']]
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int') # depends on [control=['if'], data=['definition_id']]
query_parameters = {}
if deleted is not None:
query_parameters['deleted'] = self._serialize.query('deleted', deleted, 'bool') # depends on [control=['if'], data=['deleted']]
response = self._send(http_method='PATCH', location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6', version='5.0', route_values=route_values, query_parameters=query_parameters)
return self._deserialize('BuildDefinition', response) |
def create_serving_logger() -> Logger:
"""Create a logger for serving.
This creates a logger named quart.serving.
"""
logger = getLogger('quart.serving')
if logger.level == NOTSET:
logger.setLevel(INFO)
logger.addHandler(serving_handler)
return logger | def function[create_serving_logger, parameter[]]:
constant[Create a logger for serving.
This creates a logger named quart.serving.
]
variable[logger] assign[=] call[name[getLogger], parameter[constant[quart.serving]]]
if compare[name[logger].level equal[==] name[NOTSET]] begin[:]
call[name[logger].setLevel, parameter[name[INFO]]]
call[name[logger].addHandler, parameter[name[serving_handler]]]
return[name[logger]] | keyword[def] identifier[create_serving_logger] ()-> identifier[Logger] :
literal[string]
identifier[logger] = identifier[getLogger] ( literal[string] )
keyword[if] identifier[logger] . identifier[level] == identifier[NOTSET] :
identifier[logger] . identifier[setLevel] ( identifier[INFO] )
identifier[logger] . identifier[addHandler] ( identifier[serving_handler] )
keyword[return] identifier[logger] | def create_serving_logger() -> Logger:
"""Create a logger for serving.
This creates a logger named quart.serving.
"""
logger = getLogger('quart.serving')
if logger.level == NOTSET:
logger.setLevel(INFO) # depends on [control=['if'], data=[]]
logger.addHandler(serving_handler)
return logger |
def unpack_data(data):
"""Unpack data returned by the net's iterator into a 2-tuple.
If the wrong number of items is returned, raise a helpful error
message.
"""
# Note: This function cannot detect it when a user only returns 1
# item that is exactly of length 2 (e.g. because the batch size is
# 2). In that case, the item will be erroneously split into X and
# y.
try:
X, y = data
return X, y
except ValueError:
# if a 1-tuple/list or something else like a torch tensor
if not isinstance(data, (tuple, list)) or len(data) < 2:
raise ValueError(ERROR_MSG_1_ITEM)
raise ValueError(ERROR_MSG_MORE_THAN_2_ITEMS.format(len(data))) | def function[unpack_data, parameter[data]]:
constant[Unpack data returned by the net's iterator into a 2-tuple.
If the wrong number of items is returned, raise a helpful error
message.
]
<ast.Try object at 0x7da18dc07ac0> | keyword[def] identifier[unpack_data] ( identifier[data] ):
literal[string]
keyword[try] :
identifier[X] , identifier[y] = identifier[data]
keyword[return] identifier[X] , identifier[y]
keyword[except] identifier[ValueError] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] ,( identifier[tuple] , identifier[list] )) keyword[or] identifier[len] ( identifier[data] )< literal[int] :
keyword[raise] identifier[ValueError] ( identifier[ERROR_MSG_1_ITEM] )
keyword[raise] identifier[ValueError] ( identifier[ERROR_MSG_MORE_THAN_2_ITEMS] . identifier[format] ( identifier[len] ( identifier[data] ))) | def unpack_data(data):
"""Unpack data returned by the net's iterator into a 2-tuple.
If the wrong number of items is returned, raise a helpful error
message.
"""
# Note: This function cannot detect it when a user only returns 1
# item that is exactly of length 2 (e.g. because the batch size is
# 2). In that case, the item will be erroneously split into X and
# y.
try:
(X, y) = data
return (X, y) # depends on [control=['try'], data=[]]
except ValueError:
# if a 1-tuple/list or something else like a torch tensor
if not isinstance(data, (tuple, list)) or len(data) < 2:
raise ValueError(ERROR_MSG_1_ITEM) # depends on [control=['if'], data=[]]
raise ValueError(ERROR_MSG_MORE_THAN_2_ITEMS.format(len(data))) # depends on [control=['except'], data=[]] |
def from_external(external=H2OFrame):
"""
Creates new H2OWord2vecEstimator based on an external model.
:param external: H2OFrame with an external model
:return: H2OWord2vecEstimator instance representing the external model
"""
w2v_model = H2OWord2vecEstimator(pre_trained=external)
w2v_model.train()
return w2v_model | def function[from_external, parameter[external]]:
constant[
Creates new H2OWord2vecEstimator based on an external model.
:param external: H2OFrame with an external model
:return: H2OWord2vecEstimator instance representing the external model
]
variable[w2v_model] assign[=] call[name[H2OWord2vecEstimator], parameter[]]
call[name[w2v_model].train, parameter[]]
return[name[w2v_model]] | keyword[def] identifier[from_external] ( identifier[external] = identifier[H2OFrame] ):
literal[string]
identifier[w2v_model] = identifier[H2OWord2vecEstimator] ( identifier[pre_trained] = identifier[external] )
identifier[w2v_model] . identifier[train] ()
keyword[return] identifier[w2v_model] | def from_external(external=H2OFrame):
"""
Creates new H2OWord2vecEstimator based on an external model.
:param external: H2OFrame with an external model
:return: H2OWord2vecEstimator instance representing the external model
"""
w2v_model = H2OWord2vecEstimator(pre_trained=external)
w2v_model.train()
return w2v_model |
def skip_whitespace(self, newlines=0):
"""Moves the position forwards to the next non newline space character.
If newlines >= 1 include newlines as spaces.
"""
if newlines:
while not self.eos:
if self.get_char().isspace():
self.eat_length(1)
else:
break
else:
char = ''
while not self.eos:
char = self.get_char()
if char.isspace() and char != '\n':
self.eat_length(1)
else:
break | def function[skip_whitespace, parameter[self, newlines]]:
constant[Moves the position forwards to the next non newline space character.
If newlines >= 1 include newlines as spaces.
]
if name[newlines] begin[:]
while <ast.UnaryOp object at 0x7da1b27e0850> begin[:]
if call[call[name[self].get_char, parameter[]].isspace, parameter[]] begin[:]
call[name[self].eat_length, parameter[constant[1]]] | keyword[def] identifier[skip_whitespace] ( identifier[self] , identifier[newlines] = literal[int] ):
literal[string]
keyword[if] identifier[newlines] :
keyword[while] keyword[not] identifier[self] . identifier[eos] :
keyword[if] identifier[self] . identifier[get_char] (). identifier[isspace] ():
identifier[self] . identifier[eat_length] ( literal[int] )
keyword[else] :
keyword[break]
keyword[else] :
identifier[char] = literal[string]
keyword[while] keyword[not] identifier[self] . identifier[eos] :
identifier[char] = identifier[self] . identifier[get_char] ()
keyword[if] identifier[char] . identifier[isspace] () keyword[and] identifier[char] != literal[string] :
identifier[self] . identifier[eat_length] ( literal[int] )
keyword[else] :
keyword[break] | def skip_whitespace(self, newlines=0):
"""Moves the position forwards to the next non newline space character.
If newlines >= 1 include newlines as spaces.
"""
if newlines:
while not self.eos:
if self.get_char().isspace():
self.eat_length(1) # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]] # depends on [control=['if'], data=[]]
else:
char = ''
while not self.eos:
char = self.get_char()
if char.isspace() and char != '\n':
self.eat_length(1) # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]] |
def _inject_function_into_js(context, name, func):
"""
Inject a Python function into the global scope of a dukpy JavaScript interpreter context.
:type context: dukpy.JSInterpreter
:param name: Name to give the function in JavaScript.
:param func: Python function.
"""
context.export_function(name, func)
context.evaljs(""";
{name} = function() {{
var args = Array.prototype.slice.call(arguments);
args.unshift('{name}');
return call_python.apply(null, args);
}};
""".format(name=name)) | def function[_inject_function_into_js, parameter[context, name, func]]:
constant[
Inject a Python function into the global scope of a dukpy JavaScript interpreter context.
:type context: dukpy.JSInterpreter
:param name: Name to give the function in JavaScript.
:param func: Python function.
]
call[name[context].export_function, parameter[name[name], name[func]]]
call[name[context].evaljs, parameter[call[constant[;
{name} = function() {{
var args = Array.prototype.slice.call(arguments);
args.unshift('{name}');
return call_python.apply(null, args);
}};
].format, parameter[]]]] | keyword[def] identifier[_inject_function_into_js] ( identifier[context] , identifier[name] , identifier[func] ):
literal[string]
identifier[context] . identifier[export_function] ( identifier[name] , identifier[func] )
identifier[context] . identifier[evaljs] ( literal[string] . identifier[format] ( identifier[name] = identifier[name] )) | def _inject_function_into_js(context, name, func):
"""
Inject a Python function into the global scope of a dukpy JavaScript interpreter context.
:type context: dukpy.JSInterpreter
:param name: Name to give the function in JavaScript.
:param func: Python function.
"""
context.export_function(name, func)
context.evaljs(";\n {name} = function() {{\n var args = Array.prototype.slice.call(arguments);\n args.unshift('{name}');\n return call_python.apply(null, args);\n }};\n ".format(name=name)) |
def raw_broadcast(self, destination, message, **kwargs):
"""Broadcast a raw (unmangled) message.
This may cause errors if the receiver expects a mangled message.
:param destination: Topic name to send to
:param message: Either a string or a serializable object to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
"""
self._broadcast(destination, message, **kwargs) | def function[raw_broadcast, parameter[self, destination, message]]:
constant[Broadcast a raw (unmangled) message.
This may cause errors if the receiver expects a mangled message.
:param destination: Topic name to send to
:param message: Either a string or a serializable object to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
]
call[name[self]._broadcast, parameter[name[destination], name[message]]] | keyword[def] identifier[raw_broadcast] ( identifier[self] , identifier[destination] , identifier[message] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[_broadcast] ( identifier[destination] , identifier[message] ,** identifier[kwargs] ) | def raw_broadcast(self, destination, message, **kwargs):
"""Broadcast a raw (unmangled) message.
This may cause errors if the receiver expects a mangled message.
:param destination: Topic name to send to
:param message: Either a string or a serializable object to be sent
:param **kwargs: Further parameters for the transport layer. For example
delay: Delay transport of message by this many seconds
headers: Optional dictionary of header entries
expiration: Optional expiration time, relative to sending time
transaction: Transaction ID if message should be part of a
transaction
"""
self._broadcast(destination, message, **kwargs) |
def fetch_file(self, in_path, out_path):
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
''' fetch a file from local to local -- for copatibility '''
self.put_file(in_path, out_path) | def function[fetch_file, parameter[self, in_path, out_path]]:
call[name[vvv], parameter[binary_operation[constant[FETCH %s TO %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b15b1ea0>, <ast.Name object at 0x7da1b15b23e0>]]]]]
constant[ fetch a file from local to local -- for copatibility ]
call[name[self].put_file, parameter[name[in_path], name[out_path]]] | keyword[def] identifier[fetch_file] ( identifier[self] , identifier[in_path] , identifier[out_path] ):
identifier[vvv] ( literal[string] %( identifier[in_path] , identifier[out_path] ), identifier[host] = identifier[self] . identifier[host] )
literal[string]
identifier[self] . identifier[put_file] ( identifier[in_path] , identifier[out_path] ) | def fetch_file(self, in_path, out_path):
vvv('FETCH %s TO %s' % (in_path, out_path), host=self.host)
' fetch a file from local to local -- for copatibility '
self.put_file(in_path, out_path) |
def plot_somas(somas):
'''Plot set of somas on same figure as spheres, each with different color'''
_, ax = common.get_figure(new_fig=True, subplot=111,
params={'projection': '3d', 'aspect': 'equal'})
for s in somas:
common.plot_sphere(ax, s.center, s.radius, color=random_color(), alpha=1)
plt.show() | def function[plot_somas, parameter[somas]]:
constant[Plot set of somas on same figure as spheres, each with different color]
<ast.Tuple object at 0x7da2047e9990> assign[=] call[name[common].get_figure, parameter[]]
for taget[name[s]] in starred[name[somas]] begin[:]
call[name[common].plot_sphere, parameter[name[ax], name[s].center, name[s].radius]]
call[name[plt].show, parameter[]] | keyword[def] identifier[plot_somas] ( identifier[somas] ):
literal[string]
identifier[_] , identifier[ax] = identifier[common] . identifier[get_figure] ( identifier[new_fig] = keyword[True] , identifier[subplot] = literal[int] ,
identifier[params] ={ literal[string] : literal[string] , literal[string] : literal[string] })
keyword[for] identifier[s] keyword[in] identifier[somas] :
identifier[common] . identifier[plot_sphere] ( identifier[ax] , identifier[s] . identifier[center] , identifier[s] . identifier[radius] , identifier[color] = identifier[random_color] (), identifier[alpha] = literal[int] )
identifier[plt] . identifier[show] () | def plot_somas(somas):
"""Plot set of somas on same figure as spheres, each with different color"""
(_, ax) = common.get_figure(new_fig=True, subplot=111, params={'projection': '3d', 'aspect': 'equal'})
for s in somas:
common.plot_sphere(ax, s.center, s.radius, color=random_color(), alpha=1) # depends on [control=['for'], data=['s']]
plt.show() |
def propagate(self, token, channel):
"""
Kick off the propagate function on the remote server.
Arguments:
token (str): The token to propagate
channel (str): The channel to propagate
Returns:
boolean: Success
"""
if self.get_propagate_status(token, channel) != u'0':
return
url = self.url('sd/{}/{}/setPropagate/1/'.format(token, channel))
req = self.remote_utils.get_url(url)
if req.status_code is not 200:
raise RemoteDataUploadError('Propagate fail: {}'.format(req.text))
return True | def function[propagate, parameter[self, token, channel]]:
constant[
Kick off the propagate function on the remote server.
Arguments:
token (str): The token to propagate
channel (str): The channel to propagate
Returns:
boolean: Success
]
if compare[call[name[self].get_propagate_status, parameter[name[token], name[channel]]] not_equal[!=] constant[0]] begin[:]
return[None]
variable[url] assign[=] call[name[self].url, parameter[call[constant[sd/{}/{}/setPropagate/1/].format, parameter[name[token], name[channel]]]]]
variable[req] assign[=] call[name[self].remote_utils.get_url, parameter[name[url]]]
if compare[name[req].status_code is_not constant[200]] begin[:]
<ast.Raise object at 0x7da1b02915d0>
return[constant[True]] | keyword[def] identifier[propagate] ( identifier[self] , identifier[token] , identifier[channel] ):
literal[string]
keyword[if] identifier[self] . identifier[get_propagate_status] ( identifier[token] , identifier[channel] )!= literal[string] :
keyword[return]
identifier[url] = identifier[self] . identifier[url] ( literal[string] . identifier[format] ( identifier[token] , identifier[channel] ))
identifier[req] = identifier[self] . identifier[remote_utils] . identifier[get_url] ( identifier[url] )
keyword[if] identifier[req] . identifier[status_code] keyword[is] keyword[not] literal[int] :
keyword[raise] identifier[RemoteDataUploadError] ( literal[string] . identifier[format] ( identifier[req] . identifier[text] ))
keyword[return] keyword[True] | def propagate(self, token, channel):
"""
Kick off the propagate function on the remote server.
Arguments:
token (str): The token to propagate
channel (str): The channel to propagate
Returns:
boolean: Success
"""
if self.get_propagate_status(token, channel) != u'0':
return # depends on [control=['if'], data=[]]
url = self.url('sd/{}/{}/setPropagate/1/'.format(token, channel))
req = self.remote_utils.get_url(url)
if req.status_code is not 200:
raise RemoteDataUploadError('Propagate fail: {}'.format(req.text)) # depends on [control=['if'], data=[]]
return True |
def _countEXT(self,extname="SCI"):
""" Count the number of extensions in the file with the given name
(``EXTNAME``).
"""
count=0 #simple fits image
if (self._image['PRIMARY'].header["EXTEND"]):
for i,hdu in enumerate(self._image):
if i > 0:
hduExtname = False
if 'EXTNAME' in hdu.header:
self._image[i].extnum=i
self._image[i].extname=hdu.header["EXTNAME"]
hduExtname = True
if 'EXTVER' in hdu.header:
self._image[i].extver=hdu.header["EXTVER"]
else:
self._image[i].extver = 1
if ((extname is not None) and \
(hduExtname and (hdu.header["EXTNAME"] == extname))) \
or extname is None:
count=count+1
return count | def function[_countEXT, parameter[self, extname]]:
constant[ Count the number of extensions in the file with the given name
(``EXTNAME``).
]
variable[count] assign[=] constant[0]
if call[call[name[self]._image][constant[PRIMARY]].header][constant[EXTEND]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1be5660>, <ast.Name object at 0x7da1b1be7a90>]]] in starred[call[name[enumerate], parameter[name[self]._image]]] begin[:]
if compare[name[i] greater[>] constant[0]] begin[:]
variable[hduExtname] assign[=] constant[False]
if compare[constant[EXTNAME] in name[hdu].header] begin[:]
call[name[self]._image][name[i]].extnum assign[=] name[i]
call[name[self]._image][name[i]].extname assign[=] call[name[hdu].header][constant[EXTNAME]]
variable[hduExtname] assign[=] constant[True]
if compare[constant[EXTVER] in name[hdu].header] begin[:]
call[name[self]._image][name[i]].extver assign[=] call[name[hdu].header][constant[EXTVER]]
if <ast.BoolOp object at 0x7da1b1be7250> begin[:]
variable[count] assign[=] binary_operation[name[count] + constant[1]]
return[name[count]] | keyword[def] identifier[_countEXT] ( identifier[self] , identifier[extname] = literal[string] ):
literal[string]
identifier[count] = literal[int]
keyword[if] ( identifier[self] . identifier[_image] [ literal[string] ]. identifier[header] [ literal[string] ]):
keyword[for] identifier[i] , identifier[hdu] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_image] ):
keyword[if] identifier[i] > literal[int] :
identifier[hduExtname] = keyword[False]
keyword[if] literal[string] keyword[in] identifier[hdu] . identifier[header] :
identifier[self] . identifier[_image] [ identifier[i] ]. identifier[extnum] = identifier[i]
identifier[self] . identifier[_image] [ identifier[i] ]. identifier[extname] = identifier[hdu] . identifier[header] [ literal[string] ]
identifier[hduExtname] = keyword[True]
keyword[if] literal[string] keyword[in] identifier[hdu] . identifier[header] :
identifier[self] . identifier[_image] [ identifier[i] ]. identifier[extver] = identifier[hdu] . identifier[header] [ literal[string] ]
keyword[else] :
identifier[self] . identifier[_image] [ identifier[i] ]. identifier[extver] = literal[int]
keyword[if] (( identifier[extname] keyword[is] keyword[not] keyword[None] ) keyword[and] ( identifier[hduExtname] keyword[and] ( identifier[hdu] . identifier[header] [ literal[string] ]== identifier[extname] ))) keyword[or] identifier[extname] keyword[is] keyword[None] :
identifier[count] = identifier[count] + literal[int]
keyword[return] identifier[count] | def _countEXT(self, extname='SCI'):
""" Count the number of extensions in the file with the given name
(``EXTNAME``).
"""
count = 0 #simple fits image
if self._image['PRIMARY'].header['EXTEND']:
for (i, hdu) in enumerate(self._image):
if i > 0:
hduExtname = False
if 'EXTNAME' in hdu.header:
self._image[i].extnum = i
self._image[i].extname = hdu.header['EXTNAME']
hduExtname = True # depends on [control=['if'], data=[]]
if 'EXTVER' in hdu.header:
self._image[i].extver = hdu.header['EXTVER'] # depends on [control=['if'], data=[]]
else:
self._image[i].extver = 1
if extname is not None and (hduExtname and hdu.header['EXTNAME'] == extname) or extname is None:
count = count + 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['i']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return count |
def close_fds(keep_fds): # pragma: no cover
"""Close all the file descriptors except those in keep_fds."""
# Make sure to keep stdout and stderr open for logging purpose
keep_fds = set(keep_fds).union([1, 2])
# We try to retrieve all the open fds
try:
open_fds = set(int(fd) for fd in os.listdir('/proc/self/fd'))
except FileNotFoundError:
import resource
max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
open_fds = set(fd for fd in range(3, max_nfds))
open_fds.add(0)
for i in open_fds - keep_fds:
try:
os.close(i)
except OSError:
pass | def function[close_fds, parameter[keep_fds]]:
constant[Close all the file descriptors except those in keep_fds.]
variable[keep_fds] assign[=] call[call[name[set], parameter[name[keep_fds]]].union, parameter[list[[<ast.Constant object at 0x7da1b05bfeb0>, <ast.Constant object at 0x7da1b05bfaf0>]]]]
<ast.Try object at 0x7da1b05bfd00>
for taget[name[i]] in starred[binary_operation[name[open_fds] - name[keep_fds]]] begin[:]
<ast.Try object at 0x7da1b0531d20> | keyword[def] identifier[close_fds] ( identifier[keep_fds] ):
literal[string]
identifier[keep_fds] = identifier[set] ( identifier[keep_fds] ). identifier[union] ([ literal[int] , literal[int] ])
keyword[try] :
identifier[open_fds] = identifier[set] ( identifier[int] ( identifier[fd] ) keyword[for] identifier[fd] keyword[in] identifier[os] . identifier[listdir] ( literal[string] ))
keyword[except] identifier[FileNotFoundError] :
keyword[import] identifier[resource]
identifier[max_nfds] = identifier[resource] . identifier[getrlimit] ( identifier[resource] . identifier[RLIMIT_NOFILE] )[ literal[int] ]
identifier[open_fds] = identifier[set] ( identifier[fd] keyword[for] identifier[fd] keyword[in] identifier[range] ( literal[int] , identifier[max_nfds] ))
identifier[open_fds] . identifier[add] ( literal[int] )
keyword[for] identifier[i] keyword[in] identifier[open_fds] - identifier[keep_fds] :
keyword[try] :
identifier[os] . identifier[close] ( identifier[i] )
keyword[except] identifier[OSError] :
keyword[pass] | def close_fds(keep_fds): # pragma: no cover
'Close all the file descriptors except those in keep_fds.'
# Make sure to keep stdout and stderr open for logging purpose
keep_fds = set(keep_fds).union([1, 2])
# We try to retrieve all the open fds
try:
open_fds = set((int(fd) for fd in os.listdir('/proc/self/fd'))) # depends on [control=['try'], data=[]]
except FileNotFoundError:
import resource
max_nfds = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
open_fds = set((fd for fd in range(3, max_nfds)))
open_fds.add(0) # depends on [control=['except'], data=[]]
for i in open_fds - keep_fds:
try:
os.close(i) # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['i']] |
def get_default_wrapper(cls):
"""Returns the default (first) driver wrapper
:returns: default driver wrapper
:rtype: toolium.driver_wrapper.DriverWrapper
"""
if cls.is_empty():
# Create a new driver wrapper if the pool is empty
from toolium.driver_wrapper import DriverWrapper
DriverWrapper()
return cls.driver_wrappers[0] | def function[get_default_wrapper, parameter[cls]]:
constant[Returns the default (first) driver wrapper
:returns: default driver wrapper
:rtype: toolium.driver_wrapper.DriverWrapper
]
if call[name[cls].is_empty, parameter[]] begin[:]
from relative_module[toolium.driver_wrapper] import module[DriverWrapper]
call[name[DriverWrapper], parameter[]]
return[call[name[cls].driver_wrappers][constant[0]]] | keyword[def] identifier[get_default_wrapper] ( identifier[cls] ):
literal[string]
keyword[if] identifier[cls] . identifier[is_empty] ():
keyword[from] identifier[toolium] . identifier[driver_wrapper] keyword[import] identifier[DriverWrapper]
identifier[DriverWrapper] ()
keyword[return] identifier[cls] . identifier[driver_wrappers] [ literal[int] ] | def get_default_wrapper(cls):
"""Returns the default (first) driver wrapper
:returns: default driver wrapper
:rtype: toolium.driver_wrapper.DriverWrapper
"""
if cls.is_empty():
# Create a new driver wrapper if the pool is empty
from toolium.driver_wrapper import DriverWrapper
DriverWrapper() # depends on [control=['if'], data=[]]
return cls.driver_wrappers[0] |
def get_all_plus_and_delete(self):
"""
Get all self.plus items of list. We copy it, delete the original and return the copy list
:return: list of self.plus
:rtype: list
"""
res = {}
props = list(self.plus.keys()) # we delete entries, so no for ... in ...
for prop in props:
res[prop] = self.get_plus_and_delete(prop)
return res | def function[get_all_plus_and_delete, parameter[self]]:
constant[
Get all self.plus items of list. We copy it, delete the original and return the copy list
:return: list of self.plus
:rtype: list
]
variable[res] assign[=] dictionary[[], []]
variable[props] assign[=] call[name[list], parameter[call[name[self].plus.keys, parameter[]]]]
for taget[name[prop]] in starred[name[props]] begin[:]
call[name[res]][name[prop]] assign[=] call[name[self].get_plus_and_delete, parameter[name[prop]]]
return[name[res]] | keyword[def] identifier[get_all_plus_and_delete] ( identifier[self] ):
literal[string]
identifier[res] ={}
identifier[props] = identifier[list] ( identifier[self] . identifier[plus] . identifier[keys] ())
keyword[for] identifier[prop] keyword[in] identifier[props] :
identifier[res] [ identifier[prop] ]= identifier[self] . identifier[get_plus_and_delete] ( identifier[prop] )
keyword[return] identifier[res] | def get_all_plus_and_delete(self):
"""
Get all self.plus items of list. We copy it, delete the original and return the copy list
:return: list of self.plus
:rtype: list
"""
res = {}
props = list(self.plus.keys()) # we delete entries, so no for ... in ...
for prop in props:
res[prop] = self.get_plus_and_delete(prop) # depends on [control=['for'], data=['prop']]
return res |
def start(self):
"""
Start the patch
"""
self._patcher = mock.patch(target=self.target)
MockClient = self._patcher.start()
instance = MockClient.return_value
instance.model.side_effect = mock.Mock(
side_effect=self.model
) | def function[start, parameter[self]]:
constant[
Start the patch
]
name[self]._patcher assign[=] call[name[mock].patch, parameter[]]
variable[MockClient] assign[=] call[name[self]._patcher.start, parameter[]]
variable[instance] assign[=] name[MockClient].return_value
name[instance].model.side_effect assign[=] call[name[mock].Mock, parameter[]] | keyword[def] identifier[start] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_patcher] = identifier[mock] . identifier[patch] ( identifier[target] = identifier[self] . identifier[target] )
identifier[MockClient] = identifier[self] . identifier[_patcher] . identifier[start] ()
identifier[instance] = identifier[MockClient] . identifier[return_value]
identifier[instance] . identifier[model] . identifier[side_effect] = identifier[mock] . identifier[Mock] (
identifier[side_effect] = identifier[self] . identifier[model]
) | def start(self):
"""
Start the patch
"""
self._patcher = mock.patch(target=self.target)
MockClient = self._patcher.start()
instance = MockClient.return_value
instance.model.side_effect = mock.Mock(side_effect=self.model) |
def _transform_result(typ, result):
"""Convert the result back into the input type.
"""
if issubclass(typ, bytes):
return tostring(result, encoding='utf-8')
elif issubclass(typ, unicode):
return tostring(result, encoding='unicode')
else:
return result | def function[_transform_result, parameter[typ, result]]:
constant[Convert the result back into the input type.
]
if call[name[issubclass], parameter[name[typ], name[bytes]]] begin[:]
return[call[name[tostring], parameter[name[result]]]] | keyword[def] identifier[_transform_result] ( identifier[typ] , identifier[result] ):
literal[string]
keyword[if] identifier[issubclass] ( identifier[typ] , identifier[bytes] ):
keyword[return] identifier[tostring] ( identifier[result] , identifier[encoding] = literal[string] )
keyword[elif] identifier[issubclass] ( identifier[typ] , identifier[unicode] ):
keyword[return] identifier[tostring] ( identifier[result] , identifier[encoding] = literal[string] )
keyword[else] :
keyword[return] identifier[result] | def _transform_result(typ, result):
"""Convert the result back into the input type.
"""
if issubclass(typ, bytes):
return tostring(result, encoding='utf-8') # depends on [control=['if'], data=[]]
elif issubclass(typ, unicode):
return tostring(result, encoding='unicode') # depends on [control=['if'], data=[]]
else:
return result |
def validate(self, form_cls, obj=None):
'''Validate a form from the request and handle errors'''
if 'application/json' not in request.headers.get('Content-Type'):
errors = {'Content-Type': 'expecting application/json'}
self.abort(400, errors=errors)
form = form_cls.from_json(request.json, obj=obj, instance=obj,
csrf_enabled=False)
if not form.validate():
self.abort(400, errors=form.errors)
return form | def function[validate, parameter[self, form_cls, obj]]:
constant[Validate a form from the request and handle errors]
if compare[constant[application/json] <ast.NotIn object at 0x7da2590d7190> call[name[request].headers.get, parameter[constant[Content-Type]]]] begin[:]
variable[errors] assign[=] dictionary[[<ast.Constant object at 0x7da204566740>], [<ast.Constant object at 0x7da204564850>]]
call[name[self].abort, parameter[constant[400]]]
variable[form] assign[=] call[name[form_cls].from_json, parameter[name[request].json]]
if <ast.UnaryOp object at 0x7da204567970> begin[:]
call[name[self].abort, parameter[constant[400]]]
return[name[form]] | keyword[def] identifier[validate] ( identifier[self] , identifier[form_cls] , identifier[obj] = keyword[None] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[request] . identifier[headers] . identifier[get] ( literal[string] ):
identifier[errors] ={ literal[string] : literal[string] }
identifier[self] . identifier[abort] ( literal[int] , identifier[errors] = identifier[errors] )
identifier[form] = identifier[form_cls] . identifier[from_json] ( identifier[request] . identifier[json] , identifier[obj] = identifier[obj] , identifier[instance] = identifier[obj] ,
identifier[csrf_enabled] = keyword[False] )
keyword[if] keyword[not] identifier[form] . identifier[validate] ():
identifier[self] . identifier[abort] ( literal[int] , identifier[errors] = identifier[form] . identifier[errors] )
keyword[return] identifier[form] | def validate(self, form_cls, obj=None):
"""Validate a form from the request and handle errors"""
if 'application/json' not in request.headers.get('Content-Type'):
errors = {'Content-Type': 'expecting application/json'}
self.abort(400, errors=errors) # depends on [control=['if'], data=[]]
form = form_cls.from_json(request.json, obj=obj, instance=obj, csrf_enabled=False)
if not form.validate():
self.abort(400, errors=form.errors) # depends on [control=['if'], data=[]]
return form |
def get_sequence(self):
"""Get the sequence number for a given account via Horizon.
:return: The current sequence number for a given account
:rtype: int
"""
if not self.address:
raise StellarAddressInvalidError('No address provided.')
address = self.horizon.account(self.address)
return int(address.get('sequence')) | def function[get_sequence, parameter[self]]:
constant[Get the sequence number for a given account via Horizon.
:return: The current sequence number for a given account
:rtype: int
]
if <ast.UnaryOp object at 0x7da1b16c20b0> begin[:]
<ast.Raise object at 0x7da1b16c3340>
variable[address] assign[=] call[name[self].horizon.account, parameter[name[self].address]]
return[call[name[int], parameter[call[name[address].get, parameter[constant[sequence]]]]]] | keyword[def] identifier[get_sequence] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[address] :
keyword[raise] identifier[StellarAddressInvalidError] ( literal[string] )
identifier[address] = identifier[self] . identifier[horizon] . identifier[account] ( identifier[self] . identifier[address] )
keyword[return] identifier[int] ( identifier[address] . identifier[get] ( literal[string] )) | def get_sequence(self):
"""Get the sequence number for a given account via Horizon.
:return: The current sequence number for a given account
:rtype: int
"""
if not self.address:
raise StellarAddressInvalidError('No address provided.') # depends on [control=['if'], data=[]]
address = self.horizon.account(self.address)
return int(address.get('sequence')) |
def rm_dup_args_safe(self, tag: str = None) -> None:
"""Remove duplicate arguments in a safe manner.
Remove the duplicate arguments only in the following situations:
1. Both arguments have the same name AND value. (Remove one of
them.)
2. Arguments have the same name and one of them is empty. (Remove
the empty one.)
Warning: Although this is considered to be safe and no meaningful data
is removed from wikitext, but the result of the rendered wikitext
may actually change if the second arg is empty and removed but
the first had had a value.
If `tag` is defined, it should be a string that will be appended to
the value of the remaining duplicate arguments.
Also see `rm_first_of_dup_args` function.
"""
name_to_lastarg_vals = {} \
# type: Dict[str, Tuple[Argument, List[str]]]
# Removing positional args affects their name. By reversing the list
# we avoid encountering those kind of args.
for arg in reversed(self.arguments):
name = arg.name.strip(WS)
if arg.positional:
# Value of keyword arguments is automatically stripped by MW.
val = arg.value
else:
# But it's not OK to strip whitespace in positional arguments.
val = arg.value.strip(WS)
if name in name_to_lastarg_vals:
# This is a duplicate argument.
if not val:
# This duplicate argument is empty. It's safe to remove it.
del arg[0:len(arg.string)]
else:
# Try to remove any of the detected duplicates of this
# that are empty or their value equals to this one.
lastarg, dup_vals = name_to_lastarg_vals[name]
if val in dup_vals:
del arg[0:len(arg.string)]
elif '' in dup_vals:
# This happens only if the last occurrence of name has
# been an empty string; other empty values will
# be removed as they are seen.
# In other words index of the empty argument in
# dup_vals is always 0.
del lastarg[0:len(lastarg.string)]
dup_vals.pop(0)
else:
# It was not possible to remove any of the duplicates.
dup_vals.append(val)
if tag:
arg.value += tag
else:
name_to_lastarg_vals[name] = (arg, [val]) | def function[rm_dup_args_safe, parameter[self, tag]]:
constant[Remove duplicate arguments in a safe manner.
Remove the duplicate arguments only in the following situations:
1. Both arguments have the same name AND value. (Remove one of
them.)
2. Arguments have the same name and one of them is empty. (Remove
the empty one.)
Warning: Although this is considered to be safe and no meaningful data
is removed from wikitext, but the result of the rendered wikitext
may actually change if the second arg is empty and removed but
the first had had a value.
If `tag` is defined, it should be a string that will be appended to
the value of the remaining duplicate arguments.
Also see `rm_first_of_dup_args` function.
]
variable[name_to_lastarg_vals] assign[=] dictionary[[], []]
for taget[name[arg]] in starred[call[name[reversed], parameter[name[self].arguments]]] begin[:]
variable[name] assign[=] call[name[arg].name.strip, parameter[name[WS]]]
if name[arg].positional begin[:]
variable[val] assign[=] name[arg].value
if compare[name[name] in name[name_to_lastarg_vals]] begin[:]
if <ast.UnaryOp object at 0x7da204960820> begin[:]
<ast.Delete object at 0x7da204962380> | keyword[def] identifier[rm_dup_args_safe] ( identifier[self] , identifier[tag] : identifier[str] = keyword[None] )-> keyword[None] :
literal[string]
identifier[name_to_lastarg_vals] ={}
keyword[for] identifier[arg] keyword[in] identifier[reversed] ( identifier[self] . identifier[arguments] ):
identifier[name] = identifier[arg] . identifier[name] . identifier[strip] ( identifier[WS] )
keyword[if] identifier[arg] . identifier[positional] :
identifier[val] = identifier[arg] . identifier[value]
keyword[else] :
identifier[val] = identifier[arg] . identifier[value] . identifier[strip] ( identifier[WS] )
keyword[if] identifier[name] keyword[in] identifier[name_to_lastarg_vals] :
keyword[if] keyword[not] identifier[val] :
keyword[del] identifier[arg] [ literal[int] : identifier[len] ( identifier[arg] . identifier[string] )]
keyword[else] :
identifier[lastarg] , identifier[dup_vals] = identifier[name_to_lastarg_vals] [ identifier[name] ]
keyword[if] identifier[val] keyword[in] identifier[dup_vals] :
keyword[del] identifier[arg] [ literal[int] : identifier[len] ( identifier[arg] . identifier[string] )]
keyword[elif] literal[string] keyword[in] identifier[dup_vals] :
keyword[del] identifier[lastarg] [ literal[int] : identifier[len] ( identifier[lastarg] . identifier[string] )]
identifier[dup_vals] . identifier[pop] ( literal[int] )
keyword[else] :
identifier[dup_vals] . identifier[append] ( identifier[val] )
keyword[if] identifier[tag] :
identifier[arg] . identifier[value] += identifier[tag]
keyword[else] :
identifier[name_to_lastarg_vals] [ identifier[name] ]=( identifier[arg] ,[ identifier[val] ]) | def rm_dup_args_safe(self, tag: str=None) -> None:
"""Remove duplicate arguments in a safe manner.
Remove the duplicate arguments only in the following situations:
1. Both arguments have the same name AND value. (Remove one of
them.)
2. Arguments have the same name and one of them is empty. (Remove
the empty one.)
Warning: Although this is considered to be safe and no meaningful data
is removed from wikitext, but the result of the rendered wikitext
may actually change if the second arg is empty and removed but
the first had had a value.
If `tag` is defined, it should be a string that will be appended to
the value of the remaining duplicate arguments.
Also see `rm_first_of_dup_args` function.
"""
name_to_lastarg_vals = {}
# type: Dict[str, Tuple[Argument, List[str]]]
# Removing positional args affects their name. By reversing the list
# we avoid encountering those kind of args.
for arg in reversed(self.arguments):
name = arg.name.strip(WS)
if arg.positional:
# Value of keyword arguments is automatically stripped by MW.
val = arg.value # depends on [control=['if'], data=[]]
else:
# But it's not OK to strip whitespace in positional arguments.
val = arg.value.strip(WS)
if name in name_to_lastarg_vals:
# This is a duplicate argument.
if not val:
# This duplicate argument is empty. It's safe to remove it.
del arg[0:len(arg.string)] # depends on [control=['if'], data=[]]
else:
# Try to remove any of the detected duplicates of this
# that are empty or their value equals to this one.
(lastarg, dup_vals) = name_to_lastarg_vals[name]
if val in dup_vals:
del arg[0:len(arg.string)] # depends on [control=['if'], data=[]]
elif '' in dup_vals:
# This happens only if the last occurrence of name has
# been an empty string; other empty values will
# be removed as they are seen.
# In other words index of the empty argument in
# dup_vals is always 0.
del lastarg[0:len(lastarg.string)]
dup_vals.pop(0) # depends on [control=['if'], data=['dup_vals']]
else:
# It was not possible to remove any of the duplicates.
dup_vals.append(val)
if tag:
arg.value += tag # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['name', 'name_to_lastarg_vals']]
else:
name_to_lastarg_vals[name] = (arg, [val]) # depends on [control=['for'], data=['arg']] |
def takewhile(self, func=None):
"""
Return a new Collection with the last few items removed.
Parameters:
func : function(Node) -> Node
Returns:
A new Collection, discarding all items
at and after the first item where bool(func(item)) == False
Examples:
node.find_all('tr').takewhile(Q.find_all('td').count() > 3)
"""
func = _make_callable(func)
return Collection(takewhile(func, self._items)) | def function[takewhile, parameter[self, func]]:
constant[
Return a new Collection with the last few items removed.
Parameters:
func : function(Node) -> Node
Returns:
A new Collection, discarding all items
at and after the first item where bool(func(item)) == False
Examples:
node.find_all('tr').takewhile(Q.find_all('td').count() > 3)
]
variable[func] assign[=] call[name[_make_callable], parameter[name[func]]]
return[call[name[Collection], parameter[call[name[takewhile], parameter[name[func], name[self]._items]]]]] | keyword[def] identifier[takewhile] ( identifier[self] , identifier[func] = keyword[None] ):
literal[string]
identifier[func] = identifier[_make_callable] ( identifier[func] )
keyword[return] identifier[Collection] ( identifier[takewhile] ( identifier[func] , identifier[self] . identifier[_items] )) | def takewhile(self, func=None):
"""
Return a new Collection with the last few items removed.
Parameters:
func : function(Node) -> Node
Returns:
A new Collection, discarding all items
at and after the first item where bool(func(item)) == False
Examples:
node.find_all('tr').takewhile(Q.find_all('td').count() > 3)
"""
func = _make_callable(func)
return Collection(takewhile(func, self._items)) |
def summary_plot(shap_values, features=None, feature_names=None, max_display=None, plot_type="dot",
color=None, axis_color="#333333", title=None, alpha=1, show=True, sort=True,
color_bar=True, auto_size_plot=True, layered_violin_max_num_bins=20, class_names=None):
"""Create a SHAP summary plot, colored by feature values when they are provided.
Parameters
----------
shap_values : numpy.array
Matrix of SHAP values (# samples x # features)
features : numpy.array or pandas.DataFrame or list
Matrix of feature values (# samples x # features) or a feature_names list as shorthand
feature_names : list
Names of the features (length # features)
max_display : int
How many top features to include in the plot (default is 20, or 7 for interaction plots)
plot_type : "dot" (default) or "violin"
What type of summary plot to produce
"""
multi_class = False
if isinstance(shap_values, list):
multi_class = True
plot_type = "bar" # only type supported for now
else:
assert len(shap_values.shape) != 1, "Summary plots need a matrix of shap_values, not a vector."
# default color:
if color is None:
if plot_type == 'layered_violin':
color = "coolwarm"
elif multi_class:
color = lambda i: colors.red_blue_circle(i/len(shap_values))
else:
color = colors.blue_rgb
# convert from a DataFrame or other types
if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
if feature_names is None:
feature_names = features.columns
features = features.values
elif isinstance(features, list):
if feature_names is None:
feature_names = features
features = None
elif (features is not None) and len(features.shape) == 1 and feature_names is None:
feature_names = features
features = None
num_features = (shap_values[0].shape[1] if multi_class else shap_values.shape[1])
if feature_names is None:
feature_names = np.array([labels['FEATURE'] % str(i) for i in range(num_features)])
# plotting SHAP interaction values
if not multi_class and len(shap_values.shape) == 3:
if max_display is None:
max_display = 7
else:
max_display = min(len(feature_names), max_display)
sort_inds = np.argsort(-np.abs(shap_values.sum(1)).sum(0))
# get plotting limits
delta = 1.0 / (shap_values.shape[1] ** 2)
slow = np.nanpercentile(shap_values, delta)
shigh = np.nanpercentile(shap_values, 100 - delta)
v = max(abs(slow), abs(shigh))
slow = -v
shigh = v
pl.figure(figsize=(1.5 * max_display + 1, 0.8 * max_display + 1))
pl.subplot(1, max_display, 1)
proj_shap_values = shap_values[:, sort_inds[0], sort_inds]
proj_shap_values[:, 1:] *= 2 # because off diag effects are split in half
summary_plot(
proj_shap_values, features[:, sort_inds] if features is not None else None,
feature_names=feature_names[sort_inds],
sort=False, show=False, color_bar=False,
auto_size_plot=False,
max_display=max_display
)
pl.xlim((slow, shigh))
pl.xlabel("")
title_length_limit = 11
pl.title(shorten_text(feature_names[sort_inds[0]], title_length_limit))
for i in range(1, min(len(sort_inds), max_display)):
ind = sort_inds[i]
pl.subplot(1, max_display, i + 1)
proj_shap_values = shap_values[:, ind, sort_inds]
proj_shap_values *= 2
proj_shap_values[:, i] /= 2 # because only off diag effects are split in half
summary_plot(
proj_shap_values, features[:, sort_inds] if features is not None else None,
sort=False,
feature_names=["" for i in range(len(feature_names))],
show=False,
color_bar=False,
auto_size_plot=False,
max_display=max_display
)
pl.xlim((slow, shigh))
pl.xlabel("")
if i == min(len(sort_inds), max_display) // 2:
pl.xlabel(labels['INTERACTION_VALUE'])
pl.title(shorten_text(feature_names[ind], title_length_limit))
pl.tight_layout(pad=0, w_pad=0, h_pad=0.0)
pl.subplots_adjust(hspace=0, wspace=0.1)
if show:
pl.show()
return
if max_display is None:
max_display = 20
if sort:
# order features by the sum of their effect magnitudes
if multi_class:
feature_order = np.argsort(np.sum(np.mean(np.abs(shap_values), axis=0), axis=0))
else:
feature_order = np.argsort(np.sum(np.abs(shap_values), axis=0))
feature_order = feature_order[-min(max_display, len(feature_order)):]
else:
feature_order = np.flip(np.arange(min(max_display, num_features)), 0)
row_height = 0.4
if auto_size_plot:
pl.gcf().set_size_inches(8, len(feature_order) * row_height + 1.5)
pl.axvline(x=0, color="#999999", zorder=-1)
if plot_type == "dot":
for pos, i in enumerate(feature_order):
pl.axhline(y=pos, color="#cccccc", lw=0.5, dashes=(1, 5), zorder=-1)
shaps = shap_values[:, i]
values = None if features is None else features[:, i]
inds = np.arange(len(shaps))
np.random.shuffle(inds)
if values is not None:
values = values[inds]
shaps = shaps[inds]
colored_feature = True
try:
values = np.array(values, dtype=np.float64) # make sure this can be numeric
except:
colored_feature = False
N = len(shaps)
# hspacing = (np.max(shaps) - np.min(shaps)) / 200
# curr_bin = []
nbins = 100
quant = np.round(nbins * (shaps - np.min(shaps)) / (np.max(shaps) - np.min(shaps) + 1e-8))
inds = np.argsort(quant + np.random.randn(N) * 1e-6)
layer = 0
last_bin = -1
ys = np.zeros(N)
for ind in inds:
if quant[ind] != last_bin:
layer = 0
ys[ind] = np.ceil(layer / 2) * ((layer % 2) * 2 - 1)
layer += 1
last_bin = quant[ind]
ys *= 0.9 * (row_height / np.max(ys + 1))
if features is not None and colored_feature:
# trim the color range, but prevent the color range from collapsing
vmin = np.nanpercentile(values, 5)
vmax = np.nanpercentile(values, 95)
if vmin == vmax:
vmin = np.nanpercentile(values, 1)
vmax = np.nanpercentile(values, 99)
if vmin == vmax:
vmin = np.min(values)
vmax = np.max(values)
assert features.shape[0] == len(shaps), "Feature and SHAP matrices must have the same number of rows!"
# plot the nan values in the interaction feature as grey
nan_mask = np.isnan(values)
pl.scatter(shaps[nan_mask], pos + ys[nan_mask], color="#777777", vmin=vmin,
vmax=vmax, s=16, alpha=alpha, linewidth=0,
zorder=3, rasterized=len(shaps) > 500)
# plot the non-nan values colored by the trimmed feature value
cvals = values[np.invert(nan_mask)].astype(np.float64)
cvals_imp = cvals.copy()
cvals_imp[np.isnan(cvals)] = (vmin + vmax) / 2.0
cvals[cvals_imp > vmax] = vmax
cvals[cvals_imp < vmin] = vmin
pl.scatter(shaps[np.invert(nan_mask)], pos + ys[np.invert(nan_mask)],
cmap=colors.red_blue, vmin=vmin, vmax=vmax, s=16,
c=cvals, alpha=alpha, linewidth=0,
zorder=3, rasterized=len(shaps) > 500)
else:
pl.scatter(shaps, pos + ys, s=16, alpha=alpha, linewidth=0, zorder=3,
color=color if colored_feature else "#777777", rasterized=len(shaps) > 500)
elif plot_type == "violin":
for pos, i in enumerate(feature_order):
pl.axhline(y=pos, color="#cccccc", lw=0.5, dashes=(1, 5), zorder=-1)
if features is not None:
global_low = np.nanpercentile(shap_values[:, :len(feature_names)].flatten(), 1)
global_high = np.nanpercentile(shap_values[:, :len(feature_names)].flatten(), 99)
for pos, i in enumerate(feature_order):
shaps = shap_values[:, i]
shap_min, shap_max = np.min(shaps), np.max(shaps)
rng = shap_max - shap_min
xs = np.linspace(np.min(shaps) - rng * 0.2, np.max(shaps) + rng * 0.2, 100)
if np.std(shaps) < (global_high - global_low) / 100:
ds = gaussian_kde(shaps + np.random.randn(len(shaps)) * (global_high - global_low) / 100)(xs)
else:
ds = gaussian_kde(shaps)(xs)
ds /= np.max(ds) * 3
values = features[:, i]
window_size = max(10, len(values) // 20)
smooth_values = np.zeros(len(xs) - 1)
sort_inds = np.argsort(shaps)
trailing_pos = 0
leading_pos = 0
running_sum = 0
back_fill = 0
for j in range(len(xs) - 1):
while leading_pos < len(shaps) and xs[j] >= shaps[sort_inds[leading_pos]]:
running_sum += values[sort_inds[leading_pos]]
leading_pos += 1
if leading_pos - trailing_pos > 20:
running_sum -= values[sort_inds[trailing_pos]]
trailing_pos += 1
if leading_pos - trailing_pos > 0:
smooth_values[j] = running_sum / (leading_pos - trailing_pos)
for k in range(back_fill):
smooth_values[j - k - 1] = smooth_values[j]
else:
back_fill += 1
vmin = np.nanpercentile(values, 5)
vmax = np.nanpercentile(values, 95)
if vmin == vmax:
vmin = np.nanpercentile(values, 1)
vmax = np.nanpercentile(values, 99)
if vmin == vmax:
vmin = np.min(values)
vmax = np.max(values)
pl.scatter(shaps, np.ones(shap_values.shape[0]) * pos, s=9, cmap=colors.red_blue, vmin=vmin, vmax=vmax,
c=values, alpha=alpha, linewidth=0, zorder=1)
# smooth_values -= nxp.nanpercentile(smooth_values, 5)
# smooth_values /= np.nanpercentile(smooth_values, 95)
smooth_values -= vmin
if vmax - vmin > 0:
smooth_values /= vmax - vmin
for i in range(len(xs) - 1):
if ds[i] > 0.05 or ds[i + 1] > 0.05:
pl.fill_between([xs[i], xs[i + 1]], [pos + ds[i], pos + ds[i + 1]],
[pos - ds[i], pos - ds[i + 1]], color=colors.red_blue(smooth_values[i]),
zorder=2)
else:
parts = pl.violinplot(shap_values[:, feature_order], range(len(feature_order)), points=200, vert=False,
widths=0.7,
showmeans=False, showextrema=False, showmedians=False)
for pc in parts['bodies']:
pc.set_facecolor(color)
pc.set_edgecolor('none')
pc.set_alpha(alpha)
elif plot_type == "layered_violin": # courtesy of @kodonnell
num_x_points = 200
bins = np.linspace(0, features.shape[0], layered_violin_max_num_bins + 1).round(0).astype(
'int') # the indices of the feature data corresponding to each bin
shap_min, shap_max = np.min(shap_values), np.max(shap_values)
x_points = np.linspace(shap_min, shap_max, num_x_points)
# loop through each feature and plot:
for pos, ind in enumerate(feature_order):
# decide how to handle: if #unique < layered_violin_max_num_bins then split by unique value, otherwise use bins/percentiles.
# to keep simpler code, in the case of uniques, we just adjust the bins to align with the unique counts.
feature = features[:, ind]
unique, counts = np.unique(feature, return_counts=True)
if unique.shape[0] <= layered_violin_max_num_bins:
order = np.argsort(unique)
thesebins = np.cumsum(counts[order])
thesebins = np.insert(thesebins, 0, 0)
else:
thesebins = bins
nbins = thesebins.shape[0] - 1
# order the feature data so we can apply percentiling
order = np.argsort(feature)
# x axis is located at y0 = pos, with pos being there for offset
y0 = np.ones(num_x_points) * pos
# calculate kdes:
ys = np.zeros((nbins, num_x_points))
for i in range(nbins):
# get shap values in this bin:
shaps = shap_values[order[thesebins[i]:thesebins[i + 1]], ind]
# if there's only one element, then we can't
if shaps.shape[0] == 1:
warnings.warn(
"not enough data in bin #%d for feature %s, so it'll be ignored. Try increasing the number of records to plot."
% (i, feature_names[ind]))
# to ignore it, just set it to the previous y-values (so the area between them will be zero). Not ys is already 0, so there's
# nothing to do if i == 0
if i > 0:
ys[i, :] = ys[i - 1, :]
continue
# save kde of them: note that we add a tiny bit of gaussian noise to avoid singular matrix errors
ys[i, :] = gaussian_kde(shaps + np.random.normal(loc=0, scale=0.001, size=shaps.shape[0]))(x_points)
# scale it up so that the 'size' of each y represents the size of the bin. For continuous data this will
# do nothing, but when we've gone with the unqique option, this will matter - e.g. if 99% are male and 1%
# female, we want the 1% to appear a lot smaller.
size = thesebins[i + 1] - thesebins[i]
bin_size_if_even = features.shape[0] / nbins
relative_bin_size = size / bin_size_if_even
ys[i, :] *= relative_bin_size
# now plot 'em. We don't plot the individual strips, as this can leave whitespace between them.
# instead, we plot the full kde, then remove outer strip and plot over it, etc., to ensure no
# whitespace
ys = np.cumsum(ys, axis=0)
width = 0.8
scale = ys.max() * 2 / width # 2 is here as we plot both sides of x axis
for i in range(nbins - 1, -1, -1):
y = ys[i, :] / scale
c = pl.get_cmap(color)(i / (
nbins - 1)) if color in pl.cm.datad else color # if color is a cmap, use it, otherwise use a color
pl.fill_between(x_points, pos - y, pos + y, facecolor=c)
pl.xlim(shap_min, shap_max)
elif not multi_class and plot_type == "bar":
feature_inds = feature_order[:max_display]
y_pos = np.arange(len(feature_inds))
global_shap_values = np.abs(shap_values).mean(0)
pl.barh(y_pos, global_shap_values[feature_inds], 0.7, align='center', color=color)
pl.yticks(y_pos, fontsize=13)
pl.gca().set_yticklabels([feature_names[i] for i in feature_inds])
elif multi_class and plot_type == "bar":
if class_names is None:
class_names = ["Class "+str(i) for i in range(len(shap_values))]
feature_inds = feature_order[:max_display]
y_pos = np.arange(len(feature_inds))
left_pos = np.zeros(len(feature_inds))
class_inds = np.argsort([-np.abs(shap_values[i]).mean() for i in range(len(shap_values))])
for i,ind in enumerate(class_inds):
global_shap_values = np.abs(shap_values[ind]).mean(0)
pl.barh(
y_pos, global_shap_values[feature_inds], 0.7, left=left_pos, align='center',
color=color(i), label=class_names[ind]
)
left_pos += global_shap_values[feature_inds]
pl.yticks(y_pos, fontsize=13)
pl.gca().set_yticklabels([feature_names[i] for i in feature_inds])
pl.legend(frameon=False, fontsize=12)
# draw the color bar
if color_bar and features is not None and plot_type != "bar" and \
(plot_type != "layered_violin" or color in pl.cm.datad):
import matplotlib.cm as cm
m = cm.ScalarMappable(cmap=colors.red_blue if plot_type != "layered_violin" else pl.get_cmap(color))
m.set_array([0, 1])
cb = pl.colorbar(m, ticks=[0, 1], aspect=1000)
cb.set_ticklabels([labels['FEATURE_VALUE_LOW'], labels['FEATURE_VALUE_HIGH']])
cb.set_label(labels['FEATURE_VALUE'], size=12, labelpad=0)
cb.ax.tick_params(labelsize=11, length=0)
cb.set_alpha(1)
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.9) * 20)
# cb.draw_all()
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('none')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
pl.gca().spines['left'].set_visible(False)
pl.gca().tick_params(color=axis_color, labelcolor=axis_color)
pl.yticks(range(len(feature_order)), [feature_names[i] for i in feature_order], fontsize=13)
if plot_type != "bar":
pl.gca().tick_params('y', length=20, width=0.5, which='major')
pl.gca().tick_params('x', labelsize=11)
pl.ylim(-1, len(feature_order))
if plot_type == "bar":
pl.xlabel(labels['GLOBAL_VALUE'], fontsize=13)
else:
pl.xlabel(labels['VALUE'], fontsize=13)
if show:
pl.show() | def function[summary_plot, parameter[shap_values, features, feature_names, max_display, plot_type, color, axis_color, title, alpha, show, sort, color_bar, auto_size_plot, layered_violin_max_num_bins, class_names]]:
constant[Create a SHAP summary plot, colored by feature values when they are provided.
Parameters
----------
shap_values : numpy.array
Matrix of SHAP values (# samples x # features)
features : numpy.array or pandas.DataFrame or list
Matrix of feature values (# samples x # features) or a feature_names list as shorthand
feature_names : list
Names of the features (length # features)
max_display : int
How many top features to include in the plot (default is 20, or 7 for interaction plots)
plot_type : "dot" (default) or "violin"
What type of summary plot to produce
]
variable[multi_class] assign[=] constant[False]
if call[name[isinstance], parameter[name[shap_values], name[list]]] begin[:]
variable[multi_class] assign[=] constant[True]
variable[plot_type] assign[=] constant[bar]
if compare[name[color] is constant[None]] begin[:]
if compare[name[plot_type] equal[==] constant[layered_violin]] begin[:]
variable[color] assign[=] constant[coolwarm]
if compare[call[name[str], parameter[call[name[type], parameter[name[features]]]]] equal[==] constant[<class 'pandas.core.frame.DataFrame'>]] begin[:]
if compare[name[feature_names] is constant[None]] begin[:]
variable[feature_names] assign[=] name[features].columns
variable[features] assign[=] name[features].values
variable[num_features] assign[=] <ast.IfExp object at 0x7da1b1f07f10>
if compare[name[feature_names] is constant[None]] begin[:]
variable[feature_names] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b1f06200>]]
if <ast.BoolOp object at 0x7da20e793370> begin[:]
if compare[name[max_display] is constant[None]] begin[:]
variable[max_display] assign[=] constant[7]
variable[sort_inds] assign[=] call[name[np].argsort, parameter[<ast.UnaryOp object at 0x7da18f7205e0>]]
variable[delta] assign[=] binary_operation[constant[1.0] / binary_operation[call[name[shap_values].shape][constant[1]] ** constant[2]]]
variable[slow] assign[=] call[name[np].nanpercentile, parameter[name[shap_values], name[delta]]]
variable[shigh] assign[=] call[name[np].nanpercentile, parameter[name[shap_values], binary_operation[constant[100] - name[delta]]]]
variable[v] assign[=] call[name[max], parameter[call[name[abs], parameter[name[slow]]], call[name[abs], parameter[name[shigh]]]]]
variable[slow] assign[=] <ast.UnaryOp object at 0x7da18f721b70>
variable[shigh] assign[=] name[v]
call[name[pl].figure, parameter[]]
call[name[pl].subplot, parameter[constant[1], name[max_display], constant[1]]]
variable[proj_shap_values] assign[=] call[name[shap_values]][tuple[[<ast.Slice object at 0x7da18f722ad0>, <ast.Subscript object at 0x7da18f723730>, <ast.Name object at 0x7da18f721f00>]]]
<ast.AugAssign object at 0x7da18f721180>
call[name[summary_plot], parameter[name[proj_shap_values], <ast.IfExp object at 0x7da18f723430>]]
call[name[pl].xlim, parameter[tuple[[<ast.Name object at 0x7da18f722680>, <ast.Name object at 0x7da18f7209d0>]]]]
call[name[pl].xlabel, parameter[constant[]]]
variable[title_length_limit] assign[=] constant[11]
call[name[pl].title, parameter[call[name[shorten_text], parameter[call[name[feature_names]][call[name[sort_inds]][constant[0]]], name[title_length_limit]]]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], call[name[min], parameter[call[name[len], parameter[name[sort_inds]]], name[max_display]]]]]] begin[:]
variable[ind] assign[=] call[name[sort_inds]][name[i]]
call[name[pl].subplot, parameter[constant[1], name[max_display], binary_operation[name[i] + constant[1]]]]
variable[proj_shap_values] assign[=] call[name[shap_values]][tuple[[<ast.Slice object at 0x7da18f723f10>, <ast.Name object at 0x7da18f720850>, <ast.Name object at 0x7da18f722350>]]]
<ast.AugAssign object at 0x7da18f7227a0>
<ast.AugAssign object at 0x7da18f7232e0>
call[name[summary_plot], parameter[name[proj_shap_values], <ast.IfExp object at 0x7da18f723280>]]
call[name[pl].xlim, parameter[tuple[[<ast.Name object at 0x7da18f722a10>, <ast.Name object at 0x7da18f720ee0>]]]]
call[name[pl].xlabel, parameter[constant[]]]
if compare[name[i] equal[==] binary_operation[call[name[min], parameter[call[name[len], parameter[name[sort_inds]]], name[max_display]]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]] begin[:]
call[name[pl].xlabel, parameter[call[name[labels]][constant[INTERACTION_VALUE]]]]
call[name[pl].title, parameter[call[name[shorten_text], parameter[call[name[feature_names]][name[ind]], name[title_length_limit]]]]]
call[name[pl].tight_layout, parameter[]]
call[name[pl].subplots_adjust, parameter[]]
if name[show] begin[:]
call[name[pl].show, parameter[]]
return[None]
if compare[name[max_display] is constant[None]] begin[:]
variable[max_display] assign[=] constant[20]
if name[sort] begin[:]
if name[multi_class] begin[:]
variable[feature_order] assign[=] call[name[np].argsort, parameter[call[name[np].sum, parameter[call[name[np].mean, parameter[call[name[np].abs, parameter[name[shap_values]]]]]]]]]
variable[feature_order] assign[=] call[name[feature_order]][<ast.Slice object at 0x7da1b1f07730>]
variable[row_height] assign[=] constant[0.4]
if name[auto_size_plot] begin[:]
call[call[name[pl].gcf, parameter[]].set_size_inches, parameter[constant[8], binary_operation[binary_operation[call[name[len], parameter[name[feature_order]]] * name[row_height]] + constant[1.5]]]]
call[name[pl].axvline, parameter[]]
if compare[name[plot_type] equal[==] constant[dot]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b1f05b40>, <ast.Name object at 0x7da1b1f06da0>]]] in starred[call[name[enumerate], parameter[name[feature_order]]]] begin[:]
call[name[pl].axhline, parameter[]]
variable[shaps] assign[=] call[name[shap_values]][tuple[[<ast.Slice object at 0x7da1b1f05f00>, <ast.Name object at 0x7da1b1f05960>]]]
variable[values] assign[=] <ast.IfExp object at 0x7da1b1f05f30>
variable[inds] assign[=] call[name[np].arange, parameter[call[name[len], parameter[name[shaps]]]]]
call[name[np].random.shuffle, parameter[name[inds]]]
if compare[name[values] is_not constant[None]] begin[:]
variable[values] assign[=] call[name[values]][name[inds]]
variable[shaps] assign[=] call[name[shaps]][name[inds]]
variable[colored_feature] assign[=] constant[True]
<ast.Try object at 0x7da20c6a8490>
variable[N] assign[=] call[name[len], parameter[name[shaps]]]
variable[nbins] assign[=] constant[100]
variable[quant] assign[=] call[name[np].round, parameter[binary_operation[binary_operation[name[nbins] * binary_operation[name[shaps] - call[name[np].min, parameter[name[shaps]]]]] / binary_operation[binary_operation[call[name[np].max, parameter[name[shaps]]] - call[name[np].min, parameter[name[shaps]]]] + constant[1e-08]]]]]
variable[inds] assign[=] call[name[np].argsort, parameter[binary_operation[name[quant] + binary_operation[call[name[np].random.randn, parameter[name[N]]] * constant[1e-06]]]]]
variable[layer] assign[=] constant[0]
variable[last_bin] assign[=] <ast.UnaryOp object at 0x7da20c6a8eb0>
variable[ys] assign[=] call[name[np].zeros, parameter[name[N]]]
for taget[name[ind]] in starred[name[inds]] begin[:]
if compare[call[name[quant]][name[ind]] not_equal[!=] name[last_bin]] begin[:]
variable[layer] assign[=] constant[0]
call[name[ys]][name[ind]] assign[=] binary_operation[call[name[np].ceil, parameter[binary_operation[name[layer] / constant[2]]]] * binary_operation[binary_operation[binary_operation[name[layer] <ast.Mod object at 0x7da2590d6920> constant[2]] * constant[2]] - constant[1]]]
<ast.AugAssign object at 0x7da20c6a86a0>
variable[last_bin] assign[=] call[name[quant]][name[ind]]
<ast.AugAssign object at 0x7da20c6a8370>
if <ast.BoolOp object at 0x7da20c6a9ae0> begin[:]
variable[vmin] assign[=] call[name[np].nanpercentile, parameter[name[values], constant[5]]]
variable[vmax] assign[=] call[name[np].nanpercentile, parameter[name[values], constant[95]]]
if compare[name[vmin] equal[==] name[vmax]] begin[:]
variable[vmin] assign[=] call[name[np].nanpercentile, parameter[name[values], constant[1]]]
variable[vmax] assign[=] call[name[np].nanpercentile, parameter[name[values], constant[99]]]
if compare[name[vmin] equal[==] name[vmax]] begin[:]
variable[vmin] assign[=] call[name[np].min, parameter[name[values]]]
variable[vmax] assign[=] call[name[np].max, parameter[name[values]]]
assert[compare[call[name[features].shape][constant[0]] equal[==] call[name[len], parameter[name[shaps]]]]]
variable[nan_mask] assign[=] call[name[np].isnan, parameter[name[values]]]
call[name[pl].scatter, parameter[call[name[shaps]][name[nan_mask]], binary_operation[name[pos] + call[name[ys]][name[nan_mask]]]]]
variable[cvals] assign[=] call[call[name[values]][call[name[np].invert, parameter[name[nan_mask]]]].astype, parameter[name[np].float64]]
variable[cvals_imp] assign[=] call[name[cvals].copy, parameter[]]
call[name[cvals_imp]][call[name[np].isnan, parameter[name[cvals]]]] assign[=] binary_operation[binary_operation[name[vmin] + name[vmax]] / constant[2.0]]
call[name[cvals]][compare[name[cvals_imp] greater[>] name[vmax]]] assign[=] name[vmax]
call[name[cvals]][compare[name[cvals_imp] less[<] name[vmin]]] assign[=] name[vmin]
call[name[pl].scatter, parameter[call[name[shaps]][call[name[np].invert, parameter[name[nan_mask]]]], binary_operation[name[pos] + call[name[ys]][call[name[np].invert, parameter[name[nan_mask]]]]]]]
if <ast.BoolOp object at 0x7da20c6e70a0> begin[:]
import module[matplotlib.cm] as alias[cm]
variable[m] assign[=] call[name[cm].ScalarMappable, parameter[]]
call[name[m].set_array, parameter[list[[<ast.Constant object at 0x7da20c6e5210>, <ast.Constant object at 0x7da20c6e7670>]]]]
variable[cb] assign[=] call[name[pl].colorbar, parameter[name[m]]]
call[name[cb].set_ticklabels, parameter[list[[<ast.Subscript object at 0x7da20c6e5480>, <ast.Subscript object at 0x7da20c6e62f0>]]]]
call[name[cb].set_label, parameter[call[name[labels]][constant[FEATURE_VALUE]]]]
call[name[cb].ax.tick_params, parameter[]]
call[name[cb].set_alpha, parameter[constant[1]]]
call[name[cb].outline.set_visible, parameter[constant[False]]]
variable[bbox] assign[=] call[call[name[cb].ax.get_window_extent, parameter[]].transformed, parameter[call[call[name[pl].gcf, parameter[]].dpi_scale_trans.inverted, parameter[]]]]
call[name[cb].ax.set_aspect, parameter[binary_operation[binary_operation[name[bbox].height - constant[0.9]] * constant[20]]]]
call[call[name[pl].gca, parameter[]].xaxis.set_ticks_position, parameter[constant[bottom]]]
call[call[name[pl].gca, parameter[]].yaxis.set_ticks_position, parameter[constant[none]]]
call[call[call[name[pl].gca, parameter[]].spines][constant[right]].set_visible, parameter[constant[False]]]
call[call[call[name[pl].gca, parameter[]].spines][constant[top]].set_visible, parameter[constant[False]]]
call[call[call[name[pl].gca, parameter[]].spines][constant[left]].set_visible, parameter[constant[False]]]
call[call[name[pl].gca, parameter[]].tick_params, parameter[]]
call[name[pl].yticks, parameter[call[name[range], parameter[call[name[len], parameter[name[feature_order]]]]], <ast.ListComp object at 0x7da20c6e7970>]]
if compare[name[plot_type] not_equal[!=] constant[bar]] begin[:]
call[call[name[pl].gca, parameter[]].tick_params, parameter[constant[y]]]
call[call[name[pl].gca, parameter[]].tick_params, parameter[constant[x]]]
call[name[pl].ylim, parameter[<ast.UnaryOp object at 0x7da18f00c280>, call[name[len], parameter[name[feature_order]]]]]
if compare[name[plot_type] equal[==] constant[bar]] begin[:]
call[name[pl].xlabel, parameter[call[name[labels]][constant[GLOBAL_VALUE]]]]
if name[show] begin[:]
call[name[pl].show, parameter[]] | keyword[def] identifier[summary_plot] ( identifier[shap_values] , identifier[features] = keyword[None] , identifier[feature_names] = keyword[None] , identifier[max_display] = keyword[None] , identifier[plot_type] = literal[string] ,
identifier[color] = keyword[None] , identifier[axis_color] = literal[string] , identifier[title] = keyword[None] , identifier[alpha] = literal[int] , identifier[show] = keyword[True] , identifier[sort] = keyword[True] ,
identifier[color_bar] = keyword[True] , identifier[auto_size_plot] = keyword[True] , identifier[layered_violin_max_num_bins] = literal[int] , identifier[class_names] = keyword[None] ):
literal[string]
identifier[multi_class] = keyword[False]
keyword[if] identifier[isinstance] ( identifier[shap_values] , identifier[list] ):
identifier[multi_class] = keyword[True]
identifier[plot_type] = literal[string]
keyword[else] :
keyword[assert] identifier[len] ( identifier[shap_values] . identifier[shape] )!= literal[int] , literal[string]
keyword[if] identifier[color] keyword[is] keyword[None] :
keyword[if] identifier[plot_type] == literal[string] :
identifier[color] = literal[string]
keyword[elif] identifier[multi_class] :
identifier[color] = keyword[lambda] identifier[i] : identifier[colors] . identifier[red_blue_circle] ( identifier[i] / identifier[len] ( identifier[shap_values] ))
keyword[else] :
identifier[color] = identifier[colors] . identifier[blue_rgb]
keyword[if] identifier[str] ( identifier[type] ( identifier[features] ))== literal[string] :
keyword[if] identifier[feature_names] keyword[is] keyword[None] :
identifier[feature_names] = identifier[features] . identifier[columns]
identifier[features] = identifier[features] . identifier[values]
keyword[elif] identifier[isinstance] ( identifier[features] , identifier[list] ):
keyword[if] identifier[feature_names] keyword[is] keyword[None] :
identifier[feature_names] = identifier[features]
identifier[features] = keyword[None]
keyword[elif] ( identifier[features] keyword[is] keyword[not] keyword[None] ) keyword[and] identifier[len] ( identifier[features] . identifier[shape] )== literal[int] keyword[and] identifier[feature_names] keyword[is] keyword[None] :
identifier[feature_names] = identifier[features]
identifier[features] = keyword[None]
identifier[num_features] =( identifier[shap_values] [ literal[int] ]. identifier[shape] [ literal[int] ] keyword[if] identifier[multi_class] keyword[else] identifier[shap_values] . identifier[shape] [ literal[int] ])
keyword[if] identifier[feature_names] keyword[is] keyword[None] :
identifier[feature_names] = identifier[np] . identifier[array] ([ identifier[labels] [ literal[string] ]% identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_features] )])
keyword[if] keyword[not] identifier[multi_class] keyword[and] identifier[len] ( identifier[shap_values] . identifier[shape] )== literal[int] :
keyword[if] identifier[max_display] keyword[is] keyword[None] :
identifier[max_display] = literal[int]
keyword[else] :
identifier[max_display] = identifier[min] ( identifier[len] ( identifier[feature_names] ), identifier[max_display] )
identifier[sort_inds] = identifier[np] . identifier[argsort] (- identifier[np] . identifier[abs] ( identifier[shap_values] . identifier[sum] ( literal[int] )). identifier[sum] ( literal[int] ))
identifier[delta] = literal[int] /( identifier[shap_values] . identifier[shape] [ literal[int] ]** literal[int] )
identifier[slow] = identifier[np] . identifier[nanpercentile] ( identifier[shap_values] , identifier[delta] )
identifier[shigh] = identifier[np] . identifier[nanpercentile] ( identifier[shap_values] , literal[int] - identifier[delta] )
identifier[v] = identifier[max] ( identifier[abs] ( identifier[slow] ), identifier[abs] ( identifier[shigh] ))
identifier[slow] =- identifier[v]
identifier[shigh] = identifier[v]
identifier[pl] . identifier[figure] ( identifier[figsize] =( literal[int] * identifier[max_display] + literal[int] , literal[int] * identifier[max_display] + literal[int] ))
identifier[pl] . identifier[subplot] ( literal[int] , identifier[max_display] , literal[int] )
identifier[proj_shap_values] = identifier[shap_values] [:, identifier[sort_inds] [ literal[int] ], identifier[sort_inds] ]
identifier[proj_shap_values] [:, literal[int] :]*= literal[int]
identifier[summary_plot] (
identifier[proj_shap_values] , identifier[features] [:, identifier[sort_inds] ] keyword[if] identifier[features] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None] ,
identifier[feature_names] = identifier[feature_names] [ identifier[sort_inds] ],
identifier[sort] = keyword[False] , identifier[show] = keyword[False] , identifier[color_bar] = keyword[False] ,
identifier[auto_size_plot] = keyword[False] ,
identifier[max_display] = identifier[max_display]
)
identifier[pl] . identifier[xlim] (( identifier[slow] , identifier[shigh] ))
identifier[pl] . identifier[xlabel] ( literal[string] )
identifier[title_length_limit] = literal[int]
identifier[pl] . identifier[title] ( identifier[shorten_text] ( identifier[feature_names] [ identifier[sort_inds] [ literal[int] ]], identifier[title_length_limit] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[min] ( identifier[len] ( identifier[sort_inds] ), identifier[max_display] )):
identifier[ind] = identifier[sort_inds] [ identifier[i] ]
identifier[pl] . identifier[subplot] ( literal[int] , identifier[max_display] , identifier[i] + literal[int] )
identifier[proj_shap_values] = identifier[shap_values] [:, identifier[ind] , identifier[sort_inds] ]
identifier[proj_shap_values] *= literal[int]
identifier[proj_shap_values] [:, identifier[i] ]/= literal[int]
identifier[summary_plot] (
identifier[proj_shap_values] , identifier[features] [:, identifier[sort_inds] ] keyword[if] identifier[features] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None] ,
identifier[sort] = keyword[False] ,
identifier[feature_names] =[ literal[string] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[feature_names] ))],
identifier[show] = keyword[False] ,
identifier[color_bar] = keyword[False] ,
identifier[auto_size_plot] = keyword[False] ,
identifier[max_display] = identifier[max_display]
)
identifier[pl] . identifier[xlim] (( identifier[slow] , identifier[shigh] ))
identifier[pl] . identifier[xlabel] ( literal[string] )
keyword[if] identifier[i] == identifier[min] ( identifier[len] ( identifier[sort_inds] ), identifier[max_display] )// literal[int] :
identifier[pl] . identifier[xlabel] ( identifier[labels] [ literal[string] ])
identifier[pl] . identifier[title] ( identifier[shorten_text] ( identifier[feature_names] [ identifier[ind] ], identifier[title_length_limit] ))
identifier[pl] . identifier[tight_layout] ( identifier[pad] = literal[int] , identifier[w_pad] = literal[int] , identifier[h_pad] = literal[int] )
identifier[pl] . identifier[subplots_adjust] ( identifier[hspace] = literal[int] , identifier[wspace] = literal[int] )
keyword[if] identifier[show] :
identifier[pl] . identifier[show] ()
keyword[return]
keyword[if] identifier[max_display] keyword[is] keyword[None] :
identifier[max_display] = literal[int]
keyword[if] identifier[sort] :
keyword[if] identifier[multi_class] :
identifier[feature_order] = identifier[np] . identifier[argsort] ( identifier[np] . identifier[sum] ( identifier[np] . identifier[mean] ( identifier[np] . identifier[abs] ( identifier[shap_values] ), identifier[axis] = literal[int] ), identifier[axis] = literal[int] ))
keyword[else] :
identifier[feature_order] = identifier[np] . identifier[argsort] ( identifier[np] . identifier[sum] ( identifier[np] . identifier[abs] ( identifier[shap_values] ), identifier[axis] = literal[int] ))
identifier[feature_order] = identifier[feature_order] [- identifier[min] ( identifier[max_display] , identifier[len] ( identifier[feature_order] )):]
keyword[else] :
identifier[feature_order] = identifier[np] . identifier[flip] ( identifier[np] . identifier[arange] ( identifier[min] ( identifier[max_display] , identifier[num_features] )), literal[int] )
identifier[row_height] = literal[int]
keyword[if] identifier[auto_size_plot] :
identifier[pl] . identifier[gcf] (). identifier[set_size_inches] ( literal[int] , identifier[len] ( identifier[feature_order] )* identifier[row_height] + literal[int] )
identifier[pl] . identifier[axvline] ( identifier[x] = literal[int] , identifier[color] = literal[string] , identifier[zorder] =- literal[int] )
keyword[if] identifier[plot_type] == literal[string] :
keyword[for] identifier[pos] , identifier[i] keyword[in] identifier[enumerate] ( identifier[feature_order] ):
identifier[pl] . identifier[axhline] ( identifier[y] = identifier[pos] , identifier[color] = literal[string] , identifier[lw] = literal[int] , identifier[dashes] =( literal[int] , literal[int] ), identifier[zorder] =- literal[int] )
identifier[shaps] = identifier[shap_values] [:, identifier[i] ]
identifier[values] = keyword[None] keyword[if] identifier[features] keyword[is] keyword[None] keyword[else] identifier[features] [:, identifier[i] ]
identifier[inds] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[shaps] ))
identifier[np] . identifier[random] . identifier[shuffle] ( identifier[inds] )
keyword[if] identifier[values] keyword[is] keyword[not] keyword[None] :
identifier[values] = identifier[values] [ identifier[inds] ]
identifier[shaps] = identifier[shaps] [ identifier[inds] ]
identifier[colored_feature] = keyword[True]
keyword[try] :
identifier[values] = identifier[np] . identifier[array] ( identifier[values] , identifier[dtype] = identifier[np] . identifier[float64] )
keyword[except] :
identifier[colored_feature] = keyword[False]
identifier[N] = identifier[len] ( identifier[shaps] )
identifier[nbins] = literal[int]
identifier[quant] = identifier[np] . identifier[round] ( identifier[nbins] *( identifier[shaps] - identifier[np] . identifier[min] ( identifier[shaps] ))/( identifier[np] . identifier[max] ( identifier[shaps] )- identifier[np] . identifier[min] ( identifier[shaps] )+ literal[int] ))
identifier[inds] = identifier[np] . identifier[argsort] ( identifier[quant] + identifier[np] . identifier[random] . identifier[randn] ( identifier[N] )* literal[int] )
identifier[layer] = literal[int]
identifier[last_bin] =- literal[int]
identifier[ys] = identifier[np] . identifier[zeros] ( identifier[N] )
keyword[for] identifier[ind] keyword[in] identifier[inds] :
keyword[if] identifier[quant] [ identifier[ind] ]!= identifier[last_bin] :
identifier[layer] = literal[int]
identifier[ys] [ identifier[ind] ]= identifier[np] . identifier[ceil] ( identifier[layer] / literal[int] )*(( identifier[layer] % literal[int] )* literal[int] - literal[int] )
identifier[layer] += literal[int]
identifier[last_bin] = identifier[quant] [ identifier[ind] ]
identifier[ys] *= literal[int] *( identifier[row_height] / identifier[np] . identifier[max] ( identifier[ys] + literal[int] ))
keyword[if] identifier[features] keyword[is] keyword[not] keyword[None] keyword[and] identifier[colored_feature] :
identifier[vmin] = identifier[np] . identifier[nanpercentile] ( identifier[values] , literal[int] )
identifier[vmax] = identifier[np] . identifier[nanpercentile] ( identifier[values] , literal[int] )
keyword[if] identifier[vmin] == identifier[vmax] :
identifier[vmin] = identifier[np] . identifier[nanpercentile] ( identifier[values] , literal[int] )
identifier[vmax] = identifier[np] . identifier[nanpercentile] ( identifier[values] , literal[int] )
keyword[if] identifier[vmin] == identifier[vmax] :
identifier[vmin] = identifier[np] . identifier[min] ( identifier[values] )
identifier[vmax] = identifier[np] . identifier[max] ( identifier[values] )
keyword[assert] identifier[features] . identifier[shape] [ literal[int] ]== identifier[len] ( identifier[shaps] ), literal[string]
identifier[nan_mask] = identifier[np] . identifier[isnan] ( identifier[values] )
identifier[pl] . identifier[scatter] ( identifier[shaps] [ identifier[nan_mask] ], identifier[pos] + identifier[ys] [ identifier[nan_mask] ], identifier[color] = literal[string] , identifier[vmin] = identifier[vmin] ,
identifier[vmax] = identifier[vmax] , identifier[s] = literal[int] , identifier[alpha] = identifier[alpha] , identifier[linewidth] = literal[int] ,
identifier[zorder] = literal[int] , identifier[rasterized] = identifier[len] ( identifier[shaps] )> literal[int] )
identifier[cvals] = identifier[values] [ identifier[np] . identifier[invert] ( identifier[nan_mask] )]. identifier[astype] ( identifier[np] . identifier[float64] )
identifier[cvals_imp] = identifier[cvals] . identifier[copy] ()
identifier[cvals_imp] [ identifier[np] . identifier[isnan] ( identifier[cvals] )]=( identifier[vmin] + identifier[vmax] )/ literal[int]
identifier[cvals] [ identifier[cvals_imp] > identifier[vmax] ]= identifier[vmax]
identifier[cvals] [ identifier[cvals_imp] < identifier[vmin] ]= identifier[vmin]
identifier[pl] . identifier[scatter] ( identifier[shaps] [ identifier[np] . identifier[invert] ( identifier[nan_mask] )], identifier[pos] + identifier[ys] [ identifier[np] . identifier[invert] ( identifier[nan_mask] )],
identifier[cmap] = identifier[colors] . identifier[red_blue] , identifier[vmin] = identifier[vmin] , identifier[vmax] = identifier[vmax] , identifier[s] = literal[int] ,
identifier[c] = identifier[cvals] , identifier[alpha] = identifier[alpha] , identifier[linewidth] = literal[int] ,
identifier[zorder] = literal[int] , identifier[rasterized] = identifier[len] ( identifier[shaps] )> literal[int] )
keyword[else] :
identifier[pl] . identifier[scatter] ( identifier[shaps] , identifier[pos] + identifier[ys] , identifier[s] = literal[int] , identifier[alpha] = identifier[alpha] , identifier[linewidth] = literal[int] , identifier[zorder] = literal[int] ,
identifier[color] = identifier[color] keyword[if] identifier[colored_feature] keyword[else] literal[string] , identifier[rasterized] = identifier[len] ( identifier[shaps] )> literal[int] )
keyword[elif] identifier[plot_type] == literal[string] :
keyword[for] identifier[pos] , identifier[i] keyword[in] identifier[enumerate] ( identifier[feature_order] ):
identifier[pl] . identifier[axhline] ( identifier[y] = identifier[pos] , identifier[color] = literal[string] , identifier[lw] = literal[int] , identifier[dashes] =( literal[int] , literal[int] ), identifier[zorder] =- literal[int] )
keyword[if] identifier[features] keyword[is] keyword[not] keyword[None] :
identifier[global_low] = identifier[np] . identifier[nanpercentile] ( identifier[shap_values] [:,: identifier[len] ( identifier[feature_names] )]. identifier[flatten] (), literal[int] )
identifier[global_high] = identifier[np] . identifier[nanpercentile] ( identifier[shap_values] [:,: identifier[len] ( identifier[feature_names] )]. identifier[flatten] (), literal[int] )
keyword[for] identifier[pos] , identifier[i] keyword[in] identifier[enumerate] ( identifier[feature_order] ):
identifier[shaps] = identifier[shap_values] [:, identifier[i] ]
identifier[shap_min] , identifier[shap_max] = identifier[np] . identifier[min] ( identifier[shaps] ), identifier[np] . identifier[max] ( identifier[shaps] )
identifier[rng] = identifier[shap_max] - identifier[shap_min]
identifier[xs] = identifier[np] . identifier[linspace] ( identifier[np] . identifier[min] ( identifier[shaps] )- identifier[rng] * literal[int] , identifier[np] . identifier[max] ( identifier[shaps] )+ identifier[rng] * literal[int] , literal[int] )
keyword[if] identifier[np] . identifier[std] ( identifier[shaps] )<( identifier[global_high] - identifier[global_low] )/ literal[int] :
identifier[ds] = identifier[gaussian_kde] ( identifier[shaps] + identifier[np] . identifier[random] . identifier[randn] ( identifier[len] ( identifier[shaps] ))*( identifier[global_high] - identifier[global_low] )/ literal[int] )( identifier[xs] )
keyword[else] :
identifier[ds] = identifier[gaussian_kde] ( identifier[shaps] )( identifier[xs] )
identifier[ds] /= identifier[np] . identifier[max] ( identifier[ds] )* literal[int]
identifier[values] = identifier[features] [:, identifier[i] ]
identifier[window_size] = identifier[max] ( literal[int] , identifier[len] ( identifier[values] )// literal[int] )
identifier[smooth_values] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[xs] )- literal[int] )
identifier[sort_inds] = identifier[np] . identifier[argsort] ( identifier[shaps] )
identifier[trailing_pos] = literal[int]
identifier[leading_pos] = literal[int]
identifier[running_sum] = literal[int]
identifier[back_fill] = literal[int]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[xs] )- literal[int] ):
keyword[while] identifier[leading_pos] < identifier[len] ( identifier[shaps] ) keyword[and] identifier[xs] [ identifier[j] ]>= identifier[shaps] [ identifier[sort_inds] [ identifier[leading_pos] ]]:
identifier[running_sum] += identifier[values] [ identifier[sort_inds] [ identifier[leading_pos] ]]
identifier[leading_pos] += literal[int]
keyword[if] identifier[leading_pos] - identifier[trailing_pos] > literal[int] :
identifier[running_sum] -= identifier[values] [ identifier[sort_inds] [ identifier[trailing_pos] ]]
identifier[trailing_pos] += literal[int]
keyword[if] identifier[leading_pos] - identifier[trailing_pos] > literal[int] :
identifier[smooth_values] [ identifier[j] ]= identifier[running_sum] /( identifier[leading_pos] - identifier[trailing_pos] )
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[back_fill] ):
identifier[smooth_values] [ identifier[j] - identifier[k] - literal[int] ]= identifier[smooth_values] [ identifier[j] ]
keyword[else] :
identifier[back_fill] += literal[int]
identifier[vmin] = identifier[np] . identifier[nanpercentile] ( identifier[values] , literal[int] )
identifier[vmax] = identifier[np] . identifier[nanpercentile] ( identifier[values] , literal[int] )
keyword[if] identifier[vmin] == identifier[vmax] :
identifier[vmin] = identifier[np] . identifier[nanpercentile] ( identifier[values] , literal[int] )
identifier[vmax] = identifier[np] . identifier[nanpercentile] ( identifier[values] , literal[int] )
keyword[if] identifier[vmin] == identifier[vmax] :
identifier[vmin] = identifier[np] . identifier[min] ( identifier[values] )
identifier[vmax] = identifier[np] . identifier[max] ( identifier[values] )
identifier[pl] . identifier[scatter] ( identifier[shaps] , identifier[np] . identifier[ones] ( identifier[shap_values] . identifier[shape] [ literal[int] ])* identifier[pos] , identifier[s] = literal[int] , identifier[cmap] = identifier[colors] . identifier[red_blue] , identifier[vmin] = identifier[vmin] , identifier[vmax] = identifier[vmax] ,
identifier[c] = identifier[values] , identifier[alpha] = identifier[alpha] , identifier[linewidth] = literal[int] , identifier[zorder] = literal[int] )
identifier[smooth_values] -= identifier[vmin]
keyword[if] identifier[vmax] - identifier[vmin] > literal[int] :
identifier[smooth_values] /= identifier[vmax] - identifier[vmin]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[xs] )- literal[int] ):
keyword[if] identifier[ds] [ identifier[i] ]> literal[int] keyword[or] identifier[ds] [ identifier[i] + literal[int] ]> literal[int] :
identifier[pl] . identifier[fill_between] ([ identifier[xs] [ identifier[i] ], identifier[xs] [ identifier[i] + literal[int] ]],[ identifier[pos] + identifier[ds] [ identifier[i] ], identifier[pos] + identifier[ds] [ identifier[i] + literal[int] ]],
[ identifier[pos] - identifier[ds] [ identifier[i] ], identifier[pos] - identifier[ds] [ identifier[i] + literal[int] ]], identifier[color] = identifier[colors] . identifier[red_blue] ( identifier[smooth_values] [ identifier[i] ]),
identifier[zorder] = literal[int] )
keyword[else] :
identifier[parts] = identifier[pl] . identifier[violinplot] ( identifier[shap_values] [:, identifier[feature_order] ], identifier[range] ( identifier[len] ( identifier[feature_order] )), identifier[points] = literal[int] , identifier[vert] = keyword[False] ,
identifier[widths] = literal[int] ,
identifier[showmeans] = keyword[False] , identifier[showextrema] = keyword[False] , identifier[showmedians] = keyword[False] )
keyword[for] identifier[pc] keyword[in] identifier[parts] [ literal[string] ]:
identifier[pc] . identifier[set_facecolor] ( identifier[color] )
identifier[pc] . identifier[set_edgecolor] ( literal[string] )
identifier[pc] . identifier[set_alpha] ( identifier[alpha] )
keyword[elif] identifier[plot_type] == literal[string] :
identifier[num_x_points] = literal[int]
identifier[bins] = identifier[np] . identifier[linspace] ( literal[int] , identifier[features] . identifier[shape] [ literal[int] ], identifier[layered_violin_max_num_bins] + literal[int] ). identifier[round] ( literal[int] ). identifier[astype] (
literal[string] )
identifier[shap_min] , identifier[shap_max] = identifier[np] . identifier[min] ( identifier[shap_values] ), identifier[np] . identifier[max] ( identifier[shap_values] )
identifier[x_points] = identifier[np] . identifier[linspace] ( identifier[shap_min] , identifier[shap_max] , identifier[num_x_points] )
keyword[for] identifier[pos] , identifier[ind] keyword[in] identifier[enumerate] ( identifier[feature_order] ):
identifier[feature] = identifier[features] [:, identifier[ind] ]
identifier[unique] , identifier[counts] = identifier[np] . identifier[unique] ( identifier[feature] , identifier[return_counts] = keyword[True] )
keyword[if] identifier[unique] . identifier[shape] [ literal[int] ]<= identifier[layered_violin_max_num_bins] :
identifier[order] = identifier[np] . identifier[argsort] ( identifier[unique] )
identifier[thesebins] = identifier[np] . identifier[cumsum] ( identifier[counts] [ identifier[order] ])
identifier[thesebins] = identifier[np] . identifier[insert] ( identifier[thesebins] , literal[int] , literal[int] )
keyword[else] :
identifier[thesebins] = identifier[bins]
identifier[nbins] = identifier[thesebins] . identifier[shape] [ literal[int] ]- literal[int]
identifier[order] = identifier[np] . identifier[argsort] ( identifier[feature] )
identifier[y0] = identifier[np] . identifier[ones] ( identifier[num_x_points] )* identifier[pos]
identifier[ys] = identifier[np] . identifier[zeros] (( identifier[nbins] , identifier[num_x_points] ))
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nbins] ):
identifier[shaps] = identifier[shap_values] [ identifier[order] [ identifier[thesebins] [ identifier[i] ]: identifier[thesebins] [ identifier[i] + literal[int] ]], identifier[ind] ]
keyword[if] identifier[shaps] . identifier[shape] [ literal[int] ]== literal[int] :
identifier[warnings] . identifier[warn] (
literal[string]
%( identifier[i] , identifier[feature_names] [ identifier[ind] ]))
keyword[if] identifier[i] > literal[int] :
identifier[ys] [ identifier[i] ,:]= identifier[ys] [ identifier[i] - literal[int] ,:]
keyword[continue]
identifier[ys] [ identifier[i] ,:]= identifier[gaussian_kde] ( identifier[shaps] + identifier[np] . identifier[random] . identifier[normal] ( identifier[loc] = literal[int] , identifier[scale] = literal[int] , identifier[size] = identifier[shaps] . identifier[shape] [ literal[int] ]))( identifier[x_points] )
identifier[size] = identifier[thesebins] [ identifier[i] + literal[int] ]- identifier[thesebins] [ identifier[i] ]
identifier[bin_size_if_even] = identifier[features] . identifier[shape] [ literal[int] ]/ identifier[nbins]
identifier[relative_bin_size] = identifier[size] / identifier[bin_size_if_even]
identifier[ys] [ identifier[i] ,:]*= identifier[relative_bin_size]
identifier[ys] = identifier[np] . identifier[cumsum] ( identifier[ys] , identifier[axis] = literal[int] )
identifier[width] = literal[int]
identifier[scale] = identifier[ys] . identifier[max] ()* literal[int] / identifier[width]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nbins] - literal[int] ,- literal[int] ,- literal[int] ):
identifier[y] = identifier[ys] [ identifier[i] ,:]/ identifier[scale]
identifier[c] = identifier[pl] . identifier[get_cmap] ( identifier[color] )( identifier[i] /(
identifier[nbins] - literal[int] )) keyword[if] identifier[color] keyword[in] identifier[pl] . identifier[cm] . identifier[datad] keyword[else] identifier[color]
identifier[pl] . identifier[fill_between] ( identifier[x_points] , identifier[pos] - identifier[y] , identifier[pos] + identifier[y] , identifier[facecolor] = identifier[c] )
identifier[pl] . identifier[xlim] ( identifier[shap_min] , identifier[shap_max] )
keyword[elif] keyword[not] identifier[multi_class] keyword[and] identifier[plot_type] == literal[string] :
identifier[feature_inds] = identifier[feature_order] [: identifier[max_display] ]
identifier[y_pos] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[feature_inds] ))
identifier[global_shap_values] = identifier[np] . identifier[abs] ( identifier[shap_values] ). identifier[mean] ( literal[int] )
identifier[pl] . identifier[barh] ( identifier[y_pos] , identifier[global_shap_values] [ identifier[feature_inds] ], literal[int] , identifier[align] = literal[string] , identifier[color] = identifier[color] )
identifier[pl] . identifier[yticks] ( identifier[y_pos] , identifier[fontsize] = literal[int] )
identifier[pl] . identifier[gca] (). identifier[set_yticklabels] ([ identifier[feature_names] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[feature_inds] ])
keyword[elif] identifier[multi_class] keyword[and] identifier[plot_type] == literal[string] :
keyword[if] identifier[class_names] keyword[is] keyword[None] :
identifier[class_names] =[ literal[string] + identifier[str] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[shap_values] ))]
identifier[feature_inds] = identifier[feature_order] [: identifier[max_display] ]
identifier[y_pos] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[feature_inds] ))
identifier[left_pos] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[feature_inds] ))
identifier[class_inds] = identifier[np] . identifier[argsort] ([- identifier[np] . identifier[abs] ( identifier[shap_values] [ identifier[i] ]). identifier[mean] () keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[shap_values] ))])
keyword[for] identifier[i] , identifier[ind] keyword[in] identifier[enumerate] ( identifier[class_inds] ):
identifier[global_shap_values] = identifier[np] . identifier[abs] ( identifier[shap_values] [ identifier[ind] ]). identifier[mean] ( literal[int] )
identifier[pl] . identifier[barh] (
identifier[y_pos] , identifier[global_shap_values] [ identifier[feature_inds] ], literal[int] , identifier[left] = identifier[left_pos] , identifier[align] = literal[string] ,
identifier[color] = identifier[color] ( identifier[i] ), identifier[label] = identifier[class_names] [ identifier[ind] ]
)
identifier[left_pos] += identifier[global_shap_values] [ identifier[feature_inds] ]
identifier[pl] . identifier[yticks] ( identifier[y_pos] , identifier[fontsize] = literal[int] )
identifier[pl] . identifier[gca] (). identifier[set_yticklabels] ([ identifier[feature_names] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[feature_inds] ])
identifier[pl] . identifier[legend] ( identifier[frameon] = keyword[False] , identifier[fontsize] = literal[int] )
keyword[if] identifier[color_bar] keyword[and] identifier[features] keyword[is] keyword[not] keyword[None] keyword[and] identifier[plot_type] != literal[string] keyword[and] ( identifier[plot_type] != literal[string] keyword[or] identifier[color] keyword[in] identifier[pl] . identifier[cm] . identifier[datad] ):
keyword[import] identifier[matplotlib] . identifier[cm] keyword[as] identifier[cm]
identifier[m] = identifier[cm] . identifier[ScalarMappable] ( identifier[cmap] = identifier[colors] . identifier[red_blue] keyword[if] identifier[plot_type] != literal[string] keyword[else] identifier[pl] . identifier[get_cmap] ( identifier[color] ))
identifier[m] . identifier[set_array] ([ literal[int] , literal[int] ])
identifier[cb] = identifier[pl] . identifier[colorbar] ( identifier[m] , identifier[ticks] =[ literal[int] , literal[int] ], identifier[aspect] = literal[int] )
identifier[cb] . identifier[set_ticklabels] ([ identifier[labels] [ literal[string] ], identifier[labels] [ literal[string] ]])
identifier[cb] . identifier[set_label] ( identifier[labels] [ literal[string] ], identifier[size] = literal[int] , identifier[labelpad] = literal[int] )
identifier[cb] . identifier[ax] . identifier[tick_params] ( identifier[labelsize] = literal[int] , identifier[length] = literal[int] )
identifier[cb] . identifier[set_alpha] ( literal[int] )
identifier[cb] . identifier[outline] . identifier[set_visible] ( keyword[False] )
identifier[bbox] = identifier[cb] . identifier[ax] . identifier[get_window_extent] (). identifier[transformed] ( identifier[pl] . identifier[gcf] (). identifier[dpi_scale_trans] . identifier[inverted] ())
identifier[cb] . identifier[ax] . identifier[set_aspect] (( identifier[bbox] . identifier[height] - literal[int] )* literal[int] )
identifier[pl] . identifier[gca] (). identifier[xaxis] . identifier[set_ticks_position] ( literal[string] )
identifier[pl] . identifier[gca] (). identifier[yaxis] . identifier[set_ticks_position] ( literal[string] )
identifier[pl] . identifier[gca] (). identifier[spines] [ literal[string] ]. identifier[set_visible] ( keyword[False] )
identifier[pl] . identifier[gca] (). identifier[spines] [ literal[string] ]. identifier[set_visible] ( keyword[False] )
identifier[pl] . identifier[gca] (). identifier[spines] [ literal[string] ]. identifier[set_visible] ( keyword[False] )
identifier[pl] . identifier[gca] (). identifier[tick_params] ( identifier[color] = identifier[axis_color] , identifier[labelcolor] = identifier[axis_color] )
identifier[pl] . identifier[yticks] ( identifier[range] ( identifier[len] ( identifier[feature_order] )),[ identifier[feature_names] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[feature_order] ], identifier[fontsize] = literal[int] )
keyword[if] identifier[plot_type] != literal[string] :
identifier[pl] . identifier[gca] (). identifier[tick_params] ( literal[string] , identifier[length] = literal[int] , identifier[width] = literal[int] , identifier[which] = literal[string] )
identifier[pl] . identifier[gca] (). identifier[tick_params] ( literal[string] , identifier[labelsize] = literal[int] )
identifier[pl] . identifier[ylim] (- literal[int] , identifier[len] ( identifier[feature_order] ))
keyword[if] identifier[plot_type] == literal[string] :
identifier[pl] . identifier[xlabel] ( identifier[labels] [ literal[string] ], identifier[fontsize] = literal[int] )
keyword[else] :
identifier[pl] . identifier[xlabel] ( identifier[labels] [ literal[string] ], identifier[fontsize] = literal[int] )
keyword[if] identifier[show] :
identifier[pl] . identifier[show] () | def summary_plot(shap_values, features=None, feature_names=None, max_display=None, plot_type='dot', color=None, axis_color='#333333', title=None, alpha=1, show=True, sort=True, color_bar=True, auto_size_plot=True, layered_violin_max_num_bins=20, class_names=None):
"""Create a SHAP summary plot, colored by feature values when they are provided.
Parameters
----------
shap_values : numpy.array
Matrix of SHAP values (# samples x # features)
features : numpy.array or pandas.DataFrame or list
Matrix of feature values (# samples x # features) or a feature_names list as shorthand
feature_names : list
Names of the features (length # features)
max_display : int
How many top features to include in the plot (default is 20, or 7 for interaction plots)
plot_type : "dot" (default) or "violin"
What type of summary plot to produce
"""
multi_class = False
if isinstance(shap_values, list):
multi_class = True
plot_type = 'bar' # only type supported for now # depends on [control=['if'], data=[]]
else:
assert len(shap_values.shape) != 1, 'Summary plots need a matrix of shap_values, not a vector.'
# default color:
if color is None:
if plot_type == 'layered_violin':
color = 'coolwarm' # depends on [control=['if'], data=[]]
elif multi_class:
color = lambda i: colors.red_blue_circle(i / len(shap_values)) # depends on [control=['if'], data=[]]
else:
color = colors.blue_rgb # depends on [control=['if'], data=['color']]
# convert from a DataFrame or other types
if str(type(features)) == "<class 'pandas.core.frame.DataFrame'>":
if feature_names is None:
feature_names = features.columns # depends on [control=['if'], data=['feature_names']]
features = features.values # depends on [control=['if'], data=[]]
elif isinstance(features, list):
if feature_names is None:
feature_names = features # depends on [control=['if'], data=['feature_names']]
features = None # depends on [control=['if'], data=[]]
elif features is not None and len(features.shape) == 1 and (feature_names is None):
feature_names = features
features = None # depends on [control=['if'], data=[]]
num_features = shap_values[0].shape[1] if multi_class else shap_values.shape[1]
if feature_names is None:
feature_names = np.array([labels['FEATURE'] % str(i) for i in range(num_features)]) # depends on [control=['if'], data=['feature_names']]
# plotting SHAP interaction values
if not multi_class and len(shap_values.shape) == 3:
if max_display is None:
max_display = 7 # depends on [control=['if'], data=['max_display']]
else:
max_display = min(len(feature_names), max_display)
sort_inds = np.argsort(-np.abs(shap_values.sum(1)).sum(0))
# get plotting limits
delta = 1.0 / shap_values.shape[1] ** 2
slow = np.nanpercentile(shap_values, delta)
shigh = np.nanpercentile(shap_values, 100 - delta)
v = max(abs(slow), abs(shigh))
slow = -v
shigh = v
pl.figure(figsize=(1.5 * max_display + 1, 0.8 * max_display + 1))
pl.subplot(1, max_display, 1)
proj_shap_values = shap_values[:, sort_inds[0], sort_inds]
proj_shap_values[:, 1:] *= 2 # because off diag effects are split in half
summary_plot(proj_shap_values, features[:, sort_inds] if features is not None else None, feature_names=feature_names[sort_inds], sort=False, show=False, color_bar=False, auto_size_plot=False, max_display=max_display)
pl.xlim((slow, shigh))
pl.xlabel('')
title_length_limit = 11
pl.title(shorten_text(feature_names[sort_inds[0]], title_length_limit))
for i in range(1, min(len(sort_inds), max_display)):
ind = sort_inds[i]
pl.subplot(1, max_display, i + 1)
proj_shap_values = shap_values[:, ind, sort_inds]
proj_shap_values *= 2
proj_shap_values[:, i] /= 2 # because only off diag effects are split in half
summary_plot(proj_shap_values, features[:, sort_inds] if features is not None else None, sort=False, feature_names=['' for i in range(len(feature_names))], show=False, color_bar=False, auto_size_plot=False, max_display=max_display)
pl.xlim((slow, shigh))
pl.xlabel('')
if i == min(len(sort_inds), max_display) // 2:
pl.xlabel(labels['INTERACTION_VALUE']) # depends on [control=['if'], data=[]]
pl.title(shorten_text(feature_names[ind], title_length_limit)) # depends on [control=['for'], data=['i']]
pl.tight_layout(pad=0, w_pad=0, h_pad=0.0)
pl.subplots_adjust(hspace=0, wspace=0.1)
if show:
pl.show() # depends on [control=['if'], data=[]]
return # depends on [control=['if'], data=[]]
if max_display is None:
max_display = 20 # depends on [control=['if'], data=['max_display']]
if sort:
# order features by the sum of their effect magnitudes
if multi_class:
feature_order = np.argsort(np.sum(np.mean(np.abs(shap_values), axis=0), axis=0)) # depends on [control=['if'], data=[]]
else:
feature_order = np.argsort(np.sum(np.abs(shap_values), axis=0))
feature_order = feature_order[-min(max_display, len(feature_order)):] # depends on [control=['if'], data=[]]
else:
feature_order = np.flip(np.arange(min(max_display, num_features)), 0)
row_height = 0.4
if auto_size_plot:
pl.gcf().set_size_inches(8, len(feature_order) * row_height + 1.5) # depends on [control=['if'], data=[]]
pl.axvline(x=0, color='#999999', zorder=-1)
if plot_type == 'dot':
for (pos, i) in enumerate(feature_order):
pl.axhline(y=pos, color='#cccccc', lw=0.5, dashes=(1, 5), zorder=-1)
shaps = shap_values[:, i]
values = None if features is None else features[:, i]
inds = np.arange(len(shaps))
np.random.shuffle(inds)
if values is not None:
values = values[inds] # depends on [control=['if'], data=['values']]
shaps = shaps[inds]
colored_feature = True
try:
values = np.array(values, dtype=np.float64) # make sure this can be numeric # depends on [control=['try'], data=[]]
except:
colored_feature = False # depends on [control=['except'], data=[]]
N = len(shaps)
# hspacing = (np.max(shaps) - np.min(shaps)) / 200
# curr_bin = []
nbins = 100
quant = np.round(nbins * (shaps - np.min(shaps)) / (np.max(shaps) - np.min(shaps) + 1e-08))
inds = np.argsort(quant + np.random.randn(N) * 1e-06)
layer = 0
last_bin = -1
ys = np.zeros(N)
for ind in inds:
if quant[ind] != last_bin:
layer = 0 # depends on [control=['if'], data=[]]
ys[ind] = np.ceil(layer / 2) * (layer % 2 * 2 - 1)
layer += 1
last_bin = quant[ind] # depends on [control=['for'], data=['ind']]
ys *= 0.9 * (row_height / np.max(ys + 1))
if features is not None and colored_feature:
# trim the color range, but prevent the color range from collapsing
vmin = np.nanpercentile(values, 5)
vmax = np.nanpercentile(values, 95)
if vmin == vmax:
vmin = np.nanpercentile(values, 1)
vmax = np.nanpercentile(values, 99)
if vmin == vmax:
vmin = np.min(values)
vmax = np.max(values) # depends on [control=['if'], data=['vmin', 'vmax']] # depends on [control=['if'], data=['vmin', 'vmax']]
assert features.shape[0] == len(shaps), 'Feature and SHAP matrices must have the same number of rows!'
# plot the nan values in the interaction feature as grey
nan_mask = np.isnan(values)
pl.scatter(shaps[nan_mask], pos + ys[nan_mask], color='#777777', vmin=vmin, vmax=vmax, s=16, alpha=alpha, linewidth=0, zorder=3, rasterized=len(shaps) > 500)
# plot the non-nan values colored by the trimmed feature value
cvals = values[np.invert(nan_mask)].astype(np.float64)
cvals_imp = cvals.copy()
cvals_imp[np.isnan(cvals)] = (vmin + vmax) / 2.0
cvals[cvals_imp > vmax] = vmax
cvals[cvals_imp < vmin] = vmin
pl.scatter(shaps[np.invert(nan_mask)], pos + ys[np.invert(nan_mask)], cmap=colors.red_blue, vmin=vmin, vmax=vmax, s=16, c=cvals, alpha=alpha, linewidth=0, zorder=3, rasterized=len(shaps) > 500) # depends on [control=['if'], data=[]]
else:
pl.scatter(shaps, pos + ys, s=16, alpha=alpha, linewidth=0, zorder=3, color=color if colored_feature else '#777777', rasterized=len(shaps) > 500) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
elif plot_type == 'violin':
for (pos, i) in enumerate(feature_order):
pl.axhline(y=pos, color='#cccccc', lw=0.5, dashes=(1, 5), zorder=-1) # depends on [control=['for'], data=[]]
if features is not None:
global_low = np.nanpercentile(shap_values[:, :len(feature_names)].flatten(), 1)
global_high = np.nanpercentile(shap_values[:, :len(feature_names)].flatten(), 99)
for (pos, i) in enumerate(feature_order):
shaps = shap_values[:, i]
(shap_min, shap_max) = (np.min(shaps), np.max(shaps))
rng = shap_max - shap_min
xs = np.linspace(np.min(shaps) - rng * 0.2, np.max(shaps) + rng * 0.2, 100)
if np.std(shaps) < (global_high - global_low) / 100:
ds = gaussian_kde(shaps + np.random.randn(len(shaps)) * (global_high - global_low) / 100)(xs) # depends on [control=['if'], data=[]]
else:
ds = gaussian_kde(shaps)(xs)
ds /= np.max(ds) * 3
values = features[:, i]
window_size = max(10, len(values) // 20)
smooth_values = np.zeros(len(xs) - 1)
sort_inds = np.argsort(shaps)
trailing_pos = 0
leading_pos = 0
running_sum = 0
back_fill = 0
for j in range(len(xs) - 1):
while leading_pos < len(shaps) and xs[j] >= shaps[sort_inds[leading_pos]]:
running_sum += values[sort_inds[leading_pos]]
leading_pos += 1
if leading_pos - trailing_pos > 20:
running_sum -= values[sort_inds[trailing_pos]]
trailing_pos += 1 # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
if leading_pos - trailing_pos > 0:
smooth_values[j] = running_sum / (leading_pos - trailing_pos)
for k in range(back_fill):
smooth_values[j - k - 1] = smooth_values[j] # depends on [control=['for'], data=['k']] # depends on [control=['if'], data=[]]
else:
back_fill += 1 # depends on [control=['for'], data=['j']]
vmin = np.nanpercentile(values, 5)
vmax = np.nanpercentile(values, 95)
if vmin == vmax:
vmin = np.nanpercentile(values, 1)
vmax = np.nanpercentile(values, 99)
if vmin == vmax:
vmin = np.min(values)
vmax = np.max(values) # depends on [control=['if'], data=['vmin', 'vmax']] # depends on [control=['if'], data=['vmin', 'vmax']]
pl.scatter(shaps, np.ones(shap_values.shape[0]) * pos, s=9, cmap=colors.red_blue, vmin=vmin, vmax=vmax, c=values, alpha=alpha, linewidth=0, zorder=1)
# smooth_values -= nxp.nanpercentile(smooth_values, 5)
# smooth_values /= np.nanpercentile(smooth_values, 95)
smooth_values -= vmin
if vmax - vmin > 0:
smooth_values /= vmax - vmin # depends on [control=['if'], data=[]]
for i in range(len(xs) - 1):
if ds[i] > 0.05 or ds[i + 1] > 0.05:
pl.fill_between([xs[i], xs[i + 1]], [pos + ds[i], pos + ds[i + 1]], [pos - ds[i], pos - ds[i + 1]], color=colors.red_blue(smooth_values[i]), zorder=2) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['features']]
else:
parts = pl.violinplot(shap_values[:, feature_order], range(len(feature_order)), points=200, vert=False, widths=0.7, showmeans=False, showextrema=False, showmedians=False)
for pc in parts['bodies']:
pc.set_facecolor(color)
pc.set_edgecolor('none')
pc.set_alpha(alpha) # depends on [control=['for'], data=['pc']] # depends on [control=['if'], data=[]]
elif plot_type == 'layered_violin': # courtesy of @kodonnell
num_x_points = 200
bins = np.linspace(0, features.shape[0], layered_violin_max_num_bins + 1).round(0).astype('int') # the indices of the feature data corresponding to each bin
(shap_min, shap_max) = (np.min(shap_values), np.max(shap_values))
x_points = np.linspace(shap_min, shap_max, num_x_points)
# loop through each feature and plot:
for (pos, ind) in enumerate(feature_order):
# decide how to handle: if #unique < layered_violin_max_num_bins then split by unique value, otherwise use bins/percentiles.
# to keep simpler code, in the case of uniques, we just adjust the bins to align with the unique counts.
feature = features[:, ind]
(unique, counts) = np.unique(feature, return_counts=True)
if unique.shape[0] <= layered_violin_max_num_bins:
order = np.argsort(unique)
thesebins = np.cumsum(counts[order])
thesebins = np.insert(thesebins, 0, 0) # depends on [control=['if'], data=[]]
else:
thesebins = bins
nbins = thesebins.shape[0] - 1
# order the feature data so we can apply percentiling
order = np.argsort(feature)
# x axis is located at y0 = pos, with pos being there for offset
y0 = np.ones(num_x_points) * pos
# calculate kdes:
ys = np.zeros((nbins, num_x_points))
for i in range(nbins):
# get shap values in this bin:
shaps = shap_values[order[thesebins[i]:thesebins[i + 1]], ind]
# if there's only one element, then we can't
if shaps.shape[0] == 1:
warnings.warn("not enough data in bin #%d for feature %s, so it'll be ignored. Try increasing the number of records to plot." % (i, feature_names[ind]))
# to ignore it, just set it to the previous y-values (so the area between them will be zero). Not ys is already 0, so there's
# nothing to do if i == 0
if i > 0:
ys[i, :] = ys[i - 1, :] # depends on [control=['if'], data=['i']]
continue # depends on [control=['if'], data=[]]
# save kde of them: note that we add a tiny bit of gaussian noise to avoid singular matrix errors
ys[i, :] = gaussian_kde(shaps + np.random.normal(loc=0, scale=0.001, size=shaps.shape[0]))(x_points)
# scale it up so that the 'size' of each y represents the size of the bin. For continuous data this will
# do nothing, but when we've gone with the unqique option, this will matter - e.g. if 99% are male and 1%
# female, we want the 1% to appear a lot smaller.
size = thesebins[i + 1] - thesebins[i]
bin_size_if_even = features.shape[0] / nbins
relative_bin_size = size / bin_size_if_even
ys[i, :] *= relative_bin_size # depends on [control=['for'], data=['i']]
# now plot 'em. We don't plot the individual strips, as this can leave whitespace between them.
# instead, we plot the full kde, then remove outer strip and plot over it, etc., to ensure no
# whitespace
ys = np.cumsum(ys, axis=0)
width = 0.8
scale = ys.max() * 2 / width # 2 is here as we plot both sides of x axis
for i in range(nbins - 1, -1, -1):
y = ys[i, :] / scale
c = pl.get_cmap(color)(i / (nbins - 1)) if color in pl.cm.datad else color # if color is a cmap, use it, otherwise use a color
pl.fill_between(x_points, pos - y, pos + y, facecolor=c) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=[]]
pl.xlim(shap_min, shap_max) # depends on [control=['if'], data=[]]
elif not multi_class and plot_type == 'bar':
feature_inds = feature_order[:max_display]
y_pos = np.arange(len(feature_inds))
global_shap_values = np.abs(shap_values).mean(0)
pl.barh(y_pos, global_shap_values[feature_inds], 0.7, align='center', color=color)
pl.yticks(y_pos, fontsize=13)
pl.gca().set_yticklabels([feature_names[i] for i in feature_inds]) # depends on [control=['if'], data=[]]
elif multi_class and plot_type == 'bar':
if class_names is None:
class_names = ['Class ' + str(i) for i in range(len(shap_values))] # depends on [control=['if'], data=['class_names']]
feature_inds = feature_order[:max_display]
y_pos = np.arange(len(feature_inds))
left_pos = np.zeros(len(feature_inds))
class_inds = np.argsort([-np.abs(shap_values[i]).mean() for i in range(len(shap_values))])
for (i, ind) in enumerate(class_inds):
global_shap_values = np.abs(shap_values[ind]).mean(0)
pl.barh(y_pos, global_shap_values[feature_inds], 0.7, left=left_pos, align='center', color=color(i), label=class_names[ind])
left_pos += global_shap_values[feature_inds] # depends on [control=['for'], data=[]]
pl.yticks(y_pos, fontsize=13)
pl.gca().set_yticklabels([feature_names[i] for i in feature_inds])
pl.legend(frameon=False, fontsize=12) # depends on [control=['if'], data=[]]
# draw the color bar
if color_bar and features is not None and (plot_type != 'bar') and (plot_type != 'layered_violin' or color in pl.cm.datad):
import matplotlib.cm as cm
m = cm.ScalarMappable(cmap=colors.red_blue if plot_type != 'layered_violin' else pl.get_cmap(color))
m.set_array([0, 1])
cb = pl.colorbar(m, ticks=[0, 1], aspect=1000)
cb.set_ticklabels([labels['FEATURE_VALUE_LOW'], labels['FEATURE_VALUE_HIGH']])
cb.set_label(labels['FEATURE_VALUE'], size=12, labelpad=0)
cb.ax.tick_params(labelsize=11, length=0)
cb.set_alpha(1)
cb.outline.set_visible(False)
bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted())
cb.ax.set_aspect((bbox.height - 0.9) * 20) # depends on [control=['if'], data=[]]
# cb.draw_all()
pl.gca().xaxis.set_ticks_position('bottom')
pl.gca().yaxis.set_ticks_position('none')
pl.gca().spines['right'].set_visible(False)
pl.gca().spines['top'].set_visible(False)
pl.gca().spines['left'].set_visible(False)
pl.gca().tick_params(color=axis_color, labelcolor=axis_color)
pl.yticks(range(len(feature_order)), [feature_names[i] for i in feature_order], fontsize=13)
if plot_type != 'bar':
pl.gca().tick_params('y', length=20, width=0.5, which='major') # depends on [control=['if'], data=[]]
pl.gca().tick_params('x', labelsize=11)
pl.ylim(-1, len(feature_order))
if plot_type == 'bar':
pl.xlabel(labels['GLOBAL_VALUE'], fontsize=13) # depends on [control=['if'], data=[]]
else:
pl.xlabel(labels['VALUE'], fontsize=13)
if show:
pl.show() # depends on [control=['if'], data=[]] |
def to_nifti(obj, like=None, header=None, affine=None, extensions=Ellipsis, version=1):
'''
to_nifti(obj) yields a Nifti2Image object that is as equivalent as possible to the given object
obj. If obj is a Nifti2Image already, then it is returned unmolested; other deduction rules
are described below.
The following options are accepted:
* like (default: None) may be provided to give a guide for the various header- and meta-data
that is included in the image. If this is a nifti image object, its meta-data are used; if
this is a subject, then the meta-data are deduced from the subject's voxel and native
orientation matrices. All other specific options below override anything deduced from the
like argument.
* header (default: None) may be a Nifti1 or Niti2 image header to be used as the nifti header
or to replace the header in a new image.
* affine (default: None) may specify the affine transform to be given to the image object.
* extensions (default: Ellipsis) may specify a nifti extensions object that should be included
in the header. The default value, Ellipsis, indicates that the extensions should not be
changed, and that None should be used if extensions are not implied in obj (if, for example,
obj is a data array rather than an image object with a header already.
* version (default: 2) may be specified as 1 or 2 for a Nifti1Image or Nifti2Image object,
respectively.
'''
from neuropythy.mri import Subject
obj0 = obj
# First go from like to explicit versions of affine and header:
if like is not None:
if isinstance(like, nib.analyze.AnalyzeHeader) or \
isinstance(like, nib.freesurfer.mghformat.MGHHeader):
if header is None: header = like
elif isinstance(like, nib.analyze.SpatialImage):
if header is None: header = like.header
if affine is None: affine = like.affine
elif isinstance(like, Subject):
if affine is None: affine = like.voxel_to_native_matrix
else:
raise ValueError('Could not interpret like argument with type %s' % type(like))
# check to make sure that we have to change something:
elif ((version == 1 and isinstance(obj, nib.nifti1.Nifti1Image)) or
(version == 2 and isinstance(obj, nib.nifti2.Nifti2Image))):
if ((header is None or obj.header is header) and
(extensions is Ellipsis or extensions is obj.header.extensions or
(extensions is None and len(obj.header.extensions) == 0))):
return obj
# okay, now look at the header and affine etc.
if header is None:
if isinstance(obj, nib.analyze.SpatialImage):
header = obj.header
else:
header = nib.nifti1.Nifti1Header() if version == 1 else nib.nifti2.Nifti2Header()
if affine is None:
if isinstance(obj, nib.analyze.SpatialImage):
affine = obj.affine
else:
affine = np.eye(4)
if extensions is None:
extensions = nib.nifti1.Nifti1Extensions()
# Figure out what the data is
if isinstance(obj, nib.analyze.SpatialImage):
obj = obj.dataobj
elif not pimms.is_nparray(obj):
obj = np.asarray(obj)
if len(obj.shape) < 3: obj = np.asarray([[obj]])
# Okay, make a new object now...
if version == 1:
obj = nib.nifti1.Nifti1Image(obj, affine, header)
elif version == 2:
obj = nib.nifti2.Nifti2Image(obj, affine, header)
else:
raise ValueError('invalid version given (should be 1 or 2): %s' % version)
# add the extensions if they're needed
if extensions is not Ellipsis and (len(extensions) > 0 or len(obj.header.extensions) > 0):
obj.header.extensions = extensions
# Okay, that's it!
return obj | def function[to_nifti, parameter[obj, like, header, affine, extensions, version]]:
constant[
to_nifti(obj) yields a Nifti2Image object that is as equivalent as possible to the given object
obj. If obj is a Nifti2Image already, then it is returned unmolested; other deduction rules
are described below.
The following options are accepted:
* like (default: None) may be provided to give a guide for the various header- and meta-data
that is included in the image. If this is a nifti image object, its meta-data are used; if
this is a subject, then the meta-data are deduced from the subject's voxel and native
orientation matrices. All other specific options below override anything deduced from the
like argument.
* header (default: None) may be a Nifti1 or Niti2 image header to be used as the nifti header
or to replace the header in a new image.
* affine (default: None) may specify the affine transform to be given to the image object.
* extensions (default: Ellipsis) may specify a nifti extensions object that should be included
in the header. The default value, Ellipsis, indicates that the extensions should not be
changed, and that None should be used if extensions are not implied in obj (if, for example,
obj is a data array rather than an image object with a header already.
* version (default: 2) may be specified as 1 or 2 for a Nifti1Image or Nifti2Image object,
respectively.
]
from relative_module[neuropythy.mri] import module[Subject]
variable[obj0] assign[=] name[obj]
if compare[name[like] is_not constant[None]] begin[:]
if <ast.BoolOp object at 0x7da18c4cd000> begin[:]
if compare[name[header] is constant[None]] begin[:]
variable[header] assign[=] name[like]
if compare[name[header] is constant[None]] begin[:]
if call[name[isinstance], parameter[name[obj], name[nib].analyze.SpatialImage]] begin[:]
variable[header] assign[=] name[obj].header
if compare[name[affine] is constant[None]] begin[:]
if call[name[isinstance], parameter[name[obj], name[nib].analyze.SpatialImage]] begin[:]
variable[affine] assign[=] name[obj].affine
if compare[name[extensions] is constant[None]] begin[:]
variable[extensions] assign[=] call[name[nib].nifti1.Nifti1Extensions, parameter[]]
if call[name[isinstance], parameter[name[obj], name[nib].analyze.SpatialImage]] begin[:]
variable[obj] assign[=] name[obj].dataobj
if compare[call[name[len], parameter[name[obj].shape]] less[<] constant[3]] begin[:]
variable[obj] assign[=] call[name[np].asarray, parameter[list[[<ast.List object at 0x7da18c4cd0c0>]]]]
if compare[name[version] equal[==] constant[1]] begin[:]
variable[obj] assign[=] call[name[nib].nifti1.Nifti1Image, parameter[name[obj], name[affine], name[header]]]
if <ast.BoolOp object at 0x7da18c4cca60> begin[:]
name[obj].header.extensions assign[=] name[extensions]
return[name[obj]] | keyword[def] identifier[to_nifti] ( identifier[obj] , identifier[like] = keyword[None] , identifier[header] = keyword[None] , identifier[affine] = keyword[None] , identifier[extensions] = identifier[Ellipsis] , identifier[version] = literal[int] ):
literal[string]
keyword[from] identifier[neuropythy] . identifier[mri] keyword[import] identifier[Subject]
identifier[obj0] = identifier[obj]
keyword[if] identifier[like] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[like] , identifier[nib] . identifier[analyze] . identifier[AnalyzeHeader] ) keyword[or] identifier[isinstance] ( identifier[like] , identifier[nib] . identifier[freesurfer] . identifier[mghformat] . identifier[MGHHeader] ):
keyword[if] identifier[header] keyword[is] keyword[None] : identifier[header] = identifier[like]
keyword[elif] identifier[isinstance] ( identifier[like] , identifier[nib] . identifier[analyze] . identifier[SpatialImage] ):
keyword[if] identifier[header] keyword[is] keyword[None] : identifier[header] = identifier[like] . identifier[header]
keyword[if] identifier[affine] keyword[is] keyword[None] : identifier[affine] = identifier[like] . identifier[affine]
keyword[elif] identifier[isinstance] ( identifier[like] , identifier[Subject] ):
keyword[if] identifier[affine] keyword[is] keyword[None] : identifier[affine] = identifier[like] . identifier[voxel_to_native_matrix]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[type] ( identifier[like] ))
keyword[elif] (( identifier[version] == literal[int] keyword[and] identifier[isinstance] ( identifier[obj] , identifier[nib] . identifier[nifti1] . identifier[Nifti1Image] )) keyword[or]
( identifier[version] == literal[int] keyword[and] identifier[isinstance] ( identifier[obj] , identifier[nib] . identifier[nifti2] . identifier[Nifti2Image] ))):
keyword[if] (( identifier[header] keyword[is] keyword[None] keyword[or] identifier[obj] . identifier[header] keyword[is] identifier[header] ) keyword[and]
( identifier[extensions] keyword[is] identifier[Ellipsis] keyword[or] identifier[extensions] keyword[is] identifier[obj] . identifier[header] . identifier[extensions] keyword[or]
( identifier[extensions] keyword[is] keyword[None] keyword[and] identifier[len] ( identifier[obj] . identifier[header] . identifier[extensions] )== literal[int] ))):
keyword[return] identifier[obj]
keyword[if] identifier[header] keyword[is] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[nib] . identifier[analyze] . identifier[SpatialImage] ):
identifier[header] = identifier[obj] . identifier[header]
keyword[else] :
identifier[header] = identifier[nib] . identifier[nifti1] . identifier[Nifti1Header] () keyword[if] identifier[version] == literal[int] keyword[else] identifier[nib] . identifier[nifti2] . identifier[Nifti2Header] ()
keyword[if] identifier[affine] keyword[is] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[nib] . identifier[analyze] . identifier[SpatialImage] ):
identifier[affine] = identifier[obj] . identifier[affine]
keyword[else] :
identifier[affine] = identifier[np] . identifier[eye] ( literal[int] )
keyword[if] identifier[extensions] keyword[is] keyword[None] :
identifier[extensions] = identifier[nib] . identifier[nifti1] . identifier[Nifti1Extensions] ()
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[nib] . identifier[analyze] . identifier[SpatialImage] ):
identifier[obj] = identifier[obj] . identifier[dataobj]
keyword[elif] keyword[not] identifier[pimms] . identifier[is_nparray] ( identifier[obj] ):
identifier[obj] = identifier[np] . identifier[asarray] ( identifier[obj] )
keyword[if] identifier[len] ( identifier[obj] . identifier[shape] )< literal[int] : identifier[obj] = identifier[np] . identifier[asarray] ([[ identifier[obj] ]])
keyword[if] identifier[version] == literal[int] :
identifier[obj] = identifier[nib] . identifier[nifti1] . identifier[Nifti1Image] ( identifier[obj] , identifier[affine] , identifier[header] )
keyword[elif] identifier[version] == literal[int] :
identifier[obj] = identifier[nib] . identifier[nifti2] . identifier[Nifti2Image] ( identifier[obj] , identifier[affine] , identifier[header] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[version] )
keyword[if] identifier[extensions] keyword[is] keyword[not] identifier[Ellipsis] keyword[and] ( identifier[len] ( identifier[extensions] )> literal[int] keyword[or] identifier[len] ( identifier[obj] . identifier[header] . identifier[extensions] )> literal[int] ):
identifier[obj] . identifier[header] . identifier[extensions] = identifier[extensions]
keyword[return] identifier[obj] | def to_nifti(obj, like=None, header=None, affine=None, extensions=Ellipsis, version=1):
"""
to_nifti(obj) yields a Nifti2Image object that is as equivalent as possible to the given object
obj. If obj is a Nifti2Image already, then it is returned unmolested; other deduction rules
are described below.
The following options are accepted:
* like (default: None) may be provided to give a guide for the various header- and meta-data
that is included in the image. If this is a nifti image object, its meta-data are used; if
this is a subject, then the meta-data are deduced from the subject's voxel and native
orientation matrices. All other specific options below override anything deduced from the
like argument.
* header (default: None) may be a Nifti1 or Niti2 image header to be used as the nifti header
or to replace the header in a new image.
* affine (default: None) may specify the affine transform to be given to the image object.
* extensions (default: Ellipsis) may specify a nifti extensions object that should be included
in the header. The default value, Ellipsis, indicates that the extensions should not be
changed, and that None should be used if extensions are not implied in obj (if, for example,
obj is a data array rather than an image object with a header already.
* version (default: 2) may be specified as 1 or 2 for a Nifti1Image or Nifti2Image object,
respectively.
"""
from neuropythy.mri import Subject
obj0 = obj
# First go from like to explicit versions of affine and header:
if like is not None:
if isinstance(like, nib.analyze.AnalyzeHeader) or isinstance(like, nib.freesurfer.mghformat.MGHHeader):
if header is None:
header = like # depends on [control=['if'], data=['header']] # depends on [control=['if'], data=[]]
elif isinstance(like, nib.analyze.SpatialImage):
if header is None:
header = like.header # depends on [control=['if'], data=['header']]
if affine is None:
affine = like.affine # depends on [control=['if'], data=['affine']] # depends on [control=['if'], data=[]]
elif isinstance(like, Subject):
if affine is None:
affine = like.voxel_to_native_matrix # depends on [control=['if'], data=['affine']] # depends on [control=['if'], data=[]]
else:
raise ValueError('Could not interpret like argument with type %s' % type(like)) # depends on [control=['if'], data=['like']]
# check to make sure that we have to change something:
elif version == 1 and isinstance(obj, nib.nifti1.Nifti1Image) or (version == 2 and isinstance(obj, nib.nifti2.Nifti2Image)):
if (header is None or obj.header is header) and (extensions is Ellipsis or extensions is obj.header.extensions or (extensions is None and len(obj.header.extensions) == 0)):
return obj # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# okay, now look at the header and affine etc.
if header is None:
if isinstance(obj, nib.analyze.SpatialImage):
header = obj.header # depends on [control=['if'], data=[]]
else:
header = nib.nifti1.Nifti1Header() if version == 1 else nib.nifti2.Nifti2Header() # depends on [control=['if'], data=['header']]
if affine is None:
if isinstance(obj, nib.analyze.SpatialImage):
affine = obj.affine # depends on [control=['if'], data=[]]
else:
affine = np.eye(4) # depends on [control=['if'], data=['affine']]
if extensions is None:
extensions = nib.nifti1.Nifti1Extensions() # depends on [control=['if'], data=['extensions']]
# Figure out what the data is
if isinstance(obj, nib.analyze.SpatialImage):
obj = obj.dataobj # depends on [control=['if'], data=[]]
elif not pimms.is_nparray(obj):
obj = np.asarray(obj) # depends on [control=['if'], data=[]]
if len(obj.shape) < 3:
obj = np.asarray([[obj]]) # depends on [control=['if'], data=[]]
# Okay, make a new object now...
if version == 1:
obj = nib.nifti1.Nifti1Image(obj, affine, header) # depends on [control=['if'], data=[]]
elif version == 2:
obj = nib.nifti2.Nifti2Image(obj, affine, header) # depends on [control=['if'], data=[]]
else:
raise ValueError('invalid version given (should be 1 or 2): %s' % version)
# add the extensions if they're needed
if extensions is not Ellipsis and (len(extensions) > 0 or len(obj.header.extensions) > 0):
obj.header.extensions = extensions # depends on [control=['if'], data=[]]
# Okay, that's it!
return obj |
def save_file(client, bucket, data_file, items, dry_run=None):
"""Tries to write JSON data to data file in S3."""
logger.debug('Writing {number_items} items to s3. Bucket: {bucket} Key: {key}'.format(
number_items=len(items),
bucket=bucket,
key=data_file
))
if not dry_run:
return _put_to_s3(client, bucket, data_file, json.dumps(items)) | def function[save_file, parameter[client, bucket, data_file, items, dry_run]]:
constant[Tries to write JSON data to data file in S3.]
call[name[logger].debug, parameter[call[constant[Writing {number_items} items to s3. Bucket: {bucket} Key: {key}].format, parameter[]]]]
if <ast.UnaryOp object at 0x7da1b07cceb0> begin[:]
return[call[name[_put_to_s3], parameter[name[client], name[bucket], name[data_file], call[name[json].dumps, parameter[name[items]]]]]] | keyword[def] identifier[save_file] ( identifier[client] , identifier[bucket] , identifier[data_file] , identifier[items] , identifier[dry_run] = keyword[None] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] (
identifier[number_items] = identifier[len] ( identifier[items] ),
identifier[bucket] = identifier[bucket] ,
identifier[key] = identifier[data_file]
))
keyword[if] keyword[not] identifier[dry_run] :
keyword[return] identifier[_put_to_s3] ( identifier[client] , identifier[bucket] , identifier[data_file] , identifier[json] . identifier[dumps] ( identifier[items] )) | def save_file(client, bucket, data_file, items, dry_run=None):
"""Tries to write JSON data to data file in S3."""
logger.debug('Writing {number_items} items to s3. Bucket: {bucket} Key: {key}'.format(number_items=len(items), bucket=bucket, key=data_file))
if not dry_run:
return _put_to_s3(client, bucket, data_file, json.dumps(items)) # depends on [control=['if'], data=[]] |
def JC69 (mu=1.0, alphabet="nuc", **kwargs):
"""
Jukes-Cantor 1969 model. This model assumes equal concentrations
of the nucleotides and equal transition rates between nucleotide states.
For more info, see: Jukes and Cantor (1969). Evolution of Protein Molecules.
New York: Academic Press. pp. 21–132
Parameters
-----------
mu : float
substitution rate
alphabet : str
specify alphabet to use.
Available alphabets are:
'nuc' - nucleotides only, gaps ignored
'nuc_gap' - nucleotide alphabet with gaps, gaps can be ignored optionally
"""
num_chars = len(alphabets[alphabet])
W, pi = np.ones((num_chars,num_chars)), np.ones(num_chars)
gtr = GTR(alphabet=alphabet)
gtr.assign_rates(mu=mu, pi=pi, W=W)
return gtr | def function[JC69, parameter[mu, alphabet]]:
constant[
Jukes-Cantor 1969 model. This model assumes equal concentrations
of the nucleotides and equal transition rates between nucleotide states.
For more info, see: Jukes and Cantor (1969). Evolution of Protein Molecules.
New York: Academic Press. pp. 21–132
Parameters
-----------
mu : float
substitution rate
alphabet : str
specify alphabet to use.
Available alphabets are:
'nuc' - nucleotides only, gaps ignored
'nuc_gap' - nucleotide alphabet with gaps, gaps can be ignored optionally
]
variable[num_chars] assign[=] call[name[len], parameter[call[name[alphabets]][name[alphabet]]]]
<ast.Tuple object at 0x7da1b02df9a0> assign[=] tuple[[<ast.Call object at 0x7da1b02df640>, <ast.Call object at 0x7da1b02df310>]]
variable[gtr] assign[=] call[name[GTR], parameter[]]
call[name[gtr].assign_rates, parameter[]]
return[name[gtr]] | keyword[def] identifier[JC69] ( identifier[mu] = literal[int] , identifier[alphabet] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[num_chars] = identifier[len] ( identifier[alphabets] [ identifier[alphabet] ])
identifier[W] , identifier[pi] = identifier[np] . identifier[ones] (( identifier[num_chars] , identifier[num_chars] )), identifier[np] . identifier[ones] ( identifier[num_chars] )
identifier[gtr] = identifier[GTR] ( identifier[alphabet] = identifier[alphabet] )
identifier[gtr] . identifier[assign_rates] ( identifier[mu] = identifier[mu] , identifier[pi] = identifier[pi] , identifier[W] = identifier[W] )
keyword[return] identifier[gtr] | def JC69(mu=1.0, alphabet='nuc', **kwargs):
"""
Jukes-Cantor 1969 model. This model assumes equal concentrations
of the nucleotides and equal transition rates between nucleotide states.
For more info, see: Jukes and Cantor (1969). Evolution of Protein Molecules.
New York: Academic Press. pp. 21–132
Parameters
-----------
mu : float
substitution rate
alphabet : str
specify alphabet to use.
Available alphabets are:
'nuc' - nucleotides only, gaps ignored
'nuc_gap' - nucleotide alphabet with gaps, gaps can be ignored optionally
"""
num_chars = len(alphabets[alphabet])
(W, pi) = (np.ones((num_chars, num_chars)), np.ones(num_chars))
gtr = GTR(alphabet=alphabet)
gtr.assign_rates(mu=mu, pi=pi, W=W)
return gtr |
def build_block(self, format_string):
"""
Parse the format string into blocks containing Literals, Placeholders
etc that we can cache and reuse.
"""
first_block = Block(None, py3_wrapper=self.py3_wrapper)
block = first_block
# Tokenize the format string and process them
for token in self.tokens(format_string):
value = token.group(0)
if token.group("block_start"):
# Create new block
block = block.new_block()
elif token.group("block_end"):
# Close block setting any valid state as needed
# and return to parent block to continue
if not block.parent:
raise Exception("Too many `]`")
block = block.parent
elif token.group("switch"):
# a new option has been created
block = block.switch()
elif token.group("placeholder"):
# Found a {placeholder}
key = token.group("key")
format = token.group("format")
block.add(Placeholder(key, format))
elif token.group("literal"):
block.add(Literal(value))
elif token.group("lost_brace"):
# due to how parsing happens we can get a lonesome }
# eg in format_string '{{something}' this fixes that issue
block.add(Literal(value))
elif token.group("command"):
# a block command has been found
block.set_commands(token.group("command"))
elif token.group("escaped"):
# escaped characters add unescaped values
if value[0] in ["\\", "{", "}"]:
value = value[1:]
block.add(Literal(value))
if block.parent:
raise Exception("Block not closed")
# add to the cache
self.block_cache[format_string] = first_block | def function[build_block, parameter[self, format_string]]:
constant[
Parse the format string into blocks containing Literals, Placeholders
etc that we can cache and reuse.
]
variable[first_block] assign[=] call[name[Block], parameter[constant[None]]]
variable[block] assign[=] name[first_block]
for taget[name[token]] in starred[call[name[self].tokens, parameter[name[format_string]]]] begin[:]
variable[value] assign[=] call[name[token].group, parameter[constant[0]]]
if call[name[token].group, parameter[constant[block_start]]] begin[:]
variable[block] assign[=] call[name[block].new_block, parameter[]]
if name[block].parent begin[:]
<ast.Raise object at 0x7da1b2088820>
call[name[self].block_cache][name[format_string]] assign[=] name[first_block] | keyword[def] identifier[build_block] ( identifier[self] , identifier[format_string] ):
literal[string]
identifier[first_block] = identifier[Block] ( keyword[None] , identifier[py3_wrapper] = identifier[self] . identifier[py3_wrapper] )
identifier[block] = identifier[first_block]
keyword[for] identifier[token] keyword[in] identifier[self] . identifier[tokens] ( identifier[format_string] ):
identifier[value] = identifier[token] . identifier[group] ( literal[int] )
keyword[if] identifier[token] . identifier[group] ( literal[string] ):
identifier[block] = identifier[block] . identifier[new_block] ()
keyword[elif] identifier[token] . identifier[group] ( literal[string] ):
keyword[if] keyword[not] identifier[block] . identifier[parent] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[block] = identifier[block] . identifier[parent]
keyword[elif] identifier[token] . identifier[group] ( literal[string] ):
identifier[block] = identifier[block] . identifier[switch] ()
keyword[elif] identifier[token] . identifier[group] ( literal[string] ):
identifier[key] = identifier[token] . identifier[group] ( literal[string] )
identifier[format] = identifier[token] . identifier[group] ( literal[string] )
identifier[block] . identifier[add] ( identifier[Placeholder] ( identifier[key] , identifier[format] ))
keyword[elif] identifier[token] . identifier[group] ( literal[string] ):
identifier[block] . identifier[add] ( identifier[Literal] ( identifier[value] ))
keyword[elif] identifier[token] . identifier[group] ( literal[string] ):
identifier[block] . identifier[add] ( identifier[Literal] ( identifier[value] ))
keyword[elif] identifier[token] . identifier[group] ( literal[string] ):
identifier[block] . identifier[set_commands] ( identifier[token] . identifier[group] ( literal[string] ))
keyword[elif] identifier[token] . identifier[group] ( literal[string] ):
keyword[if] identifier[value] [ literal[int] ] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[value] = identifier[value] [ literal[int] :]
identifier[block] . identifier[add] ( identifier[Literal] ( identifier[value] ))
keyword[if] identifier[block] . identifier[parent] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[self] . identifier[block_cache] [ identifier[format_string] ]= identifier[first_block] | def build_block(self, format_string):
"""
Parse the format string into blocks containing Literals, Placeholders
etc that we can cache and reuse.
"""
first_block = Block(None, py3_wrapper=self.py3_wrapper)
block = first_block
# Tokenize the format string and process them
for token in self.tokens(format_string):
value = token.group(0)
if token.group('block_start'):
# Create new block
block = block.new_block() # depends on [control=['if'], data=[]]
elif token.group('block_end'):
# Close block setting any valid state as needed
# and return to parent block to continue
if not block.parent:
raise Exception('Too many `]`') # depends on [control=['if'], data=[]]
block = block.parent # depends on [control=['if'], data=[]]
elif token.group('switch'):
# a new option has been created
block = block.switch() # depends on [control=['if'], data=[]]
elif token.group('placeholder'):
# Found a {placeholder}
key = token.group('key')
format = token.group('format')
block.add(Placeholder(key, format)) # depends on [control=['if'], data=[]]
elif token.group('literal'):
block.add(Literal(value)) # depends on [control=['if'], data=[]]
elif token.group('lost_brace'):
# due to how parsing happens we can get a lonesome }
# eg in format_string '{{something}' this fixes that issue
block.add(Literal(value)) # depends on [control=['if'], data=[]]
elif token.group('command'):
# a block command has been found
block.set_commands(token.group('command')) # depends on [control=['if'], data=[]]
elif token.group('escaped'):
# escaped characters add unescaped values
if value[0] in ['\\', '{', '}']:
value = value[1:] # depends on [control=['if'], data=[]]
block.add(Literal(value)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['token']]
if block.parent:
raise Exception('Block not closed') # depends on [control=['if'], data=[]]
# add to the cache
self.block_cache[format_string] = first_block |
def reformat_schema(model):
""" Reformat schema to be in a more displayable format. """
if not hasattr(model, 'schema'):
return "Model '{}' does not have a schema".format(model)
if 'properties' not in model.schema:
return "Schema in unexpected format."
ret = copy.deepcopy(model.schema['properties'])
if 'type' in ret:
del(ret['type'])
for key in model.schema.get('required', []):
if key in ret:
ret[key]['required'] = True
return ret | def function[reformat_schema, parameter[model]]:
constant[ Reformat schema to be in a more displayable format. ]
if <ast.UnaryOp object at 0x7da1b1f26e90> begin[:]
return[call[constant[Model '{}' does not have a schema].format, parameter[name[model]]]]
if compare[constant[properties] <ast.NotIn object at 0x7da2590d7190> name[model].schema] begin[:]
return[constant[Schema in unexpected format.]]
variable[ret] assign[=] call[name[copy].deepcopy, parameter[call[name[model].schema][constant[properties]]]]
if compare[constant[type] in name[ret]] begin[:]
<ast.Delete object at 0x7da1b1f27940>
for taget[name[key]] in starred[call[name[model].schema.get, parameter[constant[required], list[[]]]]] begin[:]
if compare[name[key] in name[ret]] begin[:]
call[call[name[ret]][name[key]]][constant[required]] assign[=] constant[True]
return[name[ret]] | keyword[def] identifier[reformat_schema] ( identifier[model] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[model] , literal[string] ):
keyword[return] literal[string] . identifier[format] ( identifier[model] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[model] . identifier[schema] :
keyword[return] literal[string]
identifier[ret] = identifier[copy] . identifier[deepcopy] ( identifier[model] . identifier[schema] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[ret] :
keyword[del] ( identifier[ret] [ literal[string] ])
keyword[for] identifier[key] keyword[in] identifier[model] . identifier[schema] . identifier[get] ( literal[string] ,[]):
keyword[if] identifier[key] keyword[in] identifier[ret] :
identifier[ret] [ identifier[key] ][ literal[string] ]= keyword[True]
keyword[return] identifier[ret] | def reformat_schema(model):
""" Reformat schema to be in a more displayable format. """
if not hasattr(model, 'schema'):
return "Model '{}' does not have a schema".format(model) # depends on [control=['if'], data=[]]
if 'properties' not in model.schema:
return 'Schema in unexpected format.' # depends on [control=['if'], data=[]]
ret = copy.deepcopy(model.schema['properties'])
if 'type' in ret:
del ret['type'] # depends on [control=['if'], data=['ret']]
for key in model.schema.get('required', []):
if key in ret:
ret[key]['required'] = True # depends on [control=['if'], data=['key', 'ret']] # depends on [control=['for'], data=['key']]
return ret |
def load_module(self, fullname):
"""import a notebook as a module"""
path = find_notebook(fullname, self.path)
print ("importing Jupyter notebook from %s" % path)
# load the notebook object
with io.open(path, 'r', encoding='utf-8') as f:
nb = read(f, 4)
# create the module and add it to sys.modules if name in sys.modules:
# return sys.modules[name]
mod = types.ModuleType(fullname)
mod.__file__ = path
mod.__loader__ = self
mod.__dict__['get_ipython'] = get_ipython
sys.modules[fullname] = mod
# extra work to ensure that magics that would affect the user_ns
# actually affect the notebook module's ns
save_user_ns = self.shell.user_ns
self.shell.user_ns = mod.__dict__
try:
for cell in nb.cells:
if cell.cell_type == 'code':
# transform the input to executable Python
code = self.shell.input_transformer_manager.transform_cell(cell.source)
# run the code in themodule
exec(code, mod.__dict__)
finally:
self.shell.user_ns = save_user_ns
return mod | def function[load_module, parameter[self, fullname]]:
constant[import a notebook as a module]
variable[path] assign[=] call[name[find_notebook], parameter[name[fullname], name[self].path]]
call[name[print], parameter[binary_operation[constant[importing Jupyter notebook from %s] <ast.Mod object at 0x7da2590d6920> name[path]]]]
with call[name[io].open, parameter[name[path], constant[r]]] begin[:]
variable[nb] assign[=] call[name[read], parameter[name[f], constant[4]]]
variable[mod] assign[=] call[name[types].ModuleType, parameter[name[fullname]]]
name[mod].__file__ assign[=] name[path]
name[mod].__loader__ assign[=] name[self]
call[name[mod].__dict__][constant[get_ipython]] assign[=] name[get_ipython]
call[name[sys].modules][name[fullname]] assign[=] name[mod]
variable[save_user_ns] assign[=] name[self].shell.user_ns
name[self].shell.user_ns assign[=] name[mod].__dict__
<ast.Try object at 0x7da18c4cf550>
return[name[mod]] | keyword[def] identifier[load_module] ( identifier[self] , identifier[fullname] ):
literal[string]
identifier[path] = identifier[find_notebook] ( identifier[fullname] , identifier[self] . identifier[path] )
identifier[print] ( literal[string] % identifier[path] )
keyword[with] identifier[io] . identifier[open] ( identifier[path] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[nb] = identifier[read] ( identifier[f] , literal[int] )
identifier[mod] = identifier[types] . identifier[ModuleType] ( identifier[fullname] )
identifier[mod] . identifier[__file__] = identifier[path]
identifier[mod] . identifier[__loader__] = identifier[self]
identifier[mod] . identifier[__dict__] [ literal[string] ]= identifier[get_ipython]
identifier[sys] . identifier[modules] [ identifier[fullname] ]= identifier[mod]
identifier[save_user_ns] = identifier[self] . identifier[shell] . identifier[user_ns]
identifier[self] . identifier[shell] . identifier[user_ns] = identifier[mod] . identifier[__dict__]
keyword[try] :
keyword[for] identifier[cell] keyword[in] identifier[nb] . identifier[cells] :
keyword[if] identifier[cell] . identifier[cell_type] == literal[string] :
identifier[code] = identifier[self] . identifier[shell] . identifier[input_transformer_manager] . identifier[transform_cell] ( identifier[cell] . identifier[source] )
identifier[exec] ( identifier[code] , identifier[mod] . identifier[__dict__] )
keyword[finally] :
identifier[self] . identifier[shell] . identifier[user_ns] = identifier[save_user_ns]
keyword[return] identifier[mod] | def load_module(self, fullname):
"""import a notebook as a module"""
path = find_notebook(fullname, self.path)
print('importing Jupyter notebook from %s' % path)
# load the notebook object
with io.open(path, 'r', encoding='utf-8') as f:
nb = read(f, 4) # depends on [control=['with'], data=['f']]
# create the module and add it to sys.modules if name in sys.modules:
# return sys.modules[name]
mod = types.ModuleType(fullname)
mod.__file__ = path
mod.__loader__ = self
mod.__dict__['get_ipython'] = get_ipython
sys.modules[fullname] = mod
# extra work to ensure that magics that would affect the user_ns
# actually affect the notebook module's ns
save_user_ns = self.shell.user_ns
self.shell.user_ns = mod.__dict__
try:
for cell in nb.cells:
if cell.cell_type == 'code':
# transform the input to executable Python
code = self.shell.input_transformer_manager.transform_cell(cell.source)
# run the code in themodule
exec(code, mod.__dict__) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cell']] # depends on [control=['try'], data=[]]
finally:
self.shell.user_ns = save_user_ns
return mod |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.