code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def eventFilter(self, object, event):
"""
Listens for tab/backtab to modify list information.
:param event | <QKeyPressEvent>
"""
if event.type() != event.KeyPress:
return super(XRichTextEdit, self).eventFilter(object, event)
cursor = object.textCursor()
curr_list = cursor.currentList()
# make sure we're in a current list
if not curr_list:
return super(XRichTextEdit, self).eventFilter(object, event)
# unindent for Backtab (Shift+Tab)
if event.key() == Qt.Key_Backtab:
delta = -1
# indent for Tab
elif event.key() == Qt.Key_Tab:
delta = 1
# otherwise, don't bother calculating
else:
return super(XRichTextEdit, self).eventFilter(object, event)
# look for the proper list to move to
curr_block = cursor.block()
curr_indent = curr_list.format().indent()
curr_style = curr_list.format().style()
prev_block = curr_block
next_block = curr_block
add_to_list = None
add_to_indent = curr_indent + delta
while prev_block or next_block:
if prev_block:
prev_block = prev_block.previous()
prev_list = prev_block.textList()
if not prev_list:
prev_block = None
else:
prev_indent = prev_list.format().indent()
if prev_indent == add_to_indent:
add_to_list = prev_list
break
if next_block:
next_block = next_block.next()
next_list = next_block.textList()
if not next_list:
next_block = None
else:
next_indent = next_list.format().indent()
if next_indent == add_to_indent:
add_to_list = next_list
break
if add_to_list is None and 0 < delta:
if curr_style in (QTextListFormat.ListCircle,
QTextListFormat.ListDisc,
QTextListFormat.ListSquare):
self.insertUnorderedList()
else:
self.insertOrderedList()
elif add_to_list:
add_to_list.add(curr_block)
return True | def function[eventFilter, parameter[self, object, event]]:
constant[
Listens for tab/backtab to modify list information.
:param event | <QKeyPressEvent>
]
if compare[call[name[event].type, parameter[]] not_equal[!=] name[event].KeyPress] begin[:]
return[call[call[name[super], parameter[name[XRichTextEdit], name[self]]].eventFilter, parameter[name[object], name[event]]]]
variable[cursor] assign[=] call[name[object].textCursor, parameter[]]
variable[curr_list] assign[=] call[name[cursor].currentList, parameter[]]
if <ast.UnaryOp object at 0x7da1b24af550> begin[:]
return[call[call[name[super], parameter[name[XRichTextEdit], name[self]]].eventFilter, parameter[name[object], name[event]]]]
if compare[call[name[event].key, parameter[]] equal[==] name[Qt].Key_Backtab] begin[:]
variable[delta] assign[=] <ast.UnaryOp object at 0x7da1b24ac160>
variable[curr_block] assign[=] call[name[cursor].block, parameter[]]
variable[curr_indent] assign[=] call[call[name[curr_list].format, parameter[]].indent, parameter[]]
variable[curr_style] assign[=] call[call[name[curr_list].format, parameter[]].style, parameter[]]
variable[prev_block] assign[=] name[curr_block]
variable[next_block] assign[=] name[curr_block]
variable[add_to_list] assign[=] constant[None]
variable[add_to_indent] assign[=] binary_operation[name[curr_indent] + name[delta]]
while <ast.BoolOp object at 0x7da1b24ae920> begin[:]
if name[prev_block] begin[:]
variable[prev_block] assign[=] call[name[prev_block].previous, parameter[]]
variable[prev_list] assign[=] call[name[prev_block].textList, parameter[]]
if <ast.UnaryOp object at 0x7da1b24ad780> begin[:]
variable[prev_block] assign[=] constant[None]
if name[next_block] begin[:]
variable[next_block] assign[=] call[name[next_block].next, parameter[]]
variable[next_list] assign[=] call[name[next_block].textList, parameter[]]
if <ast.UnaryOp object at 0x7da1b24ade70> begin[:]
variable[next_block] assign[=] constant[None]
if <ast.BoolOp object at 0x7da1b24ad8d0> begin[:]
if compare[name[curr_style] in tuple[[<ast.Attribute object at 0x7da1b24ae1a0>, <ast.Attribute object at 0x7da1b24afbe0>, <ast.Attribute object at 0x7da1b24ae170>]]] begin[:]
call[name[self].insertUnorderedList, parameter[]]
return[constant[True]] | keyword[def] identifier[eventFilter] ( identifier[self] , identifier[object] , identifier[event] ):
literal[string]
keyword[if] identifier[event] . identifier[type] ()!= identifier[event] . identifier[KeyPress] :
keyword[return] identifier[super] ( identifier[XRichTextEdit] , identifier[self] ). identifier[eventFilter] ( identifier[object] , identifier[event] )
identifier[cursor] = identifier[object] . identifier[textCursor] ()
identifier[curr_list] = identifier[cursor] . identifier[currentList] ()
keyword[if] keyword[not] identifier[curr_list] :
keyword[return] identifier[super] ( identifier[XRichTextEdit] , identifier[self] ). identifier[eventFilter] ( identifier[object] , identifier[event] )
keyword[if] identifier[event] . identifier[key] ()== identifier[Qt] . identifier[Key_Backtab] :
identifier[delta] =- literal[int]
keyword[elif] identifier[event] . identifier[key] ()== identifier[Qt] . identifier[Key_Tab] :
identifier[delta] = literal[int]
keyword[else] :
keyword[return] identifier[super] ( identifier[XRichTextEdit] , identifier[self] ). identifier[eventFilter] ( identifier[object] , identifier[event] )
identifier[curr_block] = identifier[cursor] . identifier[block] ()
identifier[curr_indent] = identifier[curr_list] . identifier[format] (). identifier[indent] ()
identifier[curr_style] = identifier[curr_list] . identifier[format] (). identifier[style] ()
identifier[prev_block] = identifier[curr_block]
identifier[next_block] = identifier[curr_block]
identifier[add_to_list] = keyword[None]
identifier[add_to_indent] = identifier[curr_indent] + identifier[delta]
keyword[while] identifier[prev_block] keyword[or] identifier[next_block] :
keyword[if] identifier[prev_block] :
identifier[prev_block] = identifier[prev_block] . identifier[previous] ()
identifier[prev_list] = identifier[prev_block] . identifier[textList] ()
keyword[if] keyword[not] identifier[prev_list] :
identifier[prev_block] = keyword[None]
keyword[else] :
identifier[prev_indent] = identifier[prev_list] . identifier[format] (). identifier[indent] ()
keyword[if] identifier[prev_indent] == identifier[add_to_indent] :
identifier[add_to_list] = identifier[prev_list]
keyword[break]
keyword[if] identifier[next_block] :
identifier[next_block] = identifier[next_block] . identifier[next] ()
identifier[next_list] = identifier[next_block] . identifier[textList] ()
keyword[if] keyword[not] identifier[next_list] :
identifier[next_block] = keyword[None]
keyword[else] :
identifier[next_indent] = identifier[next_list] . identifier[format] (). identifier[indent] ()
keyword[if] identifier[next_indent] == identifier[add_to_indent] :
identifier[add_to_list] = identifier[next_list]
keyword[break]
keyword[if] identifier[add_to_list] keyword[is] keyword[None] keyword[and] literal[int] < identifier[delta] :
keyword[if] identifier[curr_style] keyword[in] ( identifier[QTextListFormat] . identifier[ListCircle] ,
identifier[QTextListFormat] . identifier[ListDisc] ,
identifier[QTextListFormat] . identifier[ListSquare] ):
identifier[self] . identifier[insertUnorderedList] ()
keyword[else] :
identifier[self] . identifier[insertOrderedList] ()
keyword[elif] identifier[add_to_list] :
identifier[add_to_list] . identifier[add] ( identifier[curr_block] )
keyword[return] keyword[True] | def eventFilter(self, object, event):
"""
Listens for tab/backtab to modify list information.
:param event | <QKeyPressEvent>
"""
if event.type() != event.KeyPress:
return super(XRichTextEdit, self).eventFilter(object, event) # depends on [control=['if'], data=[]]
cursor = object.textCursor()
curr_list = cursor.currentList() # make sure we're in a current list
if not curr_list:
return super(XRichTextEdit, self).eventFilter(object, event) # depends on [control=['if'], data=[]] # unindent for Backtab (Shift+Tab)
if event.key() == Qt.Key_Backtab:
delta = -1 # depends on [control=['if'], data=[]] # indent for Tab
elif event.key() == Qt.Key_Tab:
delta = 1 # depends on [control=['if'], data=[]]
else: # otherwise, don't bother calculating
return super(XRichTextEdit, self).eventFilter(object, event) # look for the proper list to move to
curr_block = cursor.block()
curr_indent = curr_list.format().indent()
curr_style = curr_list.format().style()
prev_block = curr_block
next_block = curr_block
add_to_list = None
add_to_indent = curr_indent + delta
while prev_block or next_block:
if prev_block:
prev_block = prev_block.previous()
prev_list = prev_block.textList()
if not prev_list:
prev_block = None # depends on [control=['if'], data=[]]
else:
prev_indent = prev_list.format().indent()
if prev_indent == add_to_indent:
add_to_list = prev_list
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if next_block:
next_block = next_block.next()
next_list = next_block.textList()
if not next_list:
next_block = None # depends on [control=['if'], data=[]]
else:
next_indent = next_list.format().indent()
if next_indent == add_to_indent:
add_to_list = next_list
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
if add_to_list is None and 0 < delta:
if curr_style in (QTextListFormat.ListCircle, QTextListFormat.ListDisc, QTextListFormat.ListSquare):
self.insertUnorderedList() # depends on [control=['if'], data=[]]
else:
self.insertOrderedList() # depends on [control=['if'], data=[]]
elif add_to_list:
add_to_list.add(curr_block) # depends on [control=['if'], data=[]]
return True |
def from_nodes(cls, nodes, _copy=True):
"""Create a :class:`.Surface` from nodes.
Computes the ``degree`` based on the shape of ``nodes``.
Args:
nodes (numpy.ndarray): The nodes in the surface. The columns
represent each node while the rows are the dimension
of the ambient space.
_copy (bool): Flag indicating if the nodes should be copied before
being stored. Defaults to :data:`True` since callers may
freely mutate ``nodes`` after passing in.
Returns:
Surface: The constructed surface.
"""
_, num_nodes = nodes.shape
degree = cls._get_degree(num_nodes)
return cls(nodes, degree, _copy=_copy) | def function[from_nodes, parameter[cls, nodes, _copy]]:
constant[Create a :class:`.Surface` from nodes.
Computes the ``degree`` based on the shape of ``nodes``.
Args:
nodes (numpy.ndarray): The nodes in the surface. The columns
represent each node while the rows are the dimension
of the ambient space.
_copy (bool): Flag indicating if the nodes should be copied before
being stored. Defaults to :data:`True` since callers may
freely mutate ``nodes`` after passing in.
Returns:
Surface: The constructed surface.
]
<ast.Tuple object at 0x7da2046235e0> assign[=] name[nodes].shape
variable[degree] assign[=] call[name[cls]._get_degree, parameter[name[num_nodes]]]
return[call[name[cls], parameter[name[nodes], name[degree]]]] | keyword[def] identifier[from_nodes] ( identifier[cls] , identifier[nodes] , identifier[_copy] = keyword[True] ):
literal[string]
identifier[_] , identifier[num_nodes] = identifier[nodes] . identifier[shape]
identifier[degree] = identifier[cls] . identifier[_get_degree] ( identifier[num_nodes] )
keyword[return] identifier[cls] ( identifier[nodes] , identifier[degree] , identifier[_copy] = identifier[_copy] ) | def from_nodes(cls, nodes, _copy=True):
"""Create a :class:`.Surface` from nodes.
Computes the ``degree`` based on the shape of ``nodes``.
Args:
nodes (numpy.ndarray): The nodes in the surface. The columns
represent each node while the rows are the dimension
of the ambient space.
_copy (bool): Flag indicating if the nodes should be copied before
being stored. Defaults to :data:`True` since callers may
freely mutate ``nodes`` after passing in.
Returns:
Surface: The constructed surface.
"""
(_, num_nodes) = nodes.shape
degree = cls._get_degree(num_nodes)
return cls(nodes, degree, _copy=_copy) |
def bgzip_and_index(in_file, config=None, remove_orig=True, prep_cmd="", tabix_args=None, out_dir=None):
"""bgzip and tabix index an input file, handling VCF and BED.
"""
if config is None:
config = {}
out_file = in_file if in_file.endswith(".gz") else in_file + ".gz"
if out_dir:
remove_orig = False
out_file = os.path.join(out_dir, os.path.basename(out_file))
if (not utils.file_exists(out_file) or not os.path.lexists(out_file)
or (utils.file_exists(in_file) and not utils.file_uptodate(out_file, in_file))):
assert not in_file == out_file, "Input file is bgzipped but not found: %s" % in_file
assert os.path.exists(in_file), "Input file %s not found" % in_file
if not utils.file_uptodate(out_file, in_file):
with file_transaction(config, out_file) as tx_out_file:
bgzip = tools.get_bgzip_cmd(config)
cat_cmd = "zcat" if in_file.endswith(".gz") else "cat"
if prep_cmd:
prep_cmd = "| %s " % prep_cmd
cmd = "{cat_cmd} {in_file} {prep_cmd} | {bgzip} -c > {tx_out_file}"
try:
do.run(cmd.format(**locals()), "bgzip %s" % os.path.basename(in_file))
except subprocess.CalledProcessError:
# Race conditions: ignore errors where file has been deleted by another
if os.path.exists(in_file) and not os.path.exists(out_file):
raise
if remove_orig:
try:
os.remove(in_file)
except OSError: # Handle cases where run in parallel and file has been deleted
pass
tabix_index(out_file, config, tabix_args=tabix_args)
return out_file | def function[bgzip_and_index, parameter[in_file, config, remove_orig, prep_cmd, tabix_args, out_dir]]:
constant[bgzip and tabix index an input file, handling VCF and BED.
]
if compare[name[config] is constant[None]] begin[:]
variable[config] assign[=] dictionary[[], []]
variable[out_file] assign[=] <ast.IfExp object at 0x7da1b18be5c0>
if name[out_dir] begin[:]
variable[remove_orig] assign[=] constant[False]
variable[out_file] assign[=] call[name[os].path.join, parameter[name[out_dir], call[name[os].path.basename, parameter[name[out_file]]]]]
if <ast.BoolOp object at 0x7da1b18bf880> begin[:]
assert[<ast.UnaryOp object at 0x7da1b18beef0>]
assert[call[name[os].path.exists, parameter[name[in_file]]]]
if <ast.UnaryOp object at 0x7da1b18bdd80> begin[:]
with call[name[file_transaction], parameter[name[config], name[out_file]]] begin[:]
variable[bgzip] assign[=] call[name[tools].get_bgzip_cmd, parameter[name[config]]]
variable[cat_cmd] assign[=] <ast.IfExp object at 0x7da1b18bcc70>
if name[prep_cmd] begin[:]
variable[prep_cmd] assign[=] binary_operation[constant[| %s ] <ast.Mod object at 0x7da2590d6920> name[prep_cmd]]
variable[cmd] assign[=] constant[{cat_cmd} {in_file} {prep_cmd} | {bgzip} -c > {tx_out_file}]
<ast.Try object at 0x7da1b18be710>
if name[remove_orig] begin[:]
<ast.Try object at 0x7da1b17907c0>
call[name[tabix_index], parameter[name[out_file], name[config]]]
return[name[out_file]] | keyword[def] identifier[bgzip_and_index] ( identifier[in_file] , identifier[config] = keyword[None] , identifier[remove_orig] = keyword[True] , identifier[prep_cmd] = literal[string] , identifier[tabix_args] = keyword[None] , identifier[out_dir] = keyword[None] ):
literal[string]
keyword[if] identifier[config] keyword[is] keyword[None] :
identifier[config] ={}
identifier[out_file] = identifier[in_file] keyword[if] identifier[in_file] . identifier[endswith] ( literal[string] ) keyword[else] identifier[in_file] + literal[string]
keyword[if] identifier[out_dir] :
identifier[remove_orig] = keyword[False]
identifier[out_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , identifier[os] . identifier[path] . identifier[basename] ( identifier[out_file] ))
keyword[if] ( keyword[not] identifier[utils] . identifier[file_exists] ( identifier[out_file] ) keyword[or] keyword[not] identifier[os] . identifier[path] . identifier[lexists] ( identifier[out_file] )
keyword[or] ( identifier[utils] . identifier[file_exists] ( identifier[in_file] ) keyword[and] keyword[not] identifier[utils] . identifier[file_uptodate] ( identifier[out_file] , identifier[in_file] ))):
keyword[assert] keyword[not] identifier[in_file] == identifier[out_file] , literal[string] % identifier[in_file]
keyword[assert] identifier[os] . identifier[path] . identifier[exists] ( identifier[in_file] ), literal[string] % identifier[in_file]
keyword[if] keyword[not] identifier[utils] . identifier[file_uptodate] ( identifier[out_file] , identifier[in_file] ):
keyword[with] identifier[file_transaction] ( identifier[config] , identifier[out_file] ) keyword[as] identifier[tx_out_file] :
identifier[bgzip] = identifier[tools] . identifier[get_bgzip_cmd] ( identifier[config] )
identifier[cat_cmd] = literal[string] keyword[if] identifier[in_file] . identifier[endswith] ( literal[string] ) keyword[else] literal[string]
keyword[if] identifier[prep_cmd] :
identifier[prep_cmd] = literal[string] % identifier[prep_cmd]
identifier[cmd] = literal[string]
keyword[try] :
identifier[do] . identifier[run] ( identifier[cmd] . identifier[format] (** identifier[locals] ()), literal[string] % identifier[os] . identifier[path] . identifier[basename] ( identifier[in_file] ))
keyword[except] identifier[subprocess] . identifier[CalledProcessError] :
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[in_file] ) keyword[and] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[out_file] ):
keyword[raise]
keyword[if] identifier[remove_orig] :
keyword[try] :
identifier[os] . identifier[remove] ( identifier[in_file] )
keyword[except] identifier[OSError] :
keyword[pass]
identifier[tabix_index] ( identifier[out_file] , identifier[config] , identifier[tabix_args] = identifier[tabix_args] )
keyword[return] identifier[out_file] | def bgzip_and_index(in_file, config=None, remove_orig=True, prep_cmd='', tabix_args=None, out_dir=None):
"""bgzip and tabix index an input file, handling VCF and BED.
"""
if config is None:
config = {} # depends on [control=['if'], data=['config']]
out_file = in_file if in_file.endswith('.gz') else in_file + '.gz'
if out_dir:
remove_orig = False
out_file = os.path.join(out_dir, os.path.basename(out_file)) # depends on [control=['if'], data=[]]
if not utils.file_exists(out_file) or not os.path.lexists(out_file) or (utils.file_exists(in_file) and (not utils.file_uptodate(out_file, in_file))):
assert not in_file == out_file, 'Input file is bgzipped but not found: %s' % in_file
assert os.path.exists(in_file), 'Input file %s not found' % in_file
if not utils.file_uptodate(out_file, in_file):
with file_transaction(config, out_file) as tx_out_file:
bgzip = tools.get_bgzip_cmd(config)
cat_cmd = 'zcat' if in_file.endswith('.gz') else 'cat'
if prep_cmd:
prep_cmd = '| %s ' % prep_cmd # depends on [control=['if'], data=[]]
cmd = '{cat_cmd} {in_file} {prep_cmd} | {bgzip} -c > {tx_out_file}'
try:
do.run(cmd.format(**locals()), 'bgzip %s' % os.path.basename(in_file)) # depends on [control=['try'], data=[]]
except subprocess.CalledProcessError:
# Race conditions: ignore errors where file has been deleted by another
if os.path.exists(in_file) and (not os.path.exists(out_file)):
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['with'], data=[]]
if remove_orig:
try:
os.remove(in_file) # depends on [control=['try'], data=[]]
except OSError: # Handle cases where run in parallel and file has been deleted
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
tabix_index(out_file, config, tabix_args=tabix_args)
return out_file |
def delete_image(image_id, profile, **libcloud_kwargs):
'''
Delete an image of a node
:param image_id: Image to delete
:type image_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's delete_image method
:type libcloud_kwargs: ``dict``
CLI Example:
.. code-block:: bash
salt myminion libcloud_compute.delete_image image1 profile1
'''
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
image = _get_by_id(conn.list_images(), image_id)
return conn.delete_image(image, **libcloud_kwargs) | def function[delete_image, parameter[image_id, profile]]:
constant[
Delete an image of a node
:param image_id: Image to delete
:type image_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's delete_image method
:type libcloud_kwargs: ``dict``
CLI Example:
.. code-block:: bash
salt myminion libcloud_compute.delete_image image1 profile1
]
variable[conn] assign[=] call[name[_get_driver], parameter[]]
variable[libcloud_kwargs] assign[=] call[name[salt].utils.args.clean_kwargs, parameter[]]
variable[image] assign[=] call[name[_get_by_id], parameter[call[name[conn].list_images, parameter[]], name[image_id]]]
return[call[name[conn].delete_image, parameter[name[image]]]] | keyword[def] identifier[delete_image] ( identifier[image_id] , identifier[profile] ,** identifier[libcloud_kwargs] ):
literal[string]
identifier[conn] = identifier[_get_driver] ( identifier[profile] = identifier[profile] )
identifier[libcloud_kwargs] = identifier[salt] . identifier[utils] . identifier[args] . identifier[clean_kwargs] (** identifier[libcloud_kwargs] )
identifier[image] = identifier[_get_by_id] ( identifier[conn] . identifier[list_images] (), identifier[image_id] )
keyword[return] identifier[conn] . identifier[delete_image] ( identifier[image] ,** identifier[libcloud_kwargs] ) | def delete_image(image_id, profile, **libcloud_kwargs):
"""
Delete an image of a node
:param image_id: Image to delete
:type image_id: ``str``
:param profile: The profile key
:type profile: ``str``
:param libcloud_kwargs: Extra arguments for the driver's delete_image method
:type libcloud_kwargs: ``dict``
CLI Example:
.. code-block:: bash
salt myminion libcloud_compute.delete_image image1 profile1
"""
conn = _get_driver(profile=profile)
libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs)
image = _get_by_id(conn.list_images(), image_id)
return conn.delete_image(image, **libcloud_kwargs) |
def addText(self, text):
"""append text in the chosen color"""
# move to the end of the doc
self.moveCursor(QtGui.QTextCursor.End)
# insert the text
self.setTextColor(self._currentColor)
self.textCursor().insertText(text) | def function[addText, parameter[self, text]]:
constant[append text in the chosen color]
call[name[self].moveCursor, parameter[name[QtGui].QTextCursor.End]]
call[name[self].setTextColor, parameter[name[self]._currentColor]]
call[call[name[self].textCursor, parameter[]].insertText, parameter[name[text]]] | keyword[def] identifier[addText] ( identifier[self] , identifier[text] ):
literal[string]
identifier[self] . identifier[moveCursor] ( identifier[QtGui] . identifier[QTextCursor] . identifier[End] )
identifier[self] . identifier[setTextColor] ( identifier[self] . identifier[_currentColor] )
identifier[self] . identifier[textCursor] (). identifier[insertText] ( identifier[text] ) | def addText(self, text):
"""append text in the chosen color"""
# move to the end of the doc
self.moveCursor(QtGui.QTextCursor.End)
# insert the text
self.setTextColor(self._currentColor)
self.textCursor().insertText(text) |
def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True
target.delete()
return True | def function[del_permission, parameter[self, role, name]]:
constant[ revoke authorization of a group ]
if <ast.UnaryOp object at 0x7da1b0fddd50> begin[:]
return[constant[True]]
variable[targetGroup] assign[=] call[call[name[AuthGroup].objects, parameter[]].first, parameter[]]
variable[target] assign[=] call[call[name[AuthPermission].objects, parameter[]].first, parameter[]]
if <ast.UnaryOp object at 0x7da2054a7460> begin[:]
return[constant[True]]
call[name[target].delete, parameter[]]
return[constant[True]] | keyword[def] identifier[del_permission] ( identifier[self] , identifier[role] , identifier[name] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[has_permission] ( identifier[role] , identifier[name] ):
keyword[return] keyword[True]
identifier[targetGroup] = identifier[AuthGroup] . identifier[objects] ( identifier[role] = identifier[role] , identifier[creator] = identifier[self] . identifier[client] ). identifier[first] ()
identifier[target] = identifier[AuthPermission] . identifier[objects] ( identifier[groups] = identifier[targetGroup] , identifier[name] = identifier[name] , identifier[creator] = identifier[self] . identifier[client] ). identifier[first] ()
keyword[if] keyword[not] identifier[target] :
keyword[return] keyword[True]
identifier[target] . identifier[delete] ()
keyword[return] keyword[True] | def del_permission(self, role, name):
""" revoke authorization of a group """
if not self.has_permission(role, name):
return True # depends on [control=['if'], data=[]]
targetGroup = AuthGroup.objects(role=role, creator=self.client).first()
target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first()
if not target:
return True # depends on [control=['if'], data=[]]
target.delete()
return True |
def get_mac_address(
interface=None, ip=None, ip6=None,
hostname=None, network_request=True
):
# type: (Optional[str], Optional[str], Optional[str], Optional[str], bool) -> Optional[str]
"""Get a Unicast IEEE 802 MAC-48 address from a local interface or remote host.
You must only use one of the first four arguments. If none of the arguments
are selected, the default network interface for the system will be used.
Exceptions will be handled silently and returned as a None.
For the time being, it assumes you are using Ethernet.
NOTES:
* You MUST provide str-typed arguments, REGARDLESS of Python version.
* localhost/127.0.0.1 will always return '00:00:00:00:00:00'
Args:
interface (str): Name of a local network interface (e.g "Ethernet 3", "eth0", "ens32")
ip (str): Canonical dotted decimal IPv4 address of a remote host (e.g 192.168.0.1)
ip6 (str): Canonical shortened IPv6 address of a remote host (e.g ff02::1:ffe7:7f19)
hostname (str): DNS hostname of a remote host (e.g "router1.mycorp.com", "localhost")
network_request (bool): Send a UDP packet to a remote host to populate
the ARP/NDP tables for IPv4/IPv6. The port this packet is sent to can
be configured using the module variable `getmac.PORT`.
Returns:
Lowercase colon-separated MAC address, or None if one could not be
found or there was an error.
"""
if (hostname and hostname == 'localhost') or (ip and ip == '127.0.0.1'):
return '00:00:00:00:00:00'
# Resolve hostname to an IP address
if hostname:
ip = socket.gethostbyname(hostname)
# Populate the ARP table by sending a empty UDP packet to a high port
if network_request and (ip or ip6):
try:
if ip:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(b'', (ip, PORT))
else:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.sendto(b'', (ip6, PORT))
except Exception:
log.error("Failed to send ARP table population packet")
if DEBUG:
log.debug(traceback.format_exc())
# Setup the address hunt based on the arguments specified
if ip6:
if not socket.has_ipv6:
log.error("Cannot get the MAC address of a IPv6 host: "
"IPv6 is not supported on this system")
return None
elif ':' not in ip6:
log.error("Invalid IPv6 address: %s", ip6)
return None
to_find = ip6
typ = IP6
elif ip:
to_find = ip
typ = IP4
else: # Default to searching for interface
typ = INTERFACE
if interface:
to_find = interface
else:
# Default to finding MAC of the interface with the default route
if WINDOWS and network_request:
to_find = _fetch_ip_using_dns()
typ = IP4
elif WINDOWS:
to_find = 'Ethernet'
elif BSD:
if OPENBSD:
to_find = _get_default_iface_openbsd() # type: ignore
else:
to_find = _get_default_iface_freebsd() # type: ignore
if not to_find:
to_find = 'em0'
else:
to_find = _hunt_linux_default_iface() # type: ignore
if not to_find:
to_find = 'en0'
mac = _hunt_for_mac(to_find, typ, network_request)
log.debug("Raw MAC found: %s", mac)
# Check and format the result to be lowercase, colon-separated
if mac is not None:
mac = str(mac)
if not PY2: # Strip bytestring conversion artifacts
mac = mac.replace("b'", '').replace("'", '')\
.replace('\\n', '').replace('\\r', '')
mac = mac.strip().lower().replace(' ', '').replace('-', ':')
# Fix cases where there are no colons
if ':' not in mac and len(mac) == 12:
log.debug("Adding colons to MAC %s", mac)
mac = ':'.join(mac[i:i + 2] for i in range(0, len(mac), 2))
# Pad single-character octets with a leading zero (e.g Darwin's ARP output)
elif len(mac) < 17:
log.debug("Length of MAC %s is %d, padding single-character "
"octets with zeros", mac, len(mac))
parts = mac.split(':')
new_mac = []
for part in parts:
if len(part) == 1:
new_mac.append('0' + part)
else:
new_mac.append(part)
mac = ':'.join(new_mac)
# MAC address should ALWAYS be 17 characters before being returned
if len(mac) != 17:
log.warning("MAC address %s is not 17 characters long!", mac)
mac = None
elif mac.count(':') != 5:
log.warning("MAC address %s is missing ':' characters", mac)
mac = None
return mac | def function[get_mac_address, parameter[interface, ip, ip6, hostname, network_request]]:
constant[Get a Unicast IEEE 802 MAC-48 address from a local interface or remote host.
You must only use one of the first four arguments. If none of the arguments
are selected, the default network interface for the system will be used.
Exceptions will be handled silently and returned as a None.
For the time being, it assumes you are using Ethernet.
NOTES:
* You MUST provide str-typed arguments, REGARDLESS of Python version.
* localhost/127.0.0.1 will always return '00:00:00:00:00:00'
Args:
interface (str): Name of a local network interface (e.g "Ethernet 3", "eth0", "ens32")
ip (str): Canonical dotted decimal IPv4 address of a remote host (e.g 192.168.0.1)
ip6 (str): Canonical shortened IPv6 address of a remote host (e.g ff02::1:ffe7:7f19)
hostname (str): DNS hostname of a remote host (e.g "router1.mycorp.com", "localhost")
network_request (bool): Send a UDP packet to a remote host to populate
the ARP/NDP tables for IPv4/IPv6. The port this packet is sent to can
be configured using the module variable `getmac.PORT`.
Returns:
Lowercase colon-separated MAC address, or None if one could not be
found or there was an error.
]
if <ast.BoolOp object at 0x7da1b1290070> begin[:]
return[constant[00:00:00:00:00:00]]
if name[hostname] begin[:]
variable[ip] assign[=] call[name[socket].gethostbyname, parameter[name[hostname]]]
if <ast.BoolOp object at 0x7da1b1293610> begin[:]
<ast.Try object at 0x7da1b1292c20>
if name[ip6] begin[:]
if <ast.UnaryOp object at 0x7da1b1291390> begin[:]
call[name[log].error, parameter[constant[Cannot get the MAC address of a IPv6 host: IPv6 is not supported on this system]]]
return[constant[None]]
variable[to_find] assign[=] name[ip6]
variable[typ] assign[=] name[IP6]
variable[mac] assign[=] call[name[_hunt_for_mac], parameter[name[to_find], name[typ], name[network_request]]]
call[name[log].debug, parameter[constant[Raw MAC found: %s], name[mac]]]
if compare[name[mac] is_not constant[None]] begin[:]
variable[mac] assign[=] call[name[str], parameter[name[mac]]]
if <ast.UnaryOp object at 0x7da1b12f08e0> begin[:]
variable[mac] assign[=] call[call[call[call[name[mac].replace, parameter[constant[b'], constant[]]].replace, parameter[constant['], constant[]]].replace, parameter[constant[\n], constant[]]].replace, parameter[constant[\r], constant[]]]
variable[mac] assign[=] call[call[call[call[name[mac].strip, parameter[]].lower, parameter[]].replace, parameter[constant[ ], constant[]]].replace, parameter[constant[-], constant[:]]]
if <ast.BoolOp object at 0x7da1b12f2e30> begin[:]
call[name[log].debug, parameter[constant[Adding colons to MAC %s], name[mac]]]
variable[mac] assign[=] call[constant[:].join, parameter[<ast.GeneratorExp object at 0x7da1b12f1f60>]]
if compare[call[name[len], parameter[name[mac]]] not_equal[!=] constant[17]] begin[:]
call[name[log].warning, parameter[constant[MAC address %s is not 17 characters long!], name[mac]]]
variable[mac] assign[=] constant[None]
return[name[mac]] | keyword[def] identifier[get_mac_address] (
identifier[interface] = keyword[None] , identifier[ip] = keyword[None] , identifier[ip6] = keyword[None] ,
identifier[hostname] = keyword[None] , identifier[network_request] = keyword[True]
):
literal[string]
keyword[if] ( identifier[hostname] keyword[and] identifier[hostname] == literal[string] ) keyword[or] ( identifier[ip] keyword[and] identifier[ip] == literal[string] ):
keyword[return] literal[string]
keyword[if] identifier[hostname] :
identifier[ip] = identifier[socket] . identifier[gethostbyname] ( identifier[hostname] )
keyword[if] identifier[network_request] keyword[and] ( identifier[ip] keyword[or] identifier[ip6] ):
keyword[try] :
keyword[if] identifier[ip] :
identifier[s] = identifier[socket] . identifier[socket] ( identifier[socket] . identifier[AF_INET] , identifier[socket] . identifier[SOCK_DGRAM] )
identifier[s] . identifier[sendto] ( literal[string] ,( identifier[ip] , identifier[PORT] ))
keyword[else] :
identifier[s] = identifier[socket] . identifier[socket] ( identifier[socket] . identifier[AF_INET6] , identifier[socket] . identifier[SOCK_DGRAM] )
identifier[s] . identifier[sendto] ( literal[string] ,( identifier[ip6] , identifier[PORT] ))
keyword[except] identifier[Exception] :
identifier[log] . identifier[error] ( literal[string] )
keyword[if] identifier[DEBUG] :
identifier[log] . identifier[debug] ( identifier[traceback] . identifier[format_exc] ())
keyword[if] identifier[ip6] :
keyword[if] keyword[not] identifier[socket] . identifier[has_ipv6] :
identifier[log] . identifier[error] ( literal[string]
literal[string] )
keyword[return] keyword[None]
keyword[elif] literal[string] keyword[not] keyword[in] identifier[ip6] :
identifier[log] . identifier[error] ( literal[string] , identifier[ip6] )
keyword[return] keyword[None]
identifier[to_find] = identifier[ip6]
identifier[typ] = identifier[IP6]
keyword[elif] identifier[ip] :
identifier[to_find] = identifier[ip]
identifier[typ] = identifier[IP4]
keyword[else] :
identifier[typ] = identifier[INTERFACE]
keyword[if] identifier[interface] :
identifier[to_find] = identifier[interface]
keyword[else] :
keyword[if] identifier[WINDOWS] keyword[and] identifier[network_request] :
identifier[to_find] = identifier[_fetch_ip_using_dns] ()
identifier[typ] = identifier[IP4]
keyword[elif] identifier[WINDOWS] :
identifier[to_find] = literal[string]
keyword[elif] identifier[BSD] :
keyword[if] identifier[OPENBSD] :
identifier[to_find] = identifier[_get_default_iface_openbsd] ()
keyword[else] :
identifier[to_find] = identifier[_get_default_iface_freebsd] ()
keyword[if] keyword[not] identifier[to_find] :
identifier[to_find] = literal[string]
keyword[else] :
identifier[to_find] = identifier[_hunt_linux_default_iface] ()
keyword[if] keyword[not] identifier[to_find] :
identifier[to_find] = literal[string]
identifier[mac] = identifier[_hunt_for_mac] ( identifier[to_find] , identifier[typ] , identifier[network_request] )
identifier[log] . identifier[debug] ( literal[string] , identifier[mac] )
keyword[if] identifier[mac] keyword[is] keyword[not] keyword[None] :
identifier[mac] = identifier[str] ( identifier[mac] )
keyword[if] keyword[not] identifier[PY2] :
identifier[mac] = identifier[mac] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
identifier[mac] = identifier[mac] . identifier[strip] (). identifier[lower] (). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[mac] keyword[and] identifier[len] ( identifier[mac] )== literal[int] :
identifier[log] . identifier[debug] ( literal[string] , identifier[mac] )
identifier[mac] = literal[string] . identifier[join] ( identifier[mac] [ identifier[i] : identifier[i] + literal[int] ] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[mac] ), literal[int] ))
keyword[elif] identifier[len] ( identifier[mac] )< literal[int] :
identifier[log] . identifier[debug] ( literal[string]
literal[string] , identifier[mac] , identifier[len] ( identifier[mac] ))
identifier[parts] = identifier[mac] . identifier[split] ( literal[string] )
identifier[new_mac] =[]
keyword[for] identifier[part] keyword[in] identifier[parts] :
keyword[if] identifier[len] ( identifier[part] )== literal[int] :
identifier[new_mac] . identifier[append] ( literal[string] + identifier[part] )
keyword[else] :
identifier[new_mac] . identifier[append] ( identifier[part] )
identifier[mac] = literal[string] . identifier[join] ( identifier[new_mac] )
keyword[if] identifier[len] ( identifier[mac] )!= literal[int] :
identifier[log] . identifier[warning] ( literal[string] , identifier[mac] )
identifier[mac] = keyword[None]
keyword[elif] identifier[mac] . identifier[count] ( literal[string] )!= literal[int] :
identifier[log] . identifier[warning] ( literal[string] , identifier[mac] )
identifier[mac] = keyword[None]
keyword[return] identifier[mac] | def get_mac_address(interface=None, ip=None, ip6=None, hostname=None, network_request=True):
# type: (Optional[str], Optional[str], Optional[str], Optional[str], bool) -> Optional[str]
'Get a Unicast IEEE 802 MAC-48 address from a local interface or remote host.\n\n You must only use one of the first four arguments. If none of the arguments\n are selected, the default network interface for the system will be used.\n\n Exceptions will be handled silently and returned as a None.\n For the time being, it assumes you are using Ethernet.\n\n NOTES:\n * You MUST provide str-typed arguments, REGARDLESS of Python version.\n * localhost/127.0.0.1 will always return \'00:00:00:00:00:00\'\n\n Args:\n interface (str): Name of a local network interface (e.g "Ethernet 3", "eth0", "ens32")\n ip (str): Canonical dotted decimal IPv4 address of a remote host (e.g 192.168.0.1)\n ip6 (str): Canonical shortened IPv6 address of a remote host (e.g ff02::1:ffe7:7f19)\n hostname (str): DNS hostname of a remote host (e.g "router1.mycorp.com", "localhost")\n network_request (bool): Send a UDP packet to a remote host to populate\n the ARP/NDP tables for IPv4/IPv6. The port this packet is sent to can\n be configured using the module variable `getmac.PORT`.\n Returns:\n Lowercase colon-separated MAC address, or None if one could not be\n found or there was an error.\n '
if hostname and hostname == 'localhost' or (ip and ip == '127.0.0.1'):
return '00:00:00:00:00:00' # depends on [control=['if'], data=[]]
# Resolve hostname to an IP address
if hostname:
ip = socket.gethostbyname(hostname) # depends on [control=['if'], data=[]]
# Populate the ARP table by sending a empty UDP packet to a high port
if network_request and (ip or ip6):
try:
if ip:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(b'', (ip, PORT)) # depends on [control=['if'], data=[]]
else:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.sendto(b'', (ip6, PORT)) # depends on [control=['try'], data=[]]
except Exception:
log.error('Failed to send ARP table population packet')
if DEBUG:
log.debug(traceback.format_exc()) # depends on [control=['if'], data=[]] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# Setup the address hunt based on the arguments specified
if ip6:
if not socket.has_ipv6:
log.error('Cannot get the MAC address of a IPv6 host: IPv6 is not supported on this system')
return None # depends on [control=['if'], data=[]]
elif ':' not in ip6:
log.error('Invalid IPv6 address: %s', ip6)
return None # depends on [control=['if'], data=['ip6']]
to_find = ip6
typ = IP6 # depends on [control=['if'], data=[]]
elif ip:
to_find = ip
typ = IP4 # depends on [control=['if'], data=[]]
else: # Default to searching for interface
typ = INTERFACE
if interface:
to_find = interface # depends on [control=['if'], data=[]]
# Default to finding MAC of the interface with the default route
elif WINDOWS and network_request:
to_find = _fetch_ip_using_dns()
typ = IP4 # depends on [control=['if'], data=[]]
elif WINDOWS:
to_find = 'Ethernet' # depends on [control=['if'], data=[]]
elif BSD:
if OPENBSD:
to_find = _get_default_iface_openbsd() # type: ignore # depends on [control=['if'], data=[]]
else:
to_find = _get_default_iface_freebsd() # type: ignore
if not to_find:
to_find = 'em0' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
to_find = _hunt_linux_default_iface() # type: ignore
if not to_find:
to_find = 'en0' # depends on [control=['if'], data=[]]
mac = _hunt_for_mac(to_find, typ, network_request)
log.debug('Raw MAC found: %s', mac)
# Check and format the result to be lowercase, colon-separated
if mac is not None:
mac = str(mac)
if not PY2: # Strip bytestring conversion artifacts
mac = mac.replace("b'", '').replace("'", '').replace('\\n', '').replace('\\r', '') # depends on [control=['if'], data=[]]
mac = mac.strip().lower().replace(' ', '').replace('-', ':')
# Fix cases where there are no colons
if ':' not in mac and len(mac) == 12:
log.debug('Adding colons to MAC %s', mac)
mac = ':'.join((mac[i:i + 2] for i in range(0, len(mac), 2))) # depends on [control=['if'], data=[]]
# Pad single-character octets with a leading zero (e.g Darwin's ARP output)
elif len(mac) < 17:
log.debug('Length of MAC %s is %d, padding single-character octets with zeros', mac, len(mac))
parts = mac.split(':')
new_mac = []
for part in parts:
if len(part) == 1:
new_mac.append('0' + part) # depends on [control=['if'], data=[]]
else:
new_mac.append(part) # depends on [control=['for'], data=['part']]
mac = ':'.join(new_mac) # depends on [control=['if'], data=[]]
# MAC address should ALWAYS be 17 characters before being returned
if len(mac) != 17:
log.warning('MAC address %s is not 17 characters long!', mac)
mac = None # depends on [control=['if'], data=[]]
elif mac.count(':') != 5:
log.warning("MAC address %s is missing ':' characters", mac)
mac = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['mac']]
return mac |
def parse(number, region=None, keep_raw_input=False,
numobj=None, _check_region=True):
"""Parse a string and return a corresponding PhoneNumber object.
The method is quite lenient and looks for a number in the input text
(raw input) and does not check whether the string is definitely only a
phone number. To do this, it ignores punctuation and white-space, as
well as any text before the number (e.g. a leading "Tel: ") and trims
the non-number bits. It will accept a number in any format (E164,
national, international etc), assuming it can be interpreted with the
defaultRegion supplied. It also attempts to convert any alpha characters
into digits if it thinks this is a vanity number of the type "1800
MICROSOFT".
This method will throw a NumberParseException if the number is not
considered to be a possible number. Note that validation of whether the
number is actually a valid number for a particular region is not
performed. This can be done separately with is_valid_number.
Note this method canonicalizes the phone number such that different
representations can be easily compared, no matter what form it was
originally entered in (e.g. national, international). If you want to
record context about the number being parsed, such as the raw input that
was entered, how the country code was derived etc. then ensure
keep_raw_input is set.
Note if any new field is added to this method that should always be filled
in, even when keep_raw_input is False, it should also be handled in the
_copy_core_fields_only() function.
Arguments:
number -- The number that we are attempting to parse. This can
contain formatting such as +, ( and -, as well as a phone
number extension. It can also be provided in RFC3966 format.
region -- The region that we are expecting the number to be from. This
is only used if the number being parsed is not written in
international format. The country_code for the number in
this case would be stored as that of the default region
supplied. If the number is guaranteed to start with a '+'
followed by the country calling code, then None or
UNKNOWN_REGION can be supplied.
keep_raw_input -- Whether to populate the raw_input field of the
PhoneNumber object with number (as well as the
country_code_source field).
numobj -- An optional existing PhoneNumber object to receive the
parsing results
_check_region -- Whether to check the supplied region parameter;
should always be True for external callers.
Returns a PhoneNumber object filled with the parse number.
Raises:
NumberParseException if the string is not considered to be a viable
phone number (e.g. too few or too many digits) or if no default
region was supplied and the number is not in international format
(does not start with +).
"""
if numobj is None:
numobj = PhoneNumber()
if number is None:
raise NumberParseException(NumberParseException.NOT_A_NUMBER,
"The phone number supplied was None.")
elif len(number) > _MAX_INPUT_STRING_LENGTH:
raise NumberParseException(NumberParseException.TOO_LONG,
"The string supplied was too long to parse.")
national_number = _build_national_number_for_parsing(number)
if not _is_viable_phone_number(national_number):
raise NumberParseException(NumberParseException.NOT_A_NUMBER,
"The string supplied did not seem to be a phone number.")
# Check the region supplied is valid, or that the extracted number starts
# with some sort of + sign so the number's region can be determined.
if _check_region and not _check_region_for_parsing(national_number, region):
raise NumberParseException(NumberParseException.INVALID_COUNTRY_CODE,
"Missing or invalid default region.")
if keep_raw_input:
numobj.raw_input = number
# Attempt to parse extension first, since it doesn't require
# region-specific data and we want to have the non-normalised number here.
extension, national_number = _maybe_strip_extension(national_number)
if len(extension) > 0:
numobj.extension = extension
if region is None:
metadata = None
else:
metadata = PhoneMetadata.metadata_for_region(region.upper(), None)
country_code = 0
try:
country_code, normalized_national_number = _maybe_extract_country_code(national_number,
metadata,
keep_raw_input,
numobj)
except NumberParseException:
_, e, _ = sys.exc_info()
matchobj = _PLUS_CHARS_PATTERN.match(national_number)
if (e.error_type == NumberParseException.INVALID_COUNTRY_CODE and
matchobj is not None):
# Strip the plus-char, and try again.
country_code, normalized_national_number = _maybe_extract_country_code(national_number[matchobj.end():],
metadata,
keep_raw_input,
numobj)
if country_code == 0:
raise NumberParseException(NumberParseException.INVALID_COUNTRY_CODE,
"Could not interpret numbers after plus-sign.")
else:
raise
if country_code != 0:
number_region = region_code_for_country_code(country_code)
if number_region != region:
# Metadata cannot be null because the country calling code is valid.
metadata = PhoneMetadata.metadata_for_region_or_calling_code(country_code, number_region)
else:
# If no extracted country calling code, use the region supplied
# instead. The national number is just the normalized version of the
# number we were given to parse.
normalized_national_number += _normalize(national_number)
if region is not None:
country_code = metadata.country_code
numobj.country_code = country_code
elif keep_raw_input:
numobj.country_code_source = CountryCodeSource.UNSPECIFIED
if len(normalized_national_number) < _MIN_LENGTH_FOR_NSN:
raise NumberParseException(NumberParseException.TOO_SHORT_NSN,
"The string supplied is too short to be a phone number.")
if metadata is not None:
potential_national_number = normalized_national_number
carrier_code, potential_national_number, _ = _maybe_strip_national_prefix_carrier_code(potential_national_number,
metadata)
# We require that the NSN remaining after stripping the national
# prefix and carrier code be long enough to be a possible length for
# the region. Otherwise, we don't do the stripping, since the original
# number could be a valid short number.
validation_result = _test_number_length(potential_national_number, metadata)
if validation_result not in (ValidationResult.TOO_SHORT,
ValidationResult.IS_POSSIBLE_LOCAL_ONLY,
ValidationResult.INVALID_LENGTH):
normalized_national_number = potential_national_number
if keep_raw_input and carrier_code is not None and len(carrier_code) > 0:
numobj.preferred_domestic_carrier_code = carrier_code
len_national_number = len(normalized_national_number)
if len_national_number < _MIN_LENGTH_FOR_NSN: # pragma no cover
# Check of _is_viable_phone_number() at the top of this function makes
# this effectively unhittable.
raise NumberParseException(NumberParseException.TOO_SHORT_NSN,
"The string supplied is too short to be a phone number.")
if len_national_number > _MAX_LENGTH_FOR_NSN:
raise NumberParseException(NumberParseException.TOO_LONG,
"The string supplied is too long to be a phone number.")
_set_italian_leading_zeros_for_phone_number(normalized_national_number, numobj)
numobj.national_number = to_long(normalized_national_number)
return numobj | def function[parse, parameter[number, region, keep_raw_input, numobj, _check_region]]:
constant[Parse a string and return a corresponding PhoneNumber object.
The method is quite lenient and looks for a number in the input text
(raw input) and does not check whether the string is definitely only a
phone number. To do this, it ignores punctuation and white-space, as
well as any text before the number (e.g. a leading "Tel: ") and trims
the non-number bits. It will accept a number in any format (E164,
national, international etc), assuming it can be interpreted with the
defaultRegion supplied. It also attempts to convert any alpha characters
into digits if it thinks this is a vanity number of the type "1800
MICROSOFT".
This method will throw a NumberParseException if the number is not
considered to be a possible number. Note that validation of whether the
number is actually a valid number for a particular region is not
performed. This can be done separately with is_valid_number.
Note this method canonicalizes the phone number such that different
representations can be easily compared, no matter what form it was
originally entered in (e.g. national, international). If you want to
record context about the number being parsed, such as the raw input that
was entered, how the country code was derived etc. then ensure
keep_raw_input is set.
Note if any new field is added to this method that should always be filled
in, even when keep_raw_input is False, it should also be handled in the
_copy_core_fields_only() function.
Arguments:
number -- The number that we are attempting to parse. This can
contain formatting such as +, ( and -, as well as a phone
number extension. It can also be provided in RFC3966 format.
region -- The region that we are expecting the number to be from. This
is only used if the number being parsed is not written in
international format. The country_code for the number in
this case would be stored as that of the default region
supplied. If the number is guaranteed to start with a '+'
followed by the country calling code, then None or
UNKNOWN_REGION can be supplied.
keep_raw_input -- Whether to populate the raw_input field of the
PhoneNumber object with number (as well as the
country_code_source field).
numobj -- An optional existing PhoneNumber object to receive the
parsing results
_check_region -- Whether to check the supplied region parameter;
should always be True for external callers.
Returns a PhoneNumber object filled with the parse number.
Raises:
NumberParseException if the string is not considered to be a viable
phone number (e.g. too few or too many digits) or if no default
region was supplied and the number is not in international format
(does not start with +).
]
if compare[name[numobj] is constant[None]] begin[:]
variable[numobj] assign[=] call[name[PhoneNumber], parameter[]]
if compare[name[number] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b194cf10>
variable[national_number] assign[=] call[name[_build_national_number_for_parsing], parameter[name[number]]]
if <ast.UnaryOp object at 0x7da1b194c310> begin[:]
<ast.Raise object at 0x7da1b194d360>
if <ast.BoolOp object at 0x7da1b194c490> begin[:]
<ast.Raise object at 0x7da1b194cd30>
if name[keep_raw_input] begin[:]
name[numobj].raw_input assign[=] name[number]
<ast.Tuple object at 0x7da1b194d3c0> assign[=] call[name[_maybe_strip_extension], parameter[name[national_number]]]
if compare[call[name[len], parameter[name[extension]]] greater[>] constant[0]] begin[:]
name[numobj].extension assign[=] name[extension]
if compare[name[region] is constant[None]] begin[:]
variable[metadata] assign[=] constant[None]
variable[country_code] assign[=] constant[0]
<ast.Try object at 0x7da1b194df60>
if compare[name[country_code] not_equal[!=] constant[0]] begin[:]
variable[number_region] assign[=] call[name[region_code_for_country_code], parameter[name[country_code]]]
if compare[name[number_region] not_equal[!=] name[region]] begin[:]
variable[metadata] assign[=] call[name[PhoneMetadata].metadata_for_region_or_calling_code, parameter[name[country_code], name[number_region]]]
if compare[call[name[len], parameter[name[normalized_national_number]]] less[<] name[_MIN_LENGTH_FOR_NSN]] begin[:]
<ast.Raise object at 0x7da1b1950640>
if compare[name[metadata] is_not constant[None]] begin[:]
variable[potential_national_number] assign[=] name[normalized_national_number]
<ast.Tuple object at 0x7da1b1950d30> assign[=] call[name[_maybe_strip_national_prefix_carrier_code], parameter[name[potential_national_number], name[metadata]]]
variable[validation_result] assign[=] call[name[_test_number_length], parameter[name[potential_national_number], name[metadata]]]
if compare[name[validation_result] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Attribute object at 0x7da1b19da500>, <ast.Attribute object at 0x7da1b19d99f0>, <ast.Attribute object at 0x7da1b19daad0>]]] begin[:]
variable[normalized_national_number] assign[=] name[potential_national_number]
if <ast.BoolOp object at 0x7da1b19dbbe0> begin[:]
name[numobj].preferred_domestic_carrier_code assign[=] name[carrier_code]
variable[len_national_number] assign[=] call[name[len], parameter[name[normalized_national_number]]]
if compare[name[len_national_number] less[<] name[_MIN_LENGTH_FOR_NSN]] begin[:]
<ast.Raise object at 0x7da1b19d8880>
if compare[name[len_national_number] greater[>] name[_MAX_LENGTH_FOR_NSN]] begin[:]
<ast.Raise object at 0x7da1b19d9210>
call[name[_set_italian_leading_zeros_for_phone_number], parameter[name[normalized_national_number], name[numobj]]]
name[numobj].national_number assign[=] call[name[to_long], parameter[name[normalized_national_number]]]
return[name[numobj]] | keyword[def] identifier[parse] ( identifier[number] , identifier[region] = keyword[None] , identifier[keep_raw_input] = keyword[False] ,
identifier[numobj] = keyword[None] , identifier[_check_region] = keyword[True] ):
literal[string]
keyword[if] identifier[numobj] keyword[is] keyword[None] :
identifier[numobj] = identifier[PhoneNumber] ()
keyword[if] identifier[number] keyword[is] keyword[None] :
keyword[raise] identifier[NumberParseException] ( identifier[NumberParseException] . identifier[NOT_A_NUMBER] ,
literal[string] )
keyword[elif] identifier[len] ( identifier[number] )> identifier[_MAX_INPUT_STRING_LENGTH] :
keyword[raise] identifier[NumberParseException] ( identifier[NumberParseException] . identifier[TOO_LONG] ,
literal[string] )
identifier[national_number] = identifier[_build_national_number_for_parsing] ( identifier[number] )
keyword[if] keyword[not] identifier[_is_viable_phone_number] ( identifier[national_number] ):
keyword[raise] identifier[NumberParseException] ( identifier[NumberParseException] . identifier[NOT_A_NUMBER] ,
literal[string] )
keyword[if] identifier[_check_region] keyword[and] keyword[not] identifier[_check_region_for_parsing] ( identifier[national_number] , identifier[region] ):
keyword[raise] identifier[NumberParseException] ( identifier[NumberParseException] . identifier[INVALID_COUNTRY_CODE] ,
literal[string] )
keyword[if] identifier[keep_raw_input] :
identifier[numobj] . identifier[raw_input] = identifier[number]
identifier[extension] , identifier[national_number] = identifier[_maybe_strip_extension] ( identifier[national_number] )
keyword[if] identifier[len] ( identifier[extension] )> literal[int] :
identifier[numobj] . identifier[extension] = identifier[extension]
keyword[if] identifier[region] keyword[is] keyword[None] :
identifier[metadata] = keyword[None]
keyword[else] :
identifier[metadata] = identifier[PhoneMetadata] . identifier[metadata_for_region] ( identifier[region] . identifier[upper] (), keyword[None] )
identifier[country_code] = literal[int]
keyword[try] :
identifier[country_code] , identifier[normalized_national_number] = identifier[_maybe_extract_country_code] ( identifier[national_number] ,
identifier[metadata] ,
identifier[keep_raw_input] ,
identifier[numobj] )
keyword[except] identifier[NumberParseException] :
identifier[_] , identifier[e] , identifier[_] = identifier[sys] . identifier[exc_info] ()
identifier[matchobj] = identifier[_PLUS_CHARS_PATTERN] . identifier[match] ( identifier[national_number] )
keyword[if] ( identifier[e] . identifier[error_type] == identifier[NumberParseException] . identifier[INVALID_COUNTRY_CODE] keyword[and]
identifier[matchobj] keyword[is] keyword[not] keyword[None] ):
identifier[country_code] , identifier[normalized_national_number] = identifier[_maybe_extract_country_code] ( identifier[national_number] [ identifier[matchobj] . identifier[end] ():],
identifier[metadata] ,
identifier[keep_raw_input] ,
identifier[numobj] )
keyword[if] identifier[country_code] == literal[int] :
keyword[raise] identifier[NumberParseException] ( identifier[NumberParseException] . identifier[INVALID_COUNTRY_CODE] ,
literal[string] )
keyword[else] :
keyword[raise]
keyword[if] identifier[country_code] != literal[int] :
identifier[number_region] = identifier[region_code_for_country_code] ( identifier[country_code] )
keyword[if] identifier[number_region] != identifier[region] :
identifier[metadata] = identifier[PhoneMetadata] . identifier[metadata_for_region_or_calling_code] ( identifier[country_code] , identifier[number_region] )
keyword[else] :
identifier[normalized_national_number] += identifier[_normalize] ( identifier[national_number] )
keyword[if] identifier[region] keyword[is] keyword[not] keyword[None] :
identifier[country_code] = identifier[metadata] . identifier[country_code]
identifier[numobj] . identifier[country_code] = identifier[country_code]
keyword[elif] identifier[keep_raw_input] :
identifier[numobj] . identifier[country_code_source] = identifier[CountryCodeSource] . identifier[UNSPECIFIED]
keyword[if] identifier[len] ( identifier[normalized_national_number] )< identifier[_MIN_LENGTH_FOR_NSN] :
keyword[raise] identifier[NumberParseException] ( identifier[NumberParseException] . identifier[TOO_SHORT_NSN] ,
literal[string] )
keyword[if] identifier[metadata] keyword[is] keyword[not] keyword[None] :
identifier[potential_national_number] = identifier[normalized_national_number]
identifier[carrier_code] , identifier[potential_national_number] , identifier[_] = identifier[_maybe_strip_national_prefix_carrier_code] ( identifier[potential_national_number] ,
identifier[metadata] )
identifier[validation_result] = identifier[_test_number_length] ( identifier[potential_national_number] , identifier[metadata] )
keyword[if] identifier[validation_result] keyword[not] keyword[in] ( identifier[ValidationResult] . identifier[TOO_SHORT] ,
identifier[ValidationResult] . identifier[IS_POSSIBLE_LOCAL_ONLY] ,
identifier[ValidationResult] . identifier[INVALID_LENGTH] ):
identifier[normalized_national_number] = identifier[potential_national_number]
keyword[if] identifier[keep_raw_input] keyword[and] identifier[carrier_code] keyword[is] keyword[not] keyword[None] keyword[and] identifier[len] ( identifier[carrier_code] )> literal[int] :
identifier[numobj] . identifier[preferred_domestic_carrier_code] = identifier[carrier_code]
identifier[len_national_number] = identifier[len] ( identifier[normalized_national_number] )
keyword[if] identifier[len_national_number] < identifier[_MIN_LENGTH_FOR_NSN] :
keyword[raise] identifier[NumberParseException] ( identifier[NumberParseException] . identifier[TOO_SHORT_NSN] ,
literal[string] )
keyword[if] identifier[len_national_number] > identifier[_MAX_LENGTH_FOR_NSN] :
keyword[raise] identifier[NumberParseException] ( identifier[NumberParseException] . identifier[TOO_LONG] ,
literal[string] )
identifier[_set_italian_leading_zeros_for_phone_number] ( identifier[normalized_national_number] , identifier[numobj] )
identifier[numobj] . identifier[national_number] = identifier[to_long] ( identifier[normalized_national_number] )
keyword[return] identifier[numobj] | def parse(number, region=None, keep_raw_input=False, numobj=None, _check_region=True):
"""Parse a string and return a corresponding PhoneNumber object.
The method is quite lenient and looks for a number in the input text
(raw input) and does not check whether the string is definitely only a
phone number. To do this, it ignores punctuation and white-space, as
well as any text before the number (e.g. a leading "Tel: ") and trims
the non-number bits. It will accept a number in any format (E164,
national, international etc), assuming it can be interpreted with the
defaultRegion supplied. It also attempts to convert any alpha characters
into digits if it thinks this is a vanity number of the type "1800
MICROSOFT".
This method will throw a NumberParseException if the number is not
considered to be a possible number. Note that validation of whether the
number is actually a valid number for a particular region is not
performed. This can be done separately with is_valid_number.
Note this method canonicalizes the phone number such that different
representations can be easily compared, no matter what form it was
originally entered in (e.g. national, international). If you want to
record context about the number being parsed, such as the raw input that
was entered, how the country code was derived etc. then ensure
keep_raw_input is set.
Note if any new field is added to this method that should always be filled
in, even when keep_raw_input is False, it should also be handled in the
_copy_core_fields_only() function.
Arguments:
number -- The number that we are attempting to parse. This can
contain formatting such as +, ( and -, as well as a phone
number extension. It can also be provided in RFC3966 format.
region -- The region that we are expecting the number to be from. This
is only used if the number being parsed is not written in
international format. The country_code for the number in
this case would be stored as that of the default region
supplied. If the number is guaranteed to start with a '+'
followed by the country calling code, then None or
UNKNOWN_REGION can be supplied.
keep_raw_input -- Whether to populate the raw_input field of the
PhoneNumber object with number (as well as the
country_code_source field).
numobj -- An optional existing PhoneNumber object to receive the
parsing results
_check_region -- Whether to check the supplied region parameter;
should always be True for external callers.
Returns a PhoneNumber object filled with the parse number.
Raises:
NumberParseException if the string is not considered to be a viable
phone number (e.g. too few or too many digits) or if no default
region was supplied and the number is not in international format
(does not start with +).
"""
if numobj is None:
numobj = PhoneNumber() # depends on [control=['if'], data=['numobj']]
if number is None:
raise NumberParseException(NumberParseException.NOT_A_NUMBER, 'The phone number supplied was None.') # depends on [control=['if'], data=[]]
elif len(number) > _MAX_INPUT_STRING_LENGTH:
raise NumberParseException(NumberParseException.TOO_LONG, 'The string supplied was too long to parse.') # depends on [control=['if'], data=[]]
national_number = _build_national_number_for_parsing(number)
if not _is_viable_phone_number(national_number):
raise NumberParseException(NumberParseException.NOT_A_NUMBER, 'The string supplied did not seem to be a phone number.') # depends on [control=['if'], data=[]]
# Check the region supplied is valid, or that the extracted number starts
# with some sort of + sign so the number's region can be determined.
if _check_region and (not _check_region_for_parsing(national_number, region)):
raise NumberParseException(NumberParseException.INVALID_COUNTRY_CODE, 'Missing or invalid default region.') # depends on [control=['if'], data=[]]
if keep_raw_input:
numobj.raw_input = number # depends on [control=['if'], data=[]]
# Attempt to parse extension first, since it doesn't require
# region-specific data and we want to have the non-normalised number here.
(extension, national_number) = _maybe_strip_extension(national_number)
if len(extension) > 0:
numobj.extension = extension # depends on [control=['if'], data=[]]
if region is None:
metadata = None # depends on [control=['if'], data=[]]
else:
metadata = PhoneMetadata.metadata_for_region(region.upper(), None)
country_code = 0
try:
(country_code, normalized_national_number) = _maybe_extract_country_code(national_number, metadata, keep_raw_input, numobj) # depends on [control=['try'], data=[]]
except NumberParseException:
(_, e, _) = sys.exc_info()
matchobj = _PLUS_CHARS_PATTERN.match(national_number)
if e.error_type == NumberParseException.INVALID_COUNTRY_CODE and matchobj is not None:
# Strip the plus-char, and try again.
(country_code, normalized_national_number) = _maybe_extract_country_code(national_number[matchobj.end():], metadata, keep_raw_input, numobj)
if country_code == 0:
raise NumberParseException(NumberParseException.INVALID_COUNTRY_CODE, 'Could not interpret numbers after plus-sign.') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=[]]
if country_code != 0:
number_region = region_code_for_country_code(country_code)
if number_region != region:
# Metadata cannot be null because the country calling code is valid.
metadata = PhoneMetadata.metadata_for_region_or_calling_code(country_code, number_region) # depends on [control=['if'], data=['number_region']] # depends on [control=['if'], data=['country_code']]
else:
# If no extracted country calling code, use the region supplied
# instead. The national number is just the normalized version of the
# number we were given to parse.
normalized_national_number += _normalize(national_number)
if region is not None:
country_code = metadata.country_code
numobj.country_code = country_code # depends on [control=['if'], data=[]]
elif keep_raw_input:
numobj.country_code_source = CountryCodeSource.UNSPECIFIED # depends on [control=['if'], data=[]]
if len(normalized_national_number) < _MIN_LENGTH_FOR_NSN:
raise NumberParseException(NumberParseException.TOO_SHORT_NSN, 'The string supplied is too short to be a phone number.') # depends on [control=['if'], data=[]]
if metadata is not None:
potential_national_number = normalized_national_number
(carrier_code, potential_national_number, _) = _maybe_strip_national_prefix_carrier_code(potential_national_number, metadata)
# We require that the NSN remaining after stripping the national
# prefix and carrier code be long enough to be a possible length for
# the region. Otherwise, we don't do the stripping, since the original
# number could be a valid short number.
validation_result = _test_number_length(potential_national_number, metadata)
if validation_result not in (ValidationResult.TOO_SHORT, ValidationResult.IS_POSSIBLE_LOCAL_ONLY, ValidationResult.INVALID_LENGTH):
normalized_national_number = potential_national_number
if keep_raw_input and carrier_code is not None and (len(carrier_code) > 0):
numobj.preferred_domestic_carrier_code = carrier_code # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['metadata']]
len_national_number = len(normalized_national_number)
if len_national_number < _MIN_LENGTH_FOR_NSN: # pragma no cover
# Check of _is_viable_phone_number() at the top of this function makes
# this effectively unhittable.
raise NumberParseException(NumberParseException.TOO_SHORT_NSN, 'The string supplied is too short to be a phone number.') # depends on [control=['if'], data=[]]
if len_national_number > _MAX_LENGTH_FOR_NSN:
raise NumberParseException(NumberParseException.TOO_LONG, 'The string supplied is too long to be a phone number.') # depends on [control=['if'], data=[]]
_set_italian_leading_zeros_for_phone_number(normalized_national_number, numobj)
numobj.national_number = to_long(normalized_national_number)
return numobj |
def run_experiment(methods, data, n_classes, true_labels, n_runs=10, use_purity=True, use_nmi=False, use_ari=False, use_nne=False, consensus=False):
"""
runs a pre-processing + clustering experiment...
exactly one of use_purity, use_nmi, or use_ari can be true
Args:
methods: list of 2-tuples. The first element is either a single Preprocess object or a list of Preprocess objects, to be applied in sequence to the data. The second element is either a single Cluster object, a list of Cluster objects, or a list of lists, where each list is a sequence of Preprocess objects with the final element being a Cluster object.
data: genes x cells array
true_labels: 1d array of length cells
consensus: if true, runs a consensus on cluster results for each method at the very end.
use_purity, use_nmi, use_ari, use_nne: which error metric to use (at most one can be True)
Returns:
purities (list of lists)
names (list of lists)
other (dict): keys: timing, preprocessing, clusterings
"""
results = []
names = []
clusterings = {}
other_results = {}
other_results['timing'] = {}
other_results['preprocessing'] = {}
if use_purity:
purity_method = purity
elif use_nmi:
purity_method = nmi
elif use_ari:
purity_method = ari
elif use_nne:
purity_method = nne
for i in range(n_runs):
print('run {0}'.format(i))
purities = []
r = 0
method_index = 0
for preproc, cluster in methods:
t0 = time.time()
if isinstance(preproc, Preprocess):
preprocessed, ll = preproc.run(data)
output_names = preproc.output_names
else:
# if the input is a list, only use the first preproc result
p1 = data
output_names = ['']
for p in preproc:
p1, ll = p.run(p1)
p1 = p1[0]
if output_names[0] != '':
output_names[0] = output_names[0] + '_' + p.output_names[0]
else:
output_names[0] = p.output_names[0]
preprocessed = [p1]
t1 = time.time() - t0
for name, pre in zip(output_names, preprocessed):
starting_index = method_index
if isinstance(cluster, Cluster):
#try:
t0 = time.time()
labels = cluster.run(pre)
t2 = t1 + time.time() - t0
if use_nne:
purities.append(purity_method(pre, true_labels))
else:
purities.append(purity_method(labels, true_labels))
if i==0:
names.append(name + '_' + cluster.name)
clusterings[names[-1]] = []
other_results['timing'][names[-1]] = []
print(names[r])
clusterings[names[r]].append(labels)
print('time: ' + str(t2))
other_results['timing'][names[r]].append(t2)
print(purities[-1])
r += 1
method_index += 1
#except:
# print('failed to do clustering')
elif type(cluster) == list:
for c in cluster:
if isinstance(c, list):
t2 = t1
name2 = name
sub_data = pre.copy()
for subproc in c[:-1]:
t0 = time.time()
subproc_out, ll = subproc.run(sub_data)
sub_data = subproc_out[0]
name2 = name2 + '_' + subproc.output_names[0]
t2 += time.time() - t0
t0 = time.time()
labels = c[-1].run(sub_data)
t2 += time.time() - t0
if use_nne:
purities.append(purity_method(sub_data, true_labels))
else:
purities.append(purity_method(labels, true_labels))
if i==0:
names.append(name2 + '_' + c[-1].name)
clusterings[names[-1]] = []
other_results['timing'][names[-1]] = []
print(names[r])
clusterings[names[r]].append(labels)
other_results['timing'][names[r]].append(t2)
print('time: ' + str(t2))
print(purities[-1])
r += 1
method_index += 1
else:
try:
t0 = time.time()
labels = c.run(pre)
t2 = t1 + time.time() - t0
if i==0:
names.append(name + '_' + c.name)
clusterings[names[-1]] = []
other_results['timing'][names[-1]] = []
if use_nne:
purities.append(purity_method(pre, true_labels))
else:
purities.append(purity_method(labels, true_labels))
print(names[r])
clusterings[names[r]].append(labels)
other_results['timing'][names[r]].append(t2)
print('time: ' + str(t2))
print(purities[-1])
r += 1
method_index += 1
except:
print('failed to do clustering')
# find the highest purity for the pre-processing method
# save the preprocessing result with the highest NMI
num_clustering_results = method_index - starting_index
clustering_results = purities[-num_clustering_results:]
if i > 0 and len(clustering_results) > 0:
old_clustering_results = results[-1][starting_index:method_index]
if max(old_clustering_results) < max(clustering_results):
other_results['preprocessing'][name] = pre
else:
other_results['preprocessing'][name] = pre
print('\t'.join(names))
print('purities: ' + '\t'.join(map(str, purities)))
results.append(purities)
consensus_purities = []
if consensus:
other_results['consensus'] = {}
k = len(np.unique(true_labels))
for name, clusts in clusterings.items():
print(name)
clusts = np.vstack(clusts)
consensus_clust = CE.cluster_ensembles(clusts, verbose=False, N_clusters_max=k)
other_results['consensus'][name] = consensus_clust
if use_purity:
consensus_purity = purity(consensus_clust.flatten(), true_labels)
print('consensus purity: ' + str(consensus_purity))
consensus_purities.append(consensus_purity)
if use_nmi:
consensus_nmi = nmi(true_labels, consensus_clust)
print('consensus NMI: ' + str(consensus_nmi))
consensus_purities.append(consensus_nmi)
if use_ari:
consensus_ari = ari(true_labels, consensus_clust)
print('consensus ARI: ' + str(consensus_ari))
consensus_purities.append(consensus_ari)
print('consensus results: ' + '\t'.join(map(str, consensus_purities)))
other_results['clusterings'] = clusterings
return results, names, other_results | def function[run_experiment, parameter[methods, data, n_classes, true_labels, n_runs, use_purity, use_nmi, use_ari, use_nne, consensus]]:
constant[
runs a pre-processing + clustering experiment...
exactly one of use_purity, use_nmi, or use_ari can be true
Args:
methods: list of 2-tuples. The first element is either a single Preprocess object or a list of Preprocess objects, to be applied in sequence to the data. The second element is either a single Cluster object, a list of Cluster objects, or a list of lists, where each list is a sequence of Preprocess objects with the final element being a Cluster object.
data: genes x cells array
true_labels: 1d array of length cells
consensus: if true, runs a consensus on cluster results for each method at the very end.
use_purity, use_nmi, use_ari, use_nne: which error metric to use (at most one can be True)
Returns:
purities (list of lists)
names (list of lists)
other (dict): keys: timing, preprocessing, clusterings
]
variable[results] assign[=] list[[]]
variable[names] assign[=] list[[]]
variable[clusterings] assign[=] dictionary[[], []]
variable[other_results] assign[=] dictionary[[], []]
call[name[other_results]][constant[timing]] assign[=] dictionary[[], []]
call[name[other_results]][constant[preprocessing]] assign[=] dictionary[[], []]
if name[use_purity] begin[:]
variable[purity_method] assign[=] name[purity]
for taget[name[i]] in starred[call[name[range], parameter[name[n_runs]]]] begin[:]
call[name[print], parameter[call[constant[run {0}].format, parameter[name[i]]]]]
variable[purities] assign[=] list[[]]
variable[r] assign[=] constant[0]
variable[method_index] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b1a0ef20>, <ast.Name object at 0x7da1b1a0eef0>]]] in starred[name[methods]] begin[:]
variable[t0] assign[=] call[name[time].time, parameter[]]
if call[name[isinstance], parameter[name[preproc], name[Preprocess]]] begin[:]
<ast.Tuple object at 0x7da1b1a0ec80> assign[=] call[name[preproc].run, parameter[name[data]]]
variable[output_names] assign[=] name[preproc].output_names
variable[t1] assign[=] binary_operation[call[name[time].time, parameter[]] - name[t0]]
for taget[tuple[[<ast.Name object at 0x7da1b1a0de70>, <ast.Name object at 0x7da1b1a0de40>]]] in starred[call[name[zip], parameter[name[output_names], name[preprocessed]]]] begin[:]
variable[starting_index] assign[=] name[method_index]
if call[name[isinstance], parameter[name[cluster], name[Cluster]]] begin[:]
variable[t0] assign[=] call[name[time].time, parameter[]]
variable[labels] assign[=] call[name[cluster].run, parameter[name[pre]]]
variable[t2] assign[=] binary_operation[binary_operation[name[t1] + call[name[time].time, parameter[]]] - name[t0]]
if name[use_nne] begin[:]
call[name[purities].append, parameter[call[name[purity_method], parameter[name[pre], name[true_labels]]]]]
if compare[name[i] equal[==] constant[0]] begin[:]
call[name[names].append, parameter[binary_operation[binary_operation[name[name] + constant[_]] + name[cluster].name]]]
call[name[clusterings]][call[name[names]][<ast.UnaryOp object at 0x7da1b1a0d060>]] assign[=] list[[]]
call[call[name[other_results]][constant[timing]]][call[name[names]][<ast.UnaryOp object at 0x7da1b1a0ce80>]] assign[=] list[[]]
call[name[print], parameter[call[name[names]][name[r]]]]
call[call[name[clusterings]][call[name[names]][name[r]]].append, parameter[name[labels]]]
call[name[print], parameter[binary_operation[constant[time: ] + call[name[str], parameter[name[t2]]]]]]
call[call[call[name[other_results]][constant[timing]]][call[name[names]][name[r]]].append, parameter[name[t2]]]
call[name[print], parameter[call[name[purities]][<ast.UnaryOp object at 0x7da1b1a2e5f0>]]]
<ast.AugAssign object at 0x7da1b1a2d6c0>
<ast.AugAssign object at 0x7da1b1a2d690>
variable[num_clustering_results] assign[=] binary_operation[name[method_index] - name[starting_index]]
variable[clustering_results] assign[=] call[name[purities]][<ast.Slice object at 0x7da18f09cd30>]
if <ast.BoolOp object at 0x7da18f09eec0> begin[:]
variable[old_clustering_results] assign[=] call[call[name[results]][<ast.UnaryOp object at 0x7da18f09db40>]][<ast.Slice object at 0x7da18f09e6e0>]
if compare[call[name[max], parameter[name[old_clustering_results]]] less[<] call[name[max], parameter[name[clustering_results]]]] begin[:]
call[call[name[other_results]][constant[preprocessing]]][name[name]] assign[=] name[pre]
call[name[print], parameter[call[constant[ ].join, parameter[name[names]]]]]
call[name[print], parameter[binary_operation[constant[purities: ] + call[constant[ ].join, parameter[call[name[map], parameter[name[str], name[purities]]]]]]]]
call[name[results].append, parameter[name[purities]]]
variable[consensus_purities] assign[=] list[[]]
if name[consensus] begin[:]
call[name[other_results]][constant[consensus]] assign[=] dictionary[[], []]
variable[k] assign[=] call[name[len], parameter[call[name[np].unique, parameter[name[true_labels]]]]]
for taget[tuple[[<ast.Name object at 0x7da2044c2d40>, <ast.Name object at 0x7da2044c18a0>]]] in starred[call[name[clusterings].items, parameter[]]] begin[:]
call[name[print], parameter[name[name]]]
variable[clusts] assign[=] call[name[np].vstack, parameter[name[clusts]]]
variable[consensus_clust] assign[=] call[name[CE].cluster_ensembles, parameter[name[clusts]]]
call[call[name[other_results]][constant[consensus]]][name[name]] assign[=] name[consensus_clust]
if name[use_purity] begin[:]
variable[consensus_purity] assign[=] call[name[purity], parameter[call[name[consensus_clust].flatten, parameter[]], name[true_labels]]]
call[name[print], parameter[binary_operation[constant[consensus purity: ] + call[name[str], parameter[name[consensus_purity]]]]]]
call[name[consensus_purities].append, parameter[name[consensus_purity]]]
if name[use_nmi] begin[:]
variable[consensus_nmi] assign[=] call[name[nmi], parameter[name[true_labels], name[consensus_clust]]]
call[name[print], parameter[binary_operation[constant[consensus NMI: ] + call[name[str], parameter[name[consensus_nmi]]]]]]
call[name[consensus_purities].append, parameter[name[consensus_nmi]]]
if name[use_ari] begin[:]
variable[consensus_ari] assign[=] call[name[ari], parameter[name[true_labels], name[consensus_clust]]]
call[name[print], parameter[binary_operation[constant[consensus ARI: ] + call[name[str], parameter[name[consensus_ari]]]]]]
call[name[consensus_purities].append, parameter[name[consensus_ari]]]
call[name[print], parameter[binary_operation[constant[consensus results: ] + call[constant[ ].join, parameter[call[name[map], parameter[name[str], name[consensus_purities]]]]]]]]
call[name[other_results]][constant[clusterings]] assign[=] name[clusterings]
return[tuple[[<ast.Name object at 0x7da2044c2110>, <ast.Name object at 0x7da2044c1780>, <ast.Name object at 0x7da2044c1060>]]] | keyword[def] identifier[run_experiment] ( identifier[methods] , identifier[data] , identifier[n_classes] , identifier[true_labels] , identifier[n_runs] = literal[int] , identifier[use_purity] = keyword[True] , identifier[use_nmi] = keyword[False] , identifier[use_ari] = keyword[False] , identifier[use_nne] = keyword[False] , identifier[consensus] = keyword[False] ):
literal[string]
identifier[results] =[]
identifier[names] =[]
identifier[clusterings] ={}
identifier[other_results] ={}
identifier[other_results] [ literal[string] ]={}
identifier[other_results] [ literal[string] ]={}
keyword[if] identifier[use_purity] :
identifier[purity_method] = identifier[purity]
keyword[elif] identifier[use_nmi] :
identifier[purity_method] = identifier[nmi]
keyword[elif] identifier[use_ari] :
identifier[purity_method] = identifier[ari]
keyword[elif] identifier[use_nne] :
identifier[purity_method] = identifier[nne]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[n_runs] ):
identifier[print] ( literal[string] . identifier[format] ( identifier[i] ))
identifier[purities] =[]
identifier[r] = literal[int]
identifier[method_index] = literal[int]
keyword[for] identifier[preproc] , identifier[cluster] keyword[in] identifier[methods] :
identifier[t0] = identifier[time] . identifier[time] ()
keyword[if] identifier[isinstance] ( identifier[preproc] , identifier[Preprocess] ):
identifier[preprocessed] , identifier[ll] = identifier[preproc] . identifier[run] ( identifier[data] )
identifier[output_names] = identifier[preproc] . identifier[output_names]
keyword[else] :
identifier[p1] = identifier[data]
identifier[output_names] =[ literal[string] ]
keyword[for] identifier[p] keyword[in] identifier[preproc] :
identifier[p1] , identifier[ll] = identifier[p] . identifier[run] ( identifier[p1] )
identifier[p1] = identifier[p1] [ literal[int] ]
keyword[if] identifier[output_names] [ literal[int] ]!= literal[string] :
identifier[output_names] [ literal[int] ]= identifier[output_names] [ literal[int] ]+ literal[string] + identifier[p] . identifier[output_names] [ literal[int] ]
keyword[else] :
identifier[output_names] [ literal[int] ]= identifier[p] . identifier[output_names] [ literal[int] ]
identifier[preprocessed] =[ identifier[p1] ]
identifier[t1] = identifier[time] . identifier[time] ()- identifier[t0]
keyword[for] identifier[name] , identifier[pre] keyword[in] identifier[zip] ( identifier[output_names] , identifier[preprocessed] ):
identifier[starting_index] = identifier[method_index]
keyword[if] identifier[isinstance] ( identifier[cluster] , identifier[Cluster] ):
identifier[t0] = identifier[time] . identifier[time] ()
identifier[labels] = identifier[cluster] . identifier[run] ( identifier[pre] )
identifier[t2] = identifier[t1] + identifier[time] . identifier[time] ()- identifier[t0]
keyword[if] identifier[use_nne] :
identifier[purities] . identifier[append] ( identifier[purity_method] ( identifier[pre] , identifier[true_labels] ))
keyword[else] :
identifier[purities] . identifier[append] ( identifier[purity_method] ( identifier[labels] , identifier[true_labels] ))
keyword[if] identifier[i] == literal[int] :
identifier[names] . identifier[append] ( identifier[name] + literal[string] + identifier[cluster] . identifier[name] )
identifier[clusterings] [ identifier[names] [- literal[int] ]]=[]
identifier[other_results] [ literal[string] ][ identifier[names] [- literal[int] ]]=[]
identifier[print] ( identifier[names] [ identifier[r] ])
identifier[clusterings] [ identifier[names] [ identifier[r] ]]. identifier[append] ( identifier[labels] )
identifier[print] ( literal[string] + identifier[str] ( identifier[t2] ))
identifier[other_results] [ literal[string] ][ identifier[names] [ identifier[r] ]]. identifier[append] ( identifier[t2] )
identifier[print] ( identifier[purities] [- literal[int] ])
identifier[r] += literal[int]
identifier[method_index] += literal[int]
keyword[elif] identifier[type] ( identifier[cluster] )== identifier[list] :
keyword[for] identifier[c] keyword[in] identifier[cluster] :
keyword[if] identifier[isinstance] ( identifier[c] , identifier[list] ):
identifier[t2] = identifier[t1]
identifier[name2] = identifier[name]
identifier[sub_data] = identifier[pre] . identifier[copy] ()
keyword[for] identifier[subproc] keyword[in] identifier[c] [:- literal[int] ]:
identifier[t0] = identifier[time] . identifier[time] ()
identifier[subproc_out] , identifier[ll] = identifier[subproc] . identifier[run] ( identifier[sub_data] )
identifier[sub_data] = identifier[subproc_out] [ literal[int] ]
identifier[name2] = identifier[name2] + literal[string] + identifier[subproc] . identifier[output_names] [ literal[int] ]
identifier[t2] += identifier[time] . identifier[time] ()- identifier[t0]
identifier[t0] = identifier[time] . identifier[time] ()
identifier[labels] = identifier[c] [- literal[int] ]. identifier[run] ( identifier[sub_data] )
identifier[t2] += identifier[time] . identifier[time] ()- identifier[t0]
keyword[if] identifier[use_nne] :
identifier[purities] . identifier[append] ( identifier[purity_method] ( identifier[sub_data] , identifier[true_labels] ))
keyword[else] :
identifier[purities] . identifier[append] ( identifier[purity_method] ( identifier[labels] , identifier[true_labels] ))
keyword[if] identifier[i] == literal[int] :
identifier[names] . identifier[append] ( identifier[name2] + literal[string] + identifier[c] [- literal[int] ]. identifier[name] )
identifier[clusterings] [ identifier[names] [- literal[int] ]]=[]
identifier[other_results] [ literal[string] ][ identifier[names] [- literal[int] ]]=[]
identifier[print] ( identifier[names] [ identifier[r] ])
identifier[clusterings] [ identifier[names] [ identifier[r] ]]. identifier[append] ( identifier[labels] )
identifier[other_results] [ literal[string] ][ identifier[names] [ identifier[r] ]]. identifier[append] ( identifier[t2] )
identifier[print] ( literal[string] + identifier[str] ( identifier[t2] ))
identifier[print] ( identifier[purities] [- literal[int] ])
identifier[r] += literal[int]
identifier[method_index] += literal[int]
keyword[else] :
keyword[try] :
identifier[t0] = identifier[time] . identifier[time] ()
identifier[labels] = identifier[c] . identifier[run] ( identifier[pre] )
identifier[t2] = identifier[t1] + identifier[time] . identifier[time] ()- identifier[t0]
keyword[if] identifier[i] == literal[int] :
identifier[names] . identifier[append] ( identifier[name] + literal[string] + identifier[c] . identifier[name] )
identifier[clusterings] [ identifier[names] [- literal[int] ]]=[]
identifier[other_results] [ literal[string] ][ identifier[names] [- literal[int] ]]=[]
keyword[if] identifier[use_nne] :
identifier[purities] . identifier[append] ( identifier[purity_method] ( identifier[pre] , identifier[true_labels] ))
keyword[else] :
identifier[purities] . identifier[append] ( identifier[purity_method] ( identifier[labels] , identifier[true_labels] ))
identifier[print] ( identifier[names] [ identifier[r] ])
identifier[clusterings] [ identifier[names] [ identifier[r] ]]. identifier[append] ( identifier[labels] )
identifier[other_results] [ literal[string] ][ identifier[names] [ identifier[r] ]]. identifier[append] ( identifier[t2] )
identifier[print] ( literal[string] + identifier[str] ( identifier[t2] ))
identifier[print] ( identifier[purities] [- literal[int] ])
identifier[r] += literal[int]
identifier[method_index] += literal[int]
keyword[except] :
identifier[print] ( literal[string] )
identifier[num_clustering_results] = identifier[method_index] - identifier[starting_index]
identifier[clustering_results] = identifier[purities] [- identifier[num_clustering_results] :]
keyword[if] identifier[i] > literal[int] keyword[and] identifier[len] ( identifier[clustering_results] )> literal[int] :
identifier[old_clustering_results] = identifier[results] [- literal[int] ][ identifier[starting_index] : identifier[method_index] ]
keyword[if] identifier[max] ( identifier[old_clustering_results] )< identifier[max] ( identifier[clustering_results] ):
identifier[other_results] [ literal[string] ][ identifier[name] ]= identifier[pre]
keyword[else] :
identifier[other_results] [ literal[string] ][ identifier[name] ]= identifier[pre]
identifier[print] ( literal[string] . identifier[join] ( identifier[names] ))
identifier[print] ( literal[string] + literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[purities] )))
identifier[results] . identifier[append] ( identifier[purities] )
identifier[consensus_purities] =[]
keyword[if] identifier[consensus] :
identifier[other_results] [ literal[string] ]={}
identifier[k] = identifier[len] ( identifier[np] . identifier[unique] ( identifier[true_labels] ))
keyword[for] identifier[name] , identifier[clusts] keyword[in] identifier[clusterings] . identifier[items] ():
identifier[print] ( identifier[name] )
identifier[clusts] = identifier[np] . identifier[vstack] ( identifier[clusts] )
identifier[consensus_clust] = identifier[CE] . identifier[cluster_ensembles] ( identifier[clusts] , identifier[verbose] = keyword[False] , identifier[N_clusters_max] = identifier[k] )
identifier[other_results] [ literal[string] ][ identifier[name] ]= identifier[consensus_clust]
keyword[if] identifier[use_purity] :
identifier[consensus_purity] = identifier[purity] ( identifier[consensus_clust] . identifier[flatten] (), identifier[true_labels] )
identifier[print] ( literal[string] + identifier[str] ( identifier[consensus_purity] ))
identifier[consensus_purities] . identifier[append] ( identifier[consensus_purity] )
keyword[if] identifier[use_nmi] :
identifier[consensus_nmi] = identifier[nmi] ( identifier[true_labels] , identifier[consensus_clust] )
identifier[print] ( literal[string] + identifier[str] ( identifier[consensus_nmi] ))
identifier[consensus_purities] . identifier[append] ( identifier[consensus_nmi] )
keyword[if] identifier[use_ari] :
identifier[consensus_ari] = identifier[ari] ( identifier[true_labels] , identifier[consensus_clust] )
identifier[print] ( literal[string] + identifier[str] ( identifier[consensus_ari] ))
identifier[consensus_purities] . identifier[append] ( identifier[consensus_ari] )
identifier[print] ( literal[string] + literal[string] . identifier[join] ( identifier[map] ( identifier[str] , identifier[consensus_purities] )))
identifier[other_results] [ literal[string] ]= identifier[clusterings]
keyword[return] identifier[results] , identifier[names] , identifier[other_results] | def run_experiment(methods, data, n_classes, true_labels, n_runs=10, use_purity=True, use_nmi=False, use_ari=False, use_nne=False, consensus=False):
"""
runs a pre-processing + clustering experiment...
exactly one of use_purity, use_nmi, or use_ari can be true
Args:
methods: list of 2-tuples. The first element is either a single Preprocess object or a list of Preprocess objects, to be applied in sequence to the data. The second element is either a single Cluster object, a list of Cluster objects, or a list of lists, where each list is a sequence of Preprocess objects with the final element being a Cluster object.
data: genes x cells array
true_labels: 1d array of length cells
consensus: if true, runs a consensus on cluster results for each method at the very end.
use_purity, use_nmi, use_ari, use_nne: which error metric to use (at most one can be True)
Returns:
purities (list of lists)
names (list of lists)
other (dict): keys: timing, preprocessing, clusterings
"""
results = []
names = []
clusterings = {}
other_results = {}
other_results['timing'] = {}
other_results['preprocessing'] = {}
if use_purity:
purity_method = purity # depends on [control=['if'], data=[]]
elif use_nmi:
purity_method = nmi # depends on [control=['if'], data=[]]
elif use_ari:
purity_method = ari # depends on [control=['if'], data=[]]
elif use_nne:
purity_method = nne # depends on [control=['if'], data=[]]
for i in range(n_runs):
print('run {0}'.format(i))
purities = []
r = 0
method_index = 0
for (preproc, cluster) in methods:
t0 = time.time()
if isinstance(preproc, Preprocess):
(preprocessed, ll) = preproc.run(data)
output_names = preproc.output_names # depends on [control=['if'], data=[]]
else:
# if the input is a list, only use the first preproc result
p1 = data
output_names = ['']
for p in preproc:
(p1, ll) = p.run(p1)
p1 = p1[0]
if output_names[0] != '':
output_names[0] = output_names[0] + '_' + p.output_names[0] # depends on [control=['if'], data=[]]
else:
output_names[0] = p.output_names[0] # depends on [control=['for'], data=['p']]
preprocessed = [p1]
t1 = time.time() - t0
for (name, pre) in zip(output_names, preprocessed):
starting_index = method_index
if isinstance(cluster, Cluster):
#try:
t0 = time.time()
labels = cluster.run(pre)
t2 = t1 + time.time() - t0
if use_nne:
purities.append(purity_method(pre, true_labels)) # depends on [control=['if'], data=[]]
else:
purities.append(purity_method(labels, true_labels))
if i == 0:
names.append(name + '_' + cluster.name)
clusterings[names[-1]] = []
other_results['timing'][names[-1]] = [] # depends on [control=['if'], data=[]]
print(names[r])
clusterings[names[r]].append(labels)
print('time: ' + str(t2))
other_results['timing'][names[r]].append(t2)
print(purities[-1])
r += 1
method_index += 1 # depends on [control=['if'], data=[]]
#except:
# print('failed to do clustering')
elif type(cluster) == list:
for c in cluster:
if isinstance(c, list):
t2 = t1
name2 = name
sub_data = pre.copy()
for subproc in c[:-1]:
t0 = time.time()
(subproc_out, ll) = subproc.run(sub_data)
sub_data = subproc_out[0]
name2 = name2 + '_' + subproc.output_names[0]
t2 += time.time() - t0 # depends on [control=['for'], data=['subproc']]
t0 = time.time()
labels = c[-1].run(sub_data)
t2 += time.time() - t0
if use_nne:
purities.append(purity_method(sub_data, true_labels)) # depends on [control=['if'], data=[]]
else:
purities.append(purity_method(labels, true_labels))
if i == 0:
names.append(name2 + '_' + c[-1].name)
clusterings[names[-1]] = []
other_results['timing'][names[-1]] = [] # depends on [control=['if'], data=[]]
print(names[r])
clusterings[names[r]].append(labels)
other_results['timing'][names[r]].append(t2)
print('time: ' + str(t2))
print(purities[-1])
r += 1
method_index += 1 # depends on [control=['if'], data=[]]
else:
try:
t0 = time.time()
labels = c.run(pre)
t2 = t1 + time.time() - t0
if i == 0:
names.append(name + '_' + c.name)
clusterings[names[-1]] = []
other_results['timing'][names[-1]] = [] # depends on [control=['if'], data=[]]
if use_nne:
purities.append(purity_method(pre, true_labels)) # depends on [control=['if'], data=[]]
else:
purities.append(purity_method(labels, true_labels))
print(names[r])
clusterings[names[r]].append(labels)
other_results['timing'][names[r]].append(t2)
print('time: ' + str(t2))
print(purities[-1])
r += 1
method_index += 1 # depends on [control=['try'], data=[]]
except:
print('failed to do clustering') # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['c']] # depends on [control=['if'], data=['list']]
# find the highest purity for the pre-processing method
# save the preprocessing result with the highest NMI
num_clustering_results = method_index - starting_index
clustering_results = purities[-num_clustering_results:]
if i > 0 and len(clustering_results) > 0:
old_clustering_results = results[-1][starting_index:method_index]
if max(old_clustering_results) < max(clustering_results):
other_results['preprocessing'][name] = pre # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
other_results['preprocessing'][name] = pre # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
print('\t'.join(names))
print('purities: ' + '\t'.join(map(str, purities)))
results.append(purities) # depends on [control=['for'], data=['i']]
consensus_purities = []
if consensus:
other_results['consensus'] = {}
k = len(np.unique(true_labels))
for (name, clusts) in clusterings.items():
print(name)
clusts = np.vstack(clusts)
consensus_clust = CE.cluster_ensembles(clusts, verbose=False, N_clusters_max=k)
other_results['consensus'][name] = consensus_clust
if use_purity:
consensus_purity = purity(consensus_clust.flatten(), true_labels)
print('consensus purity: ' + str(consensus_purity))
consensus_purities.append(consensus_purity) # depends on [control=['if'], data=[]]
if use_nmi:
consensus_nmi = nmi(true_labels, consensus_clust)
print('consensus NMI: ' + str(consensus_nmi))
consensus_purities.append(consensus_nmi) # depends on [control=['if'], data=[]]
if use_ari:
consensus_ari = ari(true_labels, consensus_clust)
print('consensus ARI: ' + str(consensus_ari))
consensus_purities.append(consensus_ari) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
print('consensus results: ' + '\t'.join(map(str, consensus_purities))) # depends on [control=['if'], data=[]]
other_results['clusterings'] = clusterings
return (results, names, other_results) |
def get_event_canned_questions(self, id, **data):
"""
GET /events/:id/canned_questions/
This endpoint returns canned questions of a single event (examples: first name, last name, company, prefix, etc.). This endpoint will return :format:`question`.
"""
return self.get("/events/{0}/canned_questions/".format(id), data=data) | def function[get_event_canned_questions, parameter[self, id]]:
constant[
GET /events/:id/canned_questions/
This endpoint returns canned questions of a single event (examples: first name, last name, company, prefix, etc.). This endpoint will return :format:`question`.
]
return[call[name[self].get, parameter[call[constant[/events/{0}/canned_questions/].format, parameter[name[id]]]]]] | keyword[def] identifier[get_event_canned_questions] ( identifier[self] , identifier[id] ,** identifier[data] ):
literal[string]
keyword[return] identifier[self] . identifier[get] ( literal[string] . identifier[format] ( identifier[id] ), identifier[data] = identifier[data] ) | def get_event_canned_questions(self, id, **data):
"""
GET /events/:id/canned_questions/
This endpoint returns canned questions of a single event (examples: first name, last name, company, prefix, etc.). This endpoint will return :format:`question`.
"""
return self.get('/events/{0}/canned_questions/'.format(id), data=data) |
def find_cached_job(jid):
'''
Return the data for a specific cached job id. Note this only works if
cache_jobs has previously been set to True on the minion.
CLI Example:
.. code-block:: bash
salt '*' saltutil.find_cached_job <job id>
'''
serial = salt.payload.Serial(__opts__)
proc_dir = os.path.join(__opts__['cachedir'], 'minion_jobs')
job_dir = os.path.join(proc_dir, six.text_type(jid))
if not os.path.isdir(job_dir):
if not __opts__.get('cache_jobs'):
return ('Local jobs cache directory not found; you may need to'
' enable cache_jobs on this minion')
else:
return 'Local jobs cache directory {0} not found'.format(job_dir)
path = os.path.join(job_dir, 'return.p')
with salt.utils.files.fopen(path, 'rb') as fp_:
buf = fp_.read()
if buf:
try:
data = serial.loads(buf)
except NameError:
# msgpack error in salt-ssh
pass
else:
if isinstance(data, dict):
# if not a dict, this was an invalid serialized object
return data
return None | def function[find_cached_job, parameter[jid]]:
constant[
Return the data for a specific cached job id. Note this only works if
cache_jobs has previously been set to True on the minion.
CLI Example:
.. code-block:: bash
salt '*' saltutil.find_cached_job <job id>
]
variable[serial] assign[=] call[name[salt].payload.Serial, parameter[name[__opts__]]]
variable[proc_dir] assign[=] call[name[os].path.join, parameter[call[name[__opts__]][constant[cachedir]], constant[minion_jobs]]]
variable[job_dir] assign[=] call[name[os].path.join, parameter[name[proc_dir], call[name[six].text_type, parameter[name[jid]]]]]
if <ast.UnaryOp object at 0x7da2043468f0> begin[:]
if <ast.UnaryOp object at 0x7da204344af0> begin[:]
return[constant[Local jobs cache directory not found; you may need to enable cache_jobs on this minion]]
variable[path] assign[=] call[name[os].path.join, parameter[name[job_dir], constant[return.p]]]
with call[name[salt].utils.files.fopen, parameter[name[path], constant[rb]]] begin[:]
variable[buf] assign[=] call[name[fp_].read, parameter[]]
if name[buf] begin[:]
<ast.Try object at 0x7da2044c2e00>
return[constant[None]] | keyword[def] identifier[find_cached_job] ( identifier[jid] ):
literal[string]
identifier[serial] = identifier[salt] . identifier[payload] . identifier[Serial] ( identifier[__opts__] )
identifier[proc_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[__opts__] [ literal[string] ], literal[string] )
identifier[job_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[proc_dir] , identifier[six] . identifier[text_type] ( identifier[jid] ))
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[job_dir] ):
keyword[if] keyword[not] identifier[__opts__] . identifier[get] ( literal[string] ):
keyword[return] ( literal[string]
literal[string] )
keyword[else] :
keyword[return] literal[string] . identifier[format] ( identifier[job_dir] )
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[job_dir] , literal[string] )
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[path] , literal[string] ) keyword[as] identifier[fp_] :
identifier[buf] = identifier[fp_] . identifier[read] ()
keyword[if] identifier[buf] :
keyword[try] :
identifier[data] = identifier[serial] . identifier[loads] ( identifier[buf] )
keyword[except] identifier[NameError] :
keyword[pass]
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[data] , identifier[dict] ):
keyword[return] identifier[data]
keyword[return] keyword[None] | def find_cached_job(jid):
"""
Return the data for a specific cached job id. Note this only works if
cache_jobs has previously been set to True on the minion.
CLI Example:
.. code-block:: bash
salt '*' saltutil.find_cached_job <job id>
"""
serial = salt.payload.Serial(__opts__)
proc_dir = os.path.join(__opts__['cachedir'], 'minion_jobs')
job_dir = os.path.join(proc_dir, six.text_type(jid))
if not os.path.isdir(job_dir):
if not __opts__.get('cache_jobs'):
return 'Local jobs cache directory not found; you may need to enable cache_jobs on this minion' # depends on [control=['if'], data=[]]
else:
return 'Local jobs cache directory {0} not found'.format(job_dir) # depends on [control=['if'], data=[]]
path = os.path.join(job_dir, 'return.p')
with salt.utils.files.fopen(path, 'rb') as fp_:
buf = fp_.read() # depends on [control=['with'], data=['fp_']]
if buf:
try:
data = serial.loads(buf) # depends on [control=['try'], data=[]]
except NameError:
# msgpack error in salt-ssh
pass # depends on [control=['except'], data=[]]
else:
if isinstance(data, dict):
# if not a dict, this was an invalid serialized object
return data # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return None |
def _run_check(self):
"""Execute a check command.
Returns:
True if the exit code of the command is 0 otherwise False.
"""
cmd = shlex.split(self.config['check_cmd'])
self.log.info("running %s", ' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
start_time = time.time()
try:
outs, errs = proc.communicate(timeout=self.config['check_timeout'])
except subprocess.TimeoutExpired:
self.log.error("check timed out")
if proc.poll() is None:
try:
proc.kill()
except PermissionError:
self.log.warning("failed to kill check due to adequate "
"access rights, check could be running "
"under another user(root) via sudo")
return False
else:
msg = "check duration {t:.3f}ms".format(
t=(time.time() - start_time) * 1000)
self.log.info(msg)
if proc.returncode != 0:
self.log.info("stderr from the check %s", errs)
self.log.info("stdout from the check %s", outs)
return proc.returncode == 0 | def function[_run_check, parameter[self]]:
constant[Execute a check command.
Returns:
True if the exit code of the command is 0 otherwise False.
]
variable[cmd] assign[=] call[name[shlex].split, parameter[call[name[self].config][constant[check_cmd]]]]
call[name[self].log.info, parameter[constant[running %s], call[constant[ ].join, parameter[name[cmd]]]]]
variable[proc] assign[=] call[name[subprocess].Popen, parameter[name[cmd]]]
variable[start_time] assign[=] call[name[time].time, parameter[]]
<ast.Try object at 0x7da1b0dc0bb0> | keyword[def] identifier[_run_check] ( identifier[self] ):
literal[string]
identifier[cmd] = identifier[shlex] . identifier[split] ( identifier[self] . identifier[config] [ literal[string] ])
identifier[self] . identifier[log] . identifier[info] ( literal[string] , literal[string] . identifier[join] ( identifier[cmd] ))
identifier[proc] = identifier[subprocess] . identifier[Popen] ( identifier[cmd] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] ,
identifier[stderr] = identifier[subprocess] . identifier[PIPE] )
identifier[start_time] = identifier[time] . identifier[time] ()
keyword[try] :
identifier[outs] , identifier[errs] = identifier[proc] . identifier[communicate] ( identifier[timeout] = identifier[self] . identifier[config] [ literal[string] ])
keyword[except] identifier[subprocess] . identifier[TimeoutExpired] :
identifier[self] . identifier[log] . identifier[error] ( literal[string] )
keyword[if] identifier[proc] . identifier[poll] () keyword[is] keyword[None] :
keyword[try] :
identifier[proc] . identifier[kill] ()
keyword[except] identifier[PermissionError] :
identifier[self] . identifier[log] . identifier[warning] ( literal[string]
literal[string]
literal[string] )
keyword[return] keyword[False]
keyword[else] :
identifier[msg] = literal[string] . identifier[format] (
identifier[t] =( identifier[time] . identifier[time] ()- identifier[start_time] )* literal[int] )
identifier[self] . identifier[log] . identifier[info] ( identifier[msg] )
keyword[if] identifier[proc] . identifier[returncode] != literal[int] :
identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[errs] )
identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[outs] )
keyword[return] identifier[proc] . identifier[returncode] == literal[int] | def _run_check(self):
"""Execute a check command.
Returns:
True if the exit code of the command is 0 otherwise False.
"""
cmd = shlex.split(self.config['check_cmd'])
self.log.info('running %s', ' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
start_time = time.time()
try:
(outs, errs) = proc.communicate(timeout=self.config['check_timeout']) # depends on [control=['try'], data=[]]
except subprocess.TimeoutExpired:
self.log.error('check timed out')
if proc.poll() is None:
try:
proc.kill() # depends on [control=['try'], data=[]]
except PermissionError:
self.log.warning('failed to kill check due to adequate access rights, check could be running under another user(root) via sudo') # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return False # depends on [control=['except'], data=[]]
else:
msg = 'check duration {t:.3f}ms'.format(t=(time.time() - start_time) * 1000)
self.log.info(msg)
if proc.returncode != 0:
self.log.info('stderr from the check %s', errs)
self.log.info('stdout from the check %s', outs) # depends on [control=['if'], data=[]]
return proc.returncode == 0 |
def set_filter_raw(self, filter_raw):
"""Filter to be used when getting items from Ocean index"""
self.filter_raw = filter_raw
self.filter_raw_dict = []
splitted = re.compile(FILTER_SEPARATOR).split(filter_raw)
for fltr_raw in splitted:
fltr = self.__process_filter(fltr_raw)
self.filter_raw_dict.append(fltr) | def function[set_filter_raw, parameter[self, filter_raw]]:
constant[Filter to be used when getting items from Ocean index]
name[self].filter_raw assign[=] name[filter_raw]
name[self].filter_raw_dict assign[=] list[[]]
variable[splitted] assign[=] call[call[name[re].compile, parameter[name[FILTER_SEPARATOR]]].split, parameter[name[filter_raw]]]
for taget[name[fltr_raw]] in starred[name[splitted]] begin[:]
variable[fltr] assign[=] call[name[self].__process_filter, parameter[name[fltr_raw]]]
call[name[self].filter_raw_dict.append, parameter[name[fltr]]] | keyword[def] identifier[set_filter_raw] ( identifier[self] , identifier[filter_raw] ):
literal[string]
identifier[self] . identifier[filter_raw] = identifier[filter_raw]
identifier[self] . identifier[filter_raw_dict] =[]
identifier[splitted] = identifier[re] . identifier[compile] ( identifier[FILTER_SEPARATOR] ). identifier[split] ( identifier[filter_raw] )
keyword[for] identifier[fltr_raw] keyword[in] identifier[splitted] :
identifier[fltr] = identifier[self] . identifier[__process_filter] ( identifier[fltr_raw] )
identifier[self] . identifier[filter_raw_dict] . identifier[append] ( identifier[fltr] ) | def set_filter_raw(self, filter_raw):
"""Filter to be used when getting items from Ocean index"""
self.filter_raw = filter_raw
self.filter_raw_dict = []
splitted = re.compile(FILTER_SEPARATOR).split(filter_raw)
for fltr_raw in splitted:
fltr = self.__process_filter(fltr_raw)
self.filter_raw_dict.append(fltr) # depends on [control=['for'], data=['fltr_raw']] |
def sections(self):
'''
List of section titles from the table of contents on the page.
'''
if not getattr(self, '_sections', False):
query_params = {
'action': 'parse',
'prop': 'sections',
}
query_params.update(self.__title_query_param)
request = _wiki_request(query_params)
self._sections = [section['line'] for section in request['parse']['sections']]
return self._sections | def function[sections, parameter[self]]:
constant[
List of section titles from the table of contents on the page.
]
if <ast.UnaryOp object at 0x7da204564430> begin[:]
variable[query_params] assign[=] dictionary[[<ast.Constant object at 0x7da2045656f0>, <ast.Constant object at 0x7da204564880>], [<ast.Constant object at 0x7da2045653c0>, <ast.Constant object at 0x7da2045656c0>]]
call[name[query_params].update, parameter[name[self].__title_query_param]]
variable[request] assign[=] call[name[_wiki_request], parameter[name[query_params]]]
name[self]._sections assign[=] <ast.ListComp object at 0x7da20c6a8f70>
return[name[self]._sections] | keyword[def] identifier[sections] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[getattr] ( identifier[self] , literal[string] , keyword[False] ):
identifier[query_params] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
identifier[query_params] . identifier[update] ( identifier[self] . identifier[__title_query_param] )
identifier[request] = identifier[_wiki_request] ( identifier[query_params] )
identifier[self] . identifier[_sections] =[ identifier[section] [ literal[string] ] keyword[for] identifier[section] keyword[in] identifier[request] [ literal[string] ][ literal[string] ]]
keyword[return] identifier[self] . identifier[_sections] | def sections(self):
"""
List of section titles from the table of contents on the page.
"""
if not getattr(self, '_sections', False):
query_params = {'action': 'parse', 'prop': 'sections'}
query_params.update(self.__title_query_param)
request = _wiki_request(query_params)
self._sections = [section['line'] for section in request['parse']['sections']] # depends on [control=['if'], data=[]]
return self._sections |
def create_cluster(
self,
parent,
cluster_id,
cluster,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a cluster within an instance.
Example:
>>> from google.cloud import bigtable_admin_v2
>>>
>>> client = bigtable_admin_v2.BigtableInstanceAdminClient()
>>>
>>> parent = client.instance_path('[PROJECT]', '[INSTANCE]')
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `cluster`:
>>> cluster = {}
>>>
>>> response = client.create_cluster(parent, cluster_id, cluster)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): The unique name of the instance in which to create the new cluster.
Values are of the form ``projects/<project>/instances/<instance>``.
cluster_id (str): The ID to be used when referring to the new cluster within its instance,
e.g., just ``mycluster`` rather than
``projects/myproject/instances/myinstance/clusters/mycluster``.
cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): The cluster to be created. Fields marked ``OutputOnly`` must be left
blank.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_admin_v2.types.Cluster`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"create_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_cluster,
default_retry=self._method_configs["CreateCluster"].retry,
default_timeout=self._method_configs["CreateCluster"].timeout,
client_info=self._client_info,
)
request = bigtable_instance_admin_pb2.CreateClusterRequest(
parent=parent, cluster_id=cluster_id, cluster=cluster
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["create_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
instance_pb2.Cluster,
metadata_type=bigtable_instance_admin_pb2.CreateClusterMetadata,
) | def function[create_cluster, parameter[self, parent, cluster_id, cluster, retry, timeout, metadata]]:
constant[
Creates a cluster within an instance.
Example:
>>> from google.cloud import bigtable_admin_v2
>>>
>>> client = bigtable_admin_v2.BigtableInstanceAdminClient()
>>>
>>> parent = client.instance_path('[PROJECT]', '[INSTANCE]')
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `cluster`:
>>> cluster = {}
>>>
>>> response = client.create_cluster(parent, cluster_id, cluster)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): The unique name of the instance in which to create the new cluster.
Values are of the form ``projects/<project>/instances/<instance>``.
cluster_id (str): The ID to be used when referring to the new cluster within its instance,
e.g., just ``mycluster`` rather than
``projects/myproject/instances/myinstance/clusters/mycluster``.
cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): The cluster to be created. Fields marked ``OutputOnly`` must be left
blank.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_admin_v2.types.Cluster`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
]
if compare[constant[create_cluster] <ast.NotIn object at 0x7da2590d7190> name[self]._inner_api_calls] begin[:]
call[name[self]._inner_api_calls][constant[create_cluster]] assign[=] call[name[google].api_core.gapic_v1.method.wrap_method, parameter[name[self].transport.create_cluster]]
variable[request] assign[=] call[name[bigtable_instance_admin_pb2].CreateClusterRequest, parameter[]]
if compare[name[metadata] is constant[None]] begin[:]
variable[metadata] assign[=] list[[]]
variable[metadata] assign[=] call[name[list], parameter[name[metadata]]]
<ast.Try object at 0x7da20e954ee0>
variable[operation] assign[=] call[call[name[self]._inner_api_calls][constant[create_cluster]], parameter[name[request]]]
return[call[name[google].api_core.operation.from_gapic, parameter[name[operation], name[self].transport._operations_client, name[instance_pb2].Cluster]]] | keyword[def] identifier[create_cluster] (
identifier[self] ,
identifier[parent] ,
identifier[cluster_id] ,
identifier[cluster] ,
identifier[retry] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[timeout] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[metadata] = keyword[None] ,
):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_inner_api_calls] :
identifier[self] . identifier[_inner_api_calls] [
literal[string]
]= identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[wrap_method] (
identifier[self] . identifier[transport] . identifier[create_cluster] ,
identifier[default_retry] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[retry] ,
identifier[default_timeout] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[timeout] ,
identifier[client_info] = identifier[self] . identifier[_client_info] ,
)
identifier[request] = identifier[bigtable_instance_admin_pb2] . identifier[CreateClusterRequest] (
identifier[parent] = identifier[parent] , identifier[cluster_id] = identifier[cluster_id] , identifier[cluster] = identifier[cluster]
)
keyword[if] identifier[metadata] keyword[is] keyword[None] :
identifier[metadata] =[]
identifier[metadata] = identifier[list] ( identifier[metadata] )
keyword[try] :
identifier[routing_header] =[( literal[string] , identifier[parent] )]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[else] :
identifier[routing_metadata] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[routing_header] . identifier[to_grpc_metadata] (
identifier[routing_header]
)
identifier[metadata] . identifier[append] ( identifier[routing_metadata] )
identifier[operation] = identifier[self] . identifier[_inner_api_calls] [ literal[string] ](
identifier[request] , identifier[retry] = identifier[retry] , identifier[timeout] = identifier[timeout] , identifier[metadata] = identifier[metadata]
)
keyword[return] identifier[google] . identifier[api_core] . identifier[operation] . identifier[from_gapic] (
identifier[operation] ,
identifier[self] . identifier[transport] . identifier[_operations_client] ,
identifier[instance_pb2] . identifier[Cluster] ,
identifier[metadata_type] = identifier[bigtable_instance_admin_pb2] . identifier[CreateClusterMetadata] ,
) | def create_cluster(self, parent, cluster_id, cluster, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None):
"""
Creates a cluster within an instance.
Example:
>>> from google.cloud import bigtable_admin_v2
>>>
>>> client = bigtable_admin_v2.BigtableInstanceAdminClient()
>>>
>>> parent = client.instance_path('[PROJECT]', '[INSTANCE]')
>>>
>>> # TODO: Initialize `cluster_id`:
>>> cluster_id = ''
>>>
>>> # TODO: Initialize `cluster`:
>>> cluster = {}
>>>
>>> response = client.create_cluster(parent, cluster_id, cluster)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): The unique name of the instance in which to create the new cluster.
Values are of the form ``projects/<project>/instances/<instance>``.
cluster_id (str): The ID to be used when referring to the new cluster within its instance,
e.g., just ``mycluster`` rather than
``projects/myproject/instances/myinstance/clusters/mycluster``.
cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): The cluster to be created. Fields marked ``OutputOnly`` must be left
blank.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_admin_v2.types.Cluster`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_cluster' not in self._inner_api_calls:
self._inner_api_calls['create_cluster'] = google.api_core.gapic_v1.method.wrap_method(self.transport.create_cluster, default_retry=self._method_configs['CreateCluster'].retry, default_timeout=self._method_configs['CreateCluster'].timeout, client_info=self._client_info) # depends on [control=['if'], data=[]]
request = bigtable_instance_admin_pb2.CreateClusterRequest(parent=parent, cluster_id=cluster_id, cluster=cluster)
if metadata is None:
metadata = [] # depends on [control=['if'], data=['metadata']]
metadata = list(metadata)
try:
routing_header = [('parent', parent)] # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
operation = self._inner_api_calls['create_cluster'](request, retry=retry, timeout=timeout, metadata=metadata)
return google.api_core.operation.from_gapic(operation, self.transport._operations_client, instance_pb2.Cluster, metadata_type=bigtable_instance_admin_pb2.CreateClusterMetadata) |
def get_metric_group_definitions(self):
"""
Get the faked metric group definitions for this context object
that are to be returned from its create operation.
If a 'metric-groups' property had been specified for this context,
only those faked metric group definitions of its manager object that
are in that list, are included in the result. Otherwise, all metric
group definitions of its manager are included in the result.
Returns:
iterable of :class:~zhmcclient.FakedMetricGroupDefinition`: The faked
metric group definitions, in the order they had been added.
"""
group_names = self.properties.get('metric-groups', None)
if not group_names:
group_names = self.manager.get_metric_group_definition_names()
mg_defs = []
for group_name in group_names:
try:
mg_def = self.manager.get_metric_group_definition(group_name)
mg_defs.append(mg_def)
except ValueError:
pass # ignore metric groups without metric group defs
return mg_defs | def function[get_metric_group_definitions, parameter[self]]:
constant[
Get the faked metric group definitions for this context object
that are to be returned from its create operation.
If a 'metric-groups' property had been specified for this context,
only those faked metric group definitions of its manager object that
are in that list, are included in the result. Otherwise, all metric
group definitions of its manager are included in the result.
Returns:
iterable of :class:~zhmcclient.FakedMetricGroupDefinition`: The faked
metric group definitions, in the order they had been added.
]
variable[group_names] assign[=] call[name[self].properties.get, parameter[constant[metric-groups], constant[None]]]
if <ast.UnaryOp object at 0x7da18dc99900> begin[:]
variable[group_names] assign[=] call[name[self].manager.get_metric_group_definition_names, parameter[]]
variable[mg_defs] assign[=] list[[]]
for taget[name[group_name]] in starred[name[group_names]] begin[:]
<ast.Try object at 0x7da18bc73bb0>
return[name[mg_defs]] | keyword[def] identifier[get_metric_group_definitions] ( identifier[self] ):
literal[string]
identifier[group_names] = identifier[self] . identifier[properties] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[group_names] :
identifier[group_names] = identifier[self] . identifier[manager] . identifier[get_metric_group_definition_names] ()
identifier[mg_defs] =[]
keyword[for] identifier[group_name] keyword[in] identifier[group_names] :
keyword[try] :
identifier[mg_def] = identifier[self] . identifier[manager] . identifier[get_metric_group_definition] ( identifier[group_name] )
identifier[mg_defs] . identifier[append] ( identifier[mg_def] )
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[return] identifier[mg_defs] | def get_metric_group_definitions(self):
"""
Get the faked metric group definitions for this context object
that are to be returned from its create operation.
If a 'metric-groups' property had been specified for this context,
only those faked metric group definitions of its manager object that
are in that list, are included in the result. Otherwise, all metric
group definitions of its manager are included in the result.
Returns:
iterable of :class:~zhmcclient.FakedMetricGroupDefinition`: The faked
metric group definitions, in the order they had been added.
"""
group_names = self.properties.get('metric-groups', None)
if not group_names:
group_names = self.manager.get_metric_group_definition_names() # depends on [control=['if'], data=[]]
mg_defs = []
for group_name in group_names:
try:
mg_def = self.manager.get_metric_group_definition(group_name)
mg_defs.append(mg_def) # depends on [control=['try'], data=[]]
except ValueError:
pass # ignore metric groups without metric group defs # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['group_name']]
return mg_defs |
def _process_all(self, limit):
"""
This takes the list of omim identifiers from the omim.txt.Z file,
and iteratively queries the omim api for the json-formatted data.
This will create OMIM classes, with the label,
definition, and some synonyms.
If an entry is "removed",
it is added as a deprecated class.
If an entry is "moved",
it is deprecated and consider annotations are added.
Additionally, we extract:
*phenotypicSeries ids as superclasses
*equivalent ids for Orphanet and UMLS
If set to testMode,
it will write only those items in the test_ids to the testgraph.
:param limit:
:return:
"""
omimids = self._get_omim_ids()
LOG.info('Have %i omim numbers to fetch records from their API', len(omimids))
LOG.info('Have %i omim types ', len(self.omim_type))
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
geno = Genotype(graph)
model = Model(graph)
tax_label = 'Homo sapiens'
tax_id = self.globaltt[tax_label]
# add genome and taxon
geno.addGenome(tax_id, tax_label) # tax label can get added elsewhere
model.addClassToGraph(tax_id, None) # label added elsewhere
includes = set()
includes.add('all')
self.process_entries(
omimids, self._transform_entry, includes, graph, limit, self.globaltt) | def function[_process_all, parameter[self, limit]]:
constant[
This takes the list of omim identifiers from the omim.txt.Z file,
and iteratively queries the omim api for the json-formatted data.
This will create OMIM classes, with the label,
definition, and some synonyms.
If an entry is "removed",
it is added as a deprecated class.
If an entry is "moved",
it is deprecated and consider annotations are added.
Additionally, we extract:
*phenotypicSeries ids as superclasses
*equivalent ids for Orphanet and UMLS
If set to testMode,
it will write only those items in the test_ids to the testgraph.
:param limit:
:return:
]
variable[omimids] assign[=] call[name[self]._get_omim_ids, parameter[]]
call[name[LOG].info, parameter[constant[Have %i omim numbers to fetch records from their API], call[name[len], parameter[name[omimids]]]]]
call[name[LOG].info, parameter[constant[Have %i omim types ], call[name[len], parameter[name[self].omim_type]]]]
if name[self].test_mode begin[:]
variable[graph] assign[=] name[self].testgraph
variable[geno] assign[=] call[name[Genotype], parameter[name[graph]]]
variable[model] assign[=] call[name[Model], parameter[name[graph]]]
variable[tax_label] assign[=] constant[Homo sapiens]
variable[tax_id] assign[=] call[name[self].globaltt][name[tax_label]]
call[name[geno].addGenome, parameter[name[tax_id], name[tax_label]]]
call[name[model].addClassToGraph, parameter[name[tax_id], constant[None]]]
variable[includes] assign[=] call[name[set], parameter[]]
call[name[includes].add, parameter[constant[all]]]
call[name[self].process_entries, parameter[name[omimids], name[self]._transform_entry, name[includes], name[graph], name[limit], name[self].globaltt]] | keyword[def] identifier[_process_all] ( identifier[self] , identifier[limit] ):
literal[string]
identifier[omimids] = identifier[self] . identifier[_get_omim_ids] ()
identifier[LOG] . identifier[info] ( literal[string] , identifier[len] ( identifier[omimids] ))
identifier[LOG] . identifier[info] ( literal[string] , identifier[len] ( identifier[self] . identifier[omim_type] ))
keyword[if] identifier[self] . identifier[test_mode] :
identifier[graph] = identifier[self] . identifier[testgraph]
keyword[else] :
identifier[graph] = identifier[self] . identifier[graph]
identifier[geno] = identifier[Genotype] ( identifier[graph] )
identifier[model] = identifier[Model] ( identifier[graph] )
identifier[tax_label] = literal[string]
identifier[tax_id] = identifier[self] . identifier[globaltt] [ identifier[tax_label] ]
identifier[geno] . identifier[addGenome] ( identifier[tax_id] , identifier[tax_label] )
identifier[model] . identifier[addClassToGraph] ( identifier[tax_id] , keyword[None] )
identifier[includes] = identifier[set] ()
identifier[includes] . identifier[add] ( literal[string] )
identifier[self] . identifier[process_entries] (
identifier[omimids] , identifier[self] . identifier[_transform_entry] , identifier[includes] , identifier[graph] , identifier[limit] , identifier[self] . identifier[globaltt] ) | def _process_all(self, limit):
"""
This takes the list of omim identifiers from the omim.txt.Z file,
and iteratively queries the omim api for the json-formatted data.
This will create OMIM classes, with the label,
definition, and some synonyms.
If an entry is "removed",
it is added as a deprecated class.
If an entry is "moved",
it is deprecated and consider annotations are added.
Additionally, we extract:
*phenotypicSeries ids as superclasses
*equivalent ids for Orphanet and UMLS
If set to testMode,
it will write only those items in the test_ids to the testgraph.
:param limit:
:return:
"""
omimids = self._get_omim_ids()
LOG.info('Have %i omim numbers to fetch records from their API', len(omimids))
LOG.info('Have %i omim types ', len(self.omim_type))
if self.test_mode:
graph = self.testgraph # depends on [control=['if'], data=[]]
else:
graph = self.graph
geno = Genotype(graph)
model = Model(graph)
tax_label = 'Homo sapiens'
tax_id = self.globaltt[tax_label]
# add genome and taxon
geno.addGenome(tax_id, tax_label) # tax label can get added elsewhere
model.addClassToGraph(tax_id, None) # label added elsewhere
includes = set()
includes.add('all')
self.process_entries(omimids, self._transform_entry, includes, graph, limit, self.globaltt) |
def run_snr(self):
"""Run the snr calculation.
Takes results from ``self.set_parameters`` and other inputs and inputs these
into the snr calculator.
"""
if self.ecc:
required_kwargs = {'dist_type': self.dist_type,
'initial_cond_type': self.initial_cond_type,
'ecc': True}
input_args = [self.m1, self.m2, self.z_or_dist, self.initial_point,
self.eccentricity, self.observation_time]
else:
required_kwargs = {'dist_type': self.dist_type}
input_args = [self.m1, self.m2, self.spin_1, self.spin_2,
self.z_or_dist, self.start_time, self.end_time]
input_kwargs = {**required_kwargs,
**self.general,
**self.sensitivity_input,
**self.snr_input,
**self.parallel_input}
self.final_dict = snr(*input_args, **input_kwargs)
return | def function[run_snr, parameter[self]]:
constant[Run the snr calculation.
Takes results from ``self.set_parameters`` and other inputs and inputs these
into the snr calculator.
]
if name[self].ecc begin[:]
variable[required_kwargs] assign[=] dictionary[[<ast.Constant object at 0x7da2041da500>, <ast.Constant object at 0x7da2041dbc70>, <ast.Constant object at 0x7da2041d8400>], [<ast.Attribute object at 0x7da2041daad0>, <ast.Attribute object at 0x7da2041dbdf0>, <ast.Constant object at 0x7da18dc9a860>]]
variable[input_args] assign[=] list[[<ast.Attribute object at 0x7da18dc996c0>, <ast.Attribute object at 0x7da18dc9ac50>, <ast.Attribute object at 0x7da18dc9b940>, <ast.Attribute object at 0x7da18dc987f0>, <ast.Attribute object at 0x7da18dc98af0>, <ast.Attribute object at 0x7da18dc9a410>]]
variable[input_kwargs] assign[=] dictionary[[None, None, None, None, None], [<ast.Name object at 0x7da1b0a6dd20>, <ast.Attribute object at 0x7da1b0a6c670>, <ast.Attribute object at 0x7da1b0a6c100>, <ast.Attribute object at 0x7da1b0a6c340>, <ast.Attribute object at 0x7da1b0a6c640>]]
name[self].final_dict assign[=] call[name[snr], parameter[<ast.Starred object at 0x7da1b0a6d060>]]
return[None] | keyword[def] identifier[run_snr] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[ecc] :
identifier[required_kwargs] ={ literal[string] : identifier[self] . identifier[dist_type] ,
literal[string] : identifier[self] . identifier[initial_cond_type] ,
literal[string] : keyword[True] }
identifier[input_args] =[ identifier[self] . identifier[m1] , identifier[self] . identifier[m2] , identifier[self] . identifier[z_or_dist] , identifier[self] . identifier[initial_point] ,
identifier[self] . identifier[eccentricity] , identifier[self] . identifier[observation_time] ]
keyword[else] :
identifier[required_kwargs] ={ literal[string] : identifier[self] . identifier[dist_type] }
identifier[input_args] =[ identifier[self] . identifier[m1] , identifier[self] . identifier[m2] , identifier[self] . identifier[spin_1] , identifier[self] . identifier[spin_2] ,
identifier[self] . identifier[z_or_dist] , identifier[self] . identifier[start_time] , identifier[self] . identifier[end_time] ]
identifier[input_kwargs] ={** identifier[required_kwargs] ,
** identifier[self] . identifier[general] ,
** identifier[self] . identifier[sensitivity_input] ,
** identifier[self] . identifier[snr_input] ,
** identifier[self] . identifier[parallel_input] }
identifier[self] . identifier[final_dict] = identifier[snr] (* identifier[input_args] ,** identifier[input_kwargs] )
keyword[return] | def run_snr(self):
"""Run the snr calculation.
Takes results from ``self.set_parameters`` and other inputs and inputs these
into the snr calculator.
"""
if self.ecc:
required_kwargs = {'dist_type': self.dist_type, 'initial_cond_type': self.initial_cond_type, 'ecc': True}
input_args = [self.m1, self.m2, self.z_or_dist, self.initial_point, self.eccentricity, self.observation_time] # depends on [control=['if'], data=[]]
else:
required_kwargs = {'dist_type': self.dist_type}
input_args = [self.m1, self.m2, self.spin_1, self.spin_2, self.z_or_dist, self.start_time, self.end_time]
input_kwargs = {**required_kwargs, **self.general, **self.sensitivity_input, **self.snr_input, **self.parallel_input}
self.final_dict = snr(*input_args, **input_kwargs)
return |
def bell_set(self, collection, ordinal=False):
"""
Calculates the Bell set
"""
if len(collection) == 1:
yield [ collection ]
return
first = collection[0]
for smaller in self.bell_set(collection[1:]):
for n, subset in enumerate(smaller):
if not ordinal or (ordinal and is_sorted(smaller[:n] + [[ first ] + subset] + smaller[n+1:], self._nan)):
yield smaller[:n] + [[ first ] + subset] + smaller[n+1:]
if not ordinal or (ordinal and is_sorted([ [ first ] ] + smaller, self._nan)):
yield [ [ first ] ] + smaller | def function[bell_set, parameter[self, collection, ordinal]]:
constant[
Calculates the Bell set
]
if compare[call[name[len], parameter[name[collection]]] equal[==] constant[1]] begin[:]
<ast.Yield object at 0x7da1b064ead0>
return[None]
variable[first] assign[=] call[name[collection]][constant[0]]
for taget[name[smaller]] in starred[call[name[self].bell_set, parameter[call[name[collection]][<ast.Slice object at 0x7da1b064c5e0>]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b064c070>, <ast.Name object at 0x7da1b064ca60>]]] in starred[call[name[enumerate], parameter[name[smaller]]]] begin[:]
if <ast.BoolOp object at 0x7da1b064da20> begin[:]
<ast.Yield object at 0x7da1b064e080>
if <ast.BoolOp object at 0x7da1b064cd00> begin[:]
<ast.Yield object at 0x7da1b064c760> | keyword[def] identifier[bell_set] ( identifier[self] , identifier[collection] , identifier[ordinal] = keyword[False] ):
literal[string]
keyword[if] identifier[len] ( identifier[collection] )== literal[int] :
keyword[yield] [ identifier[collection] ]
keyword[return]
identifier[first] = identifier[collection] [ literal[int] ]
keyword[for] identifier[smaller] keyword[in] identifier[self] . identifier[bell_set] ( identifier[collection] [ literal[int] :]):
keyword[for] identifier[n] , identifier[subset] keyword[in] identifier[enumerate] ( identifier[smaller] ):
keyword[if] keyword[not] identifier[ordinal] keyword[or] ( identifier[ordinal] keyword[and] identifier[is_sorted] ( identifier[smaller] [: identifier[n] ]+[[ identifier[first] ]+ identifier[subset] ]+ identifier[smaller] [ identifier[n] + literal[int] :], identifier[self] . identifier[_nan] )):
keyword[yield] identifier[smaller] [: identifier[n] ]+[[ identifier[first] ]+ identifier[subset] ]+ identifier[smaller] [ identifier[n] + literal[int] :]
keyword[if] keyword[not] identifier[ordinal] keyword[or] ( identifier[ordinal] keyword[and] identifier[is_sorted] ([[ identifier[first] ]]+ identifier[smaller] , identifier[self] . identifier[_nan] )):
keyword[yield] [[ identifier[first] ]]+ identifier[smaller] | def bell_set(self, collection, ordinal=False):
"""
Calculates the Bell set
"""
if len(collection) == 1:
yield [collection]
return # depends on [control=['if'], data=[]]
first = collection[0]
for smaller in self.bell_set(collection[1:]):
for (n, subset) in enumerate(smaller):
if not ordinal or (ordinal and is_sorted(smaller[:n] + [[first] + subset] + smaller[n + 1:], self._nan)):
yield (smaller[:n] + [[first] + subset] + smaller[n + 1:]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if not ordinal or (ordinal and is_sorted([[first]] + smaller, self._nan)):
yield ([[first]] + smaller) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['smaller']] |
def compare_3PC_keys(key1, key2) -> int:
"""
Return >0 if key2 is greater than key1, <0 if lesser, 0 otherwise
"""
if key1[0] == key2[0]:
return key2[1] - key1[1]
else:
return key2[0] - key1[0] | def function[compare_3PC_keys, parameter[key1, key2]]:
constant[
Return >0 if key2 is greater than key1, <0 if lesser, 0 otherwise
]
if compare[call[name[key1]][constant[0]] equal[==] call[name[key2]][constant[0]]] begin[:]
return[binary_operation[call[name[key2]][constant[1]] - call[name[key1]][constant[1]]]] | keyword[def] identifier[compare_3PC_keys] ( identifier[key1] , identifier[key2] )-> identifier[int] :
literal[string]
keyword[if] identifier[key1] [ literal[int] ]== identifier[key2] [ literal[int] ]:
keyword[return] identifier[key2] [ literal[int] ]- identifier[key1] [ literal[int] ]
keyword[else] :
keyword[return] identifier[key2] [ literal[int] ]- identifier[key1] [ literal[int] ] | def compare_3PC_keys(key1, key2) -> int:
"""
Return >0 if key2 is greater than key1, <0 if lesser, 0 otherwise
"""
if key1[0] == key2[0]:
return key2[1] - key1[1] # depends on [control=['if'], data=[]]
else:
return key2[0] - key1[0] |
def _ParseFilterOptions(self, options):
"""Parses the filter options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
self._event_filter_expression = self.ParseStringOption(options, 'filter')
if self._event_filter_expression:
self._event_filter = event_filter.EventObjectFilter()
try:
self._event_filter.CompileFilter(self._event_filter_expression)
except errors.ParseError as exception:
raise errors.BadConfigOption((
'Unable to compile filter expression with error: '
'{0!s}').format(exception))
time_slice_event_time_string = getattr(options, 'slice', None)
time_slice_duration = getattr(options, 'slice_size', 5)
self._use_time_slicer = getattr(options, 'slicer', False)
# The slice and slicer cannot be set at the same time.
if time_slice_event_time_string and self._use_time_slicer:
raise errors.BadConfigOption(
'Time slice and slicer cannot be used at the same time.')
time_slice_event_timestamp = None
if time_slice_event_time_string:
# Note self._preferred_time_zone is None when not set but represents UTC.
preferred_time_zone = self._preferred_time_zone or 'UTC'
timezone = pytz.timezone(preferred_time_zone)
time_slice_event_timestamp = timelib.Timestamp.FromTimeString(
time_slice_event_time_string, timezone=timezone)
if time_slice_event_timestamp is None:
raise errors.BadConfigOption(
'Unsupported time slice event date and time: {0:s}'.format(
time_slice_event_time_string))
if time_slice_event_timestamp is not None or self._use_time_slicer:
# Note that time slicer uses the time slice to determine the duration.
self._time_slice = time_slices.TimeSlice(
time_slice_event_timestamp, duration=time_slice_duration) | def function[_ParseFilterOptions, parameter[self, options]]:
constant[Parses the filter options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
]
name[self]._event_filter_expression assign[=] call[name[self].ParseStringOption, parameter[name[options], constant[filter]]]
if name[self]._event_filter_expression begin[:]
name[self]._event_filter assign[=] call[name[event_filter].EventObjectFilter, parameter[]]
<ast.Try object at 0x7da20e9572b0>
variable[time_slice_event_time_string] assign[=] call[name[getattr], parameter[name[options], constant[slice], constant[None]]]
variable[time_slice_duration] assign[=] call[name[getattr], parameter[name[options], constant[slice_size], constant[5]]]
name[self]._use_time_slicer assign[=] call[name[getattr], parameter[name[options], constant[slicer], constant[False]]]
if <ast.BoolOp object at 0x7da207f9bdf0> begin[:]
<ast.Raise object at 0x7da18c4cce50>
variable[time_slice_event_timestamp] assign[=] constant[None]
if name[time_slice_event_time_string] begin[:]
variable[preferred_time_zone] assign[=] <ast.BoolOp object at 0x7da18c4cd1b0>
variable[timezone] assign[=] call[name[pytz].timezone, parameter[name[preferred_time_zone]]]
variable[time_slice_event_timestamp] assign[=] call[name[timelib].Timestamp.FromTimeString, parameter[name[time_slice_event_time_string]]]
if compare[name[time_slice_event_timestamp] is constant[None]] begin[:]
<ast.Raise object at 0x7da18c4cc520>
if <ast.BoolOp object at 0x7da18c4ce5f0> begin[:]
name[self]._time_slice assign[=] call[name[time_slices].TimeSlice, parameter[name[time_slice_event_timestamp]]] | keyword[def] identifier[_ParseFilterOptions] ( identifier[self] , identifier[options] ):
literal[string]
identifier[self] . identifier[_event_filter_expression] = identifier[self] . identifier[ParseStringOption] ( identifier[options] , literal[string] )
keyword[if] identifier[self] . identifier[_event_filter_expression] :
identifier[self] . identifier[_event_filter] = identifier[event_filter] . identifier[EventObjectFilter] ()
keyword[try] :
identifier[self] . identifier[_event_filter] . identifier[CompileFilter] ( identifier[self] . identifier[_event_filter_expression] )
keyword[except] identifier[errors] . identifier[ParseError] keyword[as] identifier[exception] :
keyword[raise] identifier[errors] . identifier[BadConfigOption] ((
literal[string]
literal[string] ). identifier[format] ( identifier[exception] ))
identifier[time_slice_event_time_string] = identifier[getattr] ( identifier[options] , literal[string] , keyword[None] )
identifier[time_slice_duration] = identifier[getattr] ( identifier[options] , literal[string] , literal[int] )
identifier[self] . identifier[_use_time_slicer] = identifier[getattr] ( identifier[options] , literal[string] , keyword[False] )
keyword[if] identifier[time_slice_event_time_string] keyword[and] identifier[self] . identifier[_use_time_slicer] :
keyword[raise] identifier[errors] . identifier[BadConfigOption] (
literal[string] )
identifier[time_slice_event_timestamp] = keyword[None]
keyword[if] identifier[time_slice_event_time_string] :
identifier[preferred_time_zone] = identifier[self] . identifier[_preferred_time_zone] keyword[or] literal[string]
identifier[timezone] = identifier[pytz] . identifier[timezone] ( identifier[preferred_time_zone] )
identifier[time_slice_event_timestamp] = identifier[timelib] . identifier[Timestamp] . identifier[FromTimeString] (
identifier[time_slice_event_time_string] , identifier[timezone] = identifier[timezone] )
keyword[if] identifier[time_slice_event_timestamp] keyword[is] keyword[None] :
keyword[raise] identifier[errors] . identifier[BadConfigOption] (
literal[string] . identifier[format] (
identifier[time_slice_event_time_string] ))
keyword[if] identifier[time_slice_event_timestamp] keyword[is] keyword[not] keyword[None] keyword[or] identifier[self] . identifier[_use_time_slicer] :
identifier[self] . identifier[_time_slice] = identifier[time_slices] . identifier[TimeSlice] (
identifier[time_slice_event_timestamp] , identifier[duration] = identifier[time_slice_duration] ) | def _ParseFilterOptions(self, options):
"""Parses the filter options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
self._event_filter_expression = self.ParseStringOption(options, 'filter')
if self._event_filter_expression:
self._event_filter = event_filter.EventObjectFilter()
try:
self._event_filter.CompileFilter(self._event_filter_expression) # depends on [control=['try'], data=[]]
except errors.ParseError as exception:
raise errors.BadConfigOption('Unable to compile filter expression with error: {0!s}'.format(exception)) # depends on [control=['except'], data=['exception']] # depends on [control=['if'], data=[]]
time_slice_event_time_string = getattr(options, 'slice', None)
time_slice_duration = getattr(options, 'slice_size', 5)
self._use_time_slicer = getattr(options, 'slicer', False)
# The slice and slicer cannot be set at the same time.
if time_slice_event_time_string and self._use_time_slicer:
raise errors.BadConfigOption('Time slice and slicer cannot be used at the same time.') # depends on [control=['if'], data=[]]
time_slice_event_timestamp = None
if time_slice_event_time_string:
# Note self._preferred_time_zone is None when not set but represents UTC.
preferred_time_zone = self._preferred_time_zone or 'UTC'
timezone = pytz.timezone(preferred_time_zone)
time_slice_event_timestamp = timelib.Timestamp.FromTimeString(time_slice_event_time_string, timezone=timezone)
if time_slice_event_timestamp is None:
raise errors.BadConfigOption('Unsupported time slice event date and time: {0:s}'.format(time_slice_event_time_string)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if time_slice_event_timestamp is not None or self._use_time_slicer:
# Note that time slicer uses the time slice to determine the duration.
self._time_slice = time_slices.TimeSlice(time_slice_event_timestamp, duration=time_slice_duration) # depends on [control=['if'], data=[]] |
def ximshow_rectified(self, slitlet2d_rect):
"""Display rectified image with spectrails and frontiers.
Parameters
----------
slitlet2d_rect : numpy array
Array containing the rectified slitlet image
"""
title = "Slitlet#" + str(self.islitlet) + " (rectify)"
ax = ximshow(slitlet2d_rect, title=title,
first_pixel=(self.bb_nc1_orig, self.bb_ns1_orig),
show=False)
# grid with fitted transformation: spectrum trails
xx = np.arange(0, self.bb_nc2_orig - self.bb_nc1_orig + 1,
dtype=np.float)
for spectrail in self.list_spectrails:
yy0 = self.corr_yrect_a + \
self.corr_yrect_b * spectrail(self.x0_reference)
yy = np.tile([yy0 - self.bb_ns1_orig], xx.size)
ax.plot(xx + self.bb_nc1_orig, yy + self.bb_ns1_orig, "b")
for spectrail in self.list_frontiers:
yy0 = self.corr_yrect_a +\
self.corr_yrect_b * spectrail(self.x0_reference)
yy = np.tile([yy0 - self.bb_ns1_orig], xx.size)
ax.plot(xx + self.bb_nc1_orig, yy + self.bb_ns1_orig, "b:")
# show plot
pause_debugplot(self.debugplot, pltshow=True) | def function[ximshow_rectified, parameter[self, slitlet2d_rect]]:
constant[Display rectified image with spectrails and frontiers.
Parameters
----------
slitlet2d_rect : numpy array
Array containing the rectified slitlet image
]
variable[title] assign[=] binary_operation[binary_operation[constant[Slitlet#] + call[name[str], parameter[name[self].islitlet]]] + constant[ (rectify)]]
variable[ax] assign[=] call[name[ximshow], parameter[name[slitlet2d_rect]]]
variable[xx] assign[=] call[name[np].arange, parameter[constant[0], binary_operation[binary_operation[name[self].bb_nc2_orig - name[self].bb_nc1_orig] + constant[1]]]]
for taget[name[spectrail]] in starred[name[self].list_spectrails] begin[:]
variable[yy0] assign[=] binary_operation[name[self].corr_yrect_a + binary_operation[name[self].corr_yrect_b * call[name[spectrail], parameter[name[self].x0_reference]]]]
variable[yy] assign[=] call[name[np].tile, parameter[list[[<ast.BinOp object at 0x7da18f7237c0>]], name[xx].size]]
call[name[ax].plot, parameter[binary_operation[name[xx] + name[self].bb_nc1_orig], binary_operation[name[yy] + name[self].bb_ns1_orig], constant[b]]]
for taget[name[spectrail]] in starred[name[self].list_frontiers] begin[:]
variable[yy0] assign[=] binary_operation[name[self].corr_yrect_a + binary_operation[name[self].corr_yrect_b * call[name[spectrail], parameter[name[self].x0_reference]]]]
variable[yy] assign[=] call[name[np].tile, parameter[list[[<ast.BinOp object at 0x7da18f720700>]], name[xx].size]]
call[name[ax].plot, parameter[binary_operation[name[xx] + name[self].bb_nc1_orig], binary_operation[name[yy] + name[self].bb_ns1_orig], constant[b:]]]
call[name[pause_debugplot], parameter[name[self].debugplot]] | keyword[def] identifier[ximshow_rectified] ( identifier[self] , identifier[slitlet2d_rect] ):
literal[string]
identifier[title] = literal[string] + identifier[str] ( identifier[self] . identifier[islitlet] )+ literal[string]
identifier[ax] = identifier[ximshow] ( identifier[slitlet2d_rect] , identifier[title] = identifier[title] ,
identifier[first_pixel] =( identifier[self] . identifier[bb_nc1_orig] , identifier[self] . identifier[bb_ns1_orig] ),
identifier[show] = keyword[False] )
identifier[xx] = identifier[np] . identifier[arange] ( literal[int] , identifier[self] . identifier[bb_nc2_orig] - identifier[self] . identifier[bb_nc1_orig] + literal[int] ,
identifier[dtype] = identifier[np] . identifier[float] )
keyword[for] identifier[spectrail] keyword[in] identifier[self] . identifier[list_spectrails] :
identifier[yy0] = identifier[self] . identifier[corr_yrect_a] + identifier[self] . identifier[corr_yrect_b] * identifier[spectrail] ( identifier[self] . identifier[x0_reference] )
identifier[yy] = identifier[np] . identifier[tile] ([ identifier[yy0] - identifier[self] . identifier[bb_ns1_orig] ], identifier[xx] . identifier[size] )
identifier[ax] . identifier[plot] ( identifier[xx] + identifier[self] . identifier[bb_nc1_orig] , identifier[yy] + identifier[self] . identifier[bb_ns1_orig] , literal[string] )
keyword[for] identifier[spectrail] keyword[in] identifier[self] . identifier[list_frontiers] :
identifier[yy0] = identifier[self] . identifier[corr_yrect_a] + identifier[self] . identifier[corr_yrect_b] * identifier[spectrail] ( identifier[self] . identifier[x0_reference] )
identifier[yy] = identifier[np] . identifier[tile] ([ identifier[yy0] - identifier[self] . identifier[bb_ns1_orig] ], identifier[xx] . identifier[size] )
identifier[ax] . identifier[plot] ( identifier[xx] + identifier[self] . identifier[bb_nc1_orig] , identifier[yy] + identifier[self] . identifier[bb_ns1_orig] , literal[string] )
identifier[pause_debugplot] ( identifier[self] . identifier[debugplot] , identifier[pltshow] = keyword[True] ) | def ximshow_rectified(self, slitlet2d_rect):
"""Display rectified image with spectrails and frontiers.
Parameters
----------
slitlet2d_rect : numpy array
Array containing the rectified slitlet image
"""
title = 'Slitlet#' + str(self.islitlet) + ' (rectify)'
ax = ximshow(slitlet2d_rect, title=title, first_pixel=(self.bb_nc1_orig, self.bb_ns1_orig), show=False)
# grid with fitted transformation: spectrum trails
xx = np.arange(0, self.bb_nc2_orig - self.bb_nc1_orig + 1, dtype=np.float)
for spectrail in self.list_spectrails:
yy0 = self.corr_yrect_a + self.corr_yrect_b * spectrail(self.x0_reference)
yy = np.tile([yy0 - self.bb_ns1_orig], xx.size)
ax.plot(xx + self.bb_nc1_orig, yy + self.bb_ns1_orig, 'b') # depends on [control=['for'], data=['spectrail']]
for spectrail in self.list_frontiers:
yy0 = self.corr_yrect_a + self.corr_yrect_b * spectrail(self.x0_reference)
yy = np.tile([yy0 - self.bb_ns1_orig], xx.size)
ax.plot(xx + self.bb_nc1_orig, yy + self.bb_ns1_orig, 'b:') # depends on [control=['for'], data=['spectrail']]
# show plot
pause_debugplot(self.debugplot, pltshow=True) |
def create_ecs_service_role(provider, context, **kwargs):
"""Used to create the ecsServieRole, which has to be named exactly that
currently, so cannot be created via CloudFormation. See:
http://docs.aws.amazon.com/AmazonECS/latest/developerguide/IAM_policies.html#service_IAM_role
Args:
provider (:class:`stacker.providers.base.BaseProvider`): provider
instance
context (:class:`stacker.context.Context`): context instance
Returns: boolean for whether or not the hook succeeded.
"""
role_name = kwargs.get("role_name", "ecsServiceRole")
client = get_session(provider.region).client('iam')
try:
client.create_role(
RoleName=role_name,
AssumeRolePolicyDocument=get_ecs_assumerole_policy().to_json()
)
except ClientError as e:
if "already exists" in str(e):
pass
else:
raise
policy = Policy(
Statement=[
Statement(
Effect=Allow,
Resource=["*"],
Action=[ecs.CreateCluster, ecs.DeregisterContainerInstance,
ecs.DiscoverPollEndpoint, ecs.Poll,
ecs.Action("Submit*")]
)
])
client.put_role_policy(
RoleName=role_name,
PolicyName="AmazonEC2ContainerServiceRolePolicy",
PolicyDocument=policy.to_json()
)
return True | def function[create_ecs_service_role, parameter[provider, context]]:
constant[Used to create the ecsServieRole, which has to be named exactly that
currently, so cannot be created via CloudFormation. See:
http://docs.aws.amazon.com/AmazonECS/latest/developerguide/IAM_policies.html#service_IAM_role
Args:
provider (:class:`stacker.providers.base.BaseProvider`): provider
instance
context (:class:`stacker.context.Context`): context instance
Returns: boolean for whether or not the hook succeeded.
]
variable[role_name] assign[=] call[name[kwargs].get, parameter[constant[role_name], constant[ecsServiceRole]]]
variable[client] assign[=] call[call[name[get_session], parameter[name[provider].region]].client, parameter[constant[iam]]]
<ast.Try object at 0x7da20c9921d0>
variable[policy] assign[=] call[name[Policy], parameter[]]
call[name[client].put_role_policy, parameter[]]
return[constant[True]] | keyword[def] identifier[create_ecs_service_role] ( identifier[provider] , identifier[context] ,** identifier[kwargs] ):
literal[string]
identifier[role_name] = identifier[kwargs] . identifier[get] ( literal[string] , literal[string] )
identifier[client] = identifier[get_session] ( identifier[provider] . identifier[region] ). identifier[client] ( literal[string] )
keyword[try] :
identifier[client] . identifier[create_role] (
identifier[RoleName] = identifier[role_name] ,
identifier[AssumeRolePolicyDocument] = identifier[get_ecs_assumerole_policy] (). identifier[to_json] ()
)
keyword[except] identifier[ClientError] keyword[as] identifier[e] :
keyword[if] literal[string] keyword[in] identifier[str] ( identifier[e] ):
keyword[pass]
keyword[else] :
keyword[raise]
identifier[policy] = identifier[Policy] (
identifier[Statement] =[
identifier[Statement] (
identifier[Effect] = identifier[Allow] ,
identifier[Resource] =[ literal[string] ],
identifier[Action] =[ identifier[ecs] . identifier[CreateCluster] , identifier[ecs] . identifier[DeregisterContainerInstance] ,
identifier[ecs] . identifier[DiscoverPollEndpoint] , identifier[ecs] . identifier[Poll] ,
identifier[ecs] . identifier[Action] ( literal[string] )]
)
])
identifier[client] . identifier[put_role_policy] (
identifier[RoleName] = identifier[role_name] ,
identifier[PolicyName] = literal[string] ,
identifier[PolicyDocument] = identifier[policy] . identifier[to_json] ()
)
keyword[return] keyword[True] | def create_ecs_service_role(provider, context, **kwargs):
"""Used to create the ecsServieRole, which has to be named exactly that
currently, so cannot be created via CloudFormation. See:
http://docs.aws.amazon.com/AmazonECS/latest/developerguide/IAM_policies.html#service_IAM_role
Args:
provider (:class:`stacker.providers.base.BaseProvider`): provider
instance
context (:class:`stacker.context.Context`): context instance
Returns: boolean for whether or not the hook succeeded.
"""
role_name = kwargs.get('role_name', 'ecsServiceRole')
client = get_session(provider.region).client('iam')
try:
client.create_role(RoleName=role_name, AssumeRolePolicyDocument=get_ecs_assumerole_policy().to_json()) # depends on [control=['try'], data=[]]
except ClientError as e:
if 'already exists' in str(e):
pass # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['e']]
policy = Policy(Statement=[Statement(Effect=Allow, Resource=['*'], Action=[ecs.CreateCluster, ecs.DeregisterContainerInstance, ecs.DiscoverPollEndpoint, ecs.Poll, ecs.Action('Submit*')])])
client.put_role_policy(RoleName=role_name, PolicyName='AmazonEC2ContainerServiceRolePolicy', PolicyDocument=policy.to_json())
return True |
def with_claims(self, issuer=None, subject=None, audience=None,
additional_claims=None):
"""Returns a copy of these credentials with modified claims.
Args:
issuer (str): The `iss` claim. If unspecified the current issuer
claim will be used.
subject (str): The `sub` claim. If unspecified the current subject
claim will be used.
audience (str): the `aud` claim. If unspecified the current
audience claim will be used.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload. This will be merged with the current
additional claims.
Returns:
google.auth.jwt.Credentials: A new credentials instance.
"""
new_additional_claims = copy.deepcopy(self._additional_claims)
new_additional_claims.update(additional_claims or {})
return self.__class__(
self._signer,
issuer=issuer if issuer is not None else self._issuer,
subject=subject if subject is not None else self._subject,
audience=audience if audience is not None else self._audience,
additional_claims=new_additional_claims) | def function[with_claims, parameter[self, issuer, subject, audience, additional_claims]]:
constant[Returns a copy of these credentials with modified claims.
Args:
issuer (str): The `iss` claim. If unspecified the current issuer
claim will be used.
subject (str): The `sub` claim. If unspecified the current subject
claim will be used.
audience (str): the `aud` claim. If unspecified the current
audience claim will be used.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload. This will be merged with the current
additional claims.
Returns:
google.auth.jwt.Credentials: A new credentials instance.
]
variable[new_additional_claims] assign[=] call[name[copy].deepcopy, parameter[name[self]._additional_claims]]
call[name[new_additional_claims].update, parameter[<ast.BoolOp object at 0x7da18dc9a800>]]
return[call[name[self].__class__, parameter[name[self]._signer]]] | keyword[def] identifier[with_claims] ( identifier[self] , identifier[issuer] = keyword[None] , identifier[subject] = keyword[None] , identifier[audience] = keyword[None] ,
identifier[additional_claims] = keyword[None] ):
literal[string]
identifier[new_additional_claims] = identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[_additional_claims] )
identifier[new_additional_claims] . identifier[update] ( identifier[additional_claims] keyword[or] {})
keyword[return] identifier[self] . identifier[__class__] (
identifier[self] . identifier[_signer] ,
identifier[issuer] = identifier[issuer] keyword[if] identifier[issuer] keyword[is] keyword[not] keyword[None] keyword[else] identifier[self] . identifier[_issuer] ,
identifier[subject] = identifier[subject] keyword[if] identifier[subject] keyword[is] keyword[not] keyword[None] keyword[else] identifier[self] . identifier[_subject] ,
identifier[audience] = identifier[audience] keyword[if] identifier[audience] keyword[is] keyword[not] keyword[None] keyword[else] identifier[self] . identifier[_audience] ,
identifier[additional_claims] = identifier[new_additional_claims] ) | def with_claims(self, issuer=None, subject=None, audience=None, additional_claims=None):
"""Returns a copy of these credentials with modified claims.
Args:
issuer (str): The `iss` claim. If unspecified the current issuer
claim will be used.
subject (str): The `sub` claim. If unspecified the current subject
claim will be used.
audience (str): the `aud` claim. If unspecified the current
audience claim will be used.
additional_claims (Mapping[str, str]): Any additional claims for
the JWT payload. This will be merged with the current
additional claims.
Returns:
google.auth.jwt.Credentials: A new credentials instance.
"""
new_additional_claims = copy.deepcopy(self._additional_claims)
new_additional_claims.update(additional_claims or {})
return self.__class__(self._signer, issuer=issuer if issuer is not None else self._issuer, subject=subject if subject is not None else self._subject, audience=audience if audience is not None else self._audience, additional_claims=new_additional_claims) |
def addApplication(self, name, version=None, path=None, disk_num=0, soft=-1):
"""Add a new application in some disk."""
fapp = Features()
fapp.features.append(Feature("name", "=", name))
if version:
fapp.features.append(Feature("version", "=", version))
if path:
fapp.features.append(Feature("path", "=", path))
self.features.append(Feature("disk.%d.applications" % disk_num, "contains", fapp, soft > 0)) | def function[addApplication, parameter[self, name, version, path, disk_num, soft]]:
constant[Add a new application in some disk.]
variable[fapp] assign[=] call[name[Features], parameter[]]
call[name[fapp].features.append, parameter[call[name[Feature], parameter[constant[name], constant[=], name[name]]]]]
if name[version] begin[:]
call[name[fapp].features.append, parameter[call[name[Feature], parameter[constant[version], constant[=], name[version]]]]]
if name[path] begin[:]
call[name[fapp].features.append, parameter[call[name[Feature], parameter[constant[path], constant[=], name[path]]]]]
call[name[self].features.append, parameter[call[name[Feature], parameter[binary_operation[constant[disk.%d.applications] <ast.Mod object at 0x7da2590d6920> name[disk_num]], constant[contains], name[fapp], compare[name[soft] greater[>] constant[0]]]]]] | keyword[def] identifier[addApplication] ( identifier[self] , identifier[name] , identifier[version] = keyword[None] , identifier[path] = keyword[None] , identifier[disk_num] = literal[int] , identifier[soft] =- literal[int] ):
literal[string]
identifier[fapp] = identifier[Features] ()
identifier[fapp] . identifier[features] . identifier[append] ( identifier[Feature] ( literal[string] , literal[string] , identifier[name] ))
keyword[if] identifier[version] :
identifier[fapp] . identifier[features] . identifier[append] ( identifier[Feature] ( literal[string] , literal[string] , identifier[version] ))
keyword[if] identifier[path] :
identifier[fapp] . identifier[features] . identifier[append] ( identifier[Feature] ( literal[string] , literal[string] , identifier[path] ))
identifier[self] . identifier[features] . identifier[append] ( identifier[Feature] ( literal[string] % identifier[disk_num] , literal[string] , identifier[fapp] , identifier[soft] > literal[int] )) | def addApplication(self, name, version=None, path=None, disk_num=0, soft=-1):
"""Add a new application in some disk."""
fapp = Features()
fapp.features.append(Feature('name', '=', name))
if version:
fapp.features.append(Feature('version', '=', version)) # depends on [control=['if'], data=[]]
if path:
fapp.features.append(Feature('path', '=', path)) # depends on [control=['if'], data=[]]
self.features.append(Feature('disk.%d.applications' % disk_num, 'contains', fapp, soft > 0)) |
def Deserialize(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
self.Timestamp = reader.ReadUInt32()
self.Services = reader.ReadUInt64()
addr = bytearray(reader.ReadFixedString(16))
addr.reverse()
addr.strip(b'\x00')
nums = []
for i in range(0, 4):
nums.append(str(addr[i]))
nums.reverse()
adddd = '.'.join(nums)
self.Address = adddd
self.Port = reader.ReadUInt16(endian='>') | def function[Deserialize, parameter[self, reader]]:
constant[
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
]
name[self].Timestamp assign[=] call[name[reader].ReadUInt32, parameter[]]
name[self].Services assign[=] call[name[reader].ReadUInt64, parameter[]]
variable[addr] assign[=] call[name[bytearray], parameter[call[name[reader].ReadFixedString, parameter[constant[16]]]]]
call[name[addr].reverse, parameter[]]
call[name[addr].strip, parameter[constant[b'\x00']]]
variable[nums] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], constant[4]]]] begin[:]
call[name[nums].append, parameter[call[name[str], parameter[call[name[addr]][name[i]]]]]]
call[name[nums].reverse, parameter[]]
variable[adddd] assign[=] call[constant[.].join, parameter[name[nums]]]
name[self].Address assign[=] name[adddd]
name[self].Port assign[=] call[name[reader].ReadUInt16, parameter[]] | keyword[def] identifier[Deserialize] ( identifier[self] , identifier[reader] ):
literal[string]
identifier[self] . identifier[Timestamp] = identifier[reader] . identifier[ReadUInt32] ()
identifier[self] . identifier[Services] = identifier[reader] . identifier[ReadUInt64] ()
identifier[addr] = identifier[bytearray] ( identifier[reader] . identifier[ReadFixedString] ( literal[int] ))
identifier[addr] . identifier[reverse] ()
identifier[addr] . identifier[strip] ( literal[string] )
identifier[nums] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , literal[int] ):
identifier[nums] . identifier[append] ( identifier[str] ( identifier[addr] [ identifier[i] ]))
identifier[nums] . identifier[reverse] ()
identifier[adddd] = literal[string] . identifier[join] ( identifier[nums] )
identifier[self] . identifier[Address] = identifier[adddd]
identifier[self] . identifier[Port] = identifier[reader] . identifier[ReadUInt16] ( identifier[endian] = literal[string] ) | def Deserialize(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
self.Timestamp = reader.ReadUInt32()
self.Services = reader.ReadUInt64()
addr = bytearray(reader.ReadFixedString(16))
addr.reverse()
addr.strip(b'\x00')
nums = []
for i in range(0, 4):
nums.append(str(addr[i])) # depends on [control=['for'], data=['i']]
nums.reverse()
adddd = '.'.join(nums)
self.Address = adddd
self.Port = reader.ReadUInt16(endian='>') |
def _check_and_convert_bools(self):
"""Replace boolean variables by the characters 'F'/'T'
"""
replacements = {
True: 'T',
False: 'F',
}
for key in self.bools:
if isinstance(self[key], bool):
self[key] = replacements[self[key]] | def function[_check_and_convert_bools, parameter[self]]:
constant[Replace boolean variables by the characters 'F'/'T'
]
variable[replacements] assign[=] dictionary[[<ast.Constant object at 0x7da1b225d3c0>, <ast.Constant object at 0x7da1b225df60>], [<ast.Constant object at 0x7da1b225f3a0>, <ast.Constant object at 0x7da1b225cc70>]]
for taget[name[key]] in starred[name[self].bools] begin[:]
if call[name[isinstance], parameter[call[name[self]][name[key]], name[bool]]] begin[:]
call[name[self]][name[key]] assign[=] call[name[replacements]][call[name[self]][name[key]]] | keyword[def] identifier[_check_and_convert_bools] ( identifier[self] ):
literal[string]
identifier[replacements] ={
keyword[True] : literal[string] ,
keyword[False] : literal[string] ,
}
keyword[for] identifier[key] keyword[in] identifier[self] . identifier[bools] :
keyword[if] identifier[isinstance] ( identifier[self] [ identifier[key] ], identifier[bool] ):
identifier[self] [ identifier[key] ]= identifier[replacements] [ identifier[self] [ identifier[key] ]] | def _check_and_convert_bools(self):
"""Replace boolean variables by the characters 'F'/'T'
"""
replacements = {True: 'T', False: 'F'}
for key in self.bools:
if isinstance(self[key], bool):
self[key] = replacements[self[key]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] |
def end(self):
"""Write all JSON data to files."""
for comic in self.data:
with codecs.open(self.jsonFn(comic), 'w', self.encoding) as f:
json.dump(self.data[comic], f, indent=2, separators=(',', ': '), sort_keys=True) | def function[end, parameter[self]]:
constant[Write all JSON data to files.]
for taget[name[comic]] in starred[name[self].data] begin[:]
with call[name[codecs].open, parameter[call[name[self].jsonFn, parameter[name[comic]]], constant[w], name[self].encoding]] begin[:]
call[name[json].dump, parameter[call[name[self].data][name[comic]], name[f]]] | keyword[def] identifier[end] ( identifier[self] ):
literal[string]
keyword[for] identifier[comic] keyword[in] identifier[self] . identifier[data] :
keyword[with] identifier[codecs] . identifier[open] ( identifier[self] . identifier[jsonFn] ( identifier[comic] ), literal[string] , identifier[self] . identifier[encoding] ) keyword[as] identifier[f] :
identifier[json] . identifier[dump] ( identifier[self] . identifier[data] [ identifier[comic] ], identifier[f] , identifier[indent] = literal[int] , identifier[separators] =( literal[string] , literal[string] ), identifier[sort_keys] = keyword[True] ) | def end(self):
"""Write all JSON data to files."""
for comic in self.data:
with codecs.open(self.jsonFn(comic), 'w', self.encoding) as f:
json.dump(self.data[comic], f, indent=2, separators=(',', ': '), sort_keys=True) # depends on [control=['with'], data=['f']] # depends on [control=['for'], data=['comic']] |
def plot_groups_unplaced(self, fout_dir=".", **kws_usr):
"""Plot each GO group."""
# kws: go2color max_gos upper_trigger max_upper
plotobj = PltGroupedGos(self)
return plotobj.plot_groups_unplaced(fout_dir, **kws_usr) | def function[plot_groups_unplaced, parameter[self, fout_dir]]:
constant[Plot each GO group.]
variable[plotobj] assign[=] call[name[PltGroupedGos], parameter[name[self]]]
return[call[name[plotobj].plot_groups_unplaced, parameter[name[fout_dir]]]] | keyword[def] identifier[plot_groups_unplaced] ( identifier[self] , identifier[fout_dir] = literal[string] ,** identifier[kws_usr] ):
literal[string]
identifier[plotobj] = identifier[PltGroupedGos] ( identifier[self] )
keyword[return] identifier[plotobj] . identifier[plot_groups_unplaced] ( identifier[fout_dir] ,** identifier[kws_usr] ) | def plot_groups_unplaced(self, fout_dir='.', **kws_usr):
"""Plot each GO group."""
# kws: go2color max_gos upper_trigger max_upper
plotobj = PltGroupedGos(self)
return plotobj.plot_groups_unplaced(fout_dir, **kws_usr) |
def get_principal_name(graph_object):
"""Attempts to resolve a principal name.
:param graph_object: the Azure AD Graph Object
:return: The resolved value or an empty string if unsuccessful.
"""
if hasattr(graph_object, 'user_principal_name'):
return graph_object.user_principal_name
elif hasattr(graph_object, 'service_principal_names'):
return graph_object.service_principal_names[0]
elif hasattr(graph_object, 'display_name'):
return graph_object.display_name
return '' | def function[get_principal_name, parameter[graph_object]]:
constant[Attempts to resolve a principal name.
:param graph_object: the Azure AD Graph Object
:return: The resolved value or an empty string if unsuccessful.
]
if call[name[hasattr], parameter[name[graph_object], constant[user_principal_name]]] begin[:]
return[name[graph_object].user_principal_name]
return[constant[]] | keyword[def] identifier[get_principal_name] ( identifier[graph_object] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[graph_object] , literal[string] ):
keyword[return] identifier[graph_object] . identifier[user_principal_name]
keyword[elif] identifier[hasattr] ( identifier[graph_object] , literal[string] ):
keyword[return] identifier[graph_object] . identifier[service_principal_names] [ literal[int] ]
keyword[elif] identifier[hasattr] ( identifier[graph_object] , literal[string] ):
keyword[return] identifier[graph_object] . identifier[display_name]
keyword[return] literal[string] | def get_principal_name(graph_object):
"""Attempts to resolve a principal name.
:param graph_object: the Azure AD Graph Object
:return: The resolved value or an empty string if unsuccessful.
"""
if hasattr(graph_object, 'user_principal_name'):
return graph_object.user_principal_name # depends on [control=['if'], data=[]]
elif hasattr(graph_object, 'service_principal_names'):
return graph_object.service_principal_names[0] # depends on [control=['if'], data=[]]
elif hasattr(graph_object, 'display_name'):
return graph_object.display_name # depends on [control=['if'], data=[]]
return '' |
def stop_gracefully(self):
'''Refuse to start more processes.
This runs in response to SIGINT or SIGTERM; if this isn't a
background process, control-C and a normal ``kill`` command
cause this.
'''
if self.shutting_down:
self.log(logging.INFO,
'second shutdown request, shutting down now')
self.scram()
else:
self.log(logging.INFO, 'shutting down after current jobs finish')
self.shutting_down = True | def function[stop_gracefully, parameter[self]]:
constant[Refuse to start more processes.
This runs in response to SIGINT or SIGTERM; if this isn't a
background process, control-C and a normal ``kill`` command
cause this.
]
if name[self].shutting_down begin[:]
call[name[self].log, parameter[name[logging].INFO, constant[second shutdown request, shutting down now]]]
call[name[self].scram, parameter[]] | keyword[def] identifier[stop_gracefully] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[shutting_down] :
identifier[self] . identifier[log] ( identifier[logging] . identifier[INFO] ,
literal[string] )
identifier[self] . identifier[scram] ()
keyword[else] :
identifier[self] . identifier[log] ( identifier[logging] . identifier[INFO] , literal[string] )
identifier[self] . identifier[shutting_down] = keyword[True] | def stop_gracefully(self):
"""Refuse to start more processes.
This runs in response to SIGINT or SIGTERM; if this isn't a
background process, control-C and a normal ``kill`` command
cause this.
"""
if self.shutting_down:
self.log(logging.INFO, 'second shutdown request, shutting down now')
self.scram() # depends on [control=['if'], data=[]]
else:
self.log(logging.INFO, 'shutting down after current jobs finish')
self.shutting_down = True |
def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
BDEFileEntry: file entry or None.
"""
path_spec = bde_path_spec.BDEPathSpec(parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) | def function[GetRootFileEntry, parameter[self]]:
constant[Retrieves the root file entry.
Returns:
BDEFileEntry: file entry or None.
]
variable[path_spec] assign[=] call[name[bde_path_spec].BDEPathSpec, parameter[]]
return[call[name[self].GetFileEntryByPathSpec, parameter[name[path_spec]]]] | keyword[def] identifier[GetRootFileEntry] ( identifier[self] ):
literal[string]
identifier[path_spec] = identifier[bde_path_spec] . identifier[BDEPathSpec] ( identifier[parent] = identifier[self] . identifier[_path_spec] . identifier[parent] )
keyword[return] identifier[self] . identifier[GetFileEntryByPathSpec] ( identifier[path_spec] ) | def GetRootFileEntry(self):
"""Retrieves the root file entry.
Returns:
BDEFileEntry: file entry or None.
"""
path_spec = bde_path_spec.BDEPathSpec(parent=self._path_spec.parent)
return self.GetFileEntryByPathSpec(path_spec) |
def unpublish(self):
"""
Un-publish the current object.
"""
if self.is_draft and self.publishing_linked:
publishing_signals.publishing_pre_unpublish.send(
sender=type(self), instance=self)
# Unlink draft and published copies then delete published.
# NOTE: This indirect dance is necessary to avoid triggering
# unwanted MPTT tree structure updates via `delete`.
type(self.publishing_linked).objects \
.filter(pk=self.publishing_linked.pk) \
.delete() # Instead of self.publishing_linked.delete()
# NOTE: We update and save the object *after* deleting the
# published version, in case the `save()` method does some
# validation that breaks when unlinked published objects exist.
self.publishing_linked = None
self.publishing_published_at = None
# Save the draft to remove its relationship with the published copy
publishing_signals.publishing_unpublish_save_draft.send(
sender=type(self), instance=self)
publishing_signals.publishing_post_unpublish.send(
sender=type(self), instance=self) | def function[unpublish, parameter[self]]:
constant[
Un-publish the current object.
]
if <ast.BoolOp object at 0x7da2043451b0> begin[:]
call[name[publishing_signals].publishing_pre_unpublish.send, parameter[]]
call[call[call[name[type], parameter[name[self].publishing_linked]].objects.filter, parameter[]].delete, parameter[]]
name[self].publishing_linked assign[=] constant[None]
name[self].publishing_published_at assign[=] constant[None]
call[name[publishing_signals].publishing_unpublish_save_draft.send, parameter[]]
call[name[publishing_signals].publishing_post_unpublish.send, parameter[]] | keyword[def] identifier[unpublish] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[is_draft] keyword[and] identifier[self] . identifier[publishing_linked] :
identifier[publishing_signals] . identifier[publishing_pre_unpublish] . identifier[send] (
identifier[sender] = identifier[type] ( identifier[self] ), identifier[instance] = identifier[self] )
identifier[type] ( identifier[self] . identifier[publishing_linked] ). identifier[objects] . identifier[filter] ( identifier[pk] = identifier[self] . identifier[publishing_linked] . identifier[pk] ). identifier[delete] ()
identifier[self] . identifier[publishing_linked] = keyword[None]
identifier[self] . identifier[publishing_published_at] = keyword[None]
identifier[publishing_signals] . identifier[publishing_unpublish_save_draft] . identifier[send] (
identifier[sender] = identifier[type] ( identifier[self] ), identifier[instance] = identifier[self] )
identifier[publishing_signals] . identifier[publishing_post_unpublish] . identifier[send] (
identifier[sender] = identifier[type] ( identifier[self] ), identifier[instance] = identifier[self] ) | def unpublish(self):
"""
Un-publish the current object.
"""
if self.is_draft and self.publishing_linked:
publishing_signals.publishing_pre_unpublish.send(sender=type(self), instance=self)
# Unlink draft and published copies then delete published.
# NOTE: This indirect dance is necessary to avoid triggering
# unwanted MPTT tree structure updates via `delete`.
type(self.publishing_linked).objects.filter(pk=self.publishing_linked.pk).delete() # Instead of self.publishing_linked.delete()
# NOTE: We update and save the object *after* deleting the
# published version, in case the `save()` method does some
# validation that breaks when unlinked published objects exist.
self.publishing_linked = None
self.publishing_published_at = None
# Save the draft to remove its relationship with the published copy
publishing_signals.publishing_unpublish_save_draft.send(sender=type(self), instance=self)
publishing_signals.publishing_post_unpublish.send(sender=type(self), instance=self) # depends on [control=['if'], data=[]] |
def _attach_params(self, params, **kwargs):
"""Attach a list of parameters (or ParameterSet) to this ParameterSet.
:parameter list params: list of parameters, or ParameterSet
:parameter **kwargs: attributes to set for each parameter (ie tags)
"""
lst = params.to_list() if isinstance(params, ParameterSet) else params
for param in lst:
param._bundle = self
for k, v in kwargs.items():
# Here we'll set the attributes (_context, _qualifier, etc)
if getattr(param, '_{}'.format(k)) is None:
setattr(param, '_{}'.format(k), v)
self._params.append(param)
self._check_copy_for()
return | def function[_attach_params, parameter[self, params]]:
constant[Attach a list of parameters (or ParameterSet) to this ParameterSet.
:parameter list params: list of parameters, or ParameterSet
:parameter **kwargs: attributes to set for each parameter (ie tags)
]
variable[lst] assign[=] <ast.IfExp object at 0x7da18eb55570>
for taget[name[param]] in starred[name[lst]] begin[:]
name[param]._bundle assign[=] name[self]
for taget[tuple[[<ast.Name object at 0x7da18eb56110>, <ast.Name object at 0x7da18eb55ae0>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
if compare[call[name[getattr], parameter[name[param], call[constant[_{}].format, parameter[name[k]]]]] is constant[None]] begin[:]
call[name[setattr], parameter[name[param], call[constant[_{}].format, parameter[name[k]]], name[v]]]
call[name[self]._params.append, parameter[name[param]]]
call[name[self]._check_copy_for, parameter[]]
return[None] | keyword[def] identifier[_attach_params] ( identifier[self] , identifier[params] ,** identifier[kwargs] ):
literal[string]
identifier[lst] = identifier[params] . identifier[to_list] () keyword[if] identifier[isinstance] ( identifier[params] , identifier[ParameterSet] ) keyword[else] identifier[params]
keyword[for] identifier[param] keyword[in] identifier[lst] :
identifier[param] . identifier[_bundle] = identifier[self]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kwargs] . identifier[items] ():
keyword[if] identifier[getattr] ( identifier[param] , literal[string] . identifier[format] ( identifier[k] )) keyword[is] keyword[None] :
identifier[setattr] ( identifier[param] , literal[string] . identifier[format] ( identifier[k] ), identifier[v] )
identifier[self] . identifier[_params] . identifier[append] ( identifier[param] )
identifier[self] . identifier[_check_copy_for] ()
keyword[return] | def _attach_params(self, params, **kwargs):
"""Attach a list of parameters (or ParameterSet) to this ParameterSet.
:parameter list params: list of parameters, or ParameterSet
:parameter **kwargs: attributes to set for each parameter (ie tags)
"""
lst = params.to_list() if isinstance(params, ParameterSet) else params
for param in lst:
param._bundle = self
for (k, v) in kwargs.items():
# Here we'll set the attributes (_context, _qualifier, etc)
if getattr(param, '_{}'.format(k)) is None:
setattr(param, '_{}'.format(k), v) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
self._params.append(param) # depends on [control=['for'], data=['param']]
self._check_copy_for()
return |
def insert_rows(self, table, rows, target_fields=None, commit_every=1000,
replace=False):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
placeholders = ["%s", ] * len(values)
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += "{0} {1} VALUES ({2})".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded %s into %s rows so far", i, table
)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i) | def function[insert_rows, parameter[self, table, rows, target_fields, commit_every, replace]]:
constant[
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
]
if name[target_fields] begin[:]
variable[target_fields] assign[=] call[constant[, ].join, parameter[name[target_fields]]]
variable[target_fields] assign[=] call[constant[({})].format, parameter[name[target_fields]]]
variable[i] assign[=] constant[0]
with call[name[closing], parameter[call[name[self].get_conn, parameter[]]]] begin[:]
if name[self].supports_autocommit begin[:]
call[name[self].set_autocommit, parameter[name[conn], constant[False]]]
call[name[conn].commit, parameter[]]
with call[name[closing], parameter[call[name[conn].cursor, parameter[]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2054a5450>, <ast.Name object at 0x7da2054a7310>]]] in starred[call[name[enumerate], parameter[name[rows], constant[1]]]] begin[:]
variable[lst] assign[=] list[[]]
for taget[name[cell]] in starred[name[row]] begin[:]
call[name[lst].append, parameter[call[name[self]._serialize_cell, parameter[name[cell], name[conn]]]]]
variable[values] assign[=] call[name[tuple], parameter[name[lst]]]
variable[placeholders] assign[=] binary_operation[list[[<ast.Constant object at 0x7da20c6c7310>]] * call[name[len], parameter[name[values]]]]
if <ast.UnaryOp object at 0x7da20c6c6bf0> begin[:]
variable[sql] assign[=] constant[INSERT INTO ]
<ast.AugAssign object at 0x7da20c6c67d0>
call[name[cur].execute, parameter[name[sql], name[values]]]
if <ast.BoolOp object at 0x7da20c6c59f0> begin[:]
call[name[conn].commit, parameter[]]
call[name[self].log.info, parameter[constant[Loaded %s into %s rows so far], name[i], name[table]]]
call[name[conn].commit, parameter[]]
call[name[self].log.info, parameter[constant[Done loading. Loaded a total of %s rows], name[i]]] | keyword[def] identifier[insert_rows] ( identifier[self] , identifier[table] , identifier[rows] , identifier[target_fields] = keyword[None] , identifier[commit_every] = literal[int] ,
identifier[replace] = keyword[False] ):
literal[string]
keyword[if] identifier[target_fields] :
identifier[target_fields] = literal[string] . identifier[join] ( identifier[target_fields] )
identifier[target_fields] = literal[string] . identifier[format] ( identifier[target_fields] )
keyword[else] :
identifier[target_fields] = literal[string]
identifier[i] = literal[int]
keyword[with] identifier[closing] ( identifier[self] . identifier[get_conn] ()) keyword[as] identifier[conn] :
keyword[if] identifier[self] . identifier[supports_autocommit] :
identifier[self] . identifier[set_autocommit] ( identifier[conn] , keyword[False] )
identifier[conn] . identifier[commit] ()
keyword[with] identifier[closing] ( identifier[conn] . identifier[cursor] ()) keyword[as] identifier[cur] :
keyword[for] identifier[i] , identifier[row] keyword[in] identifier[enumerate] ( identifier[rows] , literal[int] ):
identifier[lst] =[]
keyword[for] identifier[cell] keyword[in] identifier[row] :
identifier[lst] . identifier[append] ( identifier[self] . identifier[_serialize_cell] ( identifier[cell] , identifier[conn] ))
identifier[values] = identifier[tuple] ( identifier[lst] )
identifier[placeholders] =[ literal[string] ,]* identifier[len] ( identifier[values] )
keyword[if] keyword[not] identifier[replace] :
identifier[sql] = literal[string]
keyword[else] :
identifier[sql] = literal[string]
identifier[sql] += literal[string] . identifier[format] (
identifier[table] ,
identifier[target_fields] ,
literal[string] . identifier[join] ( identifier[placeholders] ))
identifier[cur] . identifier[execute] ( identifier[sql] , identifier[values] )
keyword[if] identifier[commit_every] keyword[and] identifier[i] % identifier[commit_every] == literal[int] :
identifier[conn] . identifier[commit] ()
identifier[self] . identifier[log] . identifier[info] (
literal[string] , identifier[i] , identifier[table]
)
identifier[conn] . identifier[commit] ()
identifier[self] . identifier[log] . identifier[info] ( literal[string] , identifier[i] ) | def insert_rows(self, table, rows, target_fields=None, commit_every=1000, replace=False):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if target_fields:
target_fields = ', '.join(target_fields)
target_fields = '({})'.format(target_fields) # depends on [control=['if'], data=[]]
else:
target_fields = ''
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False) # depends on [control=['if'], data=[]]
conn.commit()
with closing(conn.cursor()) as cur:
for (i, row) in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn)) # depends on [control=['for'], data=['cell']]
values = tuple(lst)
placeholders = ['%s'] * len(values)
if not replace:
sql = 'INSERT INTO ' # depends on [control=['if'], data=[]]
else:
sql = 'REPLACE INTO '
sql += '{0} {1} VALUES ({2})'.format(table, target_fields, ','.join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info('Loaded %s into %s rows so far', i, table) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['cur']]
conn.commit() # depends on [control=['with'], data=['closing', 'conn']]
self.log.info('Done loading. Loaded a total of %s rows', i) |
def _config(self, **kargs):
""" ReConfigure Package """
for key, value in kargs.items():
setattr(self, key, value) | def function[_config, parameter[self]]:
constant[ ReConfigure Package ]
for taget[tuple[[<ast.Name object at 0x7da18bcc9330>, <ast.Name object at 0x7da18bcca590>]]] in starred[call[name[kargs].items, parameter[]]] begin[:]
call[name[setattr], parameter[name[self], name[key], name[value]]] | keyword[def] identifier[_config] ( identifier[self] ,** identifier[kargs] ):
literal[string]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[kargs] . identifier[items] ():
identifier[setattr] ( identifier[self] , identifier[key] , identifier[value] ) | def _config(self, **kargs):
""" ReConfigure Package """
for (key, value) in kargs.items():
setattr(self, key, value) # depends on [control=['for'], data=[]] |
def reset_tag(self, name):
"""
Reset the tag and return the new tag identifier.
:param name: The tag
:type name: str
:rtype: str
"""
id_ = str(uuid.uuid4()).replace('-', '')
self._store.forever(self.tag_key(name), id_)
return id_ | def function[reset_tag, parameter[self, name]]:
constant[
Reset the tag and return the new tag identifier.
:param name: The tag
:type name: str
:rtype: str
]
variable[id_] assign[=] call[call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]].replace, parameter[constant[-], constant[]]]
call[name[self]._store.forever, parameter[call[name[self].tag_key, parameter[name[name]]], name[id_]]]
return[name[id_]] | keyword[def] identifier[reset_tag] ( identifier[self] , identifier[name] ):
literal[string]
identifier[id_] = identifier[str] ( identifier[uuid] . identifier[uuid4] ()). identifier[replace] ( literal[string] , literal[string] )
identifier[self] . identifier[_store] . identifier[forever] ( identifier[self] . identifier[tag_key] ( identifier[name] ), identifier[id_] )
keyword[return] identifier[id_] | def reset_tag(self, name):
"""
Reset the tag and return the new tag identifier.
:param name: The tag
:type name: str
:rtype: str
"""
id_ = str(uuid.uuid4()).replace('-', '')
self._store.forever(self.tag_key(name), id_)
return id_ |
def do_next(self, line):
"""Jump to the next entities (ontology, class or property) depending on context"""
if not self.current:
print("Please select an ontology first. E.g. use the 'ls ontologies' or 'get ontology <name>' commands.")
elif self.currentEntity:
g = self.current['graph']
if self.currentEntity['type'] == 'class':
nextentity = g.nextClass(self.currentEntity['object'].uri)
self._select_class(str(nextentity.uri))
elif self.currentEntity['type'] == 'property':
nextentity = g.nextProperty(self.currentEntity['object'].uri)
self._select_property(str(nextentity.uri))
elif self.currentEntity['type'] == 'concept':
nextentity = g.nextConcept(self.currentEntity['object'].uri)
self._select_concept(str(nextentity.uri))
else:
print("Not implemented")
else:
if len(self.all_ontologies) > 1:
nextonto = self._next_ontology()
self._load_ontology(nextonto)
else:
self._print("Only one ontology available in repository.") | def function[do_next, parameter[self, line]]:
constant[Jump to the next entities (ontology, class or property) depending on context]
if <ast.UnaryOp object at 0x7da1b11ab970> begin[:]
call[name[print], parameter[constant[Please select an ontology first. E.g. use the 'ls ontologies' or 'get ontology <name>' commands.]]] | keyword[def] identifier[do_next] ( identifier[self] , identifier[line] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[current] :
identifier[print] ( literal[string] )
keyword[elif] identifier[self] . identifier[currentEntity] :
identifier[g] = identifier[self] . identifier[current] [ literal[string] ]
keyword[if] identifier[self] . identifier[currentEntity] [ literal[string] ]== literal[string] :
identifier[nextentity] = identifier[g] . identifier[nextClass] ( identifier[self] . identifier[currentEntity] [ literal[string] ]. identifier[uri] )
identifier[self] . identifier[_select_class] ( identifier[str] ( identifier[nextentity] . identifier[uri] ))
keyword[elif] identifier[self] . identifier[currentEntity] [ literal[string] ]== literal[string] :
identifier[nextentity] = identifier[g] . identifier[nextProperty] ( identifier[self] . identifier[currentEntity] [ literal[string] ]. identifier[uri] )
identifier[self] . identifier[_select_property] ( identifier[str] ( identifier[nextentity] . identifier[uri] ))
keyword[elif] identifier[self] . identifier[currentEntity] [ literal[string] ]== literal[string] :
identifier[nextentity] = identifier[g] . identifier[nextConcept] ( identifier[self] . identifier[currentEntity] [ literal[string] ]. identifier[uri] )
identifier[self] . identifier[_select_concept] ( identifier[str] ( identifier[nextentity] . identifier[uri] ))
keyword[else] :
identifier[print] ( literal[string] )
keyword[else] :
keyword[if] identifier[len] ( identifier[self] . identifier[all_ontologies] )> literal[int] :
identifier[nextonto] = identifier[self] . identifier[_next_ontology] ()
identifier[self] . identifier[_load_ontology] ( identifier[nextonto] )
keyword[else] :
identifier[self] . identifier[_print] ( literal[string] ) | def do_next(self, line):
"""Jump to the next entities (ontology, class or property) depending on context"""
if not self.current:
print("Please select an ontology first. E.g. use the 'ls ontologies' or 'get ontology <name>' commands.") # depends on [control=['if'], data=[]]
elif self.currentEntity:
g = self.current['graph']
if self.currentEntity['type'] == 'class':
nextentity = g.nextClass(self.currentEntity['object'].uri)
self._select_class(str(nextentity.uri)) # depends on [control=['if'], data=[]]
elif self.currentEntity['type'] == 'property':
nextentity = g.nextProperty(self.currentEntity['object'].uri)
self._select_property(str(nextentity.uri)) # depends on [control=['if'], data=[]]
elif self.currentEntity['type'] == 'concept':
nextentity = g.nextConcept(self.currentEntity['object'].uri)
self._select_concept(str(nextentity.uri)) # depends on [control=['if'], data=[]]
else:
print('Not implemented') # depends on [control=['if'], data=[]]
elif len(self.all_ontologies) > 1:
nextonto = self._next_ontology()
self._load_ontology(nextonto) # depends on [control=['if'], data=[]]
else:
self._print('Only one ontology available in repository.') |
def shutdown_abort():
'''
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
'''
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to abort system shutdown')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False | def function[shutdown_abort, parameter[]]:
constant[
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
]
<ast.Try object at 0x7da18dc99c00> | keyword[def] identifier[shutdown_abort] ():
literal[string]
keyword[try] :
identifier[win32api] . identifier[AbortSystemShutdown] ( literal[string] )
keyword[return] keyword[True]
keyword[except] identifier[pywintypes] . identifier[error] keyword[as] identifier[exc] :
( identifier[number] , identifier[context] , identifier[message] )= identifier[exc] . identifier[args]
identifier[log] . identifier[error] ( literal[string] )
identifier[log] . identifier[error] ( literal[string] , identifier[number] )
identifier[log] . identifier[error] ( literal[string] , identifier[context] )
identifier[log] . identifier[error] ( literal[string] , identifier[message] )
keyword[return] keyword[False] | def shutdown_abort():
"""
Abort a shutdown. Only available while the dialog box is being
displayed to the user. Once the shutdown has initiated, it cannot be
aborted.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt 'minion-id' system.shutdown_abort
"""
try:
win32api.AbortSystemShutdown('127.0.0.1')
return True # depends on [control=['try'], data=[]]
except pywintypes.error as exc:
(number, context, message) = exc.args
log.error('Failed to abort system shutdown')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False # depends on [control=['except'], data=['exc']] |
def product_path(cls, project, location, product):
"""Return a fully-qualified product string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/products/{product}",
project=project,
location=location,
product=product,
) | def function[product_path, parameter[cls, project, location, product]]:
constant[Return a fully-qualified product string.]
return[call[name[google].api_core.path_template.expand, parameter[constant[projects/{project}/locations/{location}/products/{product}]]]] | keyword[def] identifier[product_path] ( identifier[cls] , identifier[project] , identifier[location] , identifier[product] ):
literal[string]
keyword[return] identifier[google] . identifier[api_core] . identifier[path_template] . identifier[expand] (
literal[string] ,
identifier[project] = identifier[project] ,
identifier[location] = identifier[location] ,
identifier[product] = identifier[product] ,
) | def product_path(cls, project, location, product):
"""Return a fully-qualified product string."""
return google.api_core.path_template.expand('projects/{project}/locations/{location}/products/{product}', project=project, location=location, product=product) |
def get_median(data_np):
"""Like :func:`get_mean` but for median."""
i = np.isfinite(data_np)
if not np.any(i):
return np.nan
return np.median(data_np[i]) | def function[get_median, parameter[data_np]]:
constant[Like :func:`get_mean` but for median.]
variable[i] assign[=] call[name[np].isfinite, parameter[name[data_np]]]
if <ast.UnaryOp object at 0x7da1b0c27220> begin[:]
return[name[np].nan]
return[call[name[np].median, parameter[call[name[data_np]][name[i]]]]] | keyword[def] identifier[get_median] ( identifier[data_np] ):
literal[string]
identifier[i] = identifier[np] . identifier[isfinite] ( identifier[data_np] )
keyword[if] keyword[not] identifier[np] . identifier[any] ( identifier[i] ):
keyword[return] identifier[np] . identifier[nan]
keyword[return] identifier[np] . identifier[median] ( identifier[data_np] [ identifier[i] ]) | def get_median(data_np):
"""Like :func:`get_mean` but for median."""
i = np.isfinite(data_np)
if not np.any(i):
return np.nan # depends on [control=['if'], data=[]]
return np.median(data_np[i]) |
def calc_mean_time_deviation(timepoints, weights, mean_time=None):
"""Return the weighted deviation of the given timepoints from their mean
time.
With equal given weights, the is simply the standard deviation of the
given time points:
>>> from hydpy import calc_mean_time_deviation
>>> calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[2., 2.])
2.0
One can pass a precalculated or alternate mean time:
>>> from hydpy import round_
>>> round_(calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[2., 2.],
... mean_time=4.))
2.236068
>>> round_(calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[1., 3.]))
1.732051
Or, in the most extreme case:
>>> calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[0., 4.])
0.0
There will be some checks for input plausibility perfomed, e.g.:
>>> calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[-2., 2.])
Traceback (most recent call last):
...
ValueError: While trying to calculate the weighted time deviation \
from mean time, the following error occurred: For the following objects, \
at least one value is negative: weights.
"""
timepoints = numpy.array(timepoints)
weights = numpy.array(weights)
validtools.test_equal_shape(timepoints=timepoints, weights=weights)
validtools.test_non_negative(weights=weights)
if mean_time is None:
mean_time = calc_mean_time(timepoints, weights)
return (numpy.sqrt(numpy.dot(weights, (timepoints-mean_time)**2) /
numpy.sum(weights))) | def function[calc_mean_time_deviation, parameter[timepoints, weights, mean_time]]:
constant[Return the weighted deviation of the given timepoints from their mean
time.
With equal given weights, the is simply the standard deviation of the
given time points:
>>> from hydpy import calc_mean_time_deviation
>>> calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[2., 2.])
2.0
One can pass a precalculated or alternate mean time:
>>> from hydpy import round_
>>> round_(calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[2., 2.],
... mean_time=4.))
2.236068
>>> round_(calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[1., 3.]))
1.732051
Or, in the most extreme case:
>>> calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[0., 4.])
0.0
There will be some checks for input plausibility perfomed, e.g.:
>>> calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[-2., 2.])
Traceback (most recent call last):
...
ValueError: While trying to calculate the weighted time deviation from mean time, the following error occurred: For the following objects, at least one value is negative: weights.
]
variable[timepoints] assign[=] call[name[numpy].array, parameter[name[timepoints]]]
variable[weights] assign[=] call[name[numpy].array, parameter[name[weights]]]
call[name[validtools].test_equal_shape, parameter[]]
call[name[validtools].test_non_negative, parameter[]]
if compare[name[mean_time] is constant[None]] begin[:]
variable[mean_time] assign[=] call[name[calc_mean_time], parameter[name[timepoints], name[weights]]]
return[call[name[numpy].sqrt, parameter[binary_operation[call[name[numpy].dot, parameter[name[weights], binary_operation[binary_operation[name[timepoints] - name[mean_time]] ** constant[2]]]] / call[name[numpy].sum, parameter[name[weights]]]]]]] | keyword[def] identifier[calc_mean_time_deviation] ( identifier[timepoints] , identifier[weights] , identifier[mean_time] = keyword[None] ):
literal[string]
identifier[timepoints] = identifier[numpy] . identifier[array] ( identifier[timepoints] )
identifier[weights] = identifier[numpy] . identifier[array] ( identifier[weights] )
identifier[validtools] . identifier[test_equal_shape] ( identifier[timepoints] = identifier[timepoints] , identifier[weights] = identifier[weights] )
identifier[validtools] . identifier[test_non_negative] ( identifier[weights] = identifier[weights] )
keyword[if] identifier[mean_time] keyword[is] keyword[None] :
identifier[mean_time] = identifier[calc_mean_time] ( identifier[timepoints] , identifier[weights] )
keyword[return] ( identifier[numpy] . identifier[sqrt] ( identifier[numpy] . identifier[dot] ( identifier[weights] ,( identifier[timepoints] - identifier[mean_time] )** literal[int] )/
identifier[numpy] . identifier[sum] ( identifier[weights] ))) | def calc_mean_time_deviation(timepoints, weights, mean_time=None):
"""Return the weighted deviation of the given timepoints from their mean
time.
With equal given weights, the is simply the standard deviation of the
given time points:
>>> from hydpy import calc_mean_time_deviation
>>> calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[2., 2.])
2.0
One can pass a precalculated or alternate mean time:
>>> from hydpy import round_
>>> round_(calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[2., 2.],
... mean_time=4.))
2.236068
>>> round_(calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[1., 3.]))
1.732051
Or, in the most extreme case:
>>> calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[0., 4.])
0.0
There will be some checks for input plausibility perfomed, e.g.:
>>> calc_mean_time_deviation(timepoints=[3., 7.],
... weights=[-2., 2.])
Traceback (most recent call last):
...
ValueError: While trying to calculate the weighted time deviation from mean time, the following error occurred: For the following objects, at least one value is negative: weights.
"""
timepoints = numpy.array(timepoints)
weights = numpy.array(weights)
validtools.test_equal_shape(timepoints=timepoints, weights=weights)
validtools.test_non_negative(weights=weights)
if mean_time is None:
mean_time = calc_mean_time(timepoints, weights) # depends on [control=['if'], data=['mean_time']]
return numpy.sqrt(numpy.dot(weights, (timepoints - mean_time) ** 2) / numpy.sum(weights)) |
def finish():
# type: () -> None
""" Merge current feature into develop. """
pretend = context.get('pretend', False)
if not pretend and (git.staged() or git.unstaged()):
log.err(
"You have uncommitted changes in your repo!\n"
"You need to stash them before you merge the hotfix branch"
)
sys.exit(1)
develop = conf.get('git.devel_branch', 'develop')
master = conf.get('git.master_branch', 'master')
branch = git.current_branch(refresh=True)
common.assert_branch_type('hotfix')
# Merge hotfix into master
common.git_checkout(master)
common.git_pull(master)
common.git_merge(master, branch.name)
# Merge hotfix into develop
common.git_checkout(develop)
common.git_pull(develop)
common.git_merge(develop, branch.name)
# Cleanup
common.git_branch_delete(branch.name)
common.git_prune()
common.git_checkout(master) | def function[finish, parameter[]]:
constant[ Merge current feature into develop. ]
variable[pretend] assign[=] call[name[context].get, parameter[constant[pretend], constant[False]]]
if <ast.BoolOp object at 0x7da1b10afac0> begin[:]
call[name[log].err, parameter[constant[You have uncommitted changes in your repo!
You need to stash them before you merge the hotfix branch]]]
call[name[sys].exit, parameter[constant[1]]]
variable[develop] assign[=] call[name[conf].get, parameter[constant[git.devel_branch], constant[develop]]]
variable[master] assign[=] call[name[conf].get, parameter[constant[git.master_branch], constant[master]]]
variable[branch] assign[=] call[name[git].current_branch, parameter[]]
call[name[common].assert_branch_type, parameter[constant[hotfix]]]
call[name[common].git_checkout, parameter[name[master]]]
call[name[common].git_pull, parameter[name[master]]]
call[name[common].git_merge, parameter[name[master], name[branch].name]]
call[name[common].git_checkout, parameter[name[develop]]]
call[name[common].git_pull, parameter[name[develop]]]
call[name[common].git_merge, parameter[name[develop], name[branch].name]]
call[name[common].git_branch_delete, parameter[name[branch].name]]
call[name[common].git_prune, parameter[]]
call[name[common].git_checkout, parameter[name[master]]] | keyword[def] identifier[finish] ():
literal[string]
identifier[pretend] = identifier[context] . identifier[get] ( literal[string] , keyword[False] )
keyword[if] keyword[not] identifier[pretend] keyword[and] ( identifier[git] . identifier[staged] () keyword[or] identifier[git] . identifier[unstaged] ()):
identifier[log] . identifier[err] (
literal[string]
literal[string]
)
identifier[sys] . identifier[exit] ( literal[int] )
identifier[develop] = identifier[conf] . identifier[get] ( literal[string] , literal[string] )
identifier[master] = identifier[conf] . identifier[get] ( literal[string] , literal[string] )
identifier[branch] = identifier[git] . identifier[current_branch] ( identifier[refresh] = keyword[True] )
identifier[common] . identifier[assert_branch_type] ( literal[string] )
identifier[common] . identifier[git_checkout] ( identifier[master] )
identifier[common] . identifier[git_pull] ( identifier[master] )
identifier[common] . identifier[git_merge] ( identifier[master] , identifier[branch] . identifier[name] )
identifier[common] . identifier[git_checkout] ( identifier[develop] )
identifier[common] . identifier[git_pull] ( identifier[develop] )
identifier[common] . identifier[git_merge] ( identifier[develop] , identifier[branch] . identifier[name] )
identifier[common] . identifier[git_branch_delete] ( identifier[branch] . identifier[name] )
identifier[common] . identifier[git_prune] ()
identifier[common] . identifier[git_checkout] ( identifier[master] ) | def finish():
# type: () -> None
' Merge current feature into develop. '
pretend = context.get('pretend', False)
if not pretend and (git.staged() or git.unstaged()):
log.err('You have uncommitted changes in your repo!\nYou need to stash them before you merge the hotfix branch')
sys.exit(1) # depends on [control=['if'], data=[]]
develop = conf.get('git.devel_branch', 'develop')
master = conf.get('git.master_branch', 'master')
branch = git.current_branch(refresh=True)
common.assert_branch_type('hotfix')
# Merge hotfix into master
common.git_checkout(master)
common.git_pull(master)
common.git_merge(master, branch.name)
# Merge hotfix into develop
common.git_checkout(develop)
common.git_pull(develop)
common.git_merge(develop, branch.name)
# Cleanup
common.git_branch_delete(branch.name)
common.git_prune()
common.git_checkout(master) |
def position_to_value(self, y):
"""Convert position in pixels to value"""
vsb = self.editor.verticalScrollBar()
return vsb.minimum()+max([0, (y-self.offset)/self.get_scale_factor()]) | def function[position_to_value, parameter[self, y]]:
constant[Convert position in pixels to value]
variable[vsb] assign[=] call[name[self].editor.verticalScrollBar, parameter[]]
return[binary_operation[call[name[vsb].minimum, parameter[]] + call[name[max], parameter[list[[<ast.Constant object at 0x7da1b1f74580>, <ast.BinOp object at 0x7da1b1f76620>]]]]]] | keyword[def] identifier[position_to_value] ( identifier[self] , identifier[y] ):
literal[string]
identifier[vsb] = identifier[self] . identifier[editor] . identifier[verticalScrollBar] ()
keyword[return] identifier[vsb] . identifier[minimum] ()+ identifier[max] ([ literal[int] ,( identifier[y] - identifier[self] . identifier[offset] )/ identifier[self] . identifier[get_scale_factor] ()]) | def position_to_value(self, y):
"""Convert position in pixels to value"""
vsb = self.editor.verticalScrollBar()
return vsb.minimum() + max([0, (y - self.offset) / self.get_scale_factor()]) |
def evaluate_emb(emb, labels):
"""Evaluate embeddings based on Recall@k."""
d_mat = get_distance_matrix(emb)
d_mat = d_mat.asnumpy()
labels = labels.asnumpy()
names = []
accs = []
for k in [1, 2, 4, 8, 16]:
names.append('Recall@%d' % k)
correct, cnt = 0.0, 0.0
for i in range(emb.shape[0]):
d_mat[i, i] = 1e10
nns = argpartition(d_mat[i], k)[:k]
if any(labels[i] == labels[nn] for nn in nns):
correct += 1
cnt += 1
accs.append(correct/cnt)
return names, accs | def function[evaluate_emb, parameter[emb, labels]]:
constant[Evaluate embeddings based on Recall@k.]
variable[d_mat] assign[=] call[name[get_distance_matrix], parameter[name[emb]]]
variable[d_mat] assign[=] call[name[d_mat].asnumpy, parameter[]]
variable[labels] assign[=] call[name[labels].asnumpy, parameter[]]
variable[names] assign[=] list[[]]
variable[accs] assign[=] list[[]]
for taget[name[k]] in starred[list[[<ast.Constant object at 0x7da1b1fa2ec0>, <ast.Constant object at 0x7da1b1fa1db0>, <ast.Constant object at 0x7da1b1fa1660>, <ast.Constant object at 0x7da1b1fa0730>, <ast.Constant object at 0x7da1b1fa06a0>]]] begin[:]
call[name[names].append, parameter[binary_operation[constant[Recall@%d] <ast.Mod object at 0x7da2590d6920> name[k]]]]
<ast.Tuple object at 0x7da1b1fa1300> assign[=] tuple[[<ast.Constant object at 0x7da1b1fa05b0>, <ast.Constant object at 0x7da1b1fa07f0>]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[emb].shape][constant[0]]]]] begin[:]
call[name[d_mat]][tuple[[<ast.Name object at 0x7da1b1fa2a40>, <ast.Name object at 0x7da1b1fa31c0>]]] assign[=] constant[10000000000.0]
variable[nns] assign[=] call[call[name[argpartition], parameter[call[name[d_mat]][name[i]], name[k]]]][<ast.Slice object at 0x7da1b204ef80>]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b204d390>]] begin[:]
<ast.AugAssign object at 0x7da1b204ffd0>
<ast.AugAssign object at 0x7da1b204d780>
call[name[accs].append, parameter[binary_operation[name[correct] / name[cnt]]]]
return[tuple[[<ast.Name object at 0x7da1b204eb60>, <ast.Name object at 0x7da1b204f880>]]] | keyword[def] identifier[evaluate_emb] ( identifier[emb] , identifier[labels] ):
literal[string]
identifier[d_mat] = identifier[get_distance_matrix] ( identifier[emb] )
identifier[d_mat] = identifier[d_mat] . identifier[asnumpy] ()
identifier[labels] = identifier[labels] . identifier[asnumpy] ()
identifier[names] =[]
identifier[accs] =[]
keyword[for] identifier[k] keyword[in] [ literal[int] , literal[int] , literal[int] , literal[int] , literal[int] ]:
identifier[names] . identifier[append] ( literal[string] % identifier[k] )
identifier[correct] , identifier[cnt] = literal[int] , literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[emb] . identifier[shape] [ literal[int] ]):
identifier[d_mat] [ identifier[i] , identifier[i] ]= literal[int]
identifier[nns] = identifier[argpartition] ( identifier[d_mat] [ identifier[i] ], identifier[k] )[: identifier[k] ]
keyword[if] identifier[any] ( identifier[labels] [ identifier[i] ]== identifier[labels] [ identifier[nn] ] keyword[for] identifier[nn] keyword[in] identifier[nns] ):
identifier[correct] += literal[int]
identifier[cnt] += literal[int]
identifier[accs] . identifier[append] ( identifier[correct] / identifier[cnt] )
keyword[return] identifier[names] , identifier[accs] | def evaluate_emb(emb, labels):
"""Evaluate embeddings based on Recall@k."""
d_mat = get_distance_matrix(emb)
d_mat = d_mat.asnumpy()
labels = labels.asnumpy()
names = []
accs = []
for k in [1, 2, 4, 8, 16]:
names.append('Recall@%d' % k)
(correct, cnt) = (0.0, 0.0)
for i in range(emb.shape[0]):
d_mat[i, i] = 10000000000.0
nns = argpartition(d_mat[i], k)[:k]
if any((labels[i] == labels[nn] for nn in nns)):
correct += 1 # depends on [control=['if'], data=[]]
cnt += 1 # depends on [control=['for'], data=['i']]
accs.append(correct / cnt) # depends on [control=['for'], data=['k']]
return (names, accs) |
def get_node(self, name, memory=False, binary=False):
"""
An individual node in the RabbitMQ cluster. Set "memory=true" to get
memory statistics, and "binary=true" to get a breakdown of binary
memory use (may be expensive if there are many small binaries in the
system).
"""
return self._api_get(
url='/api/nodes/{0}'.format(name),
params=dict(
binary=binary,
memory=memory,
),
) | def function[get_node, parameter[self, name, memory, binary]]:
constant[
An individual node in the RabbitMQ cluster. Set "memory=true" to get
memory statistics, and "binary=true" to get a breakdown of binary
memory use (may be expensive if there are many small binaries in the
system).
]
return[call[name[self]._api_get, parameter[]]] | keyword[def] identifier[get_node] ( identifier[self] , identifier[name] , identifier[memory] = keyword[False] , identifier[binary] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[_api_get] (
identifier[url] = literal[string] . identifier[format] ( identifier[name] ),
identifier[params] = identifier[dict] (
identifier[binary] = identifier[binary] ,
identifier[memory] = identifier[memory] ,
),
) | def get_node(self, name, memory=False, binary=False):
"""
An individual node in the RabbitMQ cluster. Set "memory=true" to get
memory statistics, and "binary=true" to get a breakdown of binary
memory use (may be expensive if there are many small binaries in the
system).
"""
return self._api_get(url='/api/nodes/{0}'.format(name), params=dict(binary=binary, memory=memory)) |
def Validate(self, value):
"""Validate an RDFValue instance.
Args:
value: An RDFValue instance or something which may be used to instantiate
the correct instance.
Raises:
TypeValueError: If the value is not a valid RDFValue instance or the
required type.
Returns:
A Valid RDFValue instance.
"""
# Allow None as a default.
if value is None:
return
if not isinstance(value, self.rdfclass):
# Try to coerce the type to the correct rdf_class.
try:
return self.rdfclass(value)
except rdfvalue.InitializeError:
raise TypeValueError("Value for arg %s should be an %s" %
(self.name, self.rdfclass.__name__))
return value | def function[Validate, parameter[self, value]]:
constant[Validate an RDFValue instance.
Args:
value: An RDFValue instance or something which may be used to instantiate
the correct instance.
Raises:
TypeValueError: If the value is not a valid RDFValue instance or the
required type.
Returns:
A Valid RDFValue instance.
]
if compare[name[value] is constant[None]] begin[:]
return[None]
if <ast.UnaryOp object at 0x7da18fe913c0> begin[:]
<ast.Try object at 0x7da18fe928c0>
return[name[value]] | keyword[def] identifier[Validate] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[self] . identifier[rdfclass] ):
keyword[try] :
keyword[return] identifier[self] . identifier[rdfclass] ( identifier[value] )
keyword[except] identifier[rdfvalue] . identifier[InitializeError] :
keyword[raise] identifier[TypeValueError] ( literal[string] %
( identifier[self] . identifier[name] , identifier[self] . identifier[rdfclass] . identifier[__name__] ))
keyword[return] identifier[value] | def Validate(self, value):
"""Validate an RDFValue instance.
Args:
value: An RDFValue instance or something which may be used to instantiate
the correct instance.
Raises:
TypeValueError: If the value is not a valid RDFValue instance or the
required type.
Returns:
A Valid RDFValue instance.
"""
# Allow None as a default.
if value is None:
return # depends on [control=['if'], data=[]]
if not isinstance(value, self.rdfclass):
# Try to coerce the type to the correct rdf_class.
try:
return self.rdfclass(value) # depends on [control=['try'], data=[]]
except rdfvalue.InitializeError:
raise TypeValueError('Value for arg %s should be an %s' % (self.name, self.rdfclass.__name__)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return value |
def _logic(self, value=None):
# type: (Any) -> Tuple[Union[bool, None], str]
"""Process the inner logic of the validator.
The validation results are returned as tuple (boolean (true/false), reasontext)
"""
self._validation_result, self._validation_reason = None, 'No reason'
return self._validation_result, self._validation_reason | def function[_logic, parameter[self, value]]:
constant[Process the inner logic of the validator.
The validation results are returned as tuple (boolean (true/false), reasontext)
]
<ast.Tuple object at 0x7da2049605b0> assign[=] tuple[[<ast.Constant object at 0x7da204960580>, <ast.Constant object at 0x7da2049605e0>]]
return[tuple[[<ast.Attribute object at 0x7da204962b60>, <ast.Attribute object at 0x7da2049612a0>]]] | keyword[def] identifier[_logic] ( identifier[self] , identifier[value] = keyword[None] ):
literal[string]
identifier[self] . identifier[_validation_result] , identifier[self] . identifier[_validation_reason] = keyword[None] , literal[string]
keyword[return] identifier[self] . identifier[_validation_result] , identifier[self] . identifier[_validation_reason] | def _logic(self, value=None):
# type: (Any) -> Tuple[Union[bool, None], str]
'Process the inner logic of the validator.\n\n The validation results are returned as tuple (boolean (true/false), reasontext)\n '
(self._validation_result, self._validation_reason) = (None, 'No reason')
return (self._validation_result, self._validation_reason) |
def write_packages(self, reqs_file):
"""
Dump the packages in the catalog in a requirements file
"""
write_file_lines(reqs_file, ('{}\n'.format(package) for package in self.packages)) | def function[write_packages, parameter[self, reqs_file]]:
constant[
Dump the packages in the catalog in a requirements file
]
call[name[write_file_lines], parameter[name[reqs_file], <ast.GeneratorExp object at 0x7da20c6c5300>]] | keyword[def] identifier[write_packages] ( identifier[self] , identifier[reqs_file] ):
literal[string]
identifier[write_file_lines] ( identifier[reqs_file] ,( literal[string] . identifier[format] ( identifier[package] ) keyword[for] identifier[package] keyword[in] identifier[self] . identifier[packages] )) | def write_packages(self, reqs_file):
"""
Dump the packages in the catalog in a requirements file
"""
write_file_lines(reqs_file, ('{}\n'.format(package) for package in self.packages)) |
def add_tags(self, tags, **kwargs):
"""
:param tags: Tags to add to the app
:type tags: array
Adds the specified application name tags (aliases) to this app.
The current user must be a developer of the app.
"""
if self._dxid is not None:
return dxpy.api.app_add_tags(self._dxid, input_params={"tags": tags}, **kwargs)
else:
return dxpy.api.app_add_tags('app-' + self._name, alias=self._alias,
input_params={"tags": tags}, **kwargs) | def function[add_tags, parameter[self, tags]]:
constant[
:param tags: Tags to add to the app
:type tags: array
Adds the specified application name tags (aliases) to this app.
The current user must be a developer of the app.
]
if compare[name[self]._dxid is_not constant[None]] begin[:]
return[call[name[dxpy].api.app_add_tags, parameter[name[self]._dxid]]] | keyword[def] identifier[add_tags] ( identifier[self] , identifier[tags] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[_dxid] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[dxpy] . identifier[api] . identifier[app_add_tags] ( identifier[self] . identifier[_dxid] , identifier[input_params] ={ literal[string] : identifier[tags] },** identifier[kwargs] )
keyword[else] :
keyword[return] identifier[dxpy] . identifier[api] . identifier[app_add_tags] ( literal[string] + identifier[self] . identifier[_name] , identifier[alias] = identifier[self] . identifier[_alias] ,
identifier[input_params] ={ literal[string] : identifier[tags] },** identifier[kwargs] ) | def add_tags(self, tags, **kwargs):
"""
:param tags: Tags to add to the app
:type tags: array
Adds the specified application name tags (aliases) to this app.
The current user must be a developer of the app.
"""
if self._dxid is not None:
return dxpy.api.app_add_tags(self._dxid, input_params={'tags': tags}, **kwargs) # depends on [control=['if'], data=[]]
else:
return dxpy.api.app_add_tags('app-' + self._name, alias=self._alias, input_params={'tags': tags}, **kwargs) |
def set_cluster_info(self,
disallow_cluster_termination=None,
enable_ganglia_monitoring=None,
datadog_api_token=None,
datadog_app_token=None,
node_bootstrap=None,
master_instance_type=None,
slave_instance_type=None,
min_nodes=None,
max_nodes=None,
slave_request_type=None,
fallback_to_ondemand=None,
node_base_cooldown_period=None,
node_spot_cooldown_period=None,
custom_tags=None,
heterogeneous_config=None,
maximum_bid_price_percentage=None,
timeout_for_request=None,
maximum_spot_instance_percentage=None,
stable_maximum_bid_price_percentage=None,
stable_timeout_for_request=None,
stable_spot_fallback=None,
spot_block_duration=None,
idle_cluster_timeout=None,
disk_count=None,
disk_type=None,
disk_size=None,
root_disk_size=None,
upscaling_config=None,
enable_encryption=None,
customer_ssh_key=None,
cluster_name=None,
force_tunnel=None,
image_uri_overrides=None,
env_name=None,
python_version=None,
r_version=None,
disable_cluster_pause=None,
paused_cluster_timeout_mins=None,
disable_autoscale_node_pause=None,
paused_autoscale_node_timeout_mins=None):
"""
Args:
`disallow_cluster_termination`: Set this to True if you don't want
qubole to auto-terminate idle clusters. Use this option with
extreme caution.
`enable_ganglia_monitoring`: Set this to True if you want to enable
ganglia monitoring for the cluster.
`node_bootstrap`: name of the node bootstrap file for this
cluster. It should be in stored in S3 at
<your-default-location>/scripts/hadoop/
`master_instance_type`: The instance type to use for the Hadoop master
node.
`slave_instance_type`: The instance type to use for the Hadoop slave
nodes.
`min_nodes`: Number of nodes to start the cluster with.
`max_nodes`: Maximum number of nodes the cluster may be auto-scaled up
to.
`slave_request_type`: Purchasing option for slave instances.
Valid values: "ondemand", "hybrid", "spot".
`fallback_to_ondemand`: Fallback to on-demand nodes if spot nodes could not be
obtained. Valid only if slave_request_type is 'spot'.
`node_base_cooldown_period`: Time for which an on-demand node waits before termination (Unit: minutes)
`node_spot_cooldown_period`: Time for which a spot node waits before termination (Unit: minutes)
`maximum_bid_price_percentage`: ( Valid only when `slave_request_type`
is hybrid or spot.) Maximum value to bid for spot
instances, expressed as a percentage of the base price
for the slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid".
`stable_maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`stable_timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`stable_spot_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
`spot_block_duration`: Time for which the spot block instance is provisioned (Unit:
minutes)
`disk_count`: Number of EBS volumes to attach
to each instance of the cluster.
`disk_type`: Type of the EBS volume. Valid
values are 'standard' (magnetic) and 'ssd'.
`disk_size`: Size of each EBS volume, in GB.
`root_disk_size`: Size of root volume, in GB.
`enable_encryption`: Encrypt the ephemeral drives on the instance.
`customer_ssh_key`: SSH key to use to login to the instances.
`idle_cluster_timeout`: The buffer time (range in 0-6 hrs) after a cluster goes idle
and gets terminated, given cluster auto termination is on and no cluster specific
timeout has been set (default is 2 hrs)
`heterogeneous_config` : Configuring heterogeneous nodes in Hadoop 2 and Spark clusters.
It implies that slave nodes can be of different instance types
`custom_tags` : Custom tags to be set on all instances
of the cluster. Specified as JSON object (key-value pairs)
`datadog_api_token` : Specify the Datadog API token to use the Datadog monitoring service
`datadog_app_token` : Specify the Datadog APP token to use the Datadog monitoring service
`image_uri_overrides` : Override the image name provided
`env_name`: Name of python and R environment. (For Spark clusters)
`python_version`: Version of Python for environment. (For Spark clusters)
`r_version`: Version of R for environment. (For Spark clusters)
`disable_cluster_pause`: Disable cluster pause
`paused_cluster_timeout_mins`: Paused cluster timeout in mins
`disable_autoscale_node_pause`: Disable autoscale node pause
`paused_autoscale_node_timeout_mins`: Paused autoscale node timeout in mins
Doc: For getting details about arguments
http://docs.qubole.com/en/latest/rest-api/cluster_api/create-new-cluster.html#parameters
"""
self.cluster_info['master_instance_type'] = master_instance_type
self.cluster_info['slave_instance_type'] = slave_instance_type
self.cluster_info['min_nodes'] = min_nodes
self.cluster_info['max_nodes'] = max_nodes
self.cluster_info['cluster_name'] = cluster_name
self.cluster_info['node_bootstrap'] = node_bootstrap
self.cluster_info['disallow_cluster_termination'] = disallow_cluster_termination
self.cluster_info['force_tunnel'] = force_tunnel
self.cluster_info['fallback_to_ondemand'] = fallback_to_ondemand
self.cluster_info['node_base_cooldown_period'] = node_base_cooldown_period
self.cluster_info['node_spot_cooldown_period'] = node_spot_cooldown_period
self.cluster_info['customer_ssh_key'] = customer_ssh_key
if custom_tags and custom_tags.strip():
try:
self.cluster_info['custom_tags'] = json.loads(custom_tags.strip())
except Exception as e:
raise Exception("Invalid JSON string for custom ec2 tags: %s" % e.message)
self.cluster_info['heterogeneous_config'] = heterogeneous_config
self.cluster_info['slave_request_type'] = slave_request_type
self.cluster_info['idle_cluster_timeout'] = idle_cluster_timeout
self.cluster_info['spot_settings'] = {}
self.cluster_info['rootdisk'] = {}
self.cluster_info['rootdisk']['size'] = root_disk_size
self.set_spot_instance_settings(maximum_bid_price_percentage, timeout_for_request, maximum_spot_instance_percentage)
self.set_stable_spot_bid_settings(stable_maximum_bid_price_percentage, stable_timeout_for_request, stable_spot_fallback)
self.set_spot_block_settings(spot_block_duration)
self.set_data_disk(disk_size, disk_count, disk_type, upscaling_config, enable_encryption)
self.set_monitoring(enable_ganglia_monitoring, datadog_api_token, datadog_app_token)
self.set_internal(image_uri_overrides)
self.set_env_settings(env_name, python_version, r_version)
self.set_start_stop_settings(disable_cluster_pause, paused_cluster_timeout_mins,
disable_autoscale_node_pause, paused_autoscale_node_timeout_mins) | def function[set_cluster_info, parameter[self, disallow_cluster_termination, enable_ganglia_monitoring, datadog_api_token, datadog_app_token, node_bootstrap, master_instance_type, slave_instance_type, min_nodes, max_nodes, slave_request_type, fallback_to_ondemand, node_base_cooldown_period, node_spot_cooldown_period, custom_tags, heterogeneous_config, maximum_bid_price_percentage, timeout_for_request, maximum_spot_instance_percentage, stable_maximum_bid_price_percentage, stable_timeout_for_request, stable_spot_fallback, spot_block_duration, idle_cluster_timeout, disk_count, disk_type, disk_size, root_disk_size, upscaling_config, enable_encryption, customer_ssh_key, cluster_name, force_tunnel, image_uri_overrides, env_name, python_version, r_version, disable_cluster_pause, paused_cluster_timeout_mins, disable_autoscale_node_pause, paused_autoscale_node_timeout_mins]]:
constant[
Args:
`disallow_cluster_termination`: Set this to True if you don't want
qubole to auto-terminate idle clusters. Use this option with
extreme caution.
`enable_ganglia_monitoring`: Set this to True if you want to enable
ganglia monitoring for the cluster.
`node_bootstrap`: name of the node bootstrap file for this
cluster. It should be in stored in S3 at
<your-default-location>/scripts/hadoop/
`master_instance_type`: The instance type to use for the Hadoop master
node.
`slave_instance_type`: The instance type to use for the Hadoop slave
nodes.
`min_nodes`: Number of nodes to start the cluster with.
`max_nodes`: Maximum number of nodes the cluster may be auto-scaled up
to.
`slave_request_type`: Purchasing option for slave instances.
Valid values: "ondemand", "hybrid", "spot".
`fallback_to_ondemand`: Fallback to on-demand nodes if spot nodes could not be
obtained. Valid only if slave_request_type is 'spot'.
`node_base_cooldown_period`: Time for which an on-demand node waits before termination (Unit: minutes)
`node_spot_cooldown_period`: Time for which a spot node waits before termination (Unit: minutes)
`maximum_bid_price_percentage`: ( Valid only when `slave_request_type`
is hybrid or spot.) Maximum value to bid for spot
instances, expressed as a percentage of the base price
for the slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid".
`stable_maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`stable_timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`stable_spot_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
`spot_block_duration`: Time for which the spot block instance is provisioned (Unit:
minutes)
`disk_count`: Number of EBS volumes to attach
to each instance of the cluster.
`disk_type`: Type of the EBS volume. Valid
values are 'standard' (magnetic) and 'ssd'.
`disk_size`: Size of each EBS volume, in GB.
`root_disk_size`: Size of root volume, in GB.
`enable_encryption`: Encrypt the ephemeral drives on the instance.
`customer_ssh_key`: SSH key to use to login to the instances.
`idle_cluster_timeout`: The buffer time (range in 0-6 hrs) after a cluster goes idle
and gets terminated, given cluster auto termination is on and no cluster specific
timeout has been set (default is 2 hrs)
`heterogeneous_config` : Configuring heterogeneous nodes in Hadoop 2 and Spark clusters.
It implies that slave nodes can be of different instance types
`custom_tags` : Custom tags to be set on all instances
of the cluster. Specified as JSON object (key-value pairs)
`datadog_api_token` : Specify the Datadog API token to use the Datadog monitoring service
`datadog_app_token` : Specify the Datadog APP token to use the Datadog monitoring service
`image_uri_overrides` : Override the image name provided
`env_name`: Name of python and R environment. (For Spark clusters)
`python_version`: Version of Python for environment. (For Spark clusters)
`r_version`: Version of R for environment. (For Spark clusters)
`disable_cluster_pause`: Disable cluster pause
`paused_cluster_timeout_mins`: Paused cluster timeout in mins
`disable_autoscale_node_pause`: Disable autoscale node pause
`paused_autoscale_node_timeout_mins`: Paused autoscale node timeout in mins
Doc: For getting details about arguments
http://docs.qubole.com/en/latest/rest-api/cluster_api/create-new-cluster.html#parameters
]
call[name[self].cluster_info][constant[master_instance_type]] assign[=] name[master_instance_type]
call[name[self].cluster_info][constant[slave_instance_type]] assign[=] name[slave_instance_type]
call[name[self].cluster_info][constant[min_nodes]] assign[=] name[min_nodes]
call[name[self].cluster_info][constant[max_nodes]] assign[=] name[max_nodes]
call[name[self].cluster_info][constant[cluster_name]] assign[=] name[cluster_name]
call[name[self].cluster_info][constant[node_bootstrap]] assign[=] name[node_bootstrap]
call[name[self].cluster_info][constant[disallow_cluster_termination]] assign[=] name[disallow_cluster_termination]
call[name[self].cluster_info][constant[force_tunnel]] assign[=] name[force_tunnel]
call[name[self].cluster_info][constant[fallback_to_ondemand]] assign[=] name[fallback_to_ondemand]
call[name[self].cluster_info][constant[node_base_cooldown_period]] assign[=] name[node_base_cooldown_period]
call[name[self].cluster_info][constant[node_spot_cooldown_period]] assign[=] name[node_spot_cooldown_period]
call[name[self].cluster_info][constant[customer_ssh_key]] assign[=] name[customer_ssh_key]
if <ast.BoolOp object at 0x7da20e9b1ea0> begin[:]
<ast.Try object at 0x7da20e9b1930>
call[name[self].cluster_info][constant[heterogeneous_config]] assign[=] name[heterogeneous_config]
call[name[self].cluster_info][constant[slave_request_type]] assign[=] name[slave_request_type]
call[name[self].cluster_info][constant[idle_cluster_timeout]] assign[=] name[idle_cluster_timeout]
call[name[self].cluster_info][constant[spot_settings]] assign[=] dictionary[[], []]
call[name[self].cluster_info][constant[rootdisk]] assign[=] dictionary[[], []]
call[call[name[self].cluster_info][constant[rootdisk]]][constant[size]] assign[=] name[root_disk_size]
call[name[self].set_spot_instance_settings, parameter[name[maximum_bid_price_percentage], name[timeout_for_request], name[maximum_spot_instance_percentage]]]
call[name[self].set_stable_spot_bid_settings, parameter[name[stable_maximum_bid_price_percentage], name[stable_timeout_for_request], name[stable_spot_fallback]]]
call[name[self].set_spot_block_settings, parameter[name[spot_block_duration]]]
call[name[self].set_data_disk, parameter[name[disk_size], name[disk_count], name[disk_type], name[upscaling_config], name[enable_encryption]]]
call[name[self].set_monitoring, parameter[name[enable_ganglia_monitoring], name[datadog_api_token], name[datadog_app_token]]]
call[name[self].set_internal, parameter[name[image_uri_overrides]]]
call[name[self].set_env_settings, parameter[name[env_name], name[python_version], name[r_version]]]
call[name[self].set_start_stop_settings, parameter[name[disable_cluster_pause], name[paused_cluster_timeout_mins], name[disable_autoscale_node_pause], name[paused_autoscale_node_timeout_mins]]] | keyword[def] identifier[set_cluster_info] ( identifier[self] ,
identifier[disallow_cluster_termination] = keyword[None] ,
identifier[enable_ganglia_monitoring] = keyword[None] ,
identifier[datadog_api_token] = keyword[None] ,
identifier[datadog_app_token] = keyword[None] ,
identifier[node_bootstrap] = keyword[None] ,
identifier[master_instance_type] = keyword[None] ,
identifier[slave_instance_type] = keyword[None] ,
identifier[min_nodes] = keyword[None] ,
identifier[max_nodes] = keyword[None] ,
identifier[slave_request_type] = keyword[None] ,
identifier[fallback_to_ondemand] = keyword[None] ,
identifier[node_base_cooldown_period] = keyword[None] ,
identifier[node_spot_cooldown_period] = keyword[None] ,
identifier[custom_tags] = keyword[None] ,
identifier[heterogeneous_config] = keyword[None] ,
identifier[maximum_bid_price_percentage] = keyword[None] ,
identifier[timeout_for_request] = keyword[None] ,
identifier[maximum_spot_instance_percentage] = keyword[None] ,
identifier[stable_maximum_bid_price_percentage] = keyword[None] ,
identifier[stable_timeout_for_request] = keyword[None] ,
identifier[stable_spot_fallback] = keyword[None] ,
identifier[spot_block_duration] = keyword[None] ,
identifier[idle_cluster_timeout] = keyword[None] ,
identifier[disk_count] = keyword[None] ,
identifier[disk_type] = keyword[None] ,
identifier[disk_size] = keyword[None] ,
identifier[root_disk_size] = keyword[None] ,
identifier[upscaling_config] = keyword[None] ,
identifier[enable_encryption] = keyword[None] ,
identifier[customer_ssh_key] = keyword[None] ,
identifier[cluster_name] = keyword[None] ,
identifier[force_tunnel] = keyword[None] ,
identifier[image_uri_overrides] = keyword[None] ,
identifier[env_name] = keyword[None] ,
identifier[python_version] = keyword[None] ,
identifier[r_version] = keyword[None] ,
identifier[disable_cluster_pause] = keyword[None] ,
identifier[paused_cluster_timeout_mins] = keyword[None] ,
identifier[disable_autoscale_node_pause] = keyword[None] ,
identifier[paused_autoscale_node_timeout_mins] = keyword[None] ):
literal[string]
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[master_instance_type]
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[slave_instance_type]
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[min_nodes]
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[max_nodes]
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[cluster_name]
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[node_bootstrap]
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[disallow_cluster_termination]
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[force_tunnel]
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[fallback_to_ondemand]
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[node_base_cooldown_period]
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[node_spot_cooldown_period]
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[customer_ssh_key]
keyword[if] identifier[custom_tags] keyword[and] identifier[custom_tags] . identifier[strip] ():
keyword[try] :
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[json] . identifier[loads] ( identifier[custom_tags] . identifier[strip] ())
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[Exception] ( literal[string] % identifier[e] . identifier[message] )
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[heterogeneous_config]
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[slave_request_type]
identifier[self] . identifier[cluster_info] [ literal[string] ]= identifier[idle_cluster_timeout]
identifier[self] . identifier[cluster_info] [ literal[string] ]={}
identifier[self] . identifier[cluster_info] [ literal[string] ]={}
identifier[self] . identifier[cluster_info] [ literal[string] ][ literal[string] ]= identifier[root_disk_size]
identifier[self] . identifier[set_spot_instance_settings] ( identifier[maximum_bid_price_percentage] , identifier[timeout_for_request] , identifier[maximum_spot_instance_percentage] )
identifier[self] . identifier[set_stable_spot_bid_settings] ( identifier[stable_maximum_bid_price_percentage] , identifier[stable_timeout_for_request] , identifier[stable_spot_fallback] )
identifier[self] . identifier[set_spot_block_settings] ( identifier[spot_block_duration] )
identifier[self] . identifier[set_data_disk] ( identifier[disk_size] , identifier[disk_count] , identifier[disk_type] , identifier[upscaling_config] , identifier[enable_encryption] )
identifier[self] . identifier[set_monitoring] ( identifier[enable_ganglia_monitoring] , identifier[datadog_api_token] , identifier[datadog_app_token] )
identifier[self] . identifier[set_internal] ( identifier[image_uri_overrides] )
identifier[self] . identifier[set_env_settings] ( identifier[env_name] , identifier[python_version] , identifier[r_version] )
identifier[self] . identifier[set_start_stop_settings] ( identifier[disable_cluster_pause] , identifier[paused_cluster_timeout_mins] ,
identifier[disable_autoscale_node_pause] , identifier[paused_autoscale_node_timeout_mins] ) | def set_cluster_info(self, disallow_cluster_termination=None, enable_ganglia_monitoring=None, datadog_api_token=None, datadog_app_token=None, node_bootstrap=None, master_instance_type=None, slave_instance_type=None, min_nodes=None, max_nodes=None, slave_request_type=None, fallback_to_ondemand=None, node_base_cooldown_period=None, node_spot_cooldown_period=None, custom_tags=None, heterogeneous_config=None, maximum_bid_price_percentage=None, timeout_for_request=None, maximum_spot_instance_percentage=None, stable_maximum_bid_price_percentage=None, stable_timeout_for_request=None, stable_spot_fallback=None, spot_block_duration=None, idle_cluster_timeout=None, disk_count=None, disk_type=None, disk_size=None, root_disk_size=None, upscaling_config=None, enable_encryption=None, customer_ssh_key=None, cluster_name=None, force_tunnel=None, image_uri_overrides=None, env_name=None, python_version=None, r_version=None, disable_cluster_pause=None, paused_cluster_timeout_mins=None, disable_autoscale_node_pause=None, paused_autoscale_node_timeout_mins=None):
"""
Args:
`disallow_cluster_termination`: Set this to True if you don't want
qubole to auto-terminate idle clusters. Use this option with
extreme caution.
`enable_ganglia_monitoring`: Set this to True if you want to enable
ganglia monitoring for the cluster.
`node_bootstrap`: name of the node bootstrap file for this
cluster. It should be in stored in S3 at
<your-default-location>/scripts/hadoop/
`master_instance_type`: The instance type to use for the Hadoop master
node.
`slave_instance_type`: The instance type to use for the Hadoop slave
nodes.
`min_nodes`: Number of nodes to start the cluster with.
`max_nodes`: Maximum number of nodes the cluster may be auto-scaled up
to.
`slave_request_type`: Purchasing option for slave instances.
Valid values: "ondemand", "hybrid", "spot".
`fallback_to_ondemand`: Fallback to on-demand nodes if spot nodes could not be
obtained. Valid only if slave_request_type is 'spot'.
`node_base_cooldown_period`: Time for which an on-demand node waits before termination (Unit: minutes)
`node_spot_cooldown_period`: Time for which a spot node waits before termination (Unit: minutes)
`maximum_bid_price_percentage`: ( Valid only when `slave_request_type`
is hybrid or spot.) Maximum value to bid for spot
instances, expressed as a percentage of the base price
for the slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid".
`stable_maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`stable_timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`stable_spot_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
`spot_block_duration`: Time for which the spot block instance is provisioned (Unit:
minutes)
`disk_count`: Number of EBS volumes to attach
to each instance of the cluster.
`disk_type`: Type of the EBS volume. Valid
values are 'standard' (magnetic) and 'ssd'.
`disk_size`: Size of each EBS volume, in GB.
`root_disk_size`: Size of root volume, in GB.
`enable_encryption`: Encrypt the ephemeral drives on the instance.
`customer_ssh_key`: SSH key to use to login to the instances.
`idle_cluster_timeout`: The buffer time (range in 0-6 hrs) after a cluster goes idle
and gets terminated, given cluster auto termination is on and no cluster specific
timeout has been set (default is 2 hrs)
`heterogeneous_config` : Configuring heterogeneous nodes in Hadoop 2 and Spark clusters.
It implies that slave nodes can be of different instance types
`custom_tags` : Custom tags to be set on all instances
of the cluster. Specified as JSON object (key-value pairs)
`datadog_api_token` : Specify the Datadog API token to use the Datadog monitoring service
`datadog_app_token` : Specify the Datadog APP token to use the Datadog monitoring service
`image_uri_overrides` : Override the image name provided
`env_name`: Name of python and R environment. (For Spark clusters)
`python_version`: Version of Python for environment. (For Spark clusters)
`r_version`: Version of R for environment. (For Spark clusters)
`disable_cluster_pause`: Disable cluster pause
`paused_cluster_timeout_mins`: Paused cluster timeout in mins
`disable_autoscale_node_pause`: Disable autoscale node pause
`paused_autoscale_node_timeout_mins`: Paused autoscale node timeout in mins
Doc: For getting details about arguments
http://docs.qubole.com/en/latest/rest-api/cluster_api/create-new-cluster.html#parameters
"""
self.cluster_info['master_instance_type'] = master_instance_type
self.cluster_info['slave_instance_type'] = slave_instance_type
self.cluster_info['min_nodes'] = min_nodes
self.cluster_info['max_nodes'] = max_nodes
self.cluster_info['cluster_name'] = cluster_name
self.cluster_info['node_bootstrap'] = node_bootstrap
self.cluster_info['disallow_cluster_termination'] = disallow_cluster_termination
self.cluster_info['force_tunnel'] = force_tunnel
self.cluster_info['fallback_to_ondemand'] = fallback_to_ondemand
self.cluster_info['node_base_cooldown_period'] = node_base_cooldown_period
self.cluster_info['node_spot_cooldown_period'] = node_spot_cooldown_period
self.cluster_info['customer_ssh_key'] = customer_ssh_key
if custom_tags and custom_tags.strip():
try:
self.cluster_info['custom_tags'] = json.loads(custom_tags.strip()) # depends on [control=['try'], data=[]]
except Exception as e:
raise Exception('Invalid JSON string for custom ec2 tags: %s' % e.message) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
self.cluster_info['heterogeneous_config'] = heterogeneous_config
self.cluster_info['slave_request_type'] = slave_request_type
self.cluster_info['idle_cluster_timeout'] = idle_cluster_timeout
self.cluster_info['spot_settings'] = {}
self.cluster_info['rootdisk'] = {}
self.cluster_info['rootdisk']['size'] = root_disk_size
self.set_spot_instance_settings(maximum_bid_price_percentage, timeout_for_request, maximum_spot_instance_percentage)
self.set_stable_spot_bid_settings(stable_maximum_bid_price_percentage, stable_timeout_for_request, stable_spot_fallback)
self.set_spot_block_settings(spot_block_duration)
self.set_data_disk(disk_size, disk_count, disk_type, upscaling_config, enable_encryption)
self.set_monitoring(enable_ganglia_monitoring, datadog_api_token, datadog_app_token)
self.set_internal(image_uri_overrides)
self.set_env_settings(env_name, python_version, r_version)
self.set_start_stop_settings(disable_cluster_pause, paused_cluster_timeout_mins, disable_autoscale_node_pause, paused_autoscale_node_timeout_mins) |
def _valid_numpy_subdtype(x, numpy_types):
"""
Is any dtype from numpy_types superior to the dtype of x?
"""
# If any of the types given in numpy_types is understood as numpy.generic,
# all possible x will be considered valid. This is probably unwanted.
for t in numpy_types:
assert not np.issubdtype(np.generic, t)
return any(np.issubdtype(x.dtype, t) for t in numpy_types) | def function[_valid_numpy_subdtype, parameter[x, numpy_types]]:
constant[
Is any dtype from numpy_types superior to the dtype of x?
]
for taget[name[t]] in starred[name[numpy_types]] begin[:]
assert[<ast.UnaryOp object at 0x7da204347a00>]
return[call[name[any], parameter[<ast.GeneratorExp object at 0x7da204346710>]]] | keyword[def] identifier[_valid_numpy_subdtype] ( identifier[x] , identifier[numpy_types] ):
literal[string]
keyword[for] identifier[t] keyword[in] identifier[numpy_types] :
keyword[assert] keyword[not] identifier[np] . identifier[issubdtype] ( identifier[np] . identifier[generic] , identifier[t] )
keyword[return] identifier[any] ( identifier[np] . identifier[issubdtype] ( identifier[x] . identifier[dtype] , identifier[t] ) keyword[for] identifier[t] keyword[in] identifier[numpy_types] ) | def _valid_numpy_subdtype(x, numpy_types):
"""
Is any dtype from numpy_types superior to the dtype of x?
"""
# If any of the types given in numpy_types is understood as numpy.generic,
# all possible x will be considered valid. This is probably unwanted.
for t in numpy_types:
assert not np.issubdtype(np.generic, t) # depends on [control=['for'], data=['t']]
return any((np.issubdtype(x.dtype, t) for t in numpy_types)) |
def run(self, timeout=None):
"""
Run the map/reduce operation synchronously. Returns a list of
results, or a list of links if the last phase is a link phase.
Shortcut for :meth:`riak.client.RiakClient.mapred`.
:param timeout: Timeout in milliseconds
:type timeout: integer, None
:rtype: list
"""
query, link_results_flag = self._normalize_query()
try:
result = self._client.mapred(self._inputs, query, timeout)
except riak.RiakError as e:
if 'worker_startup_failed' in e.value:
for phase in self._phases:
if phase._language == 'erlang':
if type(phase._function) is str:
raise riak.RiakError(
'May have tried erlang strfun '
'when not allowed\n'
'original error: ' + e.value)
raise e
# If the last phase is NOT a link phase, then return the result.
if not (link_results_flag or
isinstance(self._phases[-1], RiakLinkPhase)):
return result
# If there are no results, then return an empty list.
if result is None:
return []
# Otherwise, if the last phase IS a link phase, then convert the
# results to link tuples.
a = []
for r in result:
if (len(r) == 2):
link = (r[0], r[1], None)
elif (len(r) == 3):
link = (r[0], r[1], r[2])
a.append(link)
return a | def function[run, parameter[self, timeout]]:
constant[
Run the map/reduce operation synchronously. Returns a list of
results, or a list of links if the last phase is a link phase.
Shortcut for :meth:`riak.client.RiakClient.mapred`.
:param timeout: Timeout in milliseconds
:type timeout: integer, None
:rtype: list
]
<ast.Tuple object at 0x7da20c7cb4c0> assign[=] call[name[self]._normalize_query, parameter[]]
<ast.Try object at 0x7da20c7c99c0>
if <ast.UnaryOp object at 0x7da20c7c9810> begin[:]
return[name[result]]
if compare[name[result] is constant[None]] begin[:]
return[list[[]]]
variable[a] assign[=] list[[]]
for taget[name[r]] in starred[name[result]] begin[:]
if compare[call[name[len], parameter[name[r]]] equal[==] constant[2]] begin[:]
variable[link] assign[=] tuple[[<ast.Subscript object at 0x7da20c992290>, <ast.Subscript object at 0x7da18f722020>, <ast.Constant object at 0x7da18f7234c0>]]
call[name[a].append, parameter[name[link]]]
return[name[a]] | keyword[def] identifier[run] ( identifier[self] , identifier[timeout] = keyword[None] ):
literal[string]
identifier[query] , identifier[link_results_flag] = identifier[self] . identifier[_normalize_query] ()
keyword[try] :
identifier[result] = identifier[self] . identifier[_client] . identifier[mapred] ( identifier[self] . identifier[_inputs] , identifier[query] , identifier[timeout] )
keyword[except] identifier[riak] . identifier[RiakError] keyword[as] identifier[e] :
keyword[if] literal[string] keyword[in] identifier[e] . identifier[value] :
keyword[for] identifier[phase] keyword[in] identifier[self] . identifier[_phases] :
keyword[if] identifier[phase] . identifier[_language] == literal[string] :
keyword[if] identifier[type] ( identifier[phase] . identifier[_function] ) keyword[is] identifier[str] :
keyword[raise] identifier[riak] . identifier[RiakError] (
literal[string]
literal[string]
literal[string] + identifier[e] . identifier[value] )
keyword[raise] identifier[e]
keyword[if] keyword[not] ( identifier[link_results_flag] keyword[or]
identifier[isinstance] ( identifier[self] . identifier[_phases] [- literal[int] ], identifier[RiakLinkPhase] )):
keyword[return] identifier[result]
keyword[if] identifier[result] keyword[is] keyword[None] :
keyword[return] []
identifier[a] =[]
keyword[for] identifier[r] keyword[in] identifier[result] :
keyword[if] ( identifier[len] ( identifier[r] )== literal[int] ):
identifier[link] =( identifier[r] [ literal[int] ], identifier[r] [ literal[int] ], keyword[None] )
keyword[elif] ( identifier[len] ( identifier[r] )== literal[int] ):
identifier[link] =( identifier[r] [ literal[int] ], identifier[r] [ literal[int] ], identifier[r] [ literal[int] ])
identifier[a] . identifier[append] ( identifier[link] )
keyword[return] identifier[a] | def run(self, timeout=None):
"""
Run the map/reduce operation synchronously. Returns a list of
results, or a list of links if the last phase is a link phase.
Shortcut for :meth:`riak.client.RiakClient.mapred`.
:param timeout: Timeout in milliseconds
:type timeout: integer, None
:rtype: list
"""
(query, link_results_flag) = self._normalize_query()
try:
result = self._client.mapred(self._inputs, query, timeout) # depends on [control=['try'], data=[]]
except riak.RiakError as e:
if 'worker_startup_failed' in e.value:
for phase in self._phases:
if phase._language == 'erlang':
if type(phase._function) is str:
raise riak.RiakError('May have tried erlang strfun when not allowed\noriginal error: ' + e.value) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['phase']] # depends on [control=['if'], data=[]]
raise e # depends on [control=['except'], data=['e']]
# If the last phase is NOT a link phase, then return the result.
if not (link_results_flag or isinstance(self._phases[-1], RiakLinkPhase)):
return result # depends on [control=['if'], data=[]]
# If there are no results, then return an empty list.
if result is None:
return [] # depends on [control=['if'], data=[]]
# Otherwise, if the last phase IS a link phase, then convert the
# results to link tuples.
a = []
for r in result:
if len(r) == 2:
link = (r[0], r[1], None) # depends on [control=['if'], data=[]]
elif len(r) == 3:
link = (r[0], r[1], r[2]) # depends on [control=['if'], data=[]]
a.append(link) # depends on [control=['for'], data=['r']]
return a |
def list_subnets(auth=None, **kwargs):
'''
List subnets
filters
A Python dictionary of filter conditions to push down
CLI Example:
.. code-block:: bash
salt '*' neutronng.list_subnets
salt '*' neutronng.list_subnets \
filters='{"tenant_id": "1dcac318a83b4610b7a7f7ba01465548"}'
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.list_subnets(**kwargs) | def function[list_subnets, parameter[auth]]:
constant[
List subnets
filters
A Python dictionary of filter conditions to push down
CLI Example:
.. code-block:: bash
salt '*' neutronng.list_subnets
salt '*' neutronng.list_subnets filters='{"tenant_id": "1dcac318a83b4610b7a7f7ba01465548"}'
]
variable[cloud] assign[=] call[name[get_operator_cloud], parameter[name[auth]]]
variable[kwargs] assign[=] call[name[_clean_kwargs], parameter[]]
return[call[name[cloud].list_subnets, parameter[]]] | keyword[def] identifier[list_subnets] ( identifier[auth] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[cloud] = identifier[get_operator_cloud] ( identifier[auth] )
identifier[kwargs] = identifier[_clean_kwargs] (** identifier[kwargs] )
keyword[return] identifier[cloud] . identifier[list_subnets] (** identifier[kwargs] ) | def list_subnets(auth=None, **kwargs):
"""
List subnets
filters
A Python dictionary of filter conditions to push down
CLI Example:
.. code-block:: bash
salt '*' neutronng.list_subnets
salt '*' neutronng.list_subnets filters='{"tenant_id": "1dcac318a83b4610b7a7f7ba01465548"}'
"""
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.list_subnets(**kwargs) |
def removeChild(self, child_id):
"""Remove a child from current workitem
:param child_id: the child workitem id/number
(integer or equivalent string)
"""
self.log.debug("Try to remove a child <Workitem %s> from current "
"<Workitem %s>",
child_id,
self)
self._removeChildren([child_id])
self.log.info("Successfully remove a child <Workitem %s> from "
"current <Workitem %s>",
child_id,
self) | def function[removeChild, parameter[self, child_id]]:
constant[Remove a child from current workitem
:param child_id: the child workitem id/number
(integer or equivalent string)
]
call[name[self].log.debug, parameter[constant[Try to remove a child <Workitem %s> from current <Workitem %s>], name[child_id], name[self]]]
call[name[self]._removeChildren, parameter[list[[<ast.Name object at 0x7da2044c3a60>]]]]
call[name[self].log.info, parameter[constant[Successfully remove a child <Workitem %s> from current <Workitem %s>], name[child_id], name[self]]] | keyword[def] identifier[removeChild] ( identifier[self] , identifier[child_id] ):
literal[string]
identifier[self] . identifier[log] . identifier[debug] ( literal[string]
literal[string] ,
identifier[child_id] ,
identifier[self] )
identifier[self] . identifier[_removeChildren] ([ identifier[child_id] ])
identifier[self] . identifier[log] . identifier[info] ( literal[string]
literal[string] ,
identifier[child_id] ,
identifier[self] ) | def removeChild(self, child_id):
"""Remove a child from current workitem
:param child_id: the child workitem id/number
(integer or equivalent string)
"""
self.log.debug('Try to remove a child <Workitem %s> from current <Workitem %s>', child_id, self)
self._removeChildren([child_id])
self.log.info('Successfully remove a child <Workitem %s> from current <Workitem %s>', child_id, self) |
def recreate_relationship(self, attribute_name, key):
'''
Recreates one-to-one relationship
'''
iterable = self.record_keeper.foreign_to_foreign_map["banner_link_page"] # noqa
for foreign_page_id, linked_page_foreign_id in iteritems(iterable):
# get local banner page
local_page_id = self.record_keeper.get_local_page(foreign_page_id)
local_page = Page.objects.get(id=local_page_id).specific
# get local linked page
local_id = self.record_keeper.get_local_page(
linked_page_foreign_id)
linked_page = Page.objects.get(id=local_id).specific
# link the two together
setattr(local_page, attribute_name, linked_page)
# TODO: review publishing and saving revisions
local_page.save_revision().publish() | def function[recreate_relationship, parameter[self, attribute_name, key]]:
constant[
Recreates one-to-one relationship
]
variable[iterable] assign[=] call[name[self].record_keeper.foreign_to_foreign_map][constant[banner_link_page]]
for taget[tuple[[<ast.Name object at 0x7da1b05fb160>, <ast.Name object at 0x7da1b05fa8f0>]]] in starred[call[name[iteritems], parameter[name[iterable]]]] begin[:]
variable[local_page_id] assign[=] call[name[self].record_keeper.get_local_page, parameter[name[foreign_page_id]]]
variable[local_page] assign[=] call[name[Page].objects.get, parameter[]].specific
variable[local_id] assign[=] call[name[self].record_keeper.get_local_page, parameter[name[linked_page_foreign_id]]]
variable[linked_page] assign[=] call[name[Page].objects.get, parameter[]].specific
call[name[setattr], parameter[name[local_page], name[attribute_name], name[linked_page]]]
call[call[name[local_page].save_revision, parameter[]].publish, parameter[]] | keyword[def] identifier[recreate_relationship] ( identifier[self] , identifier[attribute_name] , identifier[key] ):
literal[string]
identifier[iterable] = identifier[self] . identifier[record_keeper] . identifier[foreign_to_foreign_map] [ literal[string] ]
keyword[for] identifier[foreign_page_id] , identifier[linked_page_foreign_id] keyword[in] identifier[iteritems] ( identifier[iterable] ):
identifier[local_page_id] = identifier[self] . identifier[record_keeper] . identifier[get_local_page] ( identifier[foreign_page_id] )
identifier[local_page] = identifier[Page] . identifier[objects] . identifier[get] ( identifier[id] = identifier[local_page_id] ). identifier[specific]
identifier[local_id] = identifier[self] . identifier[record_keeper] . identifier[get_local_page] (
identifier[linked_page_foreign_id] )
identifier[linked_page] = identifier[Page] . identifier[objects] . identifier[get] ( identifier[id] = identifier[local_id] ). identifier[specific]
identifier[setattr] ( identifier[local_page] , identifier[attribute_name] , identifier[linked_page] )
identifier[local_page] . identifier[save_revision] (). identifier[publish] () | def recreate_relationship(self, attribute_name, key):
"""
Recreates one-to-one relationship
"""
iterable = self.record_keeper.foreign_to_foreign_map['banner_link_page'] # noqa
for (foreign_page_id, linked_page_foreign_id) in iteritems(iterable):
# get local banner page
local_page_id = self.record_keeper.get_local_page(foreign_page_id)
local_page = Page.objects.get(id=local_page_id).specific
# get local linked page
local_id = self.record_keeper.get_local_page(linked_page_foreign_id)
linked_page = Page.objects.get(id=local_id).specific
# link the two together
setattr(local_page, attribute_name, linked_page)
# TODO: review publishing and saving revisions
local_page.save_revision().publish() # depends on [control=['for'], data=[]] |
def get_sub_comp_info(source_info, comp):
"""Build and return information about a sub-component for a particular selection
"""
sub_comps = source_info.get('components', None)
if sub_comps is None:
return source_info.copy()
moving = source_info.get('moving', False)
selection_dependent = source_info.get('selection_dependent', False)
if selection_dependent:
key = comp.make_key('{ebin_name}_{evtype_name}')
elif moving:
key = "zmax%i" % comp.zmax
ret_dict = source_info.copy()
ret_dict.update(sub_comps[key])
return ret_dict | def function[get_sub_comp_info, parameter[source_info, comp]]:
constant[Build and return information about a sub-component for a particular selection
]
variable[sub_comps] assign[=] call[name[source_info].get, parameter[constant[components], constant[None]]]
if compare[name[sub_comps] is constant[None]] begin[:]
return[call[name[source_info].copy, parameter[]]]
variable[moving] assign[=] call[name[source_info].get, parameter[constant[moving], constant[False]]]
variable[selection_dependent] assign[=] call[name[source_info].get, parameter[constant[selection_dependent], constant[False]]]
if name[selection_dependent] begin[:]
variable[key] assign[=] call[name[comp].make_key, parameter[constant[{ebin_name}_{evtype_name}]]]
variable[ret_dict] assign[=] call[name[source_info].copy, parameter[]]
call[name[ret_dict].update, parameter[call[name[sub_comps]][name[key]]]]
return[name[ret_dict]] | keyword[def] identifier[get_sub_comp_info] ( identifier[source_info] , identifier[comp] ):
literal[string]
identifier[sub_comps] = identifier[source_info] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[sub_comps] keyword[is] keyword[None] :
keyword[return] identifier[source_info] . identifier[copy] ()
identifier[moving] = identifier[source_info] . identifier[get] ( literal[string] , keyword[False] )
identifier[selection_dependent] = identifier[source_info] . identifier[get] ( literal[string] , keyword[False] )
keyword[if] identifier[selection_dependent] :
identifier[key] = identifier[comp] . identifier[make_key] ( literal[string] )
keyword[elif] identifier[moving] :
identifier[key] = literal[string] % identifier[comp] . identifier[zmax]
identifier[ret_dict] = identifier[source_info] . identifier[copy] ()
identifier[ret_dict] . identifier[update] ( identifier[sub_comps] [ identifier[key] ])
keyword[return] identifier[ret_dict] | def get_sub_comp_info(source_info, comp):
"""Build and return information about a sub-component for a particular selection
"""
sub_comps = source_info.get('components', None)
if sub_comps is None:
return source_info.copy() # depends on [control=['if'], data=[]]
moving = source_info.get('moving', False)
selection_dependent = source_info.get('selection_dependent', False)
if selection_dependent:
key = comp.make_key('{ebin_name}_{evtype_name}') # depends on [control=['if'], data=[]]
elif moving:
key = 'zmax%i' % comp.zmax # depends on [control=['if'], data=[]]
ret_dict = source_info.copy()
ret_dict.update(sub_comps[key])
return ret_dict |
def remove(self,
entity_id,
property_uri,
value):
"""Method removes a triple for the given/subject.
Args:
entity_id(string): Fedora Object ID, ideally URI of the subject
property_uri(string):
value(string):
Return:
boolean: True if triple was removed from the object
"""
if not entity_id.startswith("http"):
entity_uri = urllib.parse.urljoin(self.base_url, entity_id)
else:
entity_uri = entity_id
sparql_template = Template("""$prefix
DELETE {
<$entity> $prop_name $value_str
} WHERE {
<$entity> $prop_name $value_str
}""")
sparql = sparql_template.substitute(
prefix=build_prefixes(self.namespaces),
entity=entity_uri,
prop_name=property_uri,
value_str=self.__value_format__(value))
delete_property_request = urllib.request.Request(
entity_uri,
data=sparql.encode(),
method='PATCH',
headers={'Content-Type': 'application/sparql-update'})
response = urllib.request.urlopen(delete_property_request)
if response.code < 400:
return True
return False | def function[remove, parameter[self, entity_id, property_uri, value]]:
constant[Method removes a triple for the given/subject.
Args:
entity_id(string): Fedora Object ID, ideally URI of the subject
property_uri(string):
value(string):
Return:
boolean: True if triple was removed from the object
]
if <ast.UnaryOp object at 0x7da1b13208e0> begin[:]
variable[entity_uri] assign[=] call[name[urllib].parse.urljoin, parameter[name[self].base_url, name[entity_id]]]
variable[sparql_template] assign[=] call[name[Template], parameter[constant[$prefix
DELETE {
<$entity> $prop_name $value_str
} WHERE {
<$entity> $prop_name $value_str
}]]]
variable[sparql] assign[=] call[name[sparql_template].substitute, parameter[]]
variable[delete_property_request] assign[=] call[name[urllib].request.Request, parameter[name[entity_uri]]]
variable[response] assign[=] call[name[urllib].request.urlopen, parameter[name[delete_property_request]]]
if compare[name[response].code less[<] constant[400]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[remove] ( identifier[self] ,
identifier[entity_id] ,
identifier[property_uri] ,
identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[entity_id] . identifier[startswith] ( literal[string] ):
identifier[entity_uri] = identifier[urllib] . identifier[parse] . identifier[urljoin] ( identifier[self] . identifier[base_url] , identifier[entity_id] )
keyword[else] :
identifier[entity_uri] = identifier[entity_id]
identifier[sparql_template] = identifier[Template] ( literal[string] )
identifier[sparql] = identifier[sparql_template] . identifier[substitute] (
identifier[prefix] = identifier[build_prefixes] ( identifier[self] . identifier[namespaces] ),
identifier[entity] = identifier[entity_uri] ,
identifier[prop_name] = identifier[property_uri] ,
identifier[value_str] = identifier[self] . identifier[__value_format__] ( identifier[value] ))
identifier[delete_property_request] = identifier[urllib] . identifier[request] . identifier[Request] (
identifier[entity_uri] ,
identifier[data] = identifier[sparql] . identifier[encode] (),
identifier[method] = literal[string] ,
identifier[headers] ={ literal[string] : literal[string] })
identifier[response] = identifier[urllib] . identifier[request] . identifier[urlopen] ( identifier[delete_property_request] )
keyword[if] identifier[response] . identifier[code] < literal[int] :
keyword[return] keyword[True]
keyword[return] keyword[False] | def remove(self, entity_id, property_uri, value):
"""Method removes a triple for the given/subject.
Args:
entity_id(string): Fedora Object ID, ideally URI of the subject
property_uri(string):
value(string):
Return:
boolean: True if triple was removed from the object
"""
if not entity_id.startswith('http'):
entity_uri = urllib.parse.urljoin(self.base_url, entity_id) # depends on [control=['if'], data=[]]
else:
entity_uri = entity_id
sparql_template = Template('$prefix\n DELETE {\n <$entity> $prop_name $value_str\n } WHERE {\n <$entity> $prop_name $value_str\n }')
sparql = sparql_template.substitute(prefix=build_prefixes(self.namespaces), entity=entity_uri, prop_name=property_uri, value_str=self.__value_format__(value))
delete_property_request = urllib.request.Request(entity_uri, data=sparql.encode(), method='PATCH', headers={'Content-Type': 'application/sparql-update'})
response = urllib.request.urlopen(delete_property_request)
if response.code < 400:
return True # depends on [control=['if'], data=[]]
return False |
def diff_values(value_a, value_b, raw=False):
"""Returns a human-readable diff between two values
:param value_a: First value to compare
:param value_b: Second value to compare
:param raw: True to compare the raw values, e.g. UIDs
:returns a list of diff tuples
"""
if not raw:
value_a = _process_value(value_a)
value_b = _process_value(value_b)
# No changes
if value_a == value_b:
return None
diffs = []
# N.B.: the choice for the tuple data structure is to enable in the future
# more granular diffs, e.g. the changed values within a dictionary etc.
diffs.append((value_a, value_b))
return diffs | def function[diff_values, parameter[value_a, value_b, raw]]:
constant[Returns a human-readable diff between two values
:param value_a: First value to compare
:param value_b: Second value to compare
:param raw: True to compare the raw values, e.g. UIDs
:returns a list of diff tuples
]
if <ast.UnaryOp object at 0x7da204963520> begin[:]
variable[value_a] assign[=] call[name[_process_value], parameter[name[value_a]]]
variable[value_b] assign[=] call[name[_process_value], parameter[name[value_b]]]
if compare[name[value_a] equal[==] name[value_b]] begin[:]
return[constant[None]]
variable[diffs] assign[=] list[[]]
call[name[diffs].append, parameter[tuple[[<ast.Name object at 0x7da204962920>, <ast.Name object at 0x7da2049638b0>]]]]
return[name[diffs]] | keyword[def] identifier[diff_values] ( identifier[value_a] , identifier[value_b] , identifier[raw] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[raw] :
identifier[value_a] = identifier[_process_value] ( identifier[value_a] )
identifier[value_b] = identifier[_process_value] ( identifier[value_b] )
keyword[if] identifier[value_a] == identifier[value_b] :
keyword[return] keyword[None]
identifier[diffs] =[]
identifier[diffs] . identifier[append] (( identifier[value_a] , identifier[value_b] ))
keyword[return] identifier[diffs] | def diff_values(value_a, value_b, raw=False):
"""Returns a human-readable diff between two values
:param value_a: First value to compare
:param value_b: Second value to compare
:param raw: True to compare the raw values, e.g. UIDs
:returns a list of diff tuples
"""
if not raw:
value_a = _process_value(value_a)
value_b = _process_value(value_b) # depends on [control=['if'], data=[]]
# No changes
if value_a == value_b:
return None # depends on [control=['if'], data=[]]
diffs = []
# N.B.: the choice for the tuple data structure is to enable in the future
# more granular diffs, e.g. the changed values within a dictionary etc.
diffs.append((value_a, value_b))
return diffs |
def export_mesh(mesh, file_obj, file_type=None, **kwargs):
"""
Export a Trimesh object to a file- like object, or to a filename
Parameters
---------
file_obj : str, file-like
Where should mesh be exported to
file_type : str or None
Represents file type (eg: 'stl')
Returns
----------
exported : bytes or str
Result of exporter
"""
# if we opened a file object in this function
# we will want to close it when we're done
was_opened = False
if util.is_string(file_obj):
if file_type is None:
file_type = (str(file_obj).split('.')[-1]).lower()
if file_type in _mesh_exporters:
was_opened = True
file_obj = open(file_obj, 'wb')
file_type = str(file_type).lower()
if not (file_type in _mesh_exporters):
raise ValueError('%s exporter not available!', file_type)
if isinstance(mesh, (list, tuple, set, np.ndarray)):
faces = 0
for m in mesh:
faces += len(m.faces)
log.debug('Exporting %d meshes with a total of %d faces as %s',
len(mesh), faces, file_type.upper())
else:
log.debug('Exporting %d faces as %s', len(mesh.faces),
file_type.upper())
export = _mesh_exporters[file_type](mesh, **kwargs)
if hasattr(file_obj, 'write'):
result = util.write_encoded(file_obj, export)
else:
result = export
if was_opened:
file_obj.close()
return result | def function[export_mesh, parameter[mesh, file_obj, file_type]]:
constant[
Export a Trimesh object to a file- like object, or to a filename
Parameters
---------
file_obj : str, file-like
Where should mesh be exported to
file_type : str or None
Represents file type (eg: 'stl')
Returns
----------
exported : bytes or str
Result of exporter
]
variable[was_opened] assign[=] constant[False]
if call[name[util].is_string, parameter[name[file_obj]]] begin[:]
if compare[name[file_type] is constant[None]] begin[:]
variable[file_type] assign[=] call[call[call[call[name[str], parameter[name[file_obj]]].split, parameter[constant[.]]]][<ast.UnaryOp object at 0x7da1b22d61a0>].lower, parameter[]]
if compare[name[file_type] in name[_mesh_exporters]] begin[:]
variable[was_opened] assign[=] constant[True]
variable[file_obj] assign[=] call[name[open], parameter[name[file_obj], constant[wb]]]
variable[file_type] assign[=] call[call[name[str], parameter[name[file_type]]].lower, parameter[]]
if <ast.UnaryOp object at 0x7da1b22d5ba0> begin[:]
<ast.Raise object at 0x7da1b22d45e0>
if call[name[isinstance], parameter[name[mesh], tuple[[<ast.Name object at 0x7da1b22d5030>, <ast.Name object at 0x7da1b22d51e0>, <ast.Name object at 0x7da1b22d52a0>, <ast.Attribute object at 0x7da1b22d5540>]]]] begin[:]
variable[faces] assign[=] constant[0]
for taget[name[m]] in starred[name[mesh]] begin[:]
<ast.AugAssign object at 0x7da1b22d60e0>
call[name[log].debug, parameter[constant[Exporting %d meshes with a total of %d faces as %s], call[name[len], parameter[name[mesh]]], name[faces], call[name[file_type].upper, parameter[]]]]
variable[export] assign[=] call[call[name[_mesh_exporters]][name[file_type]], parameter[name[mesh]]]
if call[name[hasattr], parameter[name[file_obj], constant[write]]] begin[:]
variable[result] assign[=] call[name[util].write_encoded, parameter[name[file_obj], name[export]]]
if name[was_opened] begin[:]
call[name[file_obj].close, parameter[]]
return[name[result]] | keyword[def] identifier[export_mesh] ( identifier[mesh] , identifier[file_obj] , identifier[file_type] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[was_opened] = keyword[False]
keyword[if] identifier[util] . identifier[is_string] ( identifier[file_obj] ):
keyword[if] identifier[file_type] keyword[is] keyword[None] :
identifier[file_type] =( identifier[str] ( identifier[file_obj] ). identifier[split] ( literal[string] )[- literal[int] ]). identifier[lower] ()
keyword[if] identifier[file_type] keyword[in] identifier[_mesh_exporters] :
identifier[was_opened] = keyword[True]
identifier[file_obj] = identifier[open] ( identifier[file_obj] , literal[string] )
identifier[file_type] = identifier[str] ( identifier[file_type] ). identifier[lower] ()
keyword[if] keyword[not] ( identifier[file_type] keyword[in] identifier[_mesh_exporters] ):
keyword[raise] identifier[ValueError] ( literal[string] , identifier[file_type] )
keyword[if] identifier[isinstance] ( identifier[mesh] ,( identifier[list] , identifier[tuple] , identifier[set] , identifier[np] . identifier[ndarray] )):
identifier[faces] = literal[int]
keyword[for] identifier[m] keyword[in] identifier[mesh] :
identifier[faces] += identifier[len] ( identifier[m] . identifier[faces] )
identifier[log] . identifier[debug] ( literal[string] ,
identifier[len] ( identifier[mesh] ), identifier[faces] , identifier[file_type] . identifier[upper] ())
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] , identifier[len] ( identifier[mesh] . identifier[faces] ),
identifier[file_type] . identifier[upper] ())
identifier[export] = identifier[_mesh_exporters] [ identifier[file_type] ]( identifier[mesh] ,** identifier[kwargs] )
keyword[if] identifier[hasattr] ( identifier[file_obj] , literal[string] ):
identifier[result] = identifier[util] . identifier[write_encoded] ( identifier[file_obj] , identifier[export] )
keyword[else] :
identifier[result] = identifier[export]
keyword[if] identifier[was_opened] :
identifier[file_obj] . identifier[close] ()
keyword[return] identifier[result] | def export_mesh(mesh, file_obj, file_type=None, **kwargs):
"""
Export a Trimesh object to a file- like object, or to a filename
Parameters
---------
file_obj : str, file-like
Where should mesh be exported to
file_type : str or None
Represents file type (eg: 'stl')
Returns
----------
exported : bytes or str
Result of exporter
"""
# if we opened a file object in this function
# we will want to close it when we're done
was_opened = False
if util.is_string(file_obj):
if file_type is None:
file_type = str(file_obj).split('.')[-1].lower() # depends on [control=['if'], data=['file_type']]
if file_type in _mesh_exporters:
was_opened = True
file_obj = open(file_obj, 'wb') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
file_type = str(file_type).lower()
if not file_type in _mesh_exporters:
raise ValueError('%s exporter not available!', file_type) # depends on [control=['if'], data=[]]
if isinstance(mesh, (list, tuple, set, np.ndarray)):
faces = 0
for m in mesh:
faces += len(m.faces) # depends on [control=['for'], data=['m']]
log.debug('Exporting %d meshes with a total of %d faces as %s', len(mesh), faces, file_type.upper()) # depends on [control=['if'], data=[]]
else:
log.debug('Exporting %d faces as %s', len(mesh.faces), file_type.upper())
export = _mesh_exporters[file_type](mesh, **kwargs)
if hasattr(file_obj, 'write'):
result = util.write_encoded(file_obj, export) # depends on [control=['if'], data=[]]
else:
result = export
if was_opened:
file_obj.close() # depends on [control=['if'], data=[]]
return result |
def delete(self, request, bot_id, id, format=None):
"""
Delete existing Messenger Bot
---
responseMessages:
- code: 401
message: Not authenticated
"""
return super(MessengerBotDetail, self).delete(request, bot_id, id, format) | def function[delete, parameter[self, request, bot_id, id, format]]:
constant[
Delete existing Messenger Bot
---
responseMessages:
- code: 401
message: Not authenticated
]
return[call[call[name[super], parameter[name[MessengerBotDetail], name[self]]].delete, parameter[name[request], name[bot_id], name[id], name[format]]]] | keyword[def] identifier[delete] ( identifier[self] , identifier[request] , identifier[bot_id] , identifier[id] , identifier[format] = keyword[None] ):
literal[string]
keyword[return] identifier[super] ( identifier[MessengerBotDetail] , identifier[self] ). identifier[delete] ( identifier[request] , identifier[bot_id] , identifier[id] , identifier[format] ) | def delete(self, request, bot_id, id, format=None):
"""
Delete existing Messenger Bot
---
responseMessages:
- code: 401
message: Not authenticated
"""
return super(MessengerBotDetail, self).delete(request, bot_id, id, format) |
def setup_prefix_logging(logdir):
"""
Sets up a file logger that will create a log in the given logdir (usually a
lago prefix)
Args:
logdir (str): path to create the log into, will be created if it does
not exist
Returns:
None
"""
if not os.path.exists(logdir):
os.mkdir(logdir)
file_handler = logging.FileHandler(
filename=os.path.join(logdir, 'lago.log'),
)
file_formatter = get_default_log_formatter()
file_handler.setFormatter(file_formatter)
logging.root.addHandler(file_handler)
hide_paramiko_logs()
hide_stevedore_logs() | def function[setup_prefix_logging, parameter[logdir]]:
constant[
Sets up a file logger that will create a log in the given logdir (usually a
lago prefix)
Args:
logdir (str): path to create the log into, will be created if it does
not exist
Returns:
None
]
if <ast.UnaryOp object at 0x7da2041d8e80> begin[:]
call[name[os].mkdir, parameter[name[logdir]]]
variable[file_handler] assign[=] call[name[logging].FileHandler, parameter[]]
variable[file_formatter] assign[=] call[name[get_default_log_formatter], parameter[]]
call[name[file_handler].setFormatter, parameter[name[file_formatter]]]
call[name[logging].root.addHandler, parameter[name[file_handler]]]
call[name[hide_paramiko_logs], parameter[]]
call[name[hide_stevedore_logs], parameter[]] | keyword[def] identifier[setup_prefix_logging] ( identifier[logdir] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[logdir] ):
identifier[os] . identifier[mkdir] ( identifier[logdir] )
identifier[file_handler] = identifier[logging] . identifier[FileHandler] (
identifier[filename] = identifier[os] . identifier[path] . identifier[join] ( identifier[logdir] , literal[string] ),
)
identifier[file_formatter] = identifier[get_default_log_formatter] ()
identifier[file_handler] . identifier[setFormatter] ( identifier[file_formatter] )
identifier[logging] . identifier[root] . identifier[addHandler] ( identifier[file_handler] )
identifier[hide_paramiko_logs] ()
identifier[hide_stevedore_logs] () | def setup_prefix_logging(logdir):
"""
Sets up a file logger that will create a log in the given logdir (usually a
lago prefix)
Args:
logdir (str): path to create the log into, will be created if it does
not exist
Returns:
None
"""
if not os.path.exists(logdir):
os.mkdir(logdir) # depends on [control=['if'], data=[]]
file_handler = logging.FileHandler(filename=os.path.join(logdir, 'lago.log'))
file_formatter = get_default_log_formatter()
file_handler.setFormatter(file_formatter)
logging.root.addHandler(file_handler)
hide_paramiko_logs()
hide_stevedore_logs() |
def parse_datetime(value):
"""Parses a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
match = datetime_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo = kw.pop('tzinfo')
if tzinfo == 'Z':
tzinfo = utc
elif tzinfo is not None:
offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
offset = 60 * int(tzinfo[1:3]) + offset_mins
if tzinfo[0] == '-':
offset = -offset
tzinfo = get_fixed_timezone(offset)
kw = {k: int(v) for k, v in kw.items() if v is not None}
kw['tzinfo'] = tzinfo
return datetime.datetime(**kw) | def function[parse_datetime, parameter[value]]:
constant[Parses a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
]
variable[match] assign[=] call[name[datetime_re].match, parameter[name[value]]]
if name[match] begin[:]
variable[kw] assign[=] call[name[match].groupdict, parameter[]]
if call[name[kw]][constant[microsecond]] begin[:]
call[name[kw]][constant[microsecond]] assign[=] call[call[name[kw]][constant[microsecond]].ljust, parameter[constant[6], constant[0]]]
variable[tzinfo] assign[=] call[name[kw].pop, parameter[constant[tzinfo]]]
if compare[name[tzinfo] equal[==] constant[Z]] begin[:]
variable[tzinfo] assign[=] name[utc]
variable[kw] assign[=] <ast.DictComp object at 0x7da1b1451810>
call[name[kw]][constant[tzinfo]] assign[=] name[tzinfo]
return[call[name[datetime].datetime, parameter[]]] | keyword[def] identifier[parse_datetime] ( identifier[value] ):
literal[string]
identifier[match] = identifier[datetime_re] . identifier[match] ( identifier[value] )
keyword[if] identifier[match] :
identifier[kw] = identifier[match] . identifier[groupdict] ()
keyword[if] identifier[kw] [ literal[string] ]:
identifier[kw] [ literal[string] ]= identifier[kw] [ literal[string] ]. identifier[ljust] ( literal[int] , literal[string] )
identifier[tzinfo] = identifier[kw] . identifier[pop] ( literal[string] )
keyword[if] identifier[tzinfo] == literal[string] :
identifier[tzinfo] = identifier[utc]
keyword[elif] identifier[tzinfo] keyword[is] keyword[not] keyword[None] :
identifier[offset_mins] = identifier[int] ( identifier[tzinfo] [- literal[int] :]) keyword[if] identifier[len] ( identifier[tzinfo] )> literal[int] keyword[else] literal[int]
identifier[offset] = literal[int] * identifier[int] ( identifier[tzinfo] [ literal[int] : literal[int] ])+ identifier[offset_mins]
keyword[if] identifier[tzinfo] [ literal[int] ]== literal[string] :
identifier[offset] =- identifier[offset]
identifier[tzinfo] = identifier[get_fixed_timezone] ( identifier[offset] )
identifier[kw] ={ identifier[k] : identifier[int] ( identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kw] . identifier[items] () keyword[if] identifier[v] keyword[is] keyword[not] keyword[None] }
identifier[kw] [ literal[string] ]= identifier[tzinfo]
keyword[return] identifier[datetime] . identifier[datetime] (** identifier[kw] ) | def parse_datetime(value):
"""Parses a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
match = datetime_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0') # depends on [control=['if'], data=[]]
tzinfo = kw.pop('tzinfo')
if tzinfo == 'Z':
tzinfo = utc # depends on [control=['if'], data=['tzinfo']]
elif tzinfo is not None:
offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
offset = 60 * int(tzinfo[1:3]) + offset_mins
if tzinfo[0] == '-':
offset = -offset # depends on [control=['if'], data=[]]
tzinfo = get_fixed_timezone(offset) # depends on [control=['if'], data=['tzinfo']]
kw = {k: int(v) for (k, v) in kw.items() if v is not None}
kw['tzinfo'] = tzinfo
return datetime.datetime(**kw) # depends on [control=['if'], data=[]] |
def setup_opt_parser():
"""
Setup the optparser
@returns: opt_parser.OptionParser
"""
#pylint: disable-msg=C0301
#line too long
usage = "usage: %prog [options]"
opt_parser = optparse.OptionParser(usage=usage)
opt_parser.add_option("--version", action='store_true', dest=
"yolk_version", default=False, help=
"Show yolk version and exit.")
opt_parser.add_option("--debug", action='store_true', dest=
"debug", default=False, help=
"Show debugging information.")
opt_parser.add_option("-q", "--quiet", action='store_true', dest=
"quiet", default=False, help=
"Show less output.")
group_local = optparse.OptionGroup(opt_parser,
"Query installed Python packages",
"The following options show information about installed Python packages. Activated packages are normal packages on sys.path that can be imported. Non-activated packages need 'pkg_resources.require()' before they can be imported, such as packages installed with 'easy_install --multi-version'. PKG_SPEC can be either a package name or package name and version e.g. Paste==0.9")
group_local.add_option("-l", "--list", action='store_true', dest=
"show_all", default=False, help=
"List all Python packages installed by distutils or setuptools. Use PKG_SPEC to narrow results.")
group_local.add_option("-a", "--activated", action='store_true',
dest="show_active", default=False, help=
'List activated packages installed by distutils or ' +
'setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option("-n", "--non-activated", action='store_true',
dest="show_non_active", default=False, help=
'List non-activated packages installed by distutils or ' +
'setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option("-m", "--metadata", action='store_true', dest=
"metadata", default=False, help=
'Show all metadata for packages installed by ' +
'setuptools (use with -l -a or -n)')
group_local.add_option("-f", "--fields", action="store", dest=
"fields", default=False, help=
'Show specific metadata fields. ' +
'(use with -m or -M)')
group_local.add_option("-d", "--depends", action='store', dest=
"show_deps", metavar='PKG_SPEC',
help= "Show dependencies for a package installed by " +
"setuptools if they are available.")
group_local.add_option("--entry-points", action='store',
dest="show_entry_points", default=False, help=
'List entry points for a module. e.g. --entry-points nose.plugins',
metavar="MODULE")
group_local.add_option("--entry-map", action='store',
dest="show_entry_map", default=False, help=
'List entry map for a package. e.g. --entry-map yolk',
metavar="PACKAGE_NAME")
group_pypi = optparse.OptionGroup(opt_parser,
"PyPI (Cheese Shop) options",
"The following options query the Python Package Index:")
group_pypi.add_option("-C", "--changelog", action='store',
dest="show_pypi_changelog", metavar='HOURS',
default=False, help=
"Show detailed ChangeLog for PyPI for last n hours. ")
group_pypi.add_option("-D", "--download-links", action='store',
metavar="PKG_SPEC", dest="show_download_links",
default=False, help=
"Show download URL's for package listed on PyPI. Use with -T to specify egg, source etc.")
group_pypi.add_option("-F", "--fetch-package", action='store',
metavar="PKG_SPEC", dest="fetch",
default=False, help=
"Download package source or egg. You can specify a file type with -T")
group_pypi.add_option("-H", "--browse-homepage", action='store',
metavar="PKG_SPEC", dest="browse_website",
default=False, help=
"Launch web browser at home page for package.")
group_pypi.add_option("-I", "--pypi-index", action='store',
dest="pypi_index",
default=False, help=
"Specify PyPI mirror for package index.")
group_pypi.add_option("-L", "--latest-releases", action='store',
dest="show_pypi_releases", metavar="HOURS",
default=False, help=
"Show PyPI releases for last n hours. ")
group_pypi.add_option("-M", "--query-metadata", action='store',
dest="query_metadata_pypi", default=False,
metavar="PKG_SPEC", help=
"Show metadata for a package listed on PyPI. Use -f to show particular fields.")
group_pypi.add_option("-S", "", action="store", dest="pypi_search",
default=False, help=
"Search PyPI by spec and optional AND/OR operator.",
metavar='SEARCH_SPEC <AND/OR SEARCH_SPEC>')
group_pypi.add_option("-T", "--file-type", action="store", dest=
"file_type", default="all", help=
"You may specify 'source', 'egg', 'svn' or 'all' when using -D.")
group_pypi.add_option("-U", "--show-updates", action='store_true',
dest="show_updates", metavar='<PKG_NAME>',
default=False, help=
"Check PyPI for updates on package(s).")
group_pypi.add_option("-V", "--versions-available", action=
'store', dest="versions_available",
default=False, metavar='PKG_SPEC',
help="Show available versions for given package " +
"listed on PyPI.")
opt_parser.add_option_group(group_local)
opt_parser.add_option_group(group_pypi)
# add opts from plugins
all_plugins = []
for plugcls in load_plugins(others=True):
plug = plugcls()
try:
plug.add_options(opt_parser)
except AttributeError:
pass
return opt_parser | def function[setup_opt_parser, parameter[]]:
constant[
Setup the optparser
@returns: opt_parser.OptionParser
]
variable[usage] assign[=] constant[usage: %prog [options]]
variable[opt_parser] assign[=] call[name[optparse].OptionParser, parameter[]]
call[name[opt_parser].add_option, parameter[constant[--version]]]
call[name[opt_parser].add_option, parameter[constant[--debug]]]
call[name[opt_parser].add_option, parameter[constant[-q], constant[--quiet]]]
variable[group_local] assign[=] call[name[optparse].OptionGroup, parameter[name[opt_parser], constant[Query installed Python packages], constant[The following options show information about installed Python packages. Activated packages are normal packages on sys.path that can be imported. Non-activated packages need 'pkg_resources.require()' before they can be imported, such as packages installed with 'easy_install --multi-version'. PKG_SPEC can be either a package name or package name and version e.g. Paste==0.9]]]
call[name[group_local].add_option, parameter[constant[-l], constant[--list]]]
call[name[group_local].add_option, parameter[constant[-a], constant[--activated]]]
call[name[group_local].add_option, parameter[constant[-n], constant[--non-activated]]]
call[name[group_local].add_option, parameter[constant[-m], constant[--metadata]]]
call[name[group_local].add_option, parameter[constant[-f], constant[--fields]]]
call[name[group_local].add_option, parameter[constant[-d], constant[--depends]]]
call[name[group_local].add_option, parameter[constant[--entry-points]]]
call[name[group_local].add_option, parameter[constant[--entry-map]]]
variable[group_pypi] assign[=] call[name[optparse].OptionGroup, parameter[name[opt_parser], constant[PyPI (Cheese Shop) options], constant[The following options query the Python Package Index:]]]
call[name[group_pypi].add_option, parameter[constant[-C], constant[--changelog]]]
call[name[group_pypi].add_option, parameter[constant[-D], constant[--download-links]]]
call[name[group_pypi].add_option, parameter[constant[-F], constant[--fetch-package]]]
call[name[group_pypi].add_option, parameter[constant[-H], constant[--browse-homepage]]]
call[name[group_pypi].add_option, parameter[constant[-I], constant[--pypi-index]]]
call[name[group_pypi].add_option, parameter[constant[-L], constant[--latest-releases]]]
call[name[group_pypi].add_option, parameter[constant[-M], constant[--query-metadata]]]
call[name[group_pypi].add_option, parameter[constant[-S], constant[]]]
call[name[group_pypi].add_option, parameter[constant[-T], constant[--file-type]]]
call[name[group_pypi].add_option, parameter[constant[-U], constant[--show-updates]]]
call[name[group_pypi].add_option, parameter[constant[-V], constant[--versions-available]]]
call[name[opt_parser].add_option_group, parameter[name[group_local]]]
call[name[opt_parser].add_option_group, parameter[name[group_pypi]]]
variable[all_plugins] assign[=] list[[]]
for taget[name[plugcls]] in starred[call[name[load_plugins], parameter[]]] begin[:]
variable[plug] assign[=] call[name[plugcls], parameter[]]
<ast.Try object at 0x7da18dc99180>
return[name[opt_parser]] | keyword[def] identifier[setup_opt_parser] ():
literal[string]
identifier[usage] = literal[string]
identifier[opt_parser] = identifier[optparse] . identifier[OptionParser] ( identifier[usage] = identifier[usage] )
identifier[opt_parser] . identifier[add_option] ( literal[string] , identifier[action] = literal[string] , identifier[dest] =
literal[string] , identifier[default] = keyword[False] , identifier[help] =
literal[string] )
identifier[opt_parser] . identifier[add_option] ( literal[string] , identifier[action] = literal[string] , identifier[dest] =
literal[string] , identifier[default] = keyword[False] , identifier[help] =
literal[string] )
identifier[opt_parser] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[dest] =
literal[string] , identifier[default] = keyword[False] , identifier[help] =
literal[string] )
identifier[group_local] = identifier[optparse] . identifier[OptionGroup] ( identifier[opt_parser] ,
literal[string] ,
literal[string] )
identifier[group_local] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[dest] =
literal[string] , identifier[default] = keyword[False] , identifier[help] =
literal[string] )
identifier[group_local] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[dest] = literal[string] , identifier[default] = keyword[False] , identifier[help] =
literal[string] +
literal[string] )
identifier[group_local] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[dest] = literal[string] , identifier[default] = keyword[False] , identifier[help] =
literal[string] +
literal[string] )
identifier[group_local] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[dest] =
literal[string] , identifier[default] = keyword[False] , identifier[help] =
literal[string] +
literal[string] )
identifier[group_local] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[dest] =
literal[string] , identifier[default] = keyword[False] , identifier[help] =
literal[string] +
literal[string] )
identifier[group_local] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[dest] =
literal[string] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string] +
literal[string] )
identifier[group_local] . identifier[add_option] ( literal[string] , identifier[action] = literal[string] ,
identifier[dest] = literal[string] , identifier[default] = keyword[False] , identifier[help] =
literal[string] ,
identifier[metavar] = literal[string] )
identifier[group_local] . identifier[add_option] ( literal[string] , identifier[action] = literal[string] ,
identifier[dest] = literal[string] , identifier[default] = keyword[False] , identifier[help] =
literal[string] ,
identifier[metavar] = literal[string] )
identifier[group_pypi] = identifier[optparse] . identifier[OptionGroup] ( identifier[opt_parser] ,
literal[string] ,
literal[string] )
identifier[group_pypi] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[default] = keyword[False] , identifier[help] =
literal[string] )
identifier[group_pypi] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[metavar] = literal[string] , identifier[dest] = literal[string] ,
identifier[default] = keyword[False] , identifier[help] =
literal[string] )
identifier[group_pypi] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[metavar] = literal[string] , identifier[dest] = literal[string] ,
identifier[default] = keyword[False] , identifier[help] =
literal[string] )
identifier[group_pypi] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[metavar] = literal[string] , identifier[dest] = literal[string] ,
identifier[default] = keyword[False] , identifier[help] =
literal[string] )
identifier[group_pypi] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[dest] = literal[string] ,
identifier[default] = keyword[False] , identifier[help] =
literal[string] )
identifier[group_pypi] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[default] = keyword[False] , identifier[help] =
literal[string] )
identifier[group_pypi] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[dest] = literal[string] , identifier[default] = keyword[False] ,
identifier[metavar] = literal[string] , identifier[help] =
literal[string] )
identifier[group_pypi] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[dest] = literal[string] ,
identifier[default] = keyword[False] , identifier[help] =
literal[string] ,
identifier[metavar] = literal[string] )
identifier[group_pypi] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] , identifier[dest] =
literal[string] , identifier[default] = literal[string] , identifier[help] =
literal[string] )
identifier[group_pypi] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] = literal[string] ,
identifier[dest] = literal[string] , identifier[metavar] = literal[string] ,
identifier[default] = keyword[False] , identifier[help] =
literal[string] )
identifier[group_pypi] . identifier[add_option] ( literal[string] , literal[string] , identifier[action] =
literal[string] , identifier[dest] = literal[string] ,
identifier[default] = keyword[False] , identifier[metavar] = literal[string] ,
identifier[help] = literal[string] +
literal[string] )
identifier[opt_parser] . identifier[add_option_group] ( identifier[group_local] )
identifier[opt_parser] . identifier[add_option_group] ( identifier[group_pypi] )
identifier[all_plugins] =[]
keyword[for] identifier[plugcls] keyword[in] identifier[load_plugins] ( identifier[others] = keyword[True] ):
identifier[plug] = identifier[plugcls] ()
keyword[try] :
identifier[plug] . identifier[add_options] ( identifier[opt_parser] )
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[return] identifier[opt_parser] | def setup_opt_parser():
"""
Setup the optparser
@returns: opt_parser.OptionParser
"""
#pylint: disable-msg=C0301
#line too long
usage = 'usage: %prog [options]'
opt_parser = optparse.OptionParser(usage=usage)
opt_parser.add_option('--version', action='store_true', dest='yolk_version', default=False, help='Show yolk version and exit.')
opt_parser.add_option('--debug', action='store_true', dest='debug', default=False, help='Show debugging information.')
opt_parser.add_option('-q', '--quiet', action='store_true', dest='quiet', default=False, help='Show less output.')
group_local = optparse.OptionGroup(opt_parser, 'Query installed Python packages', "The following options show information about installed Python packages. Activated packages are normal packages on sys.path that can be imported. Non-activated packages need 'pkg_resources.require()' before they can be imported, such as packages installed with 'easy_install --multi-version'. PKG_SPEC can be either a package name or package name and version e.g. Paste==0.9")
group_local.add_option('-l', '--list', action='store_true', dest='show_all', default=False, help='List all Python packages installed by distutils or setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option('-a', '--activated', action='store_true', dest='show_active', default=False, help='List activated packages installed by distutils or ' + 'setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option('-n', '--non-activated', action='store_true', dest='show_non_active', default=False, help='List non-activated packages installed by distutils or ' + 'setuptools. Use PKG_SPEC to narrow results.')
group_local.add_option('-m', '--metadata', action='store_true', dest='metadata', default=False, help='Show all metadata for packages installed by ' + 'setuptools (use with -l -a or -n)')
group_local.add_option('-f', '--fields', action='store', dest='fields', default=False, help='Show specific metadata fields. ' + '(use with -m or -M)')
group_local.add_option('-d', '--depends', action='store', dest='show_deps', metavar='PKG_SPEC', help='Show dependencies for a package installed by ' + 'setuptools if they are available.')
group_local.add_option('--entry-points', action='store', dest='show_entry_points', default=False, help='List entry points for a module. e.g. --entry-points nose.plugins', metavar='MODULE')
group_local.add_option('--entry-map', action='store', dest='show_entry_map', default=False, help='List entry map for a package. e.g. --entry-map yolk', metavar='PACKAGE_NAME')
group_pypi = optparse.OptionGroup(opt_parser, 'PyPI (Cheese Shop) options', 'The following options query the Python Package Index:')
group_pypi.add_option('-C', '--changelog', action='store', dest='show_pypi_changelog', metavar='HOURS', default=False, help='Show detailed ChangeLog for PyPI for last n hours. ')
group_pypi.add_option('-D', '--download-links', action='store', metavar='PKG_SPEC', dest='show_download_links', default=False, help="Show download URL's for package listed on PyPI. Use with -T to specify egg, source etc.")
group_pypi.add_option('-F', '--fetch-package', action='store', metavar='PKG_SPEC', dest='fetch', default=False, help='Download package source or egg. You can specify a file type with -T')
group_pypi.add_option('-H', '--browse-homepage', action='store', metavar='PKG_SPEC', dest='browse_website', default=False, help='Launch web browser at home page for package.')
group_pypi.add_option('-I', '--pypi-index', action='store', dest='pypi_index', default=False, help='Specify PyPI mirror for package index.')
group_pypi.add_option('-L', '--latest-releases', action='store', dest='show_pypi_releases', metavar='HOURS', default=False, help='Show PyPI releases for last n hours. ')
group_pypi.add_option('-M', '--query-metadata', action='store', dest='query_metadata_pypi', default=False, metavar='PKG_SPEC', help='Show metadata for a package listed on PyPI. Use -f to show particular fields.')
group_pypi.add_option('-S', '', action='store', dest='pypi_search', default=False, help='Search PyPI by spec and optional AND/OR operator.', metavar='SEARCH_SPEC <AND/OR SEARCH_SPEC>')
group_pypi.add_option('-T', '--file-type', action='store', dest='file_type', default='all', help="You may specify 'source', 'egg', 'svn' or 'all' when using -D.")
group_pypi.add_option('-U', '--show-updates', action='store_true', dest='show_updates', metavar='<PKG_NAME>', default=False, help='Check PyPI for updates on package(s).')
group_pypi.add_option('-V', '--versions-available', action='store', dest='versions_available', default=False, metavar='PKG_SPEC', help='Show available versions for given package ' + 'listed on PyPI.')
opt_parser.add_option_group(group_local)
opt_parser.add_option_group(group_pypi)
# add opts from plugins
all_plugins = []
for plugcls in load_plugins(others=True):
plug = plugcls()
try:
plug.add_options(opt_parser) # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['plugcls']]
return opt_parser |
def plot_ppc(self, nsims=1000, T=np.mean, **kwargs):
""" Plots histogram of the discrepancy from draws of the posterior
Parameters
----------
nsims : int (default : 1000)
How many draws for the PPC
T : function
A discrepancy measure - e.g. np.mean, np.std, np.max
"""
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception("No latent variables estimated!")
else:
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
lv_draws = self.draw_latent_variables(nsims=nsims)
mus = [self._model(lv_draws[:,i])[0] for i in range(nsims)]
model_scale, model_shape, model_skewness = self._get_scale_and_shape_sim(lv_draws)
data_draws = np.array([self.family.draw_variable(self.link(mus[i]),
np.repeat(model_scale[i], mus[i].shape[0]), np.repeat(model_shape[i], mus[i].shape[0]),
np.repeat(model_skewness[i], mus[i].shape[0]), mus[i].shape[0]) for i in range(nsims)])
T_sim = T(self.sample(nsims=nsims), axis=1)
T_actual = T(self.data)
if T == np.mean:
description = " of the mean"
elif T == np.max:
description = " of the maximum"
elif T == np.min:
description = " of the minimum"
elif T == np.median:
description = " of the median"
else:
description = ""
plt.figure(figsize=figsize)
ax = plt.subplot()
ax.axvline(T_actual)
sns.distplot(T_sim, kde=False, ax=ax)
ax.set(title='Posterior predictive' + description, xlabel='T(x)', ylabel='Frequency');
plt.show() | def function[plot_ppc, parameter[self, nsims, T]]:
constant[ Plots histogram of the discrepancy from draws of the posterior
Parameters
----------
nsims : int (default : 1000)
How many draws for the PPC
T : function
A discrepancy measure - e.g. np.mean, np.std, np.max
]
if compare[name[self].latent_variables.estimation_method <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18bc70b20>, <ast.Constant object at 0x7da18bc72380>]]] begin[:]
<ast.Raise object at 0x7da18bc71570> | keyword[def] identifier[plot_ppc] ( identifier[self] , identifier[nsims] = literal[int] , identifier[T] = identifier[np] . identifier[mean] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[self] . identifier[latent_variables] . identifier[estimation_method] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
keyword[raise] identifier[Exception] ( literal[string] )
keyword[else] :
keyword[import] identifier[matplotlib] . identifier[pyplot] keyword[as] identifier[plt]
keyword[import] identifier[seaborn] keyword[as] identifier[sns]
identifier[figsize] = identifier[kwargs] . identifier[get] ( literal[string] ,( literal[int] , literal[int] ))
identifier[lv_draws] = identifier[self] . identifier[draw_latent_variables] ( identifier[nsims] = identifier[nsims] )
identifier[mus] =[ identifier[self] . identifier[_model] ( identifier[lv_draws] [:, identifier[i] ])[ literal[int] ] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nsims] )]
identifier[model_scale] , identifier[model_shape] , identifier[model_skewness] = identifier[self] . identifier[_get_scale_and_shape_sim] ( identifier[lv_draws] )
identifier[data_draws] = identifier[np] . identifier[array] ([ identifier[self] . identifier[family] . identifier[draw_variable] ( identifier[self] . identifier[link] ( identifier[mus] [ identifier[i] ]),
identifier[np] . identifier[repeat] ( identifier[model_scale] [ identifier[i] ], identifier[mus] [ identifier[i] ]. identifier[shape] [ literal[int] ]), identifier[np] . identifier[repeat] ( identifier[model_shape] [ identifier[i] ], identifier[mus] [ identifier[i] ]. identifier[shape] [ literal[int] ]),
identifier[np] . identifier[repeat] ( identifier[model_skewness] [ identifier[i] ], identifier[mus] [ identifier[i] ]. identifier[shape] [ literal[int] ]), identifier[mus] [ identifier[i] ]. identifier[shape] [ literal[int] ]) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[nsims] )])
identifier[T_sim] = identifier[T] ( identifier[self] . identifier[sample] ( identifier[nsims] = identifier[nsims] ), identifier[axis] = literal[int] )
identifier[T_actual] = identifier[T] ( identifier[self] . identifier[data] )
keyword[if] identifier[T] == identifier[np] . identifier[mean] :
identifier[description] = literal[string]
keyword[elif] identifier[T] == identifier[np] . identifier[max] :
identifier[description] = literal[string]
keyword[elif] identifier[T] == identifier[np] . identifier[min] :
identifier[description] = literal[string]
keyword[elif] identifier[T] == identifier[np] . identifier[median] :
identifier[description] = literal[string]
keyword[else] :
identifier[description] = literal[string]
identifier[plt] . identifier[figure] ( identifier[figsize] = identifier[figsize] )
identifier[ax] = identifier[plt] . identifier[subplot] ()
identifier[ax] . identifier[axvline] ( identifier[T_actual] )
identifier[sns] . identifier[distplot] ( identifier[T_sim] , identifier[kde] = keyword[False] , identifier[ax] = identifier[ax] )
identifier[ax] . identifier[set] ( identifier[title] = literal[string] + identifier[description] , identifier[xlabel] = literal[string] , identifier[ylabel] = literal[string] );
identifier[plt] . identifier[show] () | def plot_ppc(self, nsims=1000, T=np.mean, **kwargs):
""" Plots histogram of the discrepancy from draws of the posterior
Parameters
----------
nsims : int (default : 1000)
How many draws for the PPC
T : function
A discrepancy measure - e.g. np.mean, np.std, np.max
"""
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception('No latent variables estimated!') # depends on [control=['if'], data=[]]
else:
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize', (10, 7))
lv_draws = self.draw_latent_variables(nsims=nsims)
mus = [self._model(lv_draws[:, i])[0] for i in range(nsims)]
(model_scale, model_shape, model_skewness) = self._get_scale_and_shape_sim(lv_draws)
data_draws = np.array([self.family.draw_variable(self.link(mus[i]), np.repeat(model_scale[i], mus[i].shape[0]), np.repeat(model_shape[i], mus[i].shape[0]), np.repeat(model_skewness[i], mus[i].shape[0]), mus[i].shape[0]) for i in range(nsims)])
T_sim = T(self.sample(nsims=nsims), axis=1)
T_actual = T(self.data)
if T == np.mean:
description = ' of the mean' # depends on [control=['if'], data=[]]
elif T == np.max:
description = ' of the maximum' # depends on [control=['if'], data=[]]
elif T == np.min:
description = ' of the minimum' # depends on [control=['if'], data=[]]
elif T == np.median:
description = ' of the median' # depends on [control=['if'], data=[]]
else:
description = ''
plt.figure(figsize=figsize)
ax = plt.subplot()
ax.axvline(T_actual)
sns.distplot(T_sim, kde=False, ax=ax)
ax.set(title='Posterior predictive' + description, xlabel='T(x)', ylabel='Frequency')
plt.show() |
def diag(ax=None, linecolor='0.0', linestyle='--', **kwargs):
"""Plot the diagonal."""
ax = get_ax(ax)
xy_min = np.min((ax.get_xlim(), ax.get_ylim()))
xy_max = np.max((ax.get_ylim(), ax.get_xlim()))
return ax.plot([xy_min, xy_max], [xy_min, xy_max],
ls=linestyle,
c=linecolor,
**kwargs) | def function[diag, parameter[ax, linecolor, linestyle]]:
constant[Plot the diagonal.]
variable[ax] assign[=] call[name[get_ax], parameter[name[ax]]]
variable[xy_min] assign[=] call[name[np].min, parameter[tuple[[<ast.Call object at 0x7da1b2351780>, <ast.Call object at 0x7da1b2351240>]]]]
variable[xy_max] assign[=] call[name[np].max, parameter[tuple[[<ast.Call object at 0x7da1b23526e0>, <ast.Call object at 0x7da1b2352e90>]]]]
return[call[name[ax].plot, parameter[list[[<ast.Name object at 0x7da1b2350e50>, <ast.Name object at 0x7da1b2351f60>]], list[[<ast.Name object at 0x7da1b2353fd0>, <ast.Name object at 0x7da1b2353e80>]]]]] | keyword[def] identifier[diag] ( identifier[ax] = keyword[None] , identifier[linecolor] = literal[string] , identifier[linestyle] = literal[string] ,** identifier[kwargs] ):
literal[string]
identifier[ax] = identifier[get_ax] ( identifier[ax] )
identifier[xy_min] = identifier[np] . identifier[min] (( identifier[ax] . identifier[get_xlim] (), identifier[ax] . identifier[get_ylim] ()))
identifier[xy_max] = identifier[np] . identifier[max] (( identifier[ax] . identifier[get_ylim] (), identifier[ax] . identifier[get_xlim] ()))
keyword[return] identifier[ax] . identifier[plot] ([ identifier[xy_min] , identifier[xy_max] ],[ identifier[xy_min] , identifier[xy_max] ],
identifier[ls] = identifier[linestyle] ,
identifier[c] = identifier[linecolor] ,
** identifier[kwargs] ) | def diag(ax=None, linecolor='0.0', linestyle='--', **kwargs):
"""Plot the diagonal."""
ax = get_ax(ax)
xy_min = np.min((ax.get_xlim(), ax.get_ylim()))
xy_max = np.max((ax.get_ylim(), ax.get_xlim()))
return ax.plot([xy_min, xy_max], [xy_min, xy_max], ls=linestyle, c=linecolor, **kwargs) |
def find_keywords(string, parser, top=10, frequency={}, **kwargs):
""" Returns a sorted list of keywords in the given string.
The given parser (e.g., pattern.en.parser) is used to identify noun phrases.
The given frequency dictionary can be a reference corpus,
with relative document frequency (df, 0.0-1.0) for each lemma,
e.g., {"the": 0.8, "cat": 0.1, ...}
"""
lemmata = kwargs.pop("lemmata", kwargs.pop("stem", True))
# Parse the string and extract noun phrases (NP).
chunks = []
wordcount = 0
for sentence in parser.parse(string, chunks=True, lemmata=lemmata).split():
for w in sentence: # ["cats", "NNS", "I-NP", "O", "cat"]
if w[2] == "B-NP":
chunks.append([w])
wordcount += 1
elif w[2] == "I-NP" and w[1][:3] == chunks[-1][-1][1][:3] == "NNP":
chunks[-1][-1][+0] += " " + w[+0] # Collapse NNPs: "Ms Kitty".
chunks[-1][-1][-1] += " " + w[-1]
elif w[2] == "I-NP":
chunks[-1].append(w)
wordcount += 1
# Rate the nouns in noun phrases.
m = {}
for i, chunk in enumerate(chunks):
head = True
if parser.language not in ("ca", "es", "pt", "fr", "it", "pt", "ro"):
# Head of "cat hair" => "hair".
# Head of "poils de chat" => "poils".
chunk = list(reversed(chunk))
for w in chunk:
if w[1].startswith("NN"):
if lemmata:
k = w[-1]
else:
k = w[0].lower()
if not k in m:
m[k] = [0.0, set(), 1.0, 1.0, 1.0]
# Higher score for chunks that appear more frequently.
m[k][0] += 1 / float(wordcount)
# Higher score for chunks that appear in more contexts (semantic centrality).
m[k][1].add(" ".join(map(lambda x: x[0], chunk)).lower())
# Higher score for chunks at the start (25%) of the text.
m[k][2] += 1 if float(i) / len(chunks) <= 0.25 else 0
# Higher score for chunks not in a prepositional phrase.
m[k][3] += 1 if w[3] == "O" else 0
# Higher score for chunk head.
m[k][4] += 1 if head else 0
head = False
# Rate tf-idf if a frequency dict is given.
for k in m:
if frequency:
df = frequency.get(k, 0.0)
df = max(df, 1e-10)
df = log(1.0 / df, 2.71828)
else:
df = 1.0
m[k][0] = max(1e-10, m[k][0] * df)
m[k][1] = 1 + float(len(m[k][1]))
# Sort candidates alphabetically by total score
# The harmonic mean will emphasize tf-idf score.
hmean = lambda a: len(a) / sum(1.0 / x for x in a)
m = [(hmean(m[k]), k) for k in m]
m = sorted(m, key=lambda x: x[1])
m = sorted(m, key=lambda x: x[0], reverse=True)
m = [k for score, k in m]
return m[:top] | def function[find_keywords, parameter[string, parser, top, frequency]]:
constant[ Returns a sorted list of keywords in the given string.
The given parser (e.g., pattern.en.parser) is used to identify noun phrases.
The given frequency dictionary can be a reference corpus,
with relative document frequency (df, 0.0-1.0) for each lemma,
e.g., {"the": 0.8, "cat": 0.1, ...}
]
variable[lemmata] assign[=] call[name[kwargs].pop, parameter[constant[lemmata], call[name[kwargs].pop, parameter[constant[stem], constant[True]]]]]
variable[chunks] assign[=] list[[]]
variable[wordcount] assign[=] constant[0]
for taget[name[sentence]] in starred[call[call[name[parser].parse, parameter[name[string]]].split, parameter[]]] begin[:]
for taget[name[w]] in starred[name[sentence]] begin[:]
if compare[call[name[w]][constant[2]] equal[==] constant[B-NP]] begin[:]
call[name[chunks].append, parameter[list[[<ast.Name object at 0x7da20c6e6650>]]]]
<ast.AugAssign object at 0x7da20c6e6350>
variable[m] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da20c6e7160>, <ast.Name object at 0x7da20c6e7fa0>]]] in starred[call[name[enumerate], parameter[name[chunks]]]] begin[:]
variable[head] assign[=] constant[True]
if compare[name[parser].language <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da20c6e4ac0>, <ast.Constant object at 0x7da20c6e6080>, <ast.Constant object at 0x7da20c6e7250>, <ast.Constant object at 0x7da20c6e74c0>, <ast.Constant object at 0x7da20c6e4ee0>, <ast.Constant object at 0x7da20c6e7df0>, <ast.Constant object at 0x7da20c6e4340>]]] begin[:]
variable[chunk] assign[=] call[name[list], parameter[call[name[reversed], parameter[name[chunk]]]]]
for taget[name[w]] in starred[name[chunk]] begin[:]
if call[call[name[w]][constant[1]].startswith, parameter[constant[NN]]] begin[:]
if name[lemmata] begin[:]
variable[k] assign[=] call[name[w]][<ast.UnaryOp object at 0x7da207f008b0>]
if <ast.UnaryOp object at 0x7da207f015d0> begin[:]
call[name[m]][name[k]] assign[=] list[[<ast.Constant object at 0x7da207f03af0>, <ast.Call object at 0x7da207f00580>, <ast.Constant object at 0x7da207f00400>, <ast.Constant object at 0x7da207f002b0>, <ast.Constant object at 0x7da207f00f70>]]
<ast.AugAssign object at 0x7da207f02a70>
call[call[call[name[m]][name[k]]][constant[1]].add, parameter[call[call[constant[ ].join, parameter[call[name[map], parameter[<ast.Lambda object at 0x7da20c6a9e10>, name[chunk]]]]].lower, parameter[]]]]
<ast.AugAssign object at 0x7da20c6a9f00>
<ast.AugAssign object at 0x7da20c6abe80>
<ast.AugAssign object at 0x7da18dc079a0>
variable[head] assign[=] constant[False]
for taget[name[k]] in starred[name[m]] begin[:]
if name[frequency] begin[:]
variable[df] assign[=] call[name[frequency].get, parameter[name[k], constant[0.0]]]
variable[df] assign[=] call[name[max], parameter[name[df], constant[1e-10]]]
variable[df] assign[=] call[name[log], parameter[binary_operation[constant[1.0] / name[df]], constant[2.71828]]]
call[call[name[m]][name[k]]][constant[0]] assign[=] call[name[max], parameter[constant[1e-10], binary_operation[call[call[name[m]][name[k]]][constant[0]] * name[df]]]]
call[call[name[m]][name[k]]][constant[1]] assign[=] binary_operation[constant[1] + call[name[float], parameter[call[name[len], parameter[call[call[name[m]][name[k]]][constant[1]]]]]]]
variable[hmean] assign[=] <ast.Lambda object at 0x7da207f9b4f0>
variable[m] assign[=] <ast.ListComp object at 0x7da207f9ad40>
variable[m] assign[=] call[name[sorted], parameter[name[m]]]
variable[m] assign[=] call[name[sorted], parameter[name[m]]]
variable[m] assign[=] <ast.ListComp object at 0x7da207f9b250>
return[call[name[m]][<ast.Slice object at 0x7da207f9b820>]] | keyword[def] identifier[find_keywords] ( identifier[string] , identifier[parser] , identifier[top] = literal[int] , identifier[frequency] ={},** identifier[kwargs] ):
literal[string]
identifier[lemmata] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[kwargs] . identifier[pop] ( literal[string] , keyword[True] ))
identifier[chunks] =[]
identifier[wordcount] = literal[int]
keyword[for] identifier[sentence] keyword[in] identifier[parser] . identifier[parse] ( identifier[string] , identifier[chunks] = keyword[True] , identifier[lemmata] = identifier[lemmata] ). identifier[split] ():
keyword[for] identifier[w] keyword[in] identifier[sentence] :
keyword[if] identifier[w] [ literal[int] ]== literal[string] :
identifier[chunks] . identifier[append] ([ identifier[w] ])
identifier[wordcount] += literal[int]
keyword[elif] identifier[w] [ literal[int] ]== literal[string] keyword[and] identifier[w] [ literal[int] ][: literal[int] ]== identifier[chunks] [- literal[int] ][- literal[int] ][ literal[int] ][: literal[int] ]== literal[string] :
identifier[chunks] [- literal[int] ][- literal[int] ][+ literal[int] ]+= literal[string] + identifier[w] [+ literal[int] ]
identifier[chunks] [- literal[int] ][- literal[int] ][- literal[int] ]+= literal[string] + identifier[w] [- literal[int] ]
keyword[elif] identifier[w] [ literal[int] ]== literal[string] :
identifier[chunks] [- literal[int] ]. identifier[append] ( identifier[w] )
identifier[wordcount] += literal[int]
identifier[m] ={}
keyword[for] identifier[i] , identifier[chunk] keyword[in] identifier[enumerate] ( identifier[chunks] ):
identifier[head] = keyword[True]
keyword[if] identifier[parser] . identifier[language] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ):
identifier[chunk] = identifier[list] ( identifier[reversed] ( identifier[chunk] ))
keyword[for] identifier[w] keyword[in] identifier[chunk] :
keyword[if] identifier[w] [ literal[int] ]. identifier[startswith] ( literal[string] ):
keyword[if] identifier[lemmata] :
identifier[k] = identifier[w] [- literal[int] ]
keyword[else] :
identifier[k] = identifier[w] [ literal[int] ]. identifier[lower] ()
keyword[if] keyword[not] identifier[k] keyword[in] identifier[m] :
identifier[m] [ identifier[k] ]=[ literal[int] , identifier[set] (), literal[int] , literal[int] , literal[int] ]
identifier[m] [ identifier[k] ][ literal[int] ]+= literal[int] / identifier[float] ( identifier[wordcount] )
identifier[m] [ identifier[k] ][ literal[int] ]. identifier[add] ( literal[string] . identifier[join] ( identifier[map] ( keyword[lambda] identifier[x] : identifier[x] [ literal[int] ], identifier[chunk] )). identifier[lower] ())
identifier[m] [ identifier[k] ][ literal[int] ]+= literal[int] keyword[if] identifier[float] ( identifier[i] )/ identifier[len] ( identifier[chunks] )<= literal[int] keyword[else] literal[int]
identifier[m] [ identifier[k] ][ literal[int] ]+= literal[int] keyword[if] identifier[w] [ literal[int] ]== literal[string] keyword[else] literal[int]
identifier[m] [ identifier[k] ][ literal[int] ]+= literal[int] keyword[if] identifier[head] keyword[else] literal[int]
identifier[head] = keyword[False]
keyword[for] identifier[k] keyword[in] identifier[m] :
keyword[if] identifier[frequency] :
identifier[df] = identifier[frequency] . identifier[get] ( identifier[k] , literal[int] )
identifier[df] = identifier[max] ( identifier[df] , literal[int] )
identifier[df] = identifier[log] ( literal[int] / identifier[df] , literal[int] )
keyword[else] :
identifier[df] = literal[int]
identifier[m] [ identifier[k] ][ literal[int] ]= identifier[max] ( literal[int] , identifier[m] [ identifier[k] ][ literal[int] ]* identifier[df] )
identifier[m] [ identifier[k] ][ literal[int] ]= literal[int] + identifier[float] ( identifier[len] ( identifier[m] [ identifier[k] ][ literal[int] ]))
identifier[hmean] = keyword[lambda] identifier[a] : identifier[len] ( identifier[a] )/ identifier[sum] ( literal[int] / identifier[x] keyword[for] identifier[x] keyword[in] identifier[a] )
identifier[m] =[( identifier[hmean] ( identifier[m] [ identifier[k] ]), identifier[k] ) keyword[for] identifier[k] keyword[in] identifier[m] ]
identifier[m] = identifier[sorted] ( identifier[m] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ])
identifier[m] = identifier[sorted] ( identifier[m] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ], identifier[reverse] = keyword[True] )
identifier[m] =[ identifier[k] keyword[for] identifier[score] , identifier[k] keyword[in] identifier[m] ]
keyword[return] identifier[m] [: identifier[top] ] | def find_keywords(string, parser, top=10, frequency={}, **kwargs):
""" Returns a sorted list of keywords in the given string.
The given parser (e.g., pattern.en.parser) is used to identify noun phrases.
The given frequency dictionary can be a reference corpus,
with relative document frequency (df, 0.0-1.0) for each lemma,
e.g., {"the": 0.8, "cat": 0.1, ...}
"""
lemmata = kwargs.pop('lemmata', kwargs.pop('stem', True))
# Parse the string and extract noun phrases (NP).
chunks = []
wordcount = 0
for sentence in parser.parse(string, chunks=True, lemmata=lemmata).split():
for w in sentence: # ["cats", "NNS", "I-NP", "O", "cat"]
if w[2] == 'B-NP':
chunks.append([w])
wordcount += 1 # depends on [control=['if'], data=[]]
elif w[2] == 'I-NP' and w[1][:3] == chunks[-1][-1][1][:3] == 'NNP':
chunks[-1][-1][+0] += ' ' + w[+0] # Collapse NNPs: "Ms Kitty".
chunks[-1][-1][-1] += ' ' + w[-1] # depends on [control=['if'], data=[]]
elif w[2] == 'I-NP':
chunks[-1].append(w)
wordcount += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['w']] # depends on [control=['for'], data=['sentence']]
# Rate the nouns in noun phrases.
m = {}
for (i, chunk) in enumerate(chunks):
head = True
if parser.language not in ('ca', 'es', 'pt', 'fr', 'it', 'pt', 'ro'):
# Head of "cat hair" => "hair".
# Head of "poils de chat" => "poils".
chunk = list(reversed(chunk)) # depends on [control=['if'], data=[]]
for w in chunk:
if w[1].startswith('NN'):
if lemmata:
k = w[-1] # depends on [control=['if'], data=[]]
else:
k = w[0].lower()
if not k in m:
m[k] = [0.0, set(), 1.0, 1.0, 1.0] # depends on [control=['if'], data=[]]
# Higher score for chunks that appear more frequently.
m[k][0] += 1 / float(wordcount)
# Higher score for chunks that appear in more contexts (semantic centrality).
m[k][1].add(' '.join(map(lambda x: x[0], chunk)).lower())
# Higher score for chunks at the start (25%) of the text.
m[k][2] += 1 if float(i) / len(chunks) <= 0.25 else 0
# Higher score for chunks not in a prepositional phrase.
m[k][3] += 1 if w[3] == 'O' else 0
# Higher score for chunk head.
m[k][4] += 1 if head else 0
head = False # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['w']] # depends on [control=['for'], data=[]]
# Rate tf-idf if a frequency dict is given.
for k in m:
if frequency:
df = frequency.get(k, 0.0)
df = max(df, 1e-10)
df = log(1.0 / df, 2.71828) # depends on [control=['if'], data=[]]
else:
df = 1.0
m[k][0] = max(1e-10, m[k][0] * df)
m[k][1] = 1 + float(len(m[k][1])) # depends on [control=['for'], data=['k']]
# Sort candidates alphabetically by total score
# The harmonic mean will emphasize tf-idf score.
hmean = lambda a: len(a) / sum((1.0 / x for x in a))
m = [(hmean(m[k]), k) for k in m]
m = sorted(m, key=lambda x: x[1])
m = sorted(m, key=lambda x: x[0], reverse=True)
m = [k for (score, k) in m]
return m[:top] |
def ICALImporter(ctx, filename, all, owner, calendar, create_calendar, clear_calendar, dry, execfilter):
"""Calendar Importer for iCal (ics) files
"""
log('iCal importer running')
objectmodels = ctx.obj['db'].objectmodels
if objectmodels['user'].count({'name': owner}) > 0:
owner_object = objectmodels['user'].find_one({'name': owner})
elif objectmodels['user'].count({'uuid': owner}) > 0:
owner_object = objectmodels['user'].find_one({'uuid': owner})
else:
log('User unknown. Specify either uuid or name.', lvl=warn)
return
log('Found user')
if objectmodels['calendar'].count({'name': calendar}) > 0:
calendar = objectmodels['calendar'].find_one({'name': calendar})
elif objectmodels['calendar'].count({'uuid': owner}) > 0:
calendar = objectmodels['calendar'].find_one({'uuid': calendar})
elif create_calendar:
calendar = objectmodels['calendar']({
'uuid': std_uuid(),
'name': calendar
})
else:
log('Calendar unknown and no --create-calendar specified. Specify either uuid or name of an existing calendar.',
lvl=warn)
return
log('Found calendar')
if clear_calendar is True:
log('Clearing calendar events')
for item in objectmodels['event'].find({'calendar': calendar.uuid}):
item.delete()
with open(filename, 'rb') as file_object:
caldata = Calendar.from_ical(file_object.read())
keys = {
'class': 'str',
'created': 'dt',
'description': 'str',
'dtstart': 'dt',
'dtend': 'dt',
'timestamp': 'dt',
'modified': 'dt',
'location': 'str',
'status': 'str',
'summary': 'str',
'uid': 'str'
}
mapping = {
'description': 'summary',
'summary': 'name'
}
imports = []
def ical_import_filter(original, logfacilty):
log('Passthrough filter')
return original
if execfilter is not None:
import os
textFilePath = os.path.abspath(os.path.join(os.path.curdir, execfilter))
textFileFolder = os.path.dirname(textFilePath)
from importlib.machinery import SourceFileLoader
filter_module = SourceFileLoader("importfilter", textFilePath).load_module()
ical_import_filter = filter_module.ical_import_filter
for event in caldata.walk():
if event.name == 'VEVENT':
log(event, lvl=verbose, pretty=True)
initializer = {
'uuid': std_uuid(),
'calendar': calendar.uuid,
}
for item in keys:
thing = event.get(item, None)
if thing is None:
thing = 'NO-' + item
else:
if keys[item] == 'str':
thing = str(thing)
else:
thing = parser.parse(str(thing.dt))
thing = thing.isoformat()
if item in mapping:
item_assignment = mapping[item]
else:
item_assignment = item
initializer[item_assignment] = thing
new_event = objectmodels['event'](initializer)
new_event = ical_import_filter(new_event, log)
imports.append(new_event)
log(new_event, lvl=debug)
for ev in imports:
log(ev.summary)
if not dry:
log('Bulk creating events')
objectmodels['event'].bulk_create(imports)
calendar.save()
else:
log('Dry run - nothing stored.', lvl=warn) | def function[ICALImporter, parameter[ctx, filename, all, owner, calendar, create_calendar, clear_calendar, dry, execfilter]]:
constant[Calendar Importer for iCal (ics) files
]
call[name[log], parameter[constant[iCal importer running]]]
variable[objectmodels] assign[=] call[name[ctx].obj][constant[db]].objectmodels
if compare[call[call[name[objectmodels]][constant[user]].count, parameter[dictionary[[<ast.Constant object at 0x7da1b0ed86d0>], [<ast.Name object at 0x7da1b0ed8700>]]]] greater[>] constant[0]] begin[:]
variable[owner_object] assign[=] call[call[name[objectmodels]][constant[user]].find_one, parameter[dictionary[[<ast.Constant object at 0x7da1b0ed88e0>], [<ast.Name object at 0x7da1b0ed8910>]]]]
call[name[log], parameter[constant[Found user]]]
if compare[call[call[name[objectmodels]][constant[calendar]].count, parameter[dictionary[[<ast.Constant object at 0x7da1b0ed9150>], [<ast.Name object at 0x7da1b0ed9180>]]]] greater[>] constant[0]] begin[:]
variable[calendar] assign[=] call[call[name[objectmodels]][constant[calendar]].find_one, parameter[dictionary[[<ast.Constant object at 0x7da1b0ed9360>], [<ast.Name object at 0x7da1b0ed9390>]]]]
call[name[log], parameter[constant[Found calendar]]]
if compare[name[clear_calendar] is constant[True]] begin[:]
call[name[log], parameter[constant[Clearing calendar events]]]
for taget[name[item]] in starred[call[call[name[objectmodels]][constant[event]].find, parameter[dictionary[[<ast.Constant object at 0x7da1b0edb490>], [<ast.Attribute object at 0x7da1b0edb4c0>]]]]] begin[:]
call[name[item].delete, parameter[]]
with call[name[open], parameter[name[filename], constant[rb]]] begin[:]
variable[caldata] assign[=] call[name[Calendar].from_ical, parameter[call[name[file_object].read, parameter[]]]]
variable[keys] assign[=] dictionary[[<ast.Constant object at 0x7da1b0edb9a0>, <ast.Constant object at 0x7da1b0edb9d0>, <ast.Constant object at 0x7da1b0edba00>, <ast.Constant object at 0x7da1b0edba30>, <ast.Constant object at 0x7da1b0edba60>, <ast.Constant object at 0x7da1b0edba90>, <ast.Constant object at 0x7da1b0edbac0>, <ast.Constant object at 0x7da1b0edbaf0>, <ast.Constant object at 0x7da1b0edbb20>, <ast.Constant object at 0x7da1b0edbb50>, <ast.Constant object at 0x7da1b0edbb80>], [<ast.Constant object at 0x7da1b0edbbb0>, <ast.Constant object at 0x7da1b0edbbe0>, <ast.Constant object at 0x7da1b0edbc10>, <ast.Constant object at 0x7da1b0edbc40>, <ast.Constant object at 0x7da1b0edbc70>, <ast.Constant object at 0x7da1b0edbca0>, <ast.Constant object at 0x7da1b0edbcd0>, <ast.Constant object at 0x7da1b0edbd00>, <ast.Constant object at 0x7da1b0edbd30>, <ast.Constant object at 0x7da1b0edbd60>, <ast.Constant object at 0x7da1b0edbd90>]]
variable[mapping] assign[=] dictionary[[<ast.Constant object at 0x7da1b0edbe50>, <ast.Constant object at 0x7da1b0edbe80>], [<ast.Constant object at 0x7da1b0edbeb0>, <ast.Constant object at 0x7da1b0edbee0>]]
variable[imports] assign[=] list[[]]
def function[ical_import_filter, parameter[original, logfacilty]]:
call[name[log], parameter[constant[Passthrough filter]]]
return[name[original]]
if compare[name[execfilter] is_not constant[None]] begin[:]
import module[os]
variable[textFilePath] assign[=] call[name[os].path.abspath, parameter[call[name[os].path.join, parameter[name[os].path.curdir, name[execfilter]]]]]
variable[textFileFolder] assign[=] call[name[os].path.dirname, parameter[name[textFilePath]]]
from relative_module[importlib.machinery] import module[SourceFileLoader]
variable[filter_module] assign[=] call[call[name[SourceFileLoader], parameter[constant[importfilter], name[textFilePath]]].load_module, parameter[]]
variable[ical_import_filter] assign[=] name[filter_module].ical_import_filter
for taget[name[event]] in starred[call[name[caldata].walk, parameter[]]] begin[:]
if compare[name[event].name equal[==] constant[VEVENT]] begin[:]
call[name[log], parameter[name[event]]]
variable[initializer] assign[=] dictionary[[<ast.Constant object at 0x7da1b0ffb6a0>, <ast.Constant object at 0x7da1b0ff8bb0>], [<ast.Call object at 0x7da1b0ffb520>, <ast.Attribute object at 0x7da1b0ef4460>]]
for taget[name[item]] in starred[name[keys]] begin[:]
variable[thing] assign[=] call[name[event].get, parameter[name[item], constant[None]]]
if compare[name[thing] is constant[None]] begin[:]
variable[thing] assign[=] binary_operation[constant[NO-] + name[item]]
if compare[name[item] in name[mapping]] begin[:]
variable[item_assignment] assign[=] call[name[mapping]][name[item]]
call[name[initializer]][name[item_assignment]] assign[=] name[thing]
variable[new_event] assign[=] call[call[name[objectmodels]][constant[event]], parameter[name[initializer]]]
variable[new_event] assign[=] call[name[ical_import_filter], parameter[name[new_event], name[log]]]
call[name[imports].append, parameter[name[new_event]]]
call[name[log], parameter[name[new_event]]]
for taget[name[ev]] in starred[name[imports]] begin[:]
call[name[log], parameter[name[ev].summary]]
if <ast.UnaryOp object at 0x7da1b0e2ea70> begin[:]
call[name[log], parameter[constant[Bulk creating events]]]
call[call[name[objectmodels]][constant[event]].bulk_create, parameter[name[imports]]]
call[name[calendar].save, parameter[]] | keyword[def] identifier[ICALImporter] ( identifier[ctx] , identifier[filename] , identifier[all] , identifier[owner] , identifier[calendar] , identifier[create_calendar] , identifier[clear_calendar] , identifier[dry] , identifier[execfilter] ):
literal[string]
identifier[log] ( literal[string] )
identifier[objectmodels] = identifier[ctx] . identifier[obj] [ literal[string] ]. identifier[objectmodels]
keyword[if] identifier[objectmodels] [ literal[string] ]. identifier[count] ({ literal[string] : identifier[owner] })> literal[int] :
identifier[owner_object] = identifier[objectmodels] [ literal[string] ]. identifier[find_one] ({ literal[string] : identifier[owner] })
keyword[elif] identifier[objectmodels] [ literal[string] ]. identifier[count] ({ literal[string] : identifier[owner] })> literal[int] :
identifier[owner_object] = identifier[objectmodels] [ literal[string] ]. identifier[find_one] ({ literal[string] : identifier[owner] })
keyword[else] :
identifier[log] ( literal[string] , identifier[lvl] = identifier[warn] )
keyword[return]
identifier[log] ( literal[string] )
keyword[if] identifier[objectmodels] [ literal[string] ]. identifier[count] ({ literal[string] : identifier[calendar] })> literal[int] :
identifier[calendar] = identifier[objectmodels] [ literal[string] ]. identifier[find_one] ({ literal[string] : identifier[calendar] })
keyword[elif] identifier[objectmodels] [ literal[string] ]. identifier[count] ({ literal[string] : identifier[owner] })> literal[int] :
identifier[calendar] = identifier[objectmodels] [ literal[string] ]. identifier[find_one] ({ literal[string] : identifier[calendar] })
keyword[elif] identifier[create_calendar] :
identifier[calendar] = identifier[objectmodels] [ literal[string] ]({
literal[string] : identifier[std_uuid] (),
literal[string] : identifier[calendar]
})
keyword[else] :
identifier[log] ( literal[string] ,
identifier[lvl] = identifier[warn] )
keyword[return]
identifier[log] ( literal[string] )
keyword[if] identifier[clear_calendar] keyword[is] keyword[True] :
identifier[log] ( literal[string] )
keyword[for] identifier[item] keyword[in] identifier[objectmodels] [ literal[string] ]. identifier[find] ({ literal[string] : identifier[calendar] . identifier[uuid] }):
identifier[item] . identifier[delete] ()
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[file_object] :
identifier[caldata] = identifier[Calendar] . identifier[from_ical] ( identifier[file_object] . identifier[read] ())
identifier[keys] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[mapping] ={
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[imports] =[]
keyword[def] identifier[ical_import_filter] ( identifier[original] , identifier[logfacilty] ):
identifier[log] ( literal[string] )
keyword[return] identifier[original]
keyword[if] identifier[execfilter] keyword[is] keyword[not] keyword[None] :
keyword[import] identifier[os]
identifier[textFilePath] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[curdir] , identifier[execfilter] ))
identifier[textFileFolder] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[textFilePath] )
keyword[from] identifier[importlib] . identifier[machinery] keyword[import] identifier[SourceFileLoader]
identifier[filter_module] = identifier[SourceFileLoader] ( literal[string] , identifier[textFilePath] ). identifier[load_module] ()
identifier[ical_import_filter] = identifier[filter_module] . identifier[ical_import_filter]
keyword[for] identifier[event] keyword[in] identifier[caldata] . identifier[walk] ():
keyword[if] identifier[event] . identifier[name] == literal[string] :
identifier[log] ( identifier[event] , identifier[lvl] = identifier[verbose] , identifier[pretty] = keyword[True] )
identifier[initializer] ={
literal[string] : identifier[std_uuid] (),
literal[string] : identifier[calendar] . identifier[uuid] ,
}
keyword[for] identifier[item] keyword[in] identifier[keys] :
identifier[thing] = identifier[event] . identifier[get] ( identifier[item] , keyword[None] )
keyword[if] identifier[thing] keyword[is] keyword[None] :
identifier[thing] = literal[string] + identifier[item]
keyword[else] :
keyword[if] identifier[keys] [ identifier[item] ]== literal[string] :
identifier[thing] = identifier[str] ( identifier[thing] )
keyword[else] :
identifier[thing] = identifier[parser] . identifier[parse] ( identifier[str] ( identifier[thing] . identifier[dt] ))
identifier[thing] = identifier[thing] . identifier[isoformat] ()
keyword[if] identifier[item] keyword[in] identifier[mapping] :
identifier[item_assignment] = identifier[mapping] [ identifier[item] ]
keyword[else] :
identifier[item_assignment] = identifier[item]
identifier[initializer] [ identifier[item_assignment] ]= identifier[thing]
identifier[new_event] = identifier[objectmodels] [ literal[string] ]( identifier[initializer] )
identifier[new_event] = identifier[ical_import_filter] ( identifier[new_event] , identifier[log] )
identifier[imports] . identifier[append] ( identifier[new_event] )
identifier[log] ( identifier[new_event] , identifier[lvl] = identifier[debug] )
keyword[for] identifier[ev] keyword[in] identifier[imports] :
identifier[log] ( identifier[ev] . identifier[summary] )
keyword[if] keyword[not] identifier[dry] :
identifier[log] ( literal[string] )
identifier[objectmodels] [ literal[string] ]. identifier[bulk_create] ( identifier[imports] )
identifier[calendar] . identifier[save] ()
keyword[else] :
identifier[log] ( literal[string] , identifier[lvl] = identifier[warn] ) | def ICALImporter(ctx, filename, all, owner, calendar, create_calendar, clear_calendar, dry, execfilter):
"""Calendar Importer for iCal (ics) files
"""
log('iCal importer running')
objectmodels = ctx.obj['db'].objectmodels
if objectmodels['user'].count({'name': owner}) > 0:
owner_object = objectmodels['user'].find_one({'name': owner}) # depends on [control=['if'], data=[]]
elif objectmodels['user'].count({'uuid': owner}) > 0:
owner_object = objectmodels['user'].find_one({'uuid': owner}) # depends on [control=['if'], data=[]]
else:
log('User unknown. Specify either uuid or name.', lvl=warn)
return
log('Found user')
if objectmodels['calendar'].count({'name': calendar}) > 0:
calendar = objectmodels['calendar'].find_one({'name': calendar}) # depends on [control=['if'], data=[]]
elif objectmodels['calendar'].count({'uuid': owner}) > 0:
calendar = objectmodels['calendar'].find_one({'uuid': calendar}) # depends on [control=['if'], data=[]]
elif create_calendar:
calendar = objectmodels['calendar']({'uuid': std_uuid(), 'name': calendar}) # depends on [control=['if'], data=[]]
else:
log('Calendar unknown and no --create-calendar specified. Specify either uuid or name of an existing calendar.', lvl=warn)
return
log('Found calendar')
if clear_calendar is True:
log('Clearing calendar events')
for item in objectmodels['event'].find({'calendar': calendar.uuid}):
item.delete() # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]]
with open(filename, 'rb') as file_object:
caldata = Calendar.from_ical(file_object.read()) # depends on [control=['with'], data=['file_object']]
keys = {'class': 'str', 'created': 'dt', 'description': 'str', 'dtstart': 'dt', 'dtend': 'dt', 'timestamp': 'dt', 'modified': 'dt', 'location': 'str', 'status': 'str', 'summary': 'str', 'uid': 'str'}
mapping = {'description': 'summary', 'summary': 'name'}
imports = []
def ical_import_filter(original, logfacilty):
log('Passthrough filter')
return original
if execfilter is not None:
import os
textFilePath = os.path.abspath(os.path.join(os.path.curdir, execfilter))
textFileFolder = os.path.dirname(textFilePath)
from importlib.machinery import SourceFileLoader
filter_module = SourceFileLoader('importfilter', textFilePath).load_module()
ical_import_filter = filter_module.ical_import_filter # depends on [control=['if'], data=['execfilter']]
for event in caldata.walk():
if event.name == 'VEVENT':
log(event, lvl=verbose, pretty=True)
initializer = {'uuid': std_uuid(), 'calendar': calendar.uuid}
for item in keys:
thing = event.get(item, None)
if thing is None:
thing = 'NO-' + item # depends on [control=['if'], data=['thing']]
elif keys[item] == 'str':
thing = str(thing) # depends on [control=['if'], data=[]]
else:
thing = parser.parse(str(thing.dt))
thing = thing.isoformat()
if item in mapping:
item_assignment = mapping[item] # depends on [control=['if'], data=['item', 'mapping']]
else:
item_assignment = item
initializer[item_assignment] = thing # depends on [control=['for'], data=['item']]
new_event = objectmodels['event'](initializer)
new_event = ical_import_filter(new_event, log)
imports.append(new_event)
log(new_event, lvl=debug) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['event']]
for ev in imports:
log(ev.summary) # depends on [control=['for'], data=['ev']]
if not dry:
log('Bulk creating events')
objectmodels['event'].bulk_create(imports)
calendar.save() # depends on [control=['if'], data=[]]
else:
log('Dry run - nothing stored.', lvl=warn) |
def get_instance():
"""Return an instance of Client."""
global _instances
user_agents = _config['user-agents']
user_agent = user_agents[
random.randint(0, len(user_agents) - 1)
] if len(user_agents) > 0 else DEFAULT_UA
instance_key = user_agent
try:
instance = _instances[instance_key]
except KeyError:
instance = Client(user_agent, get_proxy)
_instances[instance_key] = instance
return instance | def function[get_instance, parameter[]]:
constant[Return an instance of Client.]
<ast.Global object at 0x7da1b14d9cf0>
variable[user_agents] assign[=] call[name[_config]][constant[user-agents]]
variable[user_agent] assign[=] <ast.IfExp object at 0x7da1b14d9ab0>
variable[instance_key] assign[=] name[user_agent]
<ast.Try object at 0x7da1b14d3220>
return[name[instance]] | keyword[def] identifier[get_instance] ():
literal[string]
keyword[global] identifier[_instances]
identifier[user_agents] = identifier[_config] [ literal[string] ]
identifier[user_agent] = identifier[user_agents] [
identifier[random] . identifier[randint] ( literal[int] , identifier[len] ( identifier[user_agents] )- literal[int] )
] keyword[if] identifier[len] ( identifier[user_agents] )> literal[int] keyword[else] identifier[DEFAULT_UA]
identifier[instance_key] = identifier[user_agent]
keyword[try] :
identifier[instance] = identifier[_instances] [ identifier[instance_key] ]
keyword[except] identifier[KeyError] :
identifier[instance] = identifier[Client] ( identifier[user_agent] , identifier[get_proxy] )
identifier[_instances] [ identifier[instance_key] ]= identifier[instance]
keyword[return] identifier[instance] | def get_instance():
"""Return an instance of Client."""
global _instances
user_agents = _config['user-agents']
user_agent = user_agents[random.randint(0, len(user_agents) - 1)] if len(user_agents) > 0 else DEFAULT_UA
instance_key = user_agent
try:
instance = _instances[instance_key] # depends on [control=['try'], data=[]]
except KeyError:
instance = Client(user_agent, get_proxy)
_instances[instance_key] = instance # depends on [control=['except'], data=[]]
return instance |
def get_author_tags(index_page):
"""
Parse `authors` from HTML ``<meta>`` and dublin core.
Args:
index_page (str): HTML content of the page you wisht to analyze.
Returns:
list: List of :class:`.SourceString` objects.
"""
dom = dhtmlparser.parseString(index_page)
authors = [
get_html_authors(dom),
get_dc_authors(dom),
]
return sum(authors, []) | def function[get_author_tags, parameter[index_page]]:
constant[
Parse `authors` from HTML ``<meta>`` and dublin core.
Args:
index_page (str): HTML content of the page you wisht to analyze.
Returns:
list: List of :class:`.SourceString` objects.
]
variable[dom] assign[=] call[name[dhtmlparser].parseString, parameter[name[index_page]]]
variable[authors] assign[=] list[[<ast.Call object at 0x7da18dc987f0>, <ast.Call object at 0x7da18dc9a500>]]
return[call[name[sum], parameter[name[authors], list[[]]]]] | keyword[def] identifier[get_author_tags] ( identifier[index_page] ):
literal[string]
identifier[dom] = identifier[dhtmlparser] . identifier[parseString] ( identifier[index_page] )
identifier[authors] =[
identifier[get_html_authors] ( identifier[dom] ),
identifier[get_dc_authors] ( identifier[dom] ),
]
keyword[return] identifier[sum] ( identifier[authors] ,[]) | def get_author_tags(index_page):
"""
Parse `authors` from HTML ``<meta>`` and dublin core.
Args:
index_page (str): HTML content of the page you wisht to analyze.
Returns:
list: List of :class:`.SourceString` objects.
"""
dom = dhtmlparser.parseString(index_page)
authors = [get_html_authors(dom), get_dc_authors(dom)]
return sum(authors, []) |
def make_alembic_config(temporary_dir, migrations_dir):
"""
Alembic uses the `alembic.ini` file to configure where it looks for everything else.
Not only is this file an unnecessary complication around a single-valued configuration,
the single-value it chooses to use (the alembic configuration directory), hard-coding
the decision that there will be such a directory makes Alembic setup overly verbose.
Instead, generate a `Config` object with the values we care about.
:returns: a usable instance of `Alembic.config.Config`
"""
config = Config()
config.set_main_option("temporary_dir", temporary_dir)
config.set_main_option("migrations_dir", migrations_dir)
return config | def function[make_alembic_config, parameter[temporary_dir, migrations_dir]]:
constant[
Alembic uses the `alembic.ini` file to configure where it looks for everything else.
Not only is this file an unnecessary complication around a single-valued configuration,
the single-value it chooses to use (the alembic configuration directory), hard-coding
the decision that there will be such a directory makes Alembic setup overly verbose.
Instead, generate a `Config` object with the values we care about.
:returns: a usable instance of `Alembic.config.Config`
]
variable[config] assign[=] call[name[Config], parameter[]]
call[name[config].set_main_option, parameter[constant[temporary_dir], name[temporary_dir]]]
call[name[config].set_main_option, parameter[constant[migrations_dir], name[migrations_dir]]]
return[name[config]] | keyword[def] identifier[make_alembic_config] ( identifier[temporary_dir] , identifier[migrations_dir] ):
literal[string]
identifier[config] = identifier[Config] ()
identifier[config] . identifier[set_main_option] ( literal[string] , identifier[temporary_dir] )
identifier[config] . identifier[set_main_option] ( literal[string] , identifier[migrations_dir] )
keyword[return] identifier[config] | def make_alembic_config(temporary_dir, migrations_dir):
"""
Alembic uses the `alembic.ini` file to configure where it looks for everything else.
Not only is this file an unnecessary complication around a single-valued configuration,
the single-value it chooses to use (the alembic configuration directory), hard-coding
the decision that there will be such a directory makes Alembic setup overly verbose.
Instead, generate a `Config` object with the values we care about.
:returns: a usable instance of `Alembic.config.Config`
"""
config = Config()
config.set_main_option('temporary_dir', temporary_dir)
config.set_main_option('migrations_dir', migrations_dir)
return config |
def get_notebook_app_versions():
"""
Get the valid version numbers of the notebook app.
"""
notebook_apps = dxpy.find_apps(name=NOTEBOOK_APP, all_versions=True)
versions = [str(dxpy.describe(app['id'])['version']) for app in notebook_apps]
return versions | def function[get_notebook_app_versions, parameter[]]:
constant[
Get the valid version numbers of the notebook app.
]
variable[notebook_apps] assign[=] call[name[dxpy].find_apps, parameter[]]
variable[versions] assign[=] <ast.ListComp object at 0x7da204960cd0>
return[name[versions]] | keyword[def] identifier[get_notebook_app_versions] ():
literal[string]
identifier[notebook_apps] = identifier[dxpy] . identifier[find_apps] ( identifier[name] = identifier[NOTEBOOK_APP] , identifier[all_versions] = keyword[True] )
identifier[versions] =[ identifier[str] ( identifier[dxpy] . identifier[describe] ( identifier[app] [ literal[string] ])[ literal[string] ]) keyword[for] identifier[app] keyword[in] identifier[notebook_apps] ]
keyword[return] identifier[versions] | def get_notebook_app_versions():
"""
Get the valid version numbers of the notebook app.
"""
notebook_apps = dxpy.find_apps(name=NOTEBOOK_APP, all_versions=True)
versions = [str(dxpy.describe(app['id'])['version']) for app in notebook_apps]
return versions |
def model_funcpointers(vk, model):
"""Fill the model with function pointer
model['funcpointers'] = {'pfn_name': 'struct_name'}
"""
model['funcpointers'] = {}
funcs = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'funcpointer']
structs = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'struct']
for f in funcs:
pfn_name = f['name']
for s in structs:
if 'member' not in s:
continue
for m in s['member']:
if m['type'] == pfn_name:
struct_name = s['@name']
model['funcpointers'][pfn_name] = struct_name | def function[model_funcpointers, parameter[vk, model]]:
constant[Fill the model with function pointer
model['funcpointers'] = {'pfn_name': 'struct_name'}
]
call[name[model]][constant[funcpointers]] assign[=] dictionary[[], []]
variable[funcs] assign[=] <ast.ListComp object at 0x7da1b0863880>
variable[structs] assign[=] <ast.ListComp object at 0x7da1b0863d00>
for taget[name[f]] in starred[name[funcs]] begin[:]
variable[pfn_name] assign[=] call[name[f]][constant[name]]
for taget[name[s]] in starred[name[structs]] begin[:]
if compare[constant[member] <ast.NotIn object at 0x7da2590d7190> name[s]] begin[:]
continue
for taget[name[m]] in starred[call[name[s]][constant[member]]] begin[:]
if compare[call[name[m]][constant[type]] equal[==] name[pfn_name]] begin[:]
variable[struct_name] assign[=] call[name[s]][constant[@name]]
call[call[name[model]][constant[funcpointers]]][name[pfn_name]] assign[=] name[struct_name] | keyword[def] identifier[model_funcpointers] ( identifier[vk] , identifier[model] ):
literal[string]
identifier[model] [ literal[string] ]={}
identifier[funcs] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[vk] [ literal[string] ][ literal[string] ][ literal[string] ]
keyword[if] identifier[x] . identifier[get] ( literal[string] )== literal[string] ]
identifier[structs] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[vk] [ literal[string] ][ literal[string] ][ literal[string] ]
keyword[if] identifier[x] . identifier[get] ( literal[string] )== literal[string] ]
keyword[for] identifier[f] keyword[in] identifier[funcs] :
identifier[pfn_name] = identifier[f] [ literal[string] ]
keyword[for] identifier[s] keyword[in] identifier[structs] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[s] :
keyword[continue]
keyword[for] identifier[m] keyword[in] identifier[s] [ literal[string] ]:
keyword[if] identifier[m] [ literal[string] ]== identifier[pfn_name] :
identifier[struct_name] = identifier[s] [ literal[string] ]
identifier[model] [ literal[string] ][ identifier[pfn_name] ]= identifier[struct_name] | def model_funcpointers(vk, model):
"""Fill the model with function pointer
model['funcpointers'] = {'pfn_name': 'struct_name'}
"""
model['funcpointers'] = {}
funcs = [x for x in vk['registry']['types']['type'] if x.get('@category') == 'funcpointer']
structs = [x for x in vk['registry']['types']['type'] if x.get('@category') == 'struct']
for f in funcs:
pfn_name = f['name']
for s in structs:
if 'member' not in s:
continue # depends on [control=['if'], data=[]]
for m in s['member']:
if m['type'] == pfn_name:
struct_name = s['@name'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['m']] # depends on [control=['for'], data=['s']]
model['funcpointers'][pfn_name] = struct_name # depends on [control=['for'], data=['f']] |
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
if melted and not all([block_col, group_col, y_col]):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data')
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df = df)
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True)
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k -1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method = p_adjust)[1]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) | def function[posthoc_durbin, parameter[a, y_col, block_col, group_col, melted, sort, p_adjust]]:
constant[Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
]
if <ast.BoolOp object at 0x7da1b1253520> begin[:]
<ast.Raise object at 0x7da1b1250790>
def function[compare_stats, parameter[i, j]]:
variable[dif] assign[=] call[name[np].abs, parameter[binary_operation[call[name[Rj]][call[name[groups]][name[i]]] - call[name[Rj]][call[name[groups]][name[j]]]]]]
variable[tval] assign[=] binary_operation[name[dif] / name[denom]]
variable[pval] assign[=] binary_operation[constant[2.0] * call[name[ss].t.sf, parameter[call[name[np].abs, parameter[name[tval]]]]]]
return[name[pval]]
<ast.Tuple object at 0x7da1b1251ab0> assign[=] call[name[__convert_to_block_df], parameter[name[a], name[y_col], name[group_col], name[block_col], name[melted]]]
if <ast.UnaryOp object at 0x7da1b1195b10> begin[:]
call[name[x]][name[group_col]] assign[=] call[name[Categorical], parameter[call[name[x]][name[group_col]]]]
call[name[x]][name[block_col]] assign[=] call[name[Categorical], parameter[call[name[x]][name[block_col]]]]
call[name[x].sort_values, parameter[]]
call[name[x].dropna, parameter[]]
variable[groups] assign[=] call[call[name[x]][name[group_col]].unique, parameter[]]
variable[t] assign[=] call[name[len], parameter[name[groups]]]
variable[b] assign[=] call[call[name[x]][name[block_col]].unique, parameter[]].size
variable[r] assign[=] name[b]
variable[k] assign[=] name[t]
call[name[x]][constant[y_ranked]] assign[=] call[call[call[name[x].groupby, parameter[name[block_col]]]][name[y_col]].rank, parameter[]]
variable[Rj] assign[=] call[call[call[name[x].groupby, parameter[name[group_col]]]][constant[y_ranked]].sum, parameter[]]
variable[A] assign[=] call[binary_operation[call[name[x]][constant[y_ranked]] ** constant[2]].sum, parameter[]]
variable[C] assign[=] binary_operation[binary_operation[binary_operation[name[b] * name[k]] * binary_operation[binary_operation[name[k] + constant[1]] ** constant[2]]] / constant[4.0]]
variable[D] assign[=] binary_operation[call[binary_operation[name[Rj] ** constant[2]].sum, parameter[]] - binary_operation[name[r] * name[C]]]
variable[T1] assign[=] binary_operation[binary_operation[binary_operation[name[t] - constant[1]] / binary_operation[name[A] - name[C]]] * name[D]]
variable[denom] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[A] - name[C]] * constant[2]] * name[r]] / binary_operation[binary_operation[binary_operation[binary_operation[name[b] * name[k]] - name[b]] - name[t]] + constant[1]]] * binary_operation[constant[1] - binary_operation[name[T1] / binary_operation[name[b] * binary_operation[name[k] - constant[1]]]]]]]]
variable[df] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[b] * name[k]] - name[b]] - name[t]] + constant[1]]
variable[vs] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da1b11973a0>, <ast.Name object at 0x7da1b1195510>]]]]
variable[combs] assign[=] call[name[it].combinations, parameter[call[name[range], parameter[name[t]]], constant[2]]]
variable[tri_upper] assign[=] call[name[np].triu_indices, parameter[call[name[vs].shape][constant[0]], constant[1]]]
variable[tri_lower] assign[=] call[name[np].tril_indices, parameter[call[name[vs].shape][constant[0]], <ast.UnaryOp object at 0x7da1b1195000>]]
call[name[vs]][tuple[[<ast.Slice object at 0x7da1b1194d90>, <ast.Slice object at 0x7da1b1194ee0>]]] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b1194c40>, <ast.Name object at 0x7da1b1194d00>]]] in starred[name[combs]] begin[:]
call[name[vs]][tuple[[<ast.Name object at 0x7da1b11962f0>, <ast.Name object at 0x7da1b1196320>]]] assign[=] call[name[compare_stats], parameter[name[i], name[j]]]
if name[p_adjust] begin[:]
call[name[vs]][name[tri_upper]] assign[=] call[call[name[multipletests], parameter[call[name[vs]][name[tri_upper]]]]][constant[1]]
call[name[vs]][name[tri_lower]] assign[=] call[name[vs].T][name[tri_lower]]
call[name[np].fill_diagonal, parameter[name[vs], <ast.UnaryOp object at 0x7da1b1196170>]]
return[call[name[DataFrame], parameter[name[vs]]]] | keyword[def] identifier[posthoc_durbin] ( identifier[a] , identifier[y_col] = keyword[None] , identifier[block_col] = keyword[None] , identifier[group_col] = keyword[None] , identifier[melted] = keyword[False] , identifier[sort] = keyword[False] , identifier[p_adjust] = keyword[None] ):
literal[string]
keyword[if] identifier[melted] keyword[and] keyword[not] identifier[all] ([ identifier[block_col] , identifier[group_col] , identifier[y_col] ]):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[def] identifier[compare_stats] ( identifier[i] , identifier[j] ):
identifier[dif] = identifier[np] . identifier[abs] ( identifier[Rj] [ identifier[groups] [ identifier[i] ]]- identifier[Rj] [ identifier[groups] [ identifier[j] ]])
identifier[tval] = identifier[dif] / identifier[denom]
identifier[pval] = literal[int] * identifier[ss] . identifier[t] . identifier[sf] ( identifier[np] . identifier[abs] ( identifier[tval] ), identifier[df] = identifier[df] )
keyword[return] identifier[pval]
identifier[x] , identifier[y_col] , identifier[group_col] , identifier[block_col] = identifier[__convert_to_block_df] ( identifier[a] , identifier[y_col] , identifier[group_col] , identifier[block_col] , identifier[melted] )
keyword[if] keyword[not] identifier[sort] :
identifier[x] [ identifier[group_col] ]= identifier[Categorical] ( identifier[x] [ identifier[group_col] ], identifier[categories] = identifier[x] [ identifier[group_col] ]. identifier[unique] (), identifier[ordered] = keyword[True] )
identifier[x] [ identifier[block_col] ]= identifier[Categorical] ( identifier[x] [ identifier[block_col] ], identifier[categories] = identifier[x] [ identifier[block_col] ]. identifier[unique] (), identifier[ordered] = keyword[True] )
identifier[x] . identifier[sort_values] ( identifier[by] =[ identifier[block_col] , identifier[group_col] ], identifier[ascending] = keyword[True] , identifier[inplace] = keyword[True] )
identifier[x] . identifier[dropna] ( identifier[inplace] = keyword[True] )
identifier[groups] = identifier[x] [ identifier[group_col] ]. identifier[unique] ()
identifier[t] = identifier[len] ( identifier[groups] )
identifier[b] = identifier[x] [ identifier[block_col] ]. identifier[unique] (). identifier[size]
identifier[r] = identifier[b]
identifier[k] = identifier[t]
identifier[x] [ literal[string] ]= identifier[x] . identifier[groupby] ( identifier[block_col] )[ identifier[y_col] ]. identifier[rank] ()
identifier[Rj] = identifier[x] . identifier[groupby] ( identifier[group_col] )[ literal[string] ]. identifier[sum] ()
identifier[A] =( identifier[x] [ literal[string] ]** literal[int] ). identifier[sum] ()
identifier[C] =( identifier[b] * identifier[k] *( identifier[k] + literal[int] )** literal[int] )/ literal[int]
identifier[D] =( identifier[Rj] ** literal[int] ). identifier[sum] ()- identifier[r] * identifier[C]
identifier[T1] =( identifier[t] - literal[int] )/( identifier[A] - identifier[C] )* identifier[D]
identifier[denom] = identifier[np] . identifier[sqrt] ((( identifier[A] - identifier[C] )* literal[int] * identifier[r] )/( identifier[b] * identifier[k] - identifier[b] - identifier[t] + literal[int] )*( literal[int] - identifier[T1] /( identifier[b] *( identifier[k] - literal[int] ))))
identifier[df] = identifier[b] * identifier[k] - identifier[b] - identifier[t] + literal[int]
identifier[vs] = identifier[np] . identifier[zeros] (( identifier[t] , identifier[t] ), identifier[dtype] = identifier[np] . identifier[float] )
identifier[combs] = identifier[it] . identifier[combinations] ( identifier[range] ( identifier[t] ), literal[int] )
identifier[tri_upper] = identifier[np] . identifier[triu_indices] ( identifier[vs] . identifier[shape] [ literal[int] ], literal[int] )
identifier[tri_lower] = identifier[np] . identifier[tril_indices] ( identifier[vs] . identifier[shape] [ literal[int] ],- literal[int] )
identifier[vs] [:,:]= literal[int]
keyword[for] identifier[i] , identifier[j] keyword[in] identifier[combs] :
identifier[vs] [ identifier[i] , identifier[j] ]= identifier[compare_stats] ( identifier[i] , identifier[j] )
keyword[if] identifier[p_adjust] :
identifier[vs] [ identifier[tri_upper] ]= identifier[multipletests] ( identifier[vs] [ identifier[tri_upper] ], identifier[method] = identifier[p_adjust] )[ literal[int] ]
identifier[vs] [ identifier[tri_lower] ]= identifier[vs] . identifier[T] [ identifier[tri_lower] ]
identifier[np] . identifier[fill_diagonal] ( identifier[vs] ,- literal[int] )
keyword[return] identifier[DataFrame] ( identifier[vs] , identifier[index] = identifier[groups] , identifier[columns] = identifier[groups] ) | def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
"""Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
"""
if melted and (not all([block_col, group_col, y_col])):
raise ValueError('block_col, group_col, y_col should be explicitly specified if using melted data') # depends on [control=['if'], data=[]]
def compare_stats(i, j):
dif = np.abs(Rj[groups[i]] - Rj[groups[j]])
tval = dif / denom
pval = 2.0 * ss.t.sf(np.abs(tval), df=df)
return pval
(x, y_col, group_col, block_col) = __convert_to_block_df(a, y_col, group_col, block_col, melted)
if not sort:
x[group_col] = Categorical(x[group_col], categories=x[group_col].unique(), ordered=True)
x[block_col] = Categorical(x[block_col], categories=x[block_col].unique(), ordered=True) # depends on [control=['if'], data=[]]
x.sort_values(by=[block_col, group_col], ascending=True, inplace=True)
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
Rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = b * k * (k + 1) ** 2 / 4.0
D = (Rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt((A - C) * 2 * r / (b * k - b - t + 1) * (1 - T1 / (b * (k - 1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=np.float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
for (i, j) in combs:
vs[i, j] = compare_stats(i, j) # depends on [control=['for'], data=[]]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1] # depends on [control=['if'], data=[]]
vs[tri_lower] = vs.T[tri_lower]
np.fill_diagonal(vs, -1)
return DataFrame(vs, index=groups, columns=groups) |
def complexity_entropy_shannon(signal):
"""
Computes the shannon entropy. Copied from the `pyEntropy <https://github.com/nikdon/pyEntropy>`_ repo by tjugo.
Parameters
----------
signal : list or array
List or array of values.
Returns
----------
shannon_entropy : float
The Shannon Entropy as float value.
Example
----------
>>> import neurokit as nk
>>>
>>> signal = np.sin(np.log(np.random.sample(666)))
>>> shannon_entropy = nk.complexity_entropy_shannon(signal)
Notes
----------
*Details*
- **shannon entropy**: Entropy is a measure of unpredictability of the state, or equivalently, of its average information content.
*Authors*
- tjugo (https://github.com/nikdon)
*Dependencies*
- numpy
*See Also*
- pyEntropy package: https://github.com/nikdon/pyEntropy
References
-----------
- None
"""
# Check if string
if not isinstance(signal, str):
signal = list(signal)
signal = np.array(signal)
# Create a frequency data
data_set = list(set(signal))
freq_list = []
for entry in data_set:
counter = 0.
for i in signal:
if i == entry:
counter += 1
freq_list.append(float(counter) / len(signal))
# Shannon entropy
shannon_entropy = 0.0
for freq in freq_list:
shannon_entropy += freq * np.log2(freq)
shannon_entropy = -shannon_entropy
return(shannon_entropy) | def function[complexity_entropy_shannon, parameter[signal]]:
constant[
Computes the shannon entropy. Copied from the `pyEntropy <https://github.com/nikdon/pyEntropy>`_ repo by tjugo.
Parameters
----------
signal : list or array
List or array of values.
Returns
----------
shannon_entropy : float
The Shannon Entropy as float value.
Example
----------
>>> import neurokit as nk
>>>
>>> signal = np.sin(np.log(np.random.sample(666)))
>>> shannon_entropy = nk.complexity_entropy_shannon(signal)
Notes
----------
*Details*
- **shannon entropy**: Entropy is a measure of unpredictability of the state, or equivalently, of its average information content.
*Authors*
- tjugo (https://github.com/nikdon)
*Dependencies*
- numpy
*See Also*
- pyEntropy package: https://github.com/nikdon/pyEntropy
References
-----------
- None
]
if <ast.UnaryOp object at 0x7da20cabeb00> begin[:]
variable[signal] assign[=] call[name[list], parameter[name[signal]]]
variable[signal] assign[=] call[name[np].array, parameter[name[signal]]]
variable[data_set] assign[=] call[name[list], parameter[call[name[set], parameter[name[signal]]]]]
variable[freq_list] assign[=] list[[]]
for taget[name[entry]] in starred[name[data_set]] begin[:]
variable[counter] assign[=] constant[0.0]
for taget[name[i]] in starred[name[signal]] begin[:]
if compare[name[i] equal[==] name[entry]] begin[:]
<ast.AugAssign object at 0x7da20cabe980>
call[name[freq_list].append, parameter[binary_operation[call[name[float], parameter[name[counter]]] / call[name[len], parameter[name[signal]]]]]]
variable[shannon_entropy] assign[=] constant[0.0]
for taget[name[freq]] in starred[name[freq_list]] begin[:]
<ast.AugAssign object at 0x7da20cabf640>
variable[shannon_entropy] assign[=] <ast.UnaryOp object at 0x7da18f09d0c0>
return[name[shannon_entropy]] | keyword[def] identifier[complexity_entropy_shannon] ( identifier[signal] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[signal] , identifier[str] ):
identifier[signal] = identifier[list] ( identifier[signal] )
identifier[signal] = identifier[np] . identifier[array] ( identifier[signal] )
identifier[data_set] = identifier[list] ( identifier[set] ( identifier[signal] ))
identifier[freq_list] =[]
keyword[for] identifier[entry] keyword[in] identifier[data_set] :
identifier[counter] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[signal] :
keyword[if] identifier[i] == identifier[entry] :
identifier[counter] += literal[int]
identifier[freq_list] . identifier[append] ( identifier[float] ( identifier[counter] )/ identifier[len] ( identifier[signal] ))
identifier[shannon_entropy] = literal[int]
keyword[for] identifier[freq] keyword[in] identifier[freq_list] :
identifier[shannon_entropy] += identifier[freq] * identifier[np] . identifier[log2] ( identifier[freq] )
identifier[shannon_entropy] =- identifier[shannon_entropy]
keyword[return] ( identifier[shannon_entropy] ) | def complexity_entropy_shannon(signal):
"""
Computes the shannon entropy. Copied from the `pyEntropy <https://github.com/nikdon/pyEntropy>`_ repo by tjugo.
Parameters
----------
signal : list or array
List or array of values.
Returns
----------
shannon_entropy : float
The Shannon Entropy as float value.
Example
----------
>>> import neurokit as nk
>>>
>>> signal = np.sin(np.log(np.random.sample(666)))
>>> shannon_entropy = nk.complexity_entropy_shannon(signal)
Notes
----------
*Details*
- **shannon entropy**: Entropy is a measure of unpredictability of the state, or equivalently, of its average information content.
*Authors*
- tjugo (https://github.com/nikdon)
*Dependencies*
- numpy
*See Also*
- pyEntropy package: https://github.com/nikdon/pyEntropy
References
-----------
- None
"""
# Check if string
if not isinstance(signal, str):
signal = list(signal) # depends on [control=['if'], data=[]]
signal = np.array(signal)
# Create a frequency data
data_set = list(set(signal))
freq_list = []
for entry in data_set:
counter = 0.0
for i in signal:
if i == entry:
counter += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
freq_list.append(float(counter) / len(signal)) # depends on [control=['for'], data=['entry']]
# Shannon entropy
shannon_entropy = 0.0
for freq in freq_list:
shannon_entropy += freq * np.log2(freq) # depends on [control=['for'], data=['freq']]
shannon_entropy = -shannon_entropy
return shannon_entropy |
def readACTIONRECORD(self):
""" Read a SWFActionRecord """
action = None
actionCode = self.readUI8()
if actionCode != 0:
actionLength = self.readUI16() if actionCode >= 0x80 else 0
#print "0x%x"%actionCode, actionLength
action = SWFActionFactory.create(actionCode, actionLength)
action.parse(self)
return action | def function[readACTIONRECORD, parameter[self]]:
constant[ Read a SWFActionRecord ]
variable[action] assign[=] constant[None]
variable[actionCode] assign[=] call[name[self].readUI8, parameter[]]
if compare[name[actionCode] not_equal[!=] constant[0]] begin[:]
variable[actionLength] assign[=] <ast.IfExp object at 0x7da1b0dc1c30>
variable[action] assign[=] call[name[SWFActionFactory].create, parameter[name[actionCode], name[actionLength]]]
call[name[action].parse, parameter[name[self]]]
return[name[action]] | keyword[def] identifier[readACTIONRECORD] ( identifier[self] ):
literal[string]
identifier[action] = keyword[None]
identifier[actionCode] = identifier[self] . identifier[readUI8] ()
keyword[if] identifier[actionCode] != literal[int] :
identifier[actionLength] = identifier[self] . identifier[readUI16] () keyword[if] identifier[actionCode] >= literal[int] keyword[else] literal[int]
identifier[action] = identifier[SWFActionFactory] . identifier[create] ( identifier[actionCode] , identifier[actionLength] )
identifier[action] . identifier[parse] ( identifier[self] )
keyword[return] identifier[action] | def readACTIONRECORD(self):
""" Read a SWFActionRecord """
action = None
actionCode = self.readUI8()
if actionCode != 0:
actionLength = self.readUI16() if actionCode >= 128 else 0
#print "0x%x"%actionCode, actionLength
action = SWFActionFactory.create(actionCode, actionLength)
action.parse(self) # depends on [control=['if'], data=['actionCode']]
return action |
def _handle_table_style(self, end_token):
"""Handle style attributes for a table until ``end_token``."""
data = _TagOpenData()
data.context = _TagOpenData.CX_ATTR_READY
while True:
this = self._read()
can_exit = (not data.context & data.CX_QUOTED or
data.context & data.CX_NOTE_SPACE)
if this == end_token and can_exit:
if data.context & (data.CX_ATTR_NAME | data.CX_ATTR_VALUE):
self._push_tag_buffer(data)
if this.isspace():
data.padding_buffer["first"] += this
return data.padding_buffer["first"]
elif this is self.END or this == end_token:
if self._context & contexts.TAG_ATTR:
if data.context & data.CX_QUOTED:
# Unclosed attribute quote: reset, don't die
data.context = data.CX_ATTR_VALUE
self._memoize_bad_route()
self._pop()
self._head = data.reset
continue
self._pop()
self._fail_route()
else:
self._handle_tag_data(data, this)
self._head += 1 | def function[_handle_table_style, parameter[self, end_token]]:
constant[Handle style attributes for a table until ``end_token``.]
variable[data] assign[=] call[name[_TagOpenData], parameter[]]
name[data].context assign[=] name[_TagOpenData].CX_ATTR_READY
while constant[True] begin[:]
variable[this] assign[=] call[name[self]._read, parameter[]]
variable[can_exit] assign[=] <ast.BoolOp object at 0x7da204346320>
if <ast.BoolOp object at 0x7da204344d00> begin[:]
if binary_operation[name[data].context <ast.BitAnd object at 0x7da2590d6b60> binary_operation[name[data].CX_ATTR_NAME <ast.BitOr object at 0x7da2590d6aa0> name[data].CX_ATTR_VALUE]] begin[:]
call[name[self]._push_tag_buffer, parameter[name[data]]]
if call[name[this].isspace, parameter[]] begin[:]
<ast.AugAssign object at 0x7da18eb55c90>
return[call[name[data].padding_buffer][constant[first]]]
<ast.AugAssign object at 0x7da18eb569e0> | keyword[def] identifier[_handle_table_style] ( identifier[self] , identifier[end_token] ):
literal[string]
identifier[data] = identifier[_TagOpenData] ()
identifier[data] . identifier[context] = identifier[_TagOpenData] . identifier[CX_ATTR_READY]
keyword[while] keyword[True] :
identifier[this] = identifier[self] . identifier[_read] ()
identifier[can_exit] =( keyword[not] identifier[data] . identifier[context] & identifier[data] . identifier[CX_QUOTED] keyword[or]
identifier[data] . identifier[context] & identifier[data] . identifier[CX_NOTE_SPACE] )
keyword[if] identifier[this] == identifier[end_token] keyword[and] identifier[can_exit] :
keyword[if] identifier[data] . identifier[context] &( identifier[data] . identifier[CX_ATTR_NAME] | identifier[data] . identifier[CX_ATTR_VALUE] ):
identifier[self] . identifier[_push_tag_buffer] ( identifier[data] )
keyword[if] identifier[this] . identifier[isspace] ():
identifier[data] . identifier[padding_buffer] [ literal[string] ]+= identifier[this]
keyword[return] identifier[data] . identifier[padding_buffer] [ literal[string] ]
keyword[elif] identifier[this] keyword[is] identifier[self] . identifier[END] keyword[or] identifier[this] == identifier[end_token] :
keyword[if] identifier[self] . identifier[_context] & identifier[contexts] . identifier[TAG_ATTR] :
keyword[if] identifier[data] . identifier[context] & identifier[data] . identifier[CX_QUOTED] :
identifier[data] . identifier[context] = identifier[data] . identifier[CX_ATTR_VALUE]
identifier[self] . identifier[_memoize_bad_route] ()
identifier[self] . identifier[_pop] ()
identifier[self] . identifier[_head] = identifier[data] . identifier[reset]
keyword[continue]
identifier[self] . identifier[_pop] ()
identifier[self] . identifier[_fail_route] ()
keyword[else] :
identifier[self] . identifier[_handle_tag_data] ( identifier[data] , identifier[this] )
identifier[self] . identifier[_head] += literal[int] | def _handle_table_style(self, end_token):
"""Handle style attributes for a table until ``end_token``."""
data = _TagOpenData()
data.context = _TagOpenData.CX_ATTR_READY
while True:
this = self._read()
can_exit = not data.context & data.CX_QUOTED or data.context & data.CX_NOTE_SPACE
if this == end_token and can_exit:
if data.context & (data.CX_ATTR_NAME | data.CX_ATTR_VALUE):
self._push_tag_buffer(data) # depends on [control=['if'], data=[]]
if this.isspace():
data.padding_buffer['first'] += this # depends on [control=['if'], data=[]]
return data.padding_buffer['first'] # depends on [control=['if'], data=[]]
elif this is self.END or this == end_token:
if self._context & contexts.TAG_ATTR:
if data.context & data.CX_QUOTED:
# Unclosed attribute quote: reset, don't die
data.context = data.CX_ATTR_VALUE
self._memoize_bad_route()
self._pop()
self._head = data.reset
continue # depends on [control=['if'], data=[]]
self._pop() # depends on [control=['if'], data=[]]
self._fail_route() # depends on [control=['if'], data=[]]
else:
self._handle_tag_data(data, this)
self._head += 1 # depends on [control=['while'], data=[]] |
def add_options(cls, manager):
"""Register plug-in specific options."""
kw = {}
if flake8.__version__ >= '3.0.0':
kw['parse_from_config'] = True
manager.add_option(
"--known-modules",
action='store',
default="",
help=(
"User defined mapping between a project name and a list of"
" provided modules. For example: ``--known-modules=project:"
"[Project],extra-project:[extras,utilities]``."
),
**kw
) | def function[add_options, parameter[cls, manager]]:
constant[Register plug-in specific options.]
variable[kw] assign[=] dictionary[[], []]
if compare[name[flake8].__version__ greater_or_equal[>=] constant[3.0.0]] begin[:]
call[name[kw]][constant[parse_from_config]] assign[=] constant[True]
call[name[manager].add_option, parameter[constant[--known-modules]]] | keyword[def] identifier[add_options] ( identifier[cls] , identifier[manager] ):
literal[string]
identifier[kw] ={}
keyword[if] identifier[flake8] . identifier[__version__] >= literal[string] :
identifier[kw] [ literal[string] ]= keyword[True]
identifier[manager] . identifier[add_option] (
literal[string] ,
identifier[action] = literal[string] ,
identifier[default] = literal[string] ,
identifier[help] =(
literal[string]
literal[string]
literal[string]
),
** identifier[kw]
) | def add_options(cls, manager):
"""Register plug-in specific options."""
kw = {}
if flake8.__version__ >= '3.0.0':
kw['parse_from_config'] = True # depends on [control=['if'], data=[]]
manager.add_option('--known-modules', action='store', default='', help='User defined mapping between a project name and a list of provided modules. For example: ``--known-modules=project:[Project],extra-project:[extras,utilities]``.', **kw) |
def while_statement(self):
"""
while_statement: 'while' local_or_expr compound
"""
self._process(Nature.WHILE)
condition = self.logical_or_expr()
compound = self.compound()
return WhileStatement(condition=condition, compound=compound) | def function[while_statement, parameter[self]]:
constant[
while_statement: 'while' local_or_expr compound
]
call[name[self]._process, parameter[name[Nature].WHILE]]
variable[condition] assign[=] call[name[self].logical_or_expr, parameter[]]
variable[compound] assign[=] call[name[self].compound, parameter[]]
return[call[name[WhileStatement], parameter[]]] | keyword[def] identifier[while_statement] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_process] ( identifier[Nature] . identifier[WHILE] )
identifier[condition] = identifier[self] . identifier[logical_or_expr] ()
identifier[compound] = identifier[self] . identifier[compound] ()
keyword[return] identifier[WhileStatement] ( identifier[condition] = identifier[condition] , identifier[compound] = identifier[compound] ) | def while_statement(self):
"""
while_statement: 'while' local_or_expr compound
"""
self._process(Nature.WHILE)
condition = self.logical_or_expr()
compound = self.compound()
return WhileStatement(condition=condition, compound=compound) |
def _on_key_pressed(self, event):
"""
Override key press to select the current scope if the user wants
to deleted a folded scope (without selecting it).
"""
delete_request = event.key() in [QtCore.Qt.Key_Backspace,
QtCore.Qt.Key_Delete]
if event.text() or delete_request:
cursor = self.editor.textCursor()
if cursor.hasSelection():
# change selection to encompass the whole scope.
positions_to_check = cursor.selectionStart(), cursor.selectionEnd()
else:
positions_to_check = (cursor.position(), )
for pos in positions_to_check:
block = self.editor.document().findBlock(pos)
th = TextBlockHelper()
if th.is_fold_trigger(block) and th.is_collapsed(block):
self.toggle_fold_trigger(block)
if delete_request and cursor.hasSelection():
scope = FoldScope(self.find_parent_scope(block))
tc = TextHelper(self.editor).select_lines(*scope.get_range())
if tc.selectionStart() > cursor.selectionStart():
start = cursor.selectionStart()
else:
start = tc.selectionStart()
if tc.selectionEnd() < cursor.selectionEnd():
end = cursor.selectionEnd()
else:
end = tc.selectionEnd()
tc.setPosition(start)
tc.setPosition(end, tc.KeepAnchor)
self.editor.setTextCursor(tc) | def function[_on_key_pressed, parameter[self, event]]:
constant[
Override key press to select the current scope if the user wants
to deleted a folded scope (without selecting it).
]
variable[delete_request] assign[=] compare[call[name[event].key, parameter[]] in list[[<ast.Attribute object at 0x7da20c6c5f60>, <ast.Attribute object at 0x7da20c6c5e70>]]]
if <ast.BoolOp object at 0x7da20c6c4e50> begin[:]
variable[cursor] assign[=] call[name[self].editor.textCursor, parameter[]]
if call[name[cursor].hasSelection, parameter[]] begin[:]
variable[positions_to_check] assign[=] tuple[[<ast.Call object at 0x7da20c6c4dc0>, <ast.Call object at 0x7da20c6c5ff0>]]
for taget[name[pos]] in starred[name[positions_to_check]] begin[:]
variable[block] assign[=] call[call[name[self].editor.document, parameter[]].findBlock, parameter[name[pos]]]
variable[th] assign[=] call[name[TextBlockHelper], parameter[]]
if <ast.BoolOp object at 0x7da20c6c7a30> begin[:]
call[name[self].toggle_fold_trigger, parameter[name[block]]]
if <ast.BoolOp object at 0x7da20c6c54b0> begin[:]
variable[scope] assign[=] call[name[FoldScope], parameter[call[name[self].find_parent_scope, parameter[name[block]]]]]
variable[tc] assign[=] call[call[name[TextHelper], parameter[name[self].editor]].select_lines, parameter[<ast.Starred object at 0x7da20c6c4190>]]
if compare[call[name[tc].selectionStart, parameter[]] greater[>] call[name[cursor].selectionStart, parameter[]]] begin[:]
variable[start] assign[=] call[name[cursor].selectionStart, parameter[]]
if compare[call[name[tc].selectionEnd, parameter[]] less[<] call[name[cursor].selectionEnd, parameter[]]] begin[:]
variable[end] assign[=] call[name[cursor].selectionEnd, parameter[]]
call[name[tc].setPosition, parameter[name[start]]]
call[name[tc].setPosition, parameter[name[end], name[tc].KeepAnchor]]
call[name[self].editor.setTextCursor, parameter[name[tc]]] | keyword[def] identifier[_on_key_pressed] ( identifier[self] , identifier[event] ):
literal[string]
identifier[delete_request] = identifier[event] . identifier[key] () keyword[in] [ identifier[QtCore] . identifier[Qt] . identifier[Key_Backspace] ,
identifier[QtCore] . identifier[Qt] . identifier[Key_Delete] ]
keyword[if] identifier[event] . identifier[text] () keyword[or] identifier[delete_request] :
identifier[cursor] = identifier[self] . identifier[editor] . identifier[textCursor] ()
keyword[if] identifier[cursor] . identifier[hasSelection] ():
identifier[positions_to_check] = identifier[cursor] . identifier[selectionStart] (), identifier[cursor] . identifier[selectionEnd] ()
keyword[else] :
identifier[positions_to_check] =( identifier[cursor] . identifier[position] (),)
keyword[for] identifier[pos] keyword[in] identifier[positions_to_check] :
identifier[block] = identifier[self] . identifier[editor] . identifier[document] (). identifier[findBlock] ( identifier[pos] )
identifier[th] = identifier[TextBlockHelper] ()
keyword[if] identifier[th] . identifier[is_fold_trigger] ( identifier[block] ) keyword[and] identifier[th] . identifier[is_collapsed] ( identifier[block] ):
identifier[self] . identifier[toggle_fold_trigger] ( identifier[block] )
keyword[if] identifier[delete_request] keyword[and] identifier[cursor] . identifier[hasSelection] ():
identifier[scope] = identifier[FoldScope] ( identifier[self] . identifier[find_parent_scope] ( identifier[block] ))
identifier[tc] = identifier[TextHelper] ( identifier[self] . identifier[editor] ). identifier[select_lines] (* identifier[scope] . identifier[get_range] ())
keyword[if] identifier[tc] . identifier[selectionStart] ()> identifier[cursor] . identifier[selectionStart] ():
identifier[start] = identifier[cursor] . identifier[selectionStart] ()
keyword[else] :
identifier[start] = identifier[tc] . identifier[selectionStart] ()
keyword[if] identifier[tc] . identifier[selectionEnd] ()< identifier[cursor] . identifier[selectionEnd] ():
identifier[end] = identifier[cursor] . identifier[selectionEnd] ()
keyword[else] :
identifier[end] = identifier[tc] . identifier[selectionEnd] ()
identifier[tc] . identifier[setPosition] ( identifier[start] )
identifier[tc] . identifier[setPosition] ( identifier[end] , identifier[tc] . identifier[KeepAnchor] )
identifier[self] . identifier[editor] . identifier[setTextCursor] ( identifier[tc] ) | def _on_key_pressed(self, event):
"""
Override key press to select the current scope if the user wants
to deleted a folded scope (without selecting it).
"""
delete_request = event.key() in [QtCore.Qt.Key_Backspace, QtCore.Qt.Key_Delete]
if event.text() or delete_request:
cursor = self.editor.textCursor()
if cursor.hasSelection():
# change selection to encompass the whole scope.
positions_to_check = (cursor.selectionStart(), cursor.selectionEnd()) # depends on [control=['if'], data=[]]
else:
positions_to_check = (cursor.position(),)
for pos in positions_to_check:
block = self.editor.document().findBlock(pos)
th = TextBlockHelper()
if th.is_fold_trigger(block) and th.is_collapsed(block):
self.toggle_fold_trigger(block)
if delete_request and cursor.hasSelection():
scope = FoldScope(self.find_parent_scope(block))
tc = TextHelper(self.editor).select_lines(*scope.get_range())
if tc.selectionStart() > cursor.selectionStart():
start = cursor.selectionStart() # depends on [control=['if'], data=[]]
else:
start = tc.selectionStart()
if tc.selectionEnd() < cursor.selectionEnd():
end = cursor.selectionEnd() # depends on [control=['if'], data=[]]
else:
end = tc.selectionEnd()
tc.setPosition(start)
tc.setPosition(end, tc.KeepAnchor)
self.editor.setTextCursor(tc) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pos']] # depends on [control=['if'], data=[]] |
def file_exists(original_file):
"""
Check to make sure the original file exists
"""
if original_file.startswith("s3://"):
from filesystem import s3
return s3.file_exists(original_file)
else:
if not os.path.exists(original_file):
return False
if not os.path.isfile(original_file):
return False
return True | def function[file_exists, parameter[original_file]]:
constant[
Check to make sure the original file exists
]
if call[name[original_file].startswith, parameter[constant[s3://]]] begin[:]
from relative_module[filesystem] import module[s3]
return[call[name[s3].file_exists, parameter[name[original_file]]]]
return[constant[True]] | keyword[def] identifier[file_exists] ( identifier[original_file] ):
literal[string]
keyword[if] identifier[original_file] . identifier[startswith] ( literal[string] ):
keyword[from] identifier[filesystem] keyword[import] identifier[s3]
keyword[return] identifier[s3] . identifier[file_exists] ( identifier[original_file] )
keyword[else] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[original_file] ):
keyword[return] keyword[False]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[original_file] ):
keyword[return] keyword[False]
keyword[return] keyword[True] | def file_exists(original_file):
"""
Check to make sure the original file exists
"""
if original_file.startswith('s3://'):
from filesystem import s3
return s3.file_exists(original_file) # depends on [control=['if'], data=[]]
else:
if not os.path.exists(original_file):
return False # depends on [control=['if'], data=[]]
if not os.path.isfile(original_file):
return False # depends on [control=['if'], data=[]]
return True |
def CreateAllStaticRAPIDFiles(in_drainage_line,
river_id,
length_id,
slope_id,
next_down_id,
rapid_output_folder,
kfac_celerity=1000.0/3600.0,
kfac_formula_type=3,
kfac_length_units="km",
lambda_k=0.35,
x_value=0.3,
nhdplus=False,
taudem_network_connectivity_tree_file=None,
file_geodatabase=None):
"""
To generate the static RAPID files (rapid_connect.csv, riv_bas_id.csv,
kfac.csv, k.csv, x.csv, comid_lat_lon_z.csv) with default values.
Parameters
----------
in_drainage_line: str
Path to the stream network (i.e. Drainage Line) shapefile.
river_id: str
The name of the field with the river ID
(Ex. 'HydroID', 'COMID', or 'LINKNO').
length_id: str
The field name containging the length of the river segment
(Ex. 'LENGTHKM' or 'Length').
slope_id: str
The field name containging the slope of the river segment
(Ex. 'Avg_Slope' or 'Slope').
next_down_id: str
The name of the field with the river ID of the next downstream river
segment (Ex. 'NextDownID' or 'DSLINKNO').
rapid_output_folder: str
The path to the folder where all of the RAPID output will be generated.
kfac_celerity: float, optional
The flow wave celerity for the watershed in meters per second.
1 km/hr or 1000.0/3600.0 m/s is a reasonable value if unknown.
kfac_formula_type: int, optional
An integer representing the formula type to use when calculating kfac.
Default is 3.
kfac_length_units: str, optional
The units for the length_id field. Supported types are "m" for meters
and "km" for kilometers. Default is "km".
lambda_k: float, optional
The value for lambda given from RAPID after the calibration process.
Default is 0.35.
x_value: float, optional
Value for the muskingum X parameter [0-0.5]. Default is 0.3.
nhdplus: bool, optional
If True, the drainage line is from the NHDPlus dataset with the VAA
fields COMID, FROMNODE, TONODE, and DIVERGENCE. Default is False.
taudem_network_connectivity_tree_file: str, optional
If set, the connectivity file will be generated from the TauDEM
connectivity tree file.
file_geodatabase: str, optional
Path to the file geodatabase. If you use this option,
in_drainage_line is the name of the stream network feature class.
(WARNING: Not always stable with GDAL.)
Example::
from RAPIDpy.gis.workflow import CreateAllStaticRAPIDFiles
CreateAllStaticRAPIDFiles(
in_drainage_line="/path/to/drainage_line.shp",
river_id="HydroID",
length_id="LENGTHKM",
slope_id="SLOPE",
next_down_river_id="NextDownID",
rapid_output_folder="/path/to/rapid/output",
)
"""
# RAPID connect file
rapid_connect_file = os.path.join(rapid_output_folder, 'rapid_connect.csv')
if nhdplus:
CreateNetworkConnectivityNHDPlus(in_drainage_line,
rapid_connect_file,
file_geodatabase)
elif taudem_network_connectivity_tree_file:
CreateNetworkConnectivityTauDEMTree(
taudem_network_connectivity_tree_file,
rapid_connect_file)
else:
CreateNetworkConnectivity(in_drainage_line,
river_id,
next_down_id,
rapid_connect_file,
file_geodatabase)
# river basin id file
riv_bas_id_file = os.path.join(rapid_output_folder, 'riv_bas_id.csv')
CreateSubsetFile(in_drainage_line,
river_id,
riv_bas_id_file,
file_geodatabase)
# kfac file
kfac_file = os.path.join(rapid_output_folder, 'kfac.csv')
CreateMuskingumKfacFile(in_drainage_line,
river_id,
length_id,
slope_id,
kfac_celerity,
kfac_formula_type,
rapid_connect_file,
kfac_file,
length_units=kfac_length_units,
file_geodatabase=file_geodatabase)
# k file
k_file = os.path.join(rapid_output_folder, 'k.csv')
CreateMuskingumKFile(lambda_k,
kfac_file,
k_file)
# x file
x_file = os.path.join(rapid_output_folder, 'x.csv')
CreateConstMuskingumXFile(x_value,
rapid_connect_file,
x_file)
# comid lat lon z file
comid_lat_lon_z_file = \
os.path.join(rapid_output_folder, 'comid_lat_lon_z.csv')
FlowlineToPoint(in_drainage_line,
river_id,
comid_lat_lon_z_file,
file_geodatabase) | def function[CreateAllStaticRAPIDFiles, parameter[in_drainage_line, river_id, length_id, slope_id, next_down_id, rapid_output_folder, kfac_celerity, kfac_formula_type, kfac_length_units, lambda_k, x_value, nhdplus, taudem_network_connectivity_tree_file, file_geodatabase]]:
constant[
To generate the static RAPID files (rapid_connect.csv, riv_bas_id.csv,
kfac.csv, k.csv, x.csv, comid_lat_lon_z.csv) with default values.
Parameters
----------
in_drainage_line: str
Path to the stream network (i.e. Drainage Line) shapefile.
river_id: str
The name of the field with the river ID
(Ex. 'HydroID', 'COMID', or 'LINKNO').
length_id: str
The field name containging the length of the river segment
(Ex. 'LENGTHKM' or 'Length').
slope_id: str
The field name containging the slope of the river segment
(Ex. 'Avg_Slope' or 'Slope').
next_down_id: str
The name of the field with the river ID of the next downstream river
segment (Ex. 'NextDownID' or 'DSLINKNO').
rapid_output_folder: str
The path to the folder where all of the RAPID output will be generated.
kfac_celerity: float, optional
The flow wave celerity for the watershed in meters per second.
1 km/hr or 1000.0/3600.0 m/s is a reasonable value if unknown.
kfac_formula_type: int, optional
An integer representing the formula type to use when calculating kfac.
Default is 3.
kfac_length_units: str, optional
The units for the length_id field. Supported types are "m" for meters
and "km" for kilometers. Default is "km".
lambda_k: float, optional
The value for lambda given from RAPID after the calibration process.
Default is 0.35.
x_value: float, optional
Value for the muskingum X parameter [0-0.5]. Default is 0.3.
nhdplus: bool, optional
If True, the drainage line is from the NHDPlus dataset with the VAA
fields COMID, FROMNODE, TONODE, and DIVERGENCE. Default is False.
taudem_network_connectivity_tree_file: str, optional
If set, the connectivity file will be generated from the TauDEM
connectivity tree file.
file_geodatabase: str, optional
Path to the file geodatabase. If you use this option,
in_drainage_line is the name of the stream network feature class.
(WARNING: Not always stable with GDAL.)
Example::
from RAPIDpy.gis.workflow import CreateAllStaticRAPIDFiles
CreateAllStaticRAPIDFiles(
in_drainage_line="/path/to/drainage_line.shp",
river_id="HydroID",
length_id="LENGTHKM",
slope_id="SLOPE",
next_down_river_id="NextDownID",
rapid_output_folder="/path/to/rapid/output",
)
]
variable[rapid_connect_file] assign[=] call[name[os].path.join, parameter[name[rapid_output_folder], constant[rapid_connect.csv]]]
if name[nhdplus] begin[:]
call[name[CreateNetworkConnectivityNHDPlus], parameter[name[in_drainage_line], name[rapid_connect_file], name[file_geodatabase]]]
variable[riv_bas_id_file] assign[=] call[name[os].path.join, parameter[name[rapid_output_folder], constant[riv_bas_id.csv]]]
call[name[CreateSubsetFile], parameter[name[in_drainage_line], name[river_id], name[riv_bas_id_file], name[file_geodatabase]]]
variable[kfac_file] assign[=] call[name[os].path.join, parameter[name[rapid_output_folder], constant[kfac.csv]]]
call[name[CreateMuskingumKfacFile], parameter[name[in_drainage_line], name[river_id], name[length_id], name[slope_id], name[kfac_celerity], name[kfac_formula_type], name[rapid_connect_file], name[kfac_file]]]
variable[k_file] assign[=] call[name[os].path.join, parameter[name[rapid_output_folder], constant[k.csv]]]
call[name[CreateMuskingumKFile], parameter[name[lambda_k], name[kfac_file], name[k_file]]]
variable[x_file] assign[=] call[name[os].path.join, parameter[name[rapid_output_folder], constant[x.csv]]]
call[name[CreateConstMuskingumXFile], parameter[name[x_value], name[rapid_connect_file], name[x_file]]]
variable[comid_lat_lon_z_file] assign[=] call[name[os].path.join, parameter[name[rapid_output_folder], constant[comid_lat_lon_z.csv]]]
call[name[FlowlineToPoint], parameter[name[in_drainage_line], name[river_id], name[comid_lat_lon_z_file], name[file_geodatabase]]] | keyword[def] identifier[CreateAllStaticRAPIDFiles] ( identifier[in_drainage_line] ,
identifier[river_id] ,
identifier[length_id] ,
identifier[slope_id] ,
identifier[next_down_id] ,
identifier[rapid_output_folder] ,
identifier[kfac_celerity] = literal[int] / literal[int] ,
identifier[kfac_formula_type] = literal[int] ,
identifier[kfac_length_units] = literal[string] ,
identifier[lambda_k] = literal[int] ,
identifier[x_value] = literal[int] ,
identifier[nhdplus] = keyword[False] ,
identifier[taudem_network_connectivity_tree_file] = keyword[None] ,
identifier[file_geodatabase] = keyword[None] ):
literal[string]
identifier[rapid_connect_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[rapid_output_folder] , literal[string] )
keyword[if] identifier[nhdplus] :
identifier[CreateNetworkConnectivityNHDPlus] ( identifier[in_drainage_line] ,
identifier[rapid_connect_file] ,
identifier[file_geodatabase] )
keyword[elif] identifier[taudem_network_connectivity_tree_file] :
identifier[CreateNetworkConnectivityTauDEMTree] (
identifier[taudem_network_connectivity_tree_file] ,
identifier[rapid_connect_file] )
keyword[else] :
identifier[CreateNetworkConnectivity] ( identifier[in_drainage_line] ,
identifier[river_id] ,
identifier[next_down_id] ,
identifier[rapid_connect_file] ,
identifier[file_geodatabase] )
identifier[riv_bas_id_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[rapid_output_folder] , literal[string] )
identifier[CreateSubsetFile] ( identifier[in_drainage_line] ,
identifier[river_id] ,
identifier[riv_bas_id_file] ,
identifier[file_geodatabase] )
identifier[kfac_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[rapid_output_folder] , literal[string] )
identifier[CreateMuskingumKfacFile] ( identifier[in_drainage_line] ,
identifier[river_id] ,
identifier[length_id] ,
identifier[slope_id] ,
identifier[kfac_celerity] ,
identifier[kfac_formula_type] ,
identifier[rapid_connect_file] ,
identifier[kfac_file] ,
identifier[length_units] = identifier[kfac_length_units] ,
identifier[file_geodatabase] = identifier[file_geodatabase] )
identifier[k_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[rapid_output_folder] , literal[string] )
identifier[CreateMuskingumKFile] ( identifier[lambda_k] ,
identifier[kfac_file] ,
identifier[k_file] )
identifier[x_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[rapid_output_folder] , literal[string] )
identifier[CreateConstMuskingumXFile] ( identifier[x_value] ,
identifier[rapid_connect_file] ,
identifier[x_file] )
identifier[comid_lat_lon_z_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[rapid_output_folder] , literal[string] )
identifier[FlowlineToPoint] ( identifier[in_drainage_line] ,
identifier[river_id] ,
identifier[comid_lat_lon_z_file] ,
identifier[file_geodatabase] ) | def CreateAllStaticRAPIDFiles(in_drainage_line, river_id, length_id, slope_id, next_down_id, rapid_output_folder, kfac_celerity=1000.0 / 3600.0, kfac_formula_type=3, kfac_length_units='km', lambda_k=0.35, x_value=0.3, nhdplus=False, taudem_network_connectivity_tree_file=None, file_geodatabase=None):
"""
To generate the static RAPID files (rapid_connect.csv, riv_bas_id.csv,
kfac.csv, k.csv, x.csv, comid_lat_lon_z.csv) with default values.
Parameters
----------
in_drainage_line: str
Path to the stream network (i.e. Drainage Line) shapefile.
river_id: str
The name of the field with the river ID
(Ex. 'HydroID', 'COMID', or 'LINKNO').
length_id: str
The field name containging the length of the river segment
(Ex. 'LENGTHKM' or 'Length').
slope_id: str
The field name containging the slope of the river segment
(Ex. 'Avg_Slope' or 'Slope').
next_down_id: str
The name of the field with the river ID of the next downstream river
segment (Ex. 'NextDownID' or 'DSLINKNO').
rapid_output_folder: str
The path to the folder where all of the RAPID output will be generated.
kfac_celerity: float, optional
The flow wave celerity for the watershed in meters per second.
1 km/hr or 1000.0/3600.0 m/s is a reasonable value if unknown.
kfac_formula_type: int, optional
An integer representing the formula type to use when calculating kfac.
Default is 3.
kfac_length_units: str, optional
The units for the length_id field. Supported types are "m" for meters
and "km" for kilometers. Default is "km".
lambda_k: float, optional
The value for lambda given from RAPID after the calibration process.
Default is 0.35.
x_value: float, optional
Value for the muskingum X parameter [0-0.5]. Default is 0.3.
nhdplus: bool, optional
If True, the drainage line is from the NHDPlus dataset with the VAA
fields COMID, FROMNODE, TONODE, and DIVERGENCE. Default is False.
taudem_network_connectivity_tree_file: str, optional
If set, the connectivity file will be generated from the TauDEM
connectivity tree file.
file_geodatabase: str, optional
Path to the file geodatabase. If you use this option,
in_drainage_line is the name of the stream network feature class.
(WARNING: Not always stable with GDAL.)
Example::
from RAPIDpy.gis.workflow import CreateAllStaticRAPIDFiles
CreateAllStaticRAPIDFiles(
in_drainage_line="/path/to/drainage_line.shp",
river_id="HydroID",
length_id="LENGTHKM",
slope_id="SLOPE",
next_down_river_id="NextDownID",
rapid_output_folder="/path/to/rapid/output",
)
"""
# RAPID connect file
rapid_connect_file = os.path.join(rapid_output_folder, 'rapid_connect.csv')
if nhdplus:
CreateNetworkConnectivityNHDPlus(in_drainage_line, rapid_connect_file, file_geodatabase) # depends on [control=['if'], data=[]]
elif taudem_network_connectivity_tree_file:
CreateNetworkConnectivityTauDEMTree(taudem_network_connectivity_tree_file, rapid_connect_file) # depends on [control=['if'], data=[]]
else:
CreateNetworkConnectivity(in_drainage_line, river_id, next_down_id, rapid_connect_file, file_geodatabase)
# river basin id file
riv_bas_id_file = os.path.join(rapid_output_folder, 'riv_bas_id.csv')
CreateSubsetFile(in_drainage_line, river_id, riv_bas_id_file, file_geodatabase)
# kfac file
kfac_file = os.path.join(rapid_output_folder, 'kfac.csv')
CreateMuskingumKfacFile(in_drainage_line, river_id, length_id, slope_id, kfac_celerity, kfac_formula_type, rapid_connect_file, kfac_file, length_units=kfac_length_units, file_geodatabase=file_geodatabase)
# k file
k_file = os.path.join(rapid_output_folder, 'k.csv')
CreateMuskingumKFile(lambda_k, kfac_file, k_file)
# x file
x_file = os.path.join(rapid_output_folder, 'x.csv')
CreateConstMuskingumXFile(x_value, rapid_connect_file, x_file)
# comid lat lon z file
comid_lat_lon_z_file = os.path.join(rapid_output_folder, 'comid_lat_lon_z.csv')
FlowlineToPoint(in_drainage_line, river_id, comid_lat_lon_z_file, file_geodatabase) |
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, msg, conn_info_map = InitConnect.unpack_rsp(rsp_pb)
if self._notify_obj is not None:
self._notify_obj.on_async_init_connect(ret_code, msg, conn_info_map)
return ret_code, msg | def function[on_recv_rsp, parameter[self, rsp_pb]]:
constant[receive response callback function]
<ast.Tuple object at 0x7da1b26aead0> assign[=] call[name[InitConnect].unpack_rsp, parameter[name[rsp_pb]]]
if compare[name[self]._notify_obj is_not constant[None]] begin[:]
call[name[self]._notify_obj.on_async_init_connect, parameter[name[ret_code], name[msg], name[conn_info_map]]]
return[tuple[[<ast.Name object at 0x7da1b26ad450>, <ast.Name object at 0x7da1b26add20>]]] | keyword[def] identifier[on_recv_rsp] ( identifier[self] , identifier[rsp_pb] ):
literal[string]
identifier[ret_code] , identifier[msg] , identifier[conn_info_map] = identifier[InitConnect] . identifier[unpack_rsp] ( identifier[rsp_pb] )
keyword[if] identifier[self] . identifier[_notify_obj] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_notify_obj] . identifier[on_async_init_connect] ( identifier[ret_code] , identifier[msg] , identifier[conn_info_map] )
keyword[return] identifier[ret_code] , identifier[msg] | def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
(ret_code, msg, conn_info_map) = InitConnect.unpack_rsp(rsp_pb)
if self._notify_obj is not None:
self._notify_obj.on_async_init_connect(ret_code, msg, conn_info_map) # depends on [control=['if'], data=[]]
return (ret_code, msg) |
def _serialize_zebra_family_prefix(prefix):
"""
Serializes family and prefix in Zebra format.
"""
if ip.valid_ipv4(prefix):
family = socket.AF_INET # fixup
prefix_addr, prefix_num = prefix.split('/')
return family, struct.pack(
_ZEBRA_FAMILY_IPV4_PREFIX_FMT,
family,
addrconv.ipv4.text_to_bin(prefix_addr),
int(prefix_num))
elif ip.valid_ipv6(prefix):
family = socket.AF_INET6 # fixup
prefix_addr, prefix_num = prefix.split('/')
return family, struct.pack(
_ZEBRA_FAMILY_IPV6_PREFIX_FMT,
family,
addrconv.ipv6.text_to_bin(prefix_addr),
int(prefix_num))
raise ValueError('Invalid prefix: %s' % prefix) | def function[_serialize_zebra_family_prefix, parameter[prefix]]:
constant[
Serializes family and prefix in Zebra format.
]
if call[name[ip].valid_ipv4, parameter[name[prefix]]] begin[:]
variable[family] assign[=] name[socket].AF_INET
<ast.Tuple object at 0x7da1b1b0f5e0> assign[=] call[name[prefix].split, parameter[constant[/]]]
return[tuple[[<ast.Name object at 0x7da1b1b0dbd0>, <ast.Call object at 0x7da1b1b0d150>]]]
<ast.Raise object at 0x7da1b1c7f580> | keyword[def] identifier[_serialize_zebra_family_prefix] ( identifier[prefix] ):
literal[string]
keyword[if] identifier[ip] . identifier[valid_ipv4] ( identifier[prefix] ):
identifier[family] = identifier[socket] . identifier[AF_INET]
identifier[prefix_addr] , identifier[prefix_num] = identifier[prefix] . identifier[split] ( literal[string] )
keyword[return] identifier[family] , identifier[struct] . identifier[pack] (
identifier[_ZEBRA_FAMILY_IPV4_PREFIX_FMT] ,
identifier[family] ,
identifier[addrconv] . identifier[ipv4] . identifier[text_to_bin] ( identifier[prefix_addr] ),
identifier[int] ( identifier[prefix_num] ))
keyword[elif] identifier[ip] . identifier[valid_ipv6] ( identifier[prefix] ):
identifier[family] = identifier[socket] . identifier[AF_INET6]
identifier[prefix_addr] , identifier[prefix_num] = identifier[prefix] . identifier[split] ( literal[string] )
keyword[return] identifier[family] , identifier[struct] . identifier[pack] (
identifier[_ZEBRA_FAMILY_IPV6_PREFIX_FMT] ,
identifier[family] ,
identifier[addrconv] . identifier[ipv6] . identifier[text_to_bin] ( identifier[prefix_addr] ),
identifier[int] ( identifier[prefix_num] ))
keyword[raise] identifier[ValueError] ( literal[string] % identifier[prefix] ) | def _serialize_zebra_family_prefix(prefix):
"""
Serializes family and prefix in Zebra format.
"""
if ip.valid_ipv4(prefix):
family = socket.AF_INET # fixup
(prefix_addr, prefix_num) = prefix.split('/')
return (family, struct.pack(_ZEBRA_FAMILY_IPV4_PREFIX_FMT, family, addrconv.ipv4.text_to_bin(prefix_addr), int(prefix_num))) # depends on [control=['if'], data=[]]
elif ip.valid_ipv6(prefix):
family = socket.AF_INET6 # fixup
(prefix_addr, prefix_num) = prefix.split('/')
return (family, struct.pack(_ZEBRA_FAMILY_IPV6_PREFIX_FMT, family, addrconv.ipv6.text_to_bin(prefix_addr), int(prefix_num))) # depends on [control=['if'], data=[]]
raise ValueError('Invalid prefix: %s' % prefix) |
def watch(self):
"""
Watches directory for changes
"""
wm = pyinotify.WatchManager()
self.notifier = pyinotify.Notifier(wm, default_proc_fun=self.callback)
wm.add_watch(self.directory, pyinotify.ALL_EVENTS)
try:
self.notifier.loop()
except (KeyboardInterrupt, AttributeError):
print_notification("Stopping")
finally:
self.notifier.stop()
self.terminate_processes() | def function[watch, parameter[self]]:
constant[
Watches directory for changes
]
variable[wm] assign[=] call[name[pyinotify].WatchManager, parameter[]]
name[self].notifier assign[=] call[name[pyinotify].Notifier, parameter[name[wm]]]
call[name[wm].add_watch, parameter[name[self].directory, name[pyinotify].ALL_EVENTS]]
<ast.Try object at 0x7da1b00a04c0> | keyword[def] identifier[watch] ( identifier[self] ):
literal[string]
identifier[wm] = identifier[pyinotify] . identifier[WatchManager] ()
identifier[self] . identifier[notifier] = identifier[pyinotify] . identifier[Notifier] ( identifier[wm] , identifier[default_proc_fun] = identifier[self] . identifier[callback] )
identifier[wm] . identifier[add_watch] ( identifier[self] . identifier[directory] , identifier[pyinotify] . identifier[ALL_EVENTS] )
keyword[try] :
identifier[self] . identifier[notifier] . identifier[loop] ()
keyword[except] ( identifier[KeyboardInterrupt] , identifier[AttributeError] ):
identifier[print_notification] ( literal[string] )
keyword[finally] :
identifier[self] . identifier[notifier] . identifier[stop] ()
identifier[self] . identifier[terminate_processes] () | def watch(self):
"""
Watches directory for changes
"""
wm = pyinotify.WatchManager()
self.notifier = pyinotify.Notifier(wm, default_proc_fun=self.callback)
wm.add_watch(self.directory, pyinotify.ALL_EVENTS)
try:
self.notifier.loop() # depends on [control=['try'], data=[]]
except (KeyboardInterrupt, AttributeError):
print_notification('Stopping') # depends on [control=['except'], data=[]]
finally:
self.notifier.stop()
self.terminate_processes() |
def free(self, kind, name):
"""
Mark a node name as no longer in use.
It could thus be recycled to name a new node.
"""
try:
params = self._parse(name)
index = int(params['index'], 10)
self._free[kind].add(index)
assert index <= self._top[kind]
if index == self._top[kind]:
self._top[kind] -= 1
except ValueError:
# ignore failures in self._parse()
pass | def function[free, parameter[self, kind, name]]:
constant[
Mark a node name as no longer in use.
It could thus be recycled to name a new node.
]
<ast.Try object at 0x7da18c4ce3e0> | keyword[def] identifier[free] ( identifier[self] , identifier[kind] , identifier[name] ):
literal[string]
keyword[try] :
identifier[params] = identifier[self] . identifier[_parse] ( identifier[name] )
identifier[index] = identifier[int] ( identifier[params] [ literal[string] ], literal[int] )
identifier[self] . identifier[_free] [ identifier[kind] ]. identifier[add] ( identifier[index] )
keyword[assert] identifier[index] <= identifier[self] . identifier[_top] [ identifier[kind] ]
keyword[if] identifier[index] == identifier[self] . identifier[_top] [ identifier[kind] ]:
identifier[self] . identifier[_top] [ identifier[kind] ]-= literal[int]
keyword[except] identifier[ValueError] :
keyword[pass] | def free(self, kind, name):
"""
Mark a node name as no longer in use.
It could thus be recycled to name a new node.
"""
try:
params = self._parse(name)
index = int(params['index'], 10)
self._free[kind].add(index)
assert index <= self._top[kind]
if index == self._top[kind]:
self._top[kind] -= 1 # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except ValueError:
# ignore failures in self._parse()
pass # depends on [control=['except'], data=[]] |
def add_multiple(self, *users):
"""Add multiple users to the group at once.
Each given user must be a dictionary containing a nickname and either
an email, phone number, or user_id.
:param args users: the users to add
:return: a membership request
:rtype: :class:`MembershipRequest`
"""
guid = uuid.uuid4()
for i, user_ in enumerate(users):
user_['guid'] = '{}-{}'.format(guid, i)
payload = {'members': users}
url = utils.urljoin(self.url, 'add')
response = self.session.post(url, json=payload)
return MembershipRequest(self, *users, group_id=self.group_id,
**response.data) | def function[add_multiple, parameter[self]]:
constant[Add multiple users to the group at once.
Each given user must be a dictionary containing a nickname and either
an email, phone number, or user_id.
:param args users: the users to add
:return: a membership request
:rtype: :class:`MembershipRequest`
]
variable[guid] assign[=] call[name[uuid].uuid4, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b1106e60>, <ast.Name object at 0x7da1b11061a0>]]] in starred[call[name[enumerate], parameter[name[users]]]] begin[:]
call[name[user_]][constant[guid]] assign[=] call[constant[{}-{}].format, parameter[name[guid], name[i]]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da1b1104160>], [<ast.Name object at 0x7da1b1104640>]]
variable[url] assign[=] call[name[utils].urljoin, parameter[name[self].url, constant[add]]]
variable[response] assign[=] call[name[self].session.post, parameter[name[url]]]
return[call[name[MembershipRequest], parameter[name[self], <ast.Starred object at 0x7da1b11076d0>]]] | keyword[def] identifier[add_multiple] ( identifier[self] ,* identifier[users] ):
literal[string]
identifier[guid] = identifier[uuid] . identifier[uuid4] ()
keyword[for] identifier[i] , identifier[user_] keyword[in] identifier[enumerate] ( identifier[users] ):
identifier[user_] [ literal[string] ]= literal[string] . identifier[format] ( identifier[guid] , identifier[i] )
identifier[payload] ={ literal[string] : identifier[users] }
identifier[url] = identifier[utils] . identifier[urljoin] ( identifier[self] . identifier[url] , literal[string] )
identifier[response] = identifier[self] . identifier[session] . identifier[post] ( identifier[url] , identifier[json] = identifier[payload] )
keyword[return] identifier[MembershipRequest] ( identifier[self] ,* identifier[users] , identifier[group_id] = identifier[self] . identifier[group_id] ,
** identifier[response] . identifier[data] ) | def add_multiple(self, *users):
"""Add multiple users to the group at once.
Each given user must be a dictionary containing a nickname and either
an email, phone number, or user_id.
:param args users: the users to add
:return: a membership request
:rtype: :class:`MembershipRequest`
"""
guid = uuid.uuid4()
for (i, user_) in enumerate(users):
user_['guid'] = '{}-{}'.format(guid, i) # depends on [control=['for'], data=[]]
payload = {'members': users}
url = utils.urljoin(self.url, 'add')
response = self.session.post(url, json=payload)
return MembershipRequest(self, *users, group_id=self.group_id, **response.data) |
def impact_table_extractor(impact_report, component_metadata):
"""Extracting impact summary of the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
context = {}
extra_args = component_metadata.extra_args
components_list = resolve_from_dictionary(
extra_args, 'components_list')
# TODO: Decide either to use it or not
if not impact_report.impact_function.debug_mode:
# only show experimental MMI Detail when in debug mode
components_list.pop('mmi_detail', None)
context['brand_logo'] = resource_url(
resources_path('img', 'logos', 'inasafe-logo-white.png'))
for key, component in list(components_list.items()):
context[key] = jinja2_output_as_string(
impact_report, component['key'])
context['inasafe_resources_base_dir'] = resources_path()
return context | def function[impact_table_extractor, parameter[impact_report, component_metadata]]:
constant[Extracting impact summary of the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
]
variable[context] assign[=] dictionary[[], []]
variable[extra_args] assign[=] name[component_metadata].extra_args
variable[components_list] assign[=] call[name[resolve_from_dictionary], parameter[name[extra_args], constant[components_list]]]
if <ast.UnaryOp object at 0x7da1b23444c0> begin[:]
call[name[components_list].pop, parameter[constant[mmi_detail], constant[None]]]
call[name[context]][constant[brand_logo]] assign[=] call[name[resource_url], parameter[call[name[resources_path], parameter[constant[img], constant[logos], constant[inasafe-logo-white.png]]]]]
for taget[tuple[[<ast.Name object at 0x7da2045643a0>, <ast.Name object at 0x7da204564b50>]]] in starred[call[name[list], parameter[call[name[components_list].items, parameter[]]]]] begin[:]
call[name[context]][name[key]] assign[=] call[name[jinja2_output_as_string], parameter[name[impact_report], call[name[component]][constant[key]]]]
call[name[context]][constant[inasafe_resources_base_dir]] assign[=] call[name[resources_path], parameter[]]
return[name[context]] | keyword[def] identifier[impact_table_extractor] ( identifier[impact_report] , identifier[component_metadata] ):
literal[string]
identifier[context] ={}
identifier[extra_args] = identifier[component_metadata] . identifier[extra_args]
identifier[components_list] = identifier[resolve_from_dictionary] (
identifier[extra_args] , literal[string] )
keyword[if] keyword[not] identifier[impact_report] . identifier[impact_function] . identifier[debug_mode] :
identifier[components_list] . identifier[pop] ( literal[string] , keyword[None] )
identifier[context] [ literal[string] ]= identifier[resource_url] (
identifier[resources_path] ( literal[string] , literal[string] , literal[string] ))
keyword[for] identifier[key] , identifier[component] keyword[in] identifier[list] ( identifier[components_list] . identifier[items] ()):
identifier[context] [ identifier[key] ]= identifier[jinja2_output_as_string] (
identifier[impact_report] , identifier[component] [ literal[string] ])
identifier[context] [ literal[string] ]= identifier[resources_path] ()
keyword[return] identifier[context] | def impact_table_extractor(impact_report, component_metadata):
"""Extracting impact summary of the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
context = {}
extra_args = component_metadata.extra_args
components_list = resolve_from_dictionary(extra_args, 'components_list')
# TODO: Decide either to use it or not
if not impact_report.impact_function.debug_mode:
# only show experimental MMI Detail when in debug mode
components_list.pop('mmi_detail', None) # depends on [control=['if'], data=[]]
context['brand_logo'] = resource_url(resources_path('img', 'logos', 'inasafe-logo-white.png'))
for (key, component) in list(components_list.items()):
context[key] = jinja2_output_as_string(impact_report, component['key']) # depends on [control=['for'], data=[]]
context['inasafe_resources_base_dir'] = resources_path()
return context |
def sort(self, field, direction="asc"):
"""
Adds sort criteria.
"""
if not isinstance(field, basestring):
raise ValueError("Field should be a string")
if direction not in ["asc", "desc"]:
raise ValueError("Sort direction should be `asc` or `desc`")
self.sorts.append({field: direction}) | def function[sort, parameter[self, field, direction]]:
constant[
Adds sort criteria.
]
if <ast.UnaryOp object at 0x7da204622aa0> begin[:]
<ast.Raise object at 0x7da204623a90>
if compare[name[direction] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da2046203d0>, <ast.Constant object at 0x7da204621960>]]] begin[:]
<ast.Raise object at 0x7da204623f40>
call[name[self].sorts.append, parameter[dictionary[[<ast.Name object at 0x7da204622e60>], [<ast.Name object at 0x7da204622110>]]]] | keyword[def] identifier[sort] ( identifier[self] , identifier[field] , identifier[direction] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[field] , identifier[basestring] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[direction] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[sorts] . identifier[append] ({ identifier[field] : identifier[direction] }) | def sort(self, field, direction='asc'):
"""
Adds sort criteria.
"""
if not isinstance(field, basestring):
raise ValueError('Field should be a string') # depends on [control=['if'], data=[]]
if direction not in ['asc', 'desc']:
raise ValueError('Sort direction should be `asc` or `desc`') # depends on [control=['if'], data=[]]
self.sorts.append({field: direction}) |
def ias60(msg):
"""Indicated airspeed
Args:
msg (String): 28 bytes hexadecimal message (BDS60) string
Returns:
int: indicated airspeed in knots
"""
d = hex2bin(data(msg))
if d[12] == '0':
return None
ias = bin2int(d[13:23]) # kts
return ias | def function[ias60, parameter[msg]]:
constant[Indicated airspeed
Args:
msg (String): 28 bytes hexadecimal message (BDS60) string
Returns:
int: indicated airspeed in knots
]
variable[d] assign[=] call[name[hex2bin], parameter[call[name[data], parameter[name[msg]]]]]
if compare[call[name[d]][constant[12]] equal[==] constant[0]] begin[:]
return[constant[None]]
variable[ias] assign[=] call[name[bin2int], parameter[call[name[d]][<ast.Slice object at 0x7da207f99540>]]]
return[name[ias]] | keyword[def] identifier[ias60] ( identifier[msg] ):
literal[string]
identifier[d] = identifier[hex2bin] ( identifier[data] ( identifier[msg] ))
keyword[if] identifier[d] [ literal[int] ]== literal[string] :
keyword[return] keyword[None]
identifier[ias] = identifier[bin2int] ( identifier[d] [ literal[int] : literal[int] ])
keyword[return] identifier[ias] | def ias60(msg):
"""Indicated airspeed
Args:
msg (String): 28 bytes hexadecimal message (BDS60) string
Returns:
int: indicated airspeed in knots
"""
d = hex2bin(data(msg))
if d[12] == '0':
return None # depends on [control=['if'], data=[]]
ias = bin2int(d[13:23]) # kts
return ias |
def _parse_email(self, val):
"""
The function for parsing the vcard email addresses.
Args:
val (:obj:`list`): The value to parse.
"""
ret = {
'type': None,
'value': None
}
try:
ret['type'] = val[1]['type']
except (KeyError, ValueError, TypeError):
pass
ret['value'] = val[3].strip()
try:
self.vars['email'].append(ret)
except AttributeError:
self.vars['email'] = []
self.vars['email'].append(ret) | def function[_parse_email, parameter[self, val]]:
constant[
The function for parsing the vcard email addresses.
Args:
val (:obj:`list`): The value to parse.
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da20c990880>, <ast.Constant object at 0x7da20c9904f0>], [<ast.Constant object at 0x7da20c991c30>, <ast.Constant object at 0x7da20c992e90>]]
<ast.Try object at 0x7da20c990f40>
call[name[ret]][constant[value]] assign[=] call[call[name[val]][constant[3]].strip, parameter[]]
<ast.Try object at 0x7da20c9934f0> | keyword[def] identifier[_parse_email] ( identifier[self] , identifier[val] ):
literal[string]
identifier[ret] ={
literal[string] : keyword[None] ,
literal[string] : keyword[None]
}
keyword[try] :
identifier[ret] [ literal[string] ]= identifier[val] [ literal[int] ][ literal[string] ]
keyword[except] ( identifier[KeyError] , identifier[ValueError] , identifier[TypeError] ):
keyword[pass]
identifier[ret] [ literal[string] ]= identifier[val] [ literal[int] ]. identifier[strip] ()
keyword[try] :
identifier[self] . identifier[vars] [ literal[string] ]. identifier[append] ( identifier[ret] )
keyword[except] identifier[AttributeError] :
identifier[self] . identifier[vars] [ literal[string] ]=[]
identifier[self] . identifier[vars] [ literal[string] ]. identifier[append] ( identifier[ret] ) | def _parse_email(self, val):
"""
The function for parsing the vcard email addresses.
Args:
val (:obj:`list`): The value to parse.
"""
ret = {'type': None, 'value': None}
try:
ret['type'] = val[1]['type'] # depends on [control=['try'], data=[]]
except (KeyError, ValueError, TypeError):
pass # depends on [control=['except'], data=[]]
ret['value'] = val[3].strip()
try:
self.vars['email'].append(ret) # depends on [control=['try'], data=[]]
except AttributeError:
self.vars['email'] = []
self.vars['email'].append(ret) # depends on [control=['except'], data=[]] |
def draw_graph( g, fmt='svg', prg='dot', options={} ):
"""
Draw an RDF graph as an image
"""
# Convert RDF to Graphviz
buf = StringIO()
rdf2dot( g, buf, options )
gv_options = options.get('graphviz',[])
if fmt == 'png':
gv_options += [ '-Gdpi=220', '-Gsize=25,10!' ]
metadata = { "width": 5500, "height": 2200, "unconfined" : True }
#import codecs
#with codecs.open('/tmp/sparqlkernel-img.dot','w',encoding='utf-8') as f:
# f.write( buf.getvalue() )
# Now use Graphviz to generate the graph
image = run_dot( buf.getvalue(), fmt=fmt, options=gv_options, prg=prg )
#with open('/tmp/sparqlkernel-img.'+fmt,'w') as f:
# f.write( image )
# Return it
if fmt == 'png':
return { 'image/png' : base64.b64encode(image).decode('ascii') }, \
{ "image/png" : metadata }
elif fmt == 'svg':
return { 'image/svg+xml' : image.decode('utf-8').replace('<svg','<svg class="unconfined"',1) }, \
{ "unconfined" : True } | def function[draw_graph, parameter[g, fmt, prg, options]]:
constant[
Draw an RDF graph as an image
]
variable[buf] assign[=] call[name[StringIO], parameter[]]
call[name[rdf2dot], parameter[name[g], name[buf], name[options]]]
variable[gv_options] assign[=] call[name[options].get, parameter[constant[graphviz], list[[]]]]
if compare[name[fmt] equal[==] constant[png]] begin[:]
<ast.AugAssign object at 0x7da2045660e0>
variable[metadata] assign[=] dictionary[[<ast.Constant object at 0x7da204567e20>, <ast.Constant object at 0x7da204564ca0>, <ast.Constant object at 0x7da2045672b0>], [<ast.Constant object at 0x7da204564850>, <ast.Constant object at 0x7da204567c10>, <ast.Constant object at 0x7da204565420>]]
variable[image] assign[=] call[name[run_dot], parameter[call[name[buf].getvalue, parameter[]]]]
if compare[name[fmt] equal[==] constant[png]] begin[:]
return[tuple[[<ast.Dict object at 0x7da204566f80>, <ast.Dict object at 0x7da204564190>]]] | keyword[def] identifier[draw_graph] ( identifier[g] , identifier[fmt] = literal[string] , identifier[prg] = literal[string] , identifier[options] ={}):
literal[string]
identifier[buf] = identifier[StringIO] ()
identifier[rdf2dot] ( identifier[g] , identifier[buf] , identifier[options] )
identifier[gv_options] = identifier[options] . identifier[get] ( literal[string] ,[])
keyword[if] identifier[fmt] == literal[string] :
identifier[gv_options] +=[ literal[string] , literal[string] ]
identifier[metadata] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : keyword[True] }
identifier[image] = identifier[run_dot] ( identifier[buf] . identifier[getvalue] (), identifier[fmt] = identifier[fmt] , identifier[options] = identifier[gv_options] , identifier[prg] = identifier[prg] )
keyword[if] identifier[fmt] == literal[string] :
keyword[return] { literal[string] : identifier[base64] . identifier[b64encode] ( identifier[image] ). identifier[decode] ( literal[string] )},{ literal[string] : identifier[metadata] }
keyword[elif] identifier[fmt] == literal[string] :
keyword[return] { literal[string] : identifier[image] . identifier[decode] ( literal[string] ). identifier[replace] ( literal[string] , literal[string] , literal[int] )},{ literal[string] : keyword[True] } | def draw_graph(g, fmt='svg', prg='dot', options={}):
"""
Draw an RDF graph as an image
"""
# Convert RDF to Graphviz
buf = StringIO()
rdf2dot(g, buf, options)
gv_options = options.get('graphviz', [])
if fmt == 'png':
gv_options += ['-Gdpi=220', '-Gsize=25,10!']
metadata = {'width': 5500, 'height': 2200, 'unconfined': True} # depends on [control=['if'], data=[]]
#import codecs
#with codecs.open('/tmp/sparqlkernel-img.dot','w',encoding='utf-8') as f:
# f.write( buf.getvalue() )
# Now use Graphviz to generate the graph
image = run_dot(buf.getvalue(), fmt=fmt, options=gv_options, prg=prg)
#with open('/tmp/sparqlkernel-img.'+fmt,'w') as f:
# f.write( image )
# Return it
if fmt == 'png':
return ({'image/png': base64.b64encode(image).decode('ascii')}, {'image/png': metadata}) # depends on [control=['if'], data=[]]
elif fmt == 'svg':
return ({'image/svg+xml': image.decode('utf-8').replace('<svg', '<svg class="unconfined"', 1)}, {'unconfined': True}) # depends on [control=['if'], data=[]] |
def get_access_logs(config):
"""
Parse config for access_log directives
:return: iterator over ('path', 'format name') tuple of found directives
"""
access_log = Literal("access_log") + ZeroOrMore(parameter) + semicolon
access_log.ignore(pythonStyleComment)
for directive in access_log.searchString(config).asList():
path = directive[1]
if path == 'off' or path.startswith('syslog:'):
# nothing to process here
continue
format_name = 'combined'
if len(directive) > 2 and '=' not in directive[2]:
format_name = directive[2]
yield path, format_name | def function[get_access_logs, parameter[config]]:
constant[
Parse config for access_log directives
:return: iterator over ('path', 'format name') tuple of found directives
]
variable[access_log] assign[=] binary_operation[binary_operation[call[name[Literal], parameter[constant[access_log]]] + call[name[ZeroOrMore], parameter[name[parameter]]]] + name[semicolon]]
call[name[access_log].ignore, parameter[name[pythonStyleComment]]]
for taget[name[directive]] in starred[call[call[name[access_log].searchString, parameter[name[config]]].asList, parameter[]]] begin[:]
variable[path] assign[=] call[name[directive]][constant[1]]
if <ast.BoolOp object at 0x7da1b184d1b0> begin[:]
continue
variable[format_name] assign[=] constant[combined]
if <ast.BoolOp object at 0x7da1b184dd80> begin[:]
variable[format_name] assign[=] call[name[directive]][constant[2]]
<ast.Yield object at 0x7da1b184d9f0> | keyword[def] identifier[get_access_logs] ( identifier[config] ):
literal[string]
identifier[access_log] = identifier[Literal] ( literal[string] )+ identifier[ZeroOrMore] ( identifier[parameter] )+ identifier[semicolon]
identifier[access_log] . identifier[ignore] ( identifier[pythonStyleComment] )
keyword[for] identifier[directive] keyword[in] identifier[access_log] . identifier[searchString] ( identifier[config] ). identifier[asList] ():
identifier[path] = identifier[directive] [ literal[int] ]
keyword[if] identifier[path] == literal[string] keyword[or] identifier[path] . identifier[startswith] ( literal[string] ):
keyword[continue]
identifier[format_name] = literal[string]
keyword[if] identifier[len] ( identifier[directive] )> literal[int] keyword[and] literal[string] keyword[not] keyword[in] identifier[directive] [ literal[int] ]:
identifier[format_name] = identifier[directive] [ literal[int] ]
keyword[yield] identifier[path] , identifier[format_name] | def get_access_logs(config):
"""
Parse config for access_log directives
:return: iterator over ('path', 'format name') tuple of found directives
"""
access_log = Literal('access_log') + ZeroOrMore(parameter) + semicolon
access_log.ignore(pythonStyleComment)
for directive in access_log.searchString(config).asList():
path = directive[1]
if path == 'off' or path.startswith('syslog:'):
# nothing to process here
continue # depends on [control=['if'], data=[]]
format_name = 'combined'
if len(directive) > 2 and '=' not in directive[2]:
format_name = directive[2] # depends on [control=['if'], data=[]]
yield (path, format_name) # depends on [control=['for'], data=['directive']] |
def addworkdays(self, date, offset):
"""
Add work days to a given date, ignoring holidays.
Note:
By definition, a zero offset causes the function to return the
initial date, even it is not a work date. An offset of 1
represents the next work date, regardless of date being a work
date or not.
Args:
date (date, datetime or str): Date to be incremented.
offset (integer): Number of work days to add. Positive values move
the date forward and negative values move the date back.
Returns:
datetime: New incremented date.
"""
date = parsefun(date)
if offset == 0:
return date
if offset > 0:
direction = 1
idx_offset = Calendar._idx_offsetnext
idx_next = Calendar._idx_nextworkday
idx_offset_other = Calendar._idx_offsetprev
idx_next_other = Calendar._idx_prevworkday
else:
direction = -1
idx_offset = Calendar._idx_offsetprev
idx_next = Calendar._idx_prevworkday
idx_offset_other = Calendar._idx_offsetnext
idx_next_other = Calendar._idx_nextworkday
# adjust date to first work day before/after so counting always
# starts from a workday
weekdaymap = self.weekdaymap # speed up
datewk = date.weekday()
if not weekdaymap[datewk].isworkday:
date += datetime.timedelta(days=\
weekdaymap[datewk][idx_offset_other])
datewk = weekdaymap[datewk][idx_next_other]
nw, nd = divmod(abs(offset), len(self.workdays))
ndays = nw * 7
while nd > 0:
ndays += abs(weekdaymap[datewk][idx_offset])
datewk = weekdaymap[datewk][idx_next]
nd -= 1
date += datetime.timedelta(days=ndays*direction)
return date | def function[addworkdays, parameter[self, date, offset]]:
constant[
Add work days to a given date, ignoring holidays.
Note:
By definition, a zero offset causes the function to return the
initial date, even it is not a work date. An offset of 1
represents the next work date, regardless of date being a work
date or not.
Args:
date (date, datetime or str): Date to be incremented.
offset (integer): Number of work days to add. Positive values move
the date forward and negative values move the date back.
Returns:
datetime: New incremented date.
]
variable[date] assign[=] call[name[parsefun], parameter[name[date]]]
if compare[name[offset] equal[==] constant[0]] begin[:]
return[name[date]]
if compare[name[offset] greater[>] constant[0]] begin[:]
variable[direction] assign[=] constant[1]
variable[idx_offset] assign[=] name[Calendar]._idx_offsetnext
variable[idx_next] assign[=] name[Calendar]._idx_nextworkday
variable[idx_offset_other] assign[=] name[Calendar]._idx_offsetprev
variable[idx_next_other] assign[=] name[Calendar]._idx_prevworkday
variable[weekdaymap] assign[=] name[self].weekdaymap
variable[datewk] assign[=] call[name[date].weekday, parameter[]]
if <ast.UnaryOp object at 0x7da2054a4ee0> begin[:]
<ast.AugAssign object at 0x7da2054a7d90>
variable[datewk] assign[=] call[call[name[weekdaymap]][name[datewk]]][name[idx_next_other]]
<ast.Tuple object at 0x7da2054a4040> assign[=] call[name[divmod], parameter[call[name[abs], parameter[name[offset]]], call[name[len], parameter[name[self].workdays]]]]
variable[ndays] assign[=] binary_operation[name[nw] * constant[7]]
while compare[name[nd] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da2054a4400>
variable[datewk] assign[=] call[call[name[weekdaymap]][name[datewk]]][name[idx_next]]
<ast.AugAssign object at 0x7da2054a5720>
<ast.AugAssign object at 0x7da2054a4850>
return[name[date]] | keyword[def] identifier[addworkdays] ( identifier[self] , identifier[date] , identifier[offset] ):
literal[string]
identifier[date] = identifier[parsefun] ( identifier[date] )
keyword[if] identifier[offset] == literal[int] :
keyword[return] identifier[date]
keyword[if] identifier[offset] > literal[int] :
identifier[direction] = literal[int]
identifier[idx_offset] = identifier[Calendar] . identifier[_idx_offsetnext]
identifier[idx_next] = identifier[Calendar] . identifier[_idx_nextworkday]
identifier[idx_offset_other] = identifier[Calendar] . identifier[_idx_offsetprev]
identifier[idx_next_other] = identifier[Calendar] . identifier[_idx_prevworkday]
keyword[else] :
identifier[direction] =- literal[int]
identifier[idx_offset] = identifier[Calendar] . identifier[_idx_offsetprev]
identifier[idx_next] = identifier[Calendar] . identifier[_idx_prevworkday]
identifier[idx_offset_other] = identifier[Calendar] . identifier[_idx_offsetnext]
identifier[idx_next_other] = identifier[Calendar] . identifier[_idx_nextworkday]
identifier[weekdaymap] = identifier[self] . identifier[weekdaymap]
identifier[datewk] = identifier[date] . identifier[weekday] ()
keyword[if] keyword[not] identifier[weekdaymap] [ identifier[datewk] ]. identifier[isworkday] :
identifier[date] += identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[weekdaymap] [ identifier[datewk] ][ identifier[idx_offset_other] ])
identifier[datewk] = identifier[weekdaymap] [ identifier[datewk] ][ identifier[idx_next_other] ]
identifier[nw] , identifier[nd] = identifier[divmod] ( identifier[abs] ( identifier[offset] ), identifier[len] ( identifier[self] . identifier[workdays] ))
identifier[ndays] = identifier[nw] * literal[int]
keyword[while] identifier[nd] > literal[int] :
identifier[ndays] += identifier[abs] ( identifier[weekdaymap] [ identifier[datewk] ][ identifier[idx_offset] ])
identifier[datewk] = identifier[weekdaymap] [ identifier[datewk] ][ identifier[idx_next] ]
identifier[nd] -= literal[int]
identifier[date] += identifier[datetime] . identifier[timedelta] ( identifier[days] = identifier[ndays] * identifier[direction] )
keyword[return] identifier[date] | def addworkdays(self, date, offset):
"""
Add work days to a given date, ignoring holidays.
Note:
By definition, a zero offset causes the function to return the
initial date, even it is not a work date. An offset of 1
represents the next work date, regardless of date being a work
date or not.
Args:
date (date, datetime or str): Date to be incremented.
offset (integer): Number of work days to add. Positive values move
the date forward and negative values move the date back.
Returns:
datetime: New incremented date.
"""
date = parsefun(date)
if offset == 0:
return date # depends on [control=['if'], data=[]]
if offset > 0:
direction = 1
idx_offset = Calendar._idx_offsetnext
idx_next = Calendar._idx_nextworkday
idx_offset_other = Calendar._idx_offsetprev
idx_next_other = Calendar._idx_prevworkday # depends on [control=['if'], data=[]]
else:
direction = -1
idx_offset = Calendar._idx_offsetprev
idx_next = Calendar._idx_prevworkday
idx_offset_other = Calendar._idx_offsetnext
idx_next_other = Calendar._idx_nextworkday # adjust date to first work day before/after so counting always
# starts from a workday
weekdaymap = self.weekdaymap # speed up
datewk = date.weekday()
if not weekdaymap[datewk].isworkday:
date += datetime.timedelta(days=weekdaymap[datewk][idx_offset_other])
datewk = weekdaymap[datewk][idx_next_other] # depends on [control=['if'], data=[]]
(nw, nd) = divmod(abs(offset), len(self.workdays))
ndays = nw * 7
while nd > 0:
ndays += abs(weekdaymap[datewk][idx_offset])
datewk = weekdaymap[datewk][idx_next]
nd -= 1 # depends on [control=['while'], data=['nd']]
date += datetime.timedelta(days=ndays * direction)
return date |
def dumpDictHdf5(RV,o):
""" Dump a dictionary where each page is a list or an array """
for key in list(RV.keys()):
o.create_dataset(name=key,data=SP.array(RV[key]),chunks=True,compression='gzip') | def function[dumpDictHdf5, parameter[RV, o]]:
constant[ Dump a dictionary where each page is a list or an array ]
for taget[name[key]] in starred[call[name[list], parameter[call[name[RV].keys, parameter[]]]]] begin[:]
call[name[o].create_dataset, parameter[]] | keyword[def] identifier[dumpDictHdf5] ( identifier[RV] , identifier[o] ):
literal[string]
keyword[for] identifier[key] keyword[in] identifier[list] ( identifier[RV] . identifier[keys] ()):
identifier[o] . identifier[create_dataset] ( identifier[name] = identifier[key] , identifier[data] = identifier[SP] . identifier[array] ( identifier[RV] [ identifier[key] ]), identifier[chunks] = keyword[True] , identifier[compression] = literal[string] ) | def dumpDictHdf5(RV, o):
""" Dump a dictionary where each page is a list or an array """
for key in list(RV.keys()):
o.create_dataset(name=key, data=SP.array(RV[key]), chunks=True, compression='gzip') # depends on [control=['for'], data=['key']] |
def get_headers(self, instant):
"""
Build the list of headers needed in order to perform S3 operations.
"""
headers = {'x-amz-date': _auth_v4.makeAMZDate(instant)}
if self.body_producer is None:
data = self.data
if data is None:
data = b""
headers["x-amz-content-sha256"] = hashlib.sha256(data).hexdigest()
else:
data = None
headers["x-amz-content-sha256"] = b"UNSIGNED-PAYLOAD"
for key, value in self.metadata.iteritems():
headers["x-amz-meta-" + key] = value
for key, value in self.amz_headers.iteritems():
headers["x-amz-" + key] = value
# Before we check if the content type is set, let's see if we can set
# it by guessing the the mimetype.
self.set_content_type()
if self.content_type is not None:
headers["Content-Type"] = self.content_type
if self.creds is not None:
headers["Authorization"] = self.sign(
headers,
data,
s3_url_context(self.endpoint, self.bucket, self.object_name),
instant,
method=self.action)
return headers | def function[get_headers, parameter[self, instant]]:
constant[
Build the list of headers needed in order to perform S3 operations.
]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da18bc712d0>], [<ast.Call object at 0x7da18bc72b00>]]
if compare[name[self].body_producer is constant[None]] begin[:]
variable[data] assign[=] name[self].data
if compare[name[data] is constant[None]] begin[:]
variable[data] assign[=] constant[b'']
call[name[headers]][constant[x-amz-content-sha256]] assign[=] call[call[name[hashlib].sha256, parameter[name[data]]].hexdigest, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18bc73460>, <ast.Name object at 0x7da18bc73d30>]]] in starred[call[name[self].metadata.iteritems, parameter[]]] begin[:]
call[name[headers]][binary_operation[constant[x-amz-meta-] + name[key]]] assign[=] name[value]
for taget[tuple[[<ast.Name object at 0x7da18bc72380>, <ast.Name object at 0x7da18bc73340>]]] in starred[call[name[self].amz_headers.iteritems, parameter[]]] begin[:]
call[name[headers]][binary_operation[constant[x-amz-] + name[key]]] assign[=] name[value]
call[name[self].set_content_type, parameter[]]
if compare[name[self].content_type is_not constant[None]] begin[:]
call[name[headers]][constant[Content-Type]] assign[=] name[self].content_type
if compare[name[self].creds is_not constant[None]] begin[:]
call[name[headers]][constant[Authorization]] assign[=] call[name[self].sign, parameter[name[headers], name[data], call[name[s3_url_context], parameter[name[self].endpoint, name[self].bucket, name[self].object_name]], name[instant]]]
return[name[headers]] | keyword[def] identifier[get_headers] ( identifier[self] , identifier[instant] ):
literal[string]
identifier[headers] ={ literal[string] : identifier[_auth_v4] . identifier[makeAMZDate] ( identifier[instant] )}
keyword[if] identifier[self] . identifier[body_producer] keyword[is] keyword[None] :
identifier[data] = identifier[self] . identifier[data]
keyword[if] identifier[data] keyword[is] keyword[None] :
identifier[data] = literal[string]
identifier[headers] [ literal[string] ]= identifier[hashlib] . identifier[sha256] ( identifier[data] ). identifier[hexdigest] ()
keyword[else] :
identifier[data] = keyword[None]
identifier[headers] [ literal[string] ]= literal[string]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[metadata] . identifier[iteritems] ():
identifier[headers] [ literal[string] + identifier[key] ]= identifier[value]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[amz_headers] . identifier[iteritems] ():
identifier[headers] [ literal[string] + identifier[key] ]= identifier[value]
identifier[self] . identifier[set_content_type] ()
keyword[if] identifier[self] . identifier[content_type] keyword[is] keyword[not] keyword[None] :
identifier[headers] [ literal[string] ]= identifier[self] . identifier[content_type]
keyword[if] identifier[self] . identifier[creds] keyword[is] keyword[not] keyword[None] :
identifier[headers] [ literal[string] ]= identifier[self] . identifier[sign] (
identifier[headers] ,
identifier[data] ,
identifier[s3_url_context] ( identifier[self] . identifier[endpoint] , identifier[self] . identifier[bucket] , identifier[self] . identifier[object_name] ),
identifier[instant] ,
identifier[method] = identifier[self] . identifier[action] )
keyword[return] identifier[headers] | def get_headers(self, instant):
"""
Build the list of headers needed in order to perform S3 operations.
"""
headers = {'x-amz-date': _auth_v4.makeAMZDate(instant)}
if self.body_producer is None:
data = self.data
if data is None:
data = b'' # depends on [control=['if'], data=['data']]
headers['x-amz-content-sha256'] = hashlib.sha256(data).hexdigest() # depends on [control=['if'], data=[]]
else:
data = None
headers['x-amz-content-sha256'] = b'UNSIGNED-PAYLOAD'
for (key, value) in self.metadata.iteritems():
headers['x-amz-meta-' + key] = value # depends on [control=['for'], data=[]]
for (key, value) in self.amz_headers.iteritems():
headers['x-amz-' + key] = value # depends on [control=['for'], data=[]]
# Before we check if the content type is set, let's see if we can set
# it by guessing the the mimetype.
self.set_content_type()
if self.content_type is not None:
headers['Content-Type'] = self.content_type # depends on [control=['if'], data=[]]
if self.creds is not None:
headers['Authorization'] = self.sign(headers, data, s3_url_context(self.endpoint, self.bucket, self.object_name), instant, method=self.action) # depends on [control=['if'], data=[]]
return headers |
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(
b64encode(('%s:%s' % (username, password)).encode('latin1')).strip()
)
return authstr | def function[_basic_auth_str, parameter[username, password]]:
constant[Returns a Basic Auth string.]
variable[authstr] assign[=] binary_operation[constant[Basic ] + call[name[to_native_string], parameter[call[call[name[b64encode], parameter[call[binary_operation[constant[%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20e962470>, <ast.Name object at 0x7da20e9600a0>]]].encode, parameter[constant[latin1]]]]].strip, parameter[]]]]]
return[name[authstr]] | keyword[def] identifier[_basic_auth_str] ( identifier[username] , identifier[password] ):
literal[string]
identifier[authstr] = literal[string] + identifier[to_native_string] (
identifier[b64encode] (( literal[string] %( identifier[username] , identifier[password] )). identifier[encode] ( literal[string] )). identifier[strip] ()
)
keyword[return] identifier[authstr] | def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + to_native_string(b64encode(('%s:%s' % (username, password)).encode('latin1')).strip())
return authstr |
def update(
self, request, pk=None, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Add a user to a team.'''
user = get_object_or_404(User, pk=pk)
team = self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
team.users.add(user)
return Response(status=status.HTTP_204_NO_CONTENT) | def function[update, parameter[self, request, pk, parent_lookup_seedteam, parent_lookup_seedteam__organization]]:
constant[Add a user to a team.]
variable[user] assign[=] call[name[get_object_or_404], parameter[name[User]]]
variable[team] assign[=] call[name[self].check_team_permissions, parameter[name[request], name[parent_lookup_seedteam], name[parent_lookup_seedteam__organization]]]
call[name[team].users.add, parameter[name[user]]]
return[call[name[Response], parameter[]]] | keyword[def] identifier[update] (
identifier[self] , identifier[request] , identifier[pk] = keyword[None] , identifier[parent_lookup_seedteam] = keyword[None] ,
identifier[parent_lookup_seedteam__organization] = keyword[None] ):
literal[string]
identifier[user] = identifier[get_object_or_404] ( identifier[User] , identifier[pk] = identifier[pk] )
identifier[team] = identifier[self] . identifier[check_team_permissions] (
identifier[request] , identifier[parent_lookup_seedteam] ,
identifier[parent_lookup_seedteam__organization] )
identifier[team] . identifier[users] . identifier[add] ( identifier[user] )
keyword[return] identifier[Response] ( identifier[status] = identifier[status] . identifier[HTTP_204_NO_CONTENT] ) | def update(self, request, pk=None, parent_lookup_seedteam=None, parent_lookup_seedteam__organization=None):
"""Add a user to a team."""
user = get_object_or_404(User, pk=pk)
team = self.check_team_permissions(request, parent_lookup_seedteam, parent_lookup_seedteam__organization)
team.users.add(user)
return Response(status=status.HTTP_204_NO_CONTENT) |
def restore_geometry_on_layout_change(self, value):
"""
Setter for **self.__restore_geometry_on_layout_change** attribute.
:param value: Attribute value.
:type value: bool
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format(
"restore_geometry_on_layout_change", value)
self.__restore_geometry_on_layout_change = value | def function[restore_geometry_on_layout_change, parameter[self, value]]:
constant[
Setter for **self.__restore_geometry_on_layout_change** attribute.
:param value: Attribute value.
:type value: bool
]
if compare[name[value] is_not constant[None]] begin[:]
assert[compare[call[name[type], parameter[name[value]]] is name[bool]]]
name[self].__restore_geometry_on_layout_change assign[=] name[value] | keyword[def] identifier[restore_geometry_on_layout_change] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[type] ( identifier[value] ) keyword[is] identifier[bool] , literal[string] . identifier[format] (
literal[string] , identifier[value] )
identifier[self] . identifier[__restore_geometry_on_layout_change] = identifier[value] | def restore_geometry_on_layout_change(self, value):
"""
Setter for **self.__restore_geometry_on_layout_change** attribute.
:param value: Attribute value.
:type value: bool
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format('restore_geometry_on_layout_change', value) # depends on [control=['if'], data=['value']]
self.__restore_geometry_on_layout_change = value |
def fit(self, Z, classes=None):
"""Fit the model according to the given training data.
Parameters
----------
Z : DictRDD containing (X, y) pairs
X - Training vector
y - Target labels
classes : iterable
The set of available classes
Returns
-------
self : object
Returns self.
"""
check_rdd(Z, {'X': (sp.spmatrix, np.ndarray)})
self._classes_ = np.unique(classes)
return self._spark_fit(SparkSGDClassifier, Z) | def function[fit, parameter[self, Z, classes]]:
constant[Fit the model according to the given training data.
Parameters
----------
Z : DictRDD containing (X, y) pairs
X - Training vector
y - Target labels
classes : iterable
The set of available classes
Returns
-------
self : object
Returns self.
]
call[name[check_rdd], parameter[name[Z], dictionary[[<ast.Constant object at 0x7da20c6c69b0>], [<ast.Tuple object at 0x7da20c6c4bb0>]]]]
name[self]._classes_ assign[=] call[name[np].unique, parameter[name[classes]]]
return[call[name[self]._spark_fit, parameter[name[SparkSGDClassifier], name[Z]]]] | keyword[def] identifier[fit] ( identifier[self] , identifier[Z] , identifier[classes] = keyword[None] ):
literal[string]
identifier[check_rdd] ( identifier[Z] ,{ literal[string] :( identifier[sp] . identifier[spmatrix] , identifier[np] . identifier[ndarray] )})
identifier[self] . identifier[_classes_] = identifier[np] . identifier[unique] ( identifier[classes] )
keyword[return] identifier[self] . identifier[_spark_fit] ( identifier[SparkSGDClassifier] , identifier[Z] ) | def fit(self, Z, classes=None):
"""Fit the model according to the given training data.
Parameters
----------
Z : DictRDD containing (X, y) pairs
X - Training vector
y - Target labels
classes : iterable
The set of available classes
Returns
-------
self : object
Returns self.
"""
check_rdd(Z, {'X': (sp.spmatrix, np.ndarray)})
self._classes_ = np.unique(classes)
return self._spark_fit(SparkSGDClassifier, Z) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.