code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
---|---|---|---|
def render_applicationNavigation(self, ctx, data):
"""
For authenticated users, add primary application navigation to the
given tag. For unauthenticated users, remove the given tag from the
output.
@see L{xmantissa.webnav.applicationNavigation}
"""
if self.username is None:
return ''
translator = self._getViewerPrivateApplication()
return applicationNavigation(
ctx,
translator,
translator.getPageComponents().navigation) | def function[render_applicationNavigation, parameter[self, ctx, data]]:
constant[
For authenticated users, add primary application navigation to the
given tag. For unauthenticated users, remove the given tag from the
output.
@see L{xmantissa.webnav.applicationNavigation}
]
if compare[name[self].username is constant[None]] begin[:]
return[constant[]]
variable[translator] assign[=] call[name[self]._getViewerPrivateApplication, parameter[]]
return[call[name[applicationNavigation], parameter[name[ctx], name[translator], call[name[translator].getPageComponents, parameter[]].navigation]]] | keyword[def] identifier[render_applicationNavigation] ( identifier[self] , identifier[ctx] , identifier[data] ):
literal[string]
keyword[if] identifier[self] . identifier[username] keyword[is] keyword[None] :
keyword[return] literal[string]
identifier[translator] = identifier[self] . identifier[_getViewerPrivateApplication] ()
keyword[return] identifier[applicationNavigation] (
identifier[ctx] ,
identifier[translator] ,
identifier[translator] . identifier[getPageComponents] (). identifier[navigation] ) | def render_applicationNavigation(self, ctx, data):
"""
For authenticated users, add primary application navigation to the
given tag. For unauthenticated users, remove the given tag from the
output.
@see L{xmantissa.webnav.applicationNavigation}
"""
if self.username is None:
return '' # depends on [control=['if'], data=[]]
translator = self._getViewerPrivateApplication()
return applicationNavigation(ctx, translator, translator.getPageComponents().navigation) |
def register_module(module=None):
"""
Registers given module or caller introspected module in the candidates modules for tracing.
:param module: Module to register.
:type module: ModuleType
:return: Definition success.
:rtype: bool
"""
global REGISTERED_MODULES
if module is None:
# Note: inspect.getmodule() can return the wrong module if it has been imported with different relatives paths.
module = sys.modules.get(inspect.currentframe().f_back.f_globals["__name__"])
REGISTERED_MODULES.add(module)
return True | def function[register_module, parameter[module]]:
constant[
Registers given module or caller introspected module in the candidates modules for tracing.
:param module: Module to register.
:type module: ModuleType
:return: Definition success.
:rtype: bool
]
<ast.Global object at 0x7da18c4ceb90>
if compare[name[module] is constant[None]] begin[:]
variable[module] assign[=] call[name[sys].modules.get, parameter[call[call[name[inspect].currentframe, parameter[]].f_back.f_globals][constant[__name__]]]]
call[name[REGISTERED_MODULES].add, parameter[name[module]]]
return[constant[True]] | keyword[def] identifier[register_module] ( identifier[module] = keyword[None] ):
literal[string]
keyword[global] identifier[REGISTERED_MODULES]
keyword[if] identifier[module] keyword[is] keyword[None] :
identifier[module] = identifier[sys] . identifier[modules] . identifier[get] ( identifier[inspect] . identifier[currentframe] (). identifier[f_back] . identifier[f_globals] [ literal[string] ])
identifier[REGISTERED_MODULES] . identifier[add] ( identifier[module] )
keyword[return] keyword[True] | def register_module(module=None):
"""
Registers given module or caller introspected module in the candidates modules for tracing.
:param module: Module to register.
:type module: ModuleType
:return: Definition success.
:rtype: bool
"""
global REGISTERED_MODULES
if module is None:
# Note: inspect.getmodule() can return the wrong module if it has been imported with different relatives paths.
module = sys.modules.get(inspect.currentframe().f_back.f_globals['__name__']) # depends on [control=['if'], data=['module']]
REGISTERED_MODULES.add(module)
return True |
def make_table(headers, lines, prefix=None):
"""
Generates an ASCII table according to the given headers and lines
:param headers: List of table headers (N-tuple)
:param lines: List of table lines (N-tuples)
:param prefix: Optional prefix for each line
:return: The ASCII representation of the table
:raise ValueError: Different number of columns between headers and
lines
"""
# Normalize the prefix
prefix = str(prefix or "")
# Maximum lengths
lengths = [len(title) for title in headers]
# Store the number of columns (0-based)
nb_columns = len(lengths) - 1
# Lines
str_lines = []
for idx, line in enumerate(lines):
# Recompute lengths
str_line = []
str_lines.append(str_line)
column = -1
try:
for column, entry in enumerate(line):
str_entry = str(entry)
str_line.append(str_entry)
if len(str_entry) > lengths[column]:
lengths[column] = len(str_entry)
except IndexError:
# Line too small/big
raise ValueError(
"Different sizes for header and lines "
"(line {0})".format(idx + 1)
)
except (TypeError, AttributeError):
# Invalid type of line
raise ValueError(
"Invalid type of line: %s", type(line).__name__
)
else:
if column != nb_columns:
# Check if all lines have the same number of columns
raise ValueError(
"Different sizes for header and lines "
"(line {0})".format(idx + 1)
)
# Prepare the head (centered text)
format_str = "{0}|".format(prefix)
for column, length in enumerate(lengths):
format_str += " {%d:^%d} |" % (column, length)
head_str = format_str.format(*headers)
# Prepare the separator, according the length of the headers string
separator = "{0}{1}".format(prefix, "-" * (len(head_str) - len(prefix)))
idx = head_str.find("|")
while idx != -1:
separator = "+".join((separator[:idx], separator[idx + 1 :]))
idx = head_str.find("|", idx + 1)
# Prepare the output
output = [separator, head_str, separator.replace("-", "=")]
# Compute the lines
format_str = format_str.replace("^", "<")
for line in str_lines:
output.append(format_str.format(*line))
output.append(separator)
# Force the last end of line
output.append("")
# Join'em
return "\n".join(output) | def function[make_table, parameter[headers, lines, prefix]]:
constant[
Generates an ASCII table according to the given headers and lines
:param headers: List of table headers (N-tuple)
:param lines: List of table lines (N-tuples)
:param prefix: Optional prefix for each line
:return: The ASCII representation of the table
:raise ValueError: Different number of columns between headers and
lines
]
variable[prefix] assign[=] call[name[str], parameter[<ast.BoolOp object at 0x7da18f09ed40>]]
variable[lengths] assign[=] <ast.ListComp object at 0x7da18f09e7d0>
variable[nb_columns] assign[=] binary_operation[call[name[len], parameter[name[lengths]]] - constant[1]]
variable[str_lines] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18f09f280>, <ast.Name object at 0x7da18f09d1e0>]]] in starred[call[name[enumerate], parameter[name[lines]]]] begin[:]
variable[str_line] assign[=] list[[]]
call[name[str_lines].append, parameter[name[str_line]]]
variable[column] assign[=] <ast.UnaryOp object at 0x7da18f09cca0>
<ast.Try object at 0x7da18f09db10>
variable[format_str] assign[=] call[constant[{0}|].format, parameter[name[prefix]]]
for taget[tuple[[<ast.Name object at 0x7da18f09fa30>, <ast.Name object at 0x7da18f09fd30>]]] in starred[call[name[enumerate], parameter[name[lengths]]]] begin[:]
<ast.AugAssign object at 0x7da18f09ca60>
variable[head_str] assign[=] call[name[format_str].format, parameter[<ast.Starred object at 0x7da18f09fbb0>]]
variable[separator] assign[=] call[constant[{0}{1}].format, parameter[name[prefix], binary_operation[constant[-] * binary_operation[call[name[len], parameter[name[head_str]]] - call[name[len], parameter[name[prefix]]]]]]]
variable[idx] assign[=] call[name[head_str].find, parameter[constant[|]]]
while compare[name[idx] not_equal[!=] <ast.UnaryOp object at 0x7da18f09ca00>] begin[:]
variable[separator] assign[=] call[constant[+].join, parameter[tuple[[<ast.Subscript object at 0x7da18f09d570>, <ast.Subscript object at 0x7da18f09fdc0>]]]]
variable[idx] assign[=] call[name[head_str].find, parameter[constant[|], binary_operation[name[idx] + constant[1]]]]
variable[output] assign[=] list[[<ast.Name object at 0x7da18f09f4c0>, <ast.Name object at 0x7da18f09c340>, <ast.Call object at 0x7da18f09c370>]]
variable[format_str] assign[=] call[name[format_str].replace, parameter[constant[^], constant[<]]]
for taget[name[line]] in starred[name[str_lines]] begin[:]
call[name[output].append, parameter[call[name[format_str].format, parameter[<ast.Starred object at 0x7da18f09f340>]]]]
call[name[output].append, parameter[name[separator]]]
call[name[output].append, parameter[constant[]]]
return[call[constant[
].join, parameter[name[output]]]] | keyword[def] identifier[make_table] ( identifier[headers] , identifier[lines] , identifier[prefix] = keyword[None] ):
literal[string]
identifier[prefix] = identifier[str] ( identifier[prefix] keyword[or] literal[string] )
identifier[lengths] =[ identifier[len] ( identifier[title] ) keyword[for] identifier[title] keyword[in] identifier[headers] ]
identifier[nb_columns] = identifier[len] ( identifier[lengths] )- literal[int]
identifier[str_lines] =[]
keyword[for] identifier[idx] , identifier[line] keyword[in] identifier[enumerate] ( identifier[lines] ):
identifier[str_line] =[]
identifier[str_lines] . identifier[append] ( identifier[str_line] )
identifier[column] =- literal[int]
keyword[try] :
keyword[for] identifier[column] , identifier[entry] keyword[in] identifier[enumerate] ( identifier[line] ):
identifier[str_entry] = identifier[str] ( identifier[entry] )
identifier[str_line] . identifier[append] ( identifier[str_entry] )
keyword[if] identifier[len] ( identifier[str_entry] )> identifier[lengths] [ identifier[column] ]:
identifier[lengths] [ identifier[column] ]= identifier[len] ( identifier[str_entry] )
keyword[except] identifier[IndexError] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( identifier[idx] + literal[int] )
)
keyword[except] ( identifier[TypeError] , identifier[AttributeError] ):
keyword[raise] identifier[ValueError] (
literal[string] , identifier[type] ( identifier[line] ). identifier[__name__]
)
keyword[else] :
keyword[if] identifier[column] != identifier[nb_columns] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( identifier[idx] + literal[int] )
)
identifier[format_str] = literal[string] . identifier[format] ( identifier[prefix] )
keyword[for] identifier[column] , identifier[length] keyword[in] identifier[enumerate] ( identifier[lengths] ):
identifier[format_str] += literal[string] %( identifier[column] , identifier[length] )
identifier[head_str] = identifier[format_str] . identifier[format] (* identifier[headers] )
identifier[separator] = literal[string] . identifier[format] ( identifier[prefix] , literal[string] *( identifier[len] ( identifier[head_str] )- identifier[len] ( identifier[prefix] )))
identifier[idx] = identifier[head_str] . identifier[find] ( literal[string] )
keyword[while] identifier[idx] !=- literal[int] :
identifier[separator] = literal[string] . identifier[join] (( identifier[separator] [: identifier[idx] ], identifier[separator] [ identifier[idx] + literal[int] :]))
identifier[idx] = identifier[head_str] . identifier[find] ( literal[string] , identifier[idx] + literal[int] )
identifier[output] =[ identifier[separator] , identifier[head_str] , identifier[separator] . identifier[replace] ( literal[string] , literal[string] )]
identifier[format_str] = identifier[format_str] . identifier[replace] ( literal[string] , literal[string] )
keyword[for] identifier[line] keyword[in] identifier[str_lines] :
identifier[output] . identifier[append] ( identifier[format_str] . identifier[format] (* identifier[line] ))
identifier[output] . identifier[append] ( identifier[separator] )
identifier[output] . identifier[append] ( literal[string] )
keyword[return] literal[string] . identifier[join] ( identifier[output] ) | def make_table(headers, lines, prefix=None):
"""
Generates an ASCII table according to the given headers and lines
:param headers: List of table headers (N-tuple)
:param lines: List of table lines (N-tuples)
:param prefix: Optional prefix for each line
:return: The ASCII representation of the table
:raise ValueError: Different number of columns between headers and
lines
"""
# Normalize the prefix
prefix = str(prefix or '')
# Maximum lengths
lengths = [len(title) for title in headers]
# Store the number of columns (0-based)
nb_columns = len(lengths) - 1
# Lines
str_lines = []
for (idx, line) in enumerate(lines):
# Recompute lengths
str_line = []
str_lines.append(str_line)
column = -1
try:
for (column, entry) in enumerate(line):
str_entry = str(entry)
str_line.append(str_entry)
if len(str_entry) > lengths[column]:
lengths[column] = len(str_entry) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]]
except IndexError:
# Line too small/big
raise ValueError('Different sizes for header and lines (line {0})'.format(idx + 1)) # depends on [control=['except'], data=[]]
except (TypeError, AttributeError):
# Invalid type of line
raise ValueError('Invalid type of line: %s', type(line).__name__) # depends on [control=['except'], data=[]]
else:
if column != nb_columns:
# Check if all lines have the same number of columns
raise ValueError('Different sizes for header and lines (line {0})'.format(idx + 1)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Prepare the head (centered text)
format_str = '{0}|'.format(prefix)
for (column, length) in enumerate(lengths):
format_str += ' {%d:^%d} |' % (column, length) # depends on [control=['for'], data=[]]
head_str = format_str.format(*headers)
# Prepare the separator, according the length of the headers string
separator = '{0}{1}'.format(prefix, '-' * (len(head_str) - len(prefix)))
idx = head_str.find('|')
while idx != -1:
separator = '+'.join((separator[:idx], separator[idx + 1:]))
idx = head_str.find('|', idx + 1) # depends on [control=['while'], data=['idx']]
# Prepare the output
output = [separator, head_str, separator.replace('-', '=')]
# Compute the lines
format_str = format_str.replace('^', '<')
for line in str_lines:
output.append(format_str.format(*line))
output.append(separator) # depends on [control=['for'], data=['line']]
# Force the last end of line
output.append('')
# Join'em
return '\n'.join(output) |
def get_equivalents(term_id: str) -> List[Mapping[str, Union[str, bool]]]:
"""Get equivalents given ns:id
Args:
term_id (str): term id
Returns:
List[Mapping[str, Union[str, bool]]]: e.g. [{'term_id': 'HGNC:5', 'namespace': 'HGNC'}, 'primary': False]
"""
try:
errors = []
terms = get_terms(term_id)
if len(terms) == 0:
return {"equivalents": [], "errors": errors}
elif len(terms) > 1:
errors.append(
f'Too many primary IDs returned. Given term_id: {term_id} matches these term_ids: {[term["id"] for term in terms]}'
)
return {"equivalents": [], "errors": errors}
else:
term_id = terms[0]["id"]
term_id_key = bel.db.arangodb.arango_id_to_key(term_id)
equivalents = []
query = f"""
FOR vertex, edge IN 1..5
ANY 'equivalence_nodes/{term_id_key}' equivalence_edges
OPTIONS {{bfs: true, uniqueVertices : 'global'}}
RETURN DISTINCT {{
term_id: vertex.name,
namespace: vertex.namespace,
primary: vertex.primary
}}
"""
cursor = belns_db.aql.execute(query, count=True, batch_size=20)
for doc in cursor:
if doc.get("term_id", False):
equivalents.append(doc)
equivalents.append(
{"term_id": term_id, "namespace": term_id.split(":")[0], "primary": True}
)
return {"equivalents": equivalents, "errors": errors}
except Exception as e:
log.error(f"Problem getting term equivalents for {term_id} msg: {e}")
return {"equivalents": [], "errors": [f"Unexpected error {e}"]} | def function[get_equivalents, parameter[term_id]]:
constant[Get equivalents given ns:id
Args:
term_id (str): term id
Returns:
List[Mapping[str, Union[str, bool]]]: e.g. [{'term_id': 'HGNC:5', 'namespace': 'HGNC'}, 'primary': False]
]
<ast.Try object at 0x7da1b19cd330> | keyword[def] identifier[get_equivalents] ( identifier[term_id] : identifier[str] )-> identifier[List] [ identifier[Mapping] [ identifier[str] , identifier[Union] [ identifier[str] , identifier[bool] ]]]:
literal[string]
keyword[try] :
identifier[errors] =[]
identifier[terms] = identifier[get_terms] ( identifier[term_id] )
keyword[if] identifier[len] ( identifier[terms] )== literal[int] :
keyword[return] { literal[string] :[], literal[string] : identifier[errors] }
keyword[elif] identifier[len] ( identifier[terms] )> literal[int] :
identifier[errors] . identifier[append] (
literal[string]
)
keyword[return] { literal[string] :[], literal[string] : identifier[errors] }
keyword[else] :
identifier[term_id] = identifier[terms] [ literal[int] ][ literal[string] ]
identifier[term_id_key] = identifier[bel] . identifier[db] . identifier[arangodb] . identifier[arango_id_to_key] ( identifier[term_id] )
identifier[equivalents] =[]
identifier[query] = literal[string]
identifier[cursor] = identifier[belns_db] . identifier[aql] . identifier[execute] ( identifier[query] , identifier[count] = keyword[True] , identifier[batch_size] = literal[int] )
keyword[for] identifier[doc] keyword[in] identifier[cursor] :
keyword[if] identifier[doc] . identifier[get] ( literal[string] , keyword[False] ):
identifier[equivalents] . identifier[append] ( identifier[doc] )
identifier[equivalents] . identifier[append] (
{ literal[string] : identifier[term_id] , literal[string] : identifier[term_id] . identifier[split] ( literal[string] )[ literal[int] ], literal[string] : keyword[True] }
)
keyword[return] { literal[string] : identifier[equivalents] , literal[string] : identifier[errors] }
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[log] . identifier[error] ( literal[string] )
keyword[return] { literal[string] :[], literal[string] :[ literal[string] ]} | def get_equivalents(term_id: str) -> List[Mapping[str, Union[str, bool]]]:
"""Get equivalents given ns:id
Args:
term_id (str): term id
Returns:
List[Mapping[str, Union[str, bool]]]: e.g. [{'term_id': 'HGNC:5', 'namespace': 'HGNC'}, 'primary': False]
"""
try:
errors = []
terms = get_terms(term_id)
if len(terms) == 0:
return {'equivalents': [], 'errors': errors} # depends on [control=['if'], data=[]]
elif len(terms) > 1:
errors.append(f"Too many primary IDs returned. Given term_id: {term_id} matches these term_ids: {[term['id'] for term in terms]}")
return {'equivalents': [], 'errors': errors} # depends on [control=['if'], data=[]]
else:
term_id = terms[0]['id']
term_id_key = bel.db.arangodb.arango_id_to_key(term_id)
equivalents = []
query = f"\n FOR vertex, edge IN 1..5\n ANY 'equivalence_nodes/{term_id_key}' equivalence_edges\n OPTIONS {{bfs: true, uniqueVertices : 'global'}}\n RETURN DISTINCT {{\n term_id: vertex.name,\n namespace: vertex.namespace,\n primary: vertex.primary\n }}\n "
cursor = belns_db.aql.execute(query, count=True, batch_size=20)
for doc in cursor:
if doc.get('term_id', False):
equivalents.append(doc) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['doc']]
equivalents.append({'term_id': term_id, 'namespace': term_id.split(':')[0], 'primary': True})
return {'equivalents': equivalents, 'errors': errors} # depends on [control=['try'], data=[]]
except Exception as e:
log.error(f'Problem getting term equivalents for {term_id} msg: {e}')
return {'equivalents': [], 'errors': [f'Unexpected error {e}']} # depends on [control=['except'], data=['e']] |
def dropdb():
"""Drop database tables"""
manager.db.engine.echo = True
if prompt_bool("Are you sure you want to lose all your data"):
manager.db.drop_all()
metadata, alembic_version = alembic_table_metadata()
alembic_version.drop()
manager.db.session.commit() | def function[dropdb, parameter[]]:
constant[Drop database tables]
name[manager].db.engine.echo assign[=] constant[True]
if call[name[prompt_bool], parameter[constant[Are you sure you want to lose all your data]]] begin[:]
call[name[manager].db.drop_all, parameter[]]
<ast.Tuple object at 0x7da20c991630> assign[=] call[name[alembic_table_metadata], parameter[]]
call[name[alembic_version].drop, parameter[]]
call[name[manager].db.session.commit, parameter[]] | keyword[def] identifier[dropdb] ():
literal[string]
identifier[manager] . identifier[db] . identifier[engine] . identifier[echo] = keyword[True]
keyword[if] identifier[prompt_bool] ( literal[string] ):
identifier[manager] . identifier[db] . identifier[drop_all] ()
identifier[metadata] , identifier[alembic_version] = identifier[alembic_table_metadata] ()
identifier[alembic_version] . identifier[drop] ()
identifier[manager] . identifier[db] . identifier[session] . identifier[commit] () | def dropdb():
"""Drop database tables"""
manager.db.engine.echo = True
if prompt_bool('Are you sure you want to lose all your data'):
manager.db.drop_all()
(metadata, alembic_version) = alembic_table_metadata()
alembic_version.drop()
manager.db.session.commit() # depends on [control=['if'], data=[]] |
def code(ctx, show_hidden, query, single):
"""
Generate codes.
Generate codes from credentials stored on your YubiKey.
Provide a query string to match one or more specific credentials.
Touch and HOTP credentials require a single match to be triggered.
"""
ensure_validated(ctx)
controller = ctx.obj['controller']
creds = [(cr, c)
for (cr, c) in controller.calculate_all()
if show_hidden or not cr.is_hidden
]
creds = _search(creds, query)
if len(creds) == 1:
cred, code = creds[0]
if cred.touch:
prompt_for_touch()
try:
if cred.oath_type == OATH_TYPE.HOTP:
# HOTP might require touch, we don't know.
# Assume yes after 500ms.
hotp_touch_timer = Timer(0.500, prompt_for_touch)
hotp_touch_timer.start()
creds = [(cred, controller.calculate(cred))]
hotp_touch_timer.cancel()
elif code is None:
creds = [(cred, controller.calculate(cred))]
except APDUError as e:
if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED:
ctx.fail('Touch credential timed out!')
elif single:
_error_multiple_hits(ctx, [cr for cr, c in creds])
if single:
click.echo(creds[0][1].value)
else:
creds.sort()
outputs = [
(
cr.printable_key,
c.value if c
else '[Touch Credential]' if cr.touch
else '[HOTP Credential]' if cr.oath_type == OATH_TYPE.HOTP
else ''
) for (cr, c) in creds
]
longest_name = max(len(n) for (n, c) in outputs) if outputs else 0
longest_code = max(len(c) for (n, c) in outputs) if outputs else 0
format_str = u'{:<%d} {:>%d}' % (longest_name, longest_code)
for name, result in outputs:
click.echo(format_str.format(name, result)) | def function[code, parameter[ctx, show_hidden, query, single]]:
constant[
Generate codes.
Generate codes from credentials stored on your YubiKey.
Provide a query string to match one or more specific credentials.
Touch and HOTP credentials require a single match to be triggered.
]
call[name[ensure_validated], parameter[name[ctx]]]
variable[controller] assign[=] call[name[ctx].obj][constant[controller]]
variable[creds] assign[=] <ast.ListComp object at 0x7da2044c1b40>
variable[creds] assign[=] call[name[_search], parameter[name[creds], name[query]]]
if compare[call[name[len], parameter[name[creds]]] equal[==] constant[1]] begin[:]
<ast.Tuple object at 0x7da2044c1780> assign[=] call[name[creds]][constant[0]]
if name[cred].touch begin[:]
call[name[prompt_for_touch], parameter[]]
<ast.Try object at 0x7da2044c1030>
if name[single] begin[:]
call[name[click].echo, parameter[call[call[name[creds]][constant[0]]][constant[1]].value]] | keyword[def] identifier[code] ( identifier[ctx] , identifier[show_hidden] , identifier[query] , identifier[single] ):
literal[string]
identifier[ensure_validated] ( identifier[ctx] )
identifier[controller] = identifier[ctx] . identifier[obj] [ literal[string] ]
identifier[creds] =[( identifier[cr] , identifier[c] )
keyword[for] ( identifier[cr] , identifier[c] ) keyword[in] identifier[controller] . identifier[calculate_all] ()
keyword[if] identifier[show_hidden] keyword[or] keyword[not] identifier[cr] . identifier[is_hidden]
]
identifier[creds] = identifier[_search] ( identifier[creds] , identifier[query] )
keyword[if] identifier[len] ( identifier[creds] )== literal[int] :
identifier[cred] , identifier[code] = identifier[creds] [ literal[int] ]
keyword[if] identifier[cred] . identifier[touch] :
identifier[prompt_for_touch] ()
keyword[try] :
keyword[if] identifier[cred] . identifier[oath_type] == identifier[OATH_TYPE] . identifier[HOTP] :
identifier[hotp_touch_timer] = identifier[Timer] ( literal[int] , identifier[prompt_for_touch] )
identifier[hotp_touch_timer] . identifier[start] ()
identifier[creds] =[( identifier[cred] , identifier[controller] . identifier[calculate] ( identifier[cred] ))]
identifier[hotp_touch_timer] . identifier[cancel] ()
keyword[elif] identifier[code] keyword[is] keyword[None] :
identifier[creds] =[( identifier[cred] , identifier[controller] . identifier[calculate] ( identifier[cred] ))]
keyword[except] identifier[APDUError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[sw] == identifier[SW] . identifier[SECURITY_CONDITION_NOT_SATISFIED] :
identifier[ctx] . identifier[fail] ( literal[string] )
keyword[elif] identifier[single] :
identifier[_error_multiple_hits] ( identifier[ctx] ,[ identifier[cr] keyword[for] identifier[cr] , identifier[c] keyword[in] identifier[creds] ])
keyword[if] identifier[single] :
identifier[click] . identifier[echo] ( identifier[creds] [ literal[int] ][ literal[int] ]. identifier[value] )
keyword[else] :
identifier[creds] . identifier[sort] ()
identifier[outputs] =[
(
identifier[cr] . identifier[printable_key] ,
identifier[c] . identifier[value] keyword[if] identifier[c]
keyword[else] literal[string] keyword[if] identifier[cr] . identifier[touch]
keyword[else] literal[string] keyword[if] identifier[cr] . identifier[oath_type] == identifier[OATH_TYPE] . identifier[HOTP]
keyword[else] literal[string]
) keyword[for] ( identifier[cr] , identifier[c] ) keyword[in] identifier[creds]
]
identifier[longest_name] = identifier[max] ( identifier[len] ( identifier[n] ) keyword[for] ( identifier[n] , identifier[c] ) keyword[in] identifier[outputs] ) keyword[if] identifier[outputs] keyword[else] literal[int]
identifier[longest_code] = identifier[max] ( identifier[len] ( identifier[c] ) keyword[for] ( identifier[n] , identifier[c] ) keyword[in] identifier[outputs] ) keyword[if] identifier[outputs] keyword[else] literal[int]
identifier[format_str] = literal[string] %( identifier[longest_name] , identifier[longest_code] )
keyword[for] identifier[name] , identifier[result] keyword[in] identifier[outputs] :
identifier[click] . identifier[echo] ( identifier[format_str] . identifier[format] ( identifier[name] , identifier[result] )) | def code(ctx, show_hidden, query, single):
"""
Generate codes.
Generate codes from credentials stored on your YubiKey.
Provide a query string to match one or more specific credentials.
Touch and HOTP credentials require a single match to be triggered.
"""
ensure_validated(ctx)
controller = ctx.obj['controller']
creds = [(cr, c) for (cr, c) in controller.calculate_all() if show_hidden or not cr.is_hidden]
creds = _search(creds, query)
if len(creds) == 1:
(cred, code) = creds[0]
if cred.touch:
prompt_for_touch() # depends on [control=['if'], data=[]]
try:
if cred.oath_type == OATH_TYPE.HOTP:
# HOTP might require touch, we don't know.
# Assume yes after 500ms.
hotp_touch_timer = Timer(0.5, prompt_for_touch)
hotp_touch_timer.start()
creds = [(cred, controller.calculate(cred))]
hotp_touch_timer.cancel() # depends on [control=['if'], data=[]]
elif code is None:
creds = [(cred, controller.calculate(cred))] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except APDUError as e:
if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED:
ctx.fail('Touch credential timed out!') # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
elif single:
_error_multiple_hits(ctx, [cr for (cr, c) in creds]) # depends on [control=['if'], data=[]]
if single:
click.echo(creds[0][1].value) # depends on [control=['if'], data=[]]
else:
creds.sort()
outputs = [(cr.printable_key, c.value if c else '[Touch Credential]' if cr.touch else '[HOTP Credential]' if cr.oath_type == OATH_TYPE.HOTP else '') for (cr, c) in creds]
longest_name = max((len(n) for (n, c) in outputs)) if outputs else 0
longest_code = max((len(c) for (n, c) in outputs)) if outputs else 0
format_str = u'{:<%d} {:>%d}' % (longest_name, longest_code)
for (name, result) in outputs:
click.echo(format_str.format(name, result)) # depends on [control=['for'], data=[]] |
def hook_point(self, hook_name):
"""Generic function to call modules methods if such method is avalaible
:param hook_name: function name to call
:type hook_name: str
:return:None
"""
self.my_daemon.hook_point(hook_name=hook_name, handle=self) | def function[hook_point, parameter[self, hook_name]]:
constant[Generic function to call modules methods if such method is avalaible
:param hook_name: function name to call
:type hook_name: str
:return:None
]
call[name[self].my_daemon.hook_point, parameter[]] | keyword[def] identifier[hook_point] ( identifier[self] , identifier[hook_name] ):
literal[string]
identifier[self] . identifier[my_daemon] . identifier[hook_point] ( identifier[hook_name] = identifier[hook_name] , identifier[handle] = identifier[self] ) | def hook_point(self, hook_name):
"""Generic function to call modules methods if such method is avalaible
:param hook_name: function name to call
:type hook_name: str
:return:None
"""
self.my_daemon.hook_point(hook_name=hook_name, handle=self) |
def delete(self):
"""Delete this file"""
if not self.remote:
if not os.path.exists(self.projectpath + self.basedir + '/' + self.filename):
return False
else:
os.unlink(self.projectpath + self.basedir + '/' + self.filename)
#Remove metadata
metafile = self.projectpath + self.basedir + '/' + self.metafilename()
if os.path.exists(metafile):
os.unlink(metafile)
#also remove any .*.INPUTTEMPLATE.* links that pointed to this file: simply remove all dead links
for linkf,realf in clam.common.util.globsymlinks(self.projectpath + self.basedir + '/.*.INPUTTEMPLATE.*'):
if not os.path.exists(realf):
os.unlink(linkf)
return True
else:
if self.client:
requestparams = self.client.initrequest()
else:
requestparams = {}
requests.delete( self.projectpath + self.basedir + '/' + self.filename, **requestparams)
return True | def function[delete, parameter[self]]:
constant[Delete this file]
if <ast.UnaryOp object at 0x7da18dc06a40> begin[:]
if <ast.UnaryOp object at 0x7da18dc05e70> begin[:]
return[constant[False]]
variable[metafile] assign[=] binary_operation[binary_operation[binary_operation[name[self].projectpath + name[self].basedir] + constant[/]] + call[name[self].metafilename, parameter[]]]
if call[name[os].path.exists, parameter[name[metafile]]] begin[:]
call[name[os].unlink, parameter[name[metafile]]]
for taget[tuple[[<ast.Name object at 0x7da18bc73d90>, <ast.Name object at 0x7da18bc723e0>]]] in starred[call[name[clam].common.util.globsymlinks, parameter[binary_operation[binary_operation[name[self].projectpath + name[self].basedir] + constant[/.*.INPUTTEMPLATE.*]]]]] begin[:]
if <ast.UnaryOp object at 0x7da18bc70c40> begin[:]
call[name[os].unlink, parameter[name[linkf]]]
return[constant[True]] | keyword[def] identifier[delete] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[remote] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[projectpath] + identifier[self] . identifier[basedir] + literal[string] + identifier[self] . identifier[filename] ):
keyword[return] keyword[False]
keyword[else] :
identifier[os] . identifier[unlink] ( identifier[self] . identifier[projectpath] + identifier[self] . identifier[basedir] + literal[string] + identifier[self] . identifier[filename] )
identifier[metafile] = identifier[self] . identifier[projectpath] + identifier[self] . identifier[basedir] + literal[string] + identifier[self] . identifier[metafilename] ()
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[metafile] ):
identifier[os] . identifier[unlink] ( identifier[metafile] )
keyword[for] identifier[linkf] , identifier[realf] keyword[in] identifier[clam] . identifier[common] . identifier[util] . identifier[globsymlinks] ( identifier[self] . identifier[projectpath] + identifier[self] . identifier[basedir] + literal[string] ):
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[realf] ):
identifier[os] . identifier[unlink] ( identifier[linkf] )
keyword[return] keyword[True]
keyword[else] :
keyword[if] identifier[self] . identifier[client] :
identifier[requestparams] = identifier[self] . identifier[client] . identifier[initrequest] ()
keyword[else] :
identifier[requestparams] ={}
identifier[requests] . identifier[delete] ( identifier[self] . identifier[projectpath] + identifier[self] . identifier[basedir] + literal[string] + identifier[self] . identifier[filename] ,** identifier[requestparams] )
keyword[return] keyword[True] | def delete(self):
"""Delete this file"""
if not self.remote:
if not os.path.exists(self.projectpath + self.basedir + '/' + self.filename):
return False # depends on [control=['if'], data=[]]
else:
os.unlink(self.projectpath + self.basedir + '/' + self.filename)
#Remove metadata
metafile = self.projectpath + self.basedir + '/' + self.metafilename()
if os.path.exists(metafile):
os.unlink(metafile) # depends on [control=['if'], data=[]]
#also remove any .*.INPUTTEMPLATE.* links that pointed to this file: simply remove all dead links
for (linkf, realf) in clam.common.util.globsymlinks(self.projectpath + self.basedir + '/.*.INPUTTEMPLATE.*'):
if not os.path.exists(realf):
os.unlink(linkf) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return True # depends on [control=['if'], data=[]]
else:
if self.client:
requestparams = self.client.initrequest() # depends on [control=['if'], data=[]]
else:
requestparams = {}
requests.delete(self.projectpath + self.basedir + '/' + self.filename, **requestparams)
return True |
def consume_token(self, tokens, index, tokens_len):
"""Consume a token.
Returns a tuple of (tokens, tokens_len, index) when consumption is
completed and tokens have been merged together.
"""
finished = False
if tokens[index].line > self.line:
finished = True
end = index
elif index == tokens_len - 1:
finished = True
end = index + 1
if finished:
pasted_together_contents = ""
for i in range(self.begin, end):
pasted_together_contents += tokens[i].content
replacement = [Token(type=TokenType.Comment,
content=pasted_together_contents,
line=tokens[self.begin].line,
col=tokens[self.begin].col)]
tokens = _replace_token_range(tokens,
self.begin,
end,
replacement)
return (self.begin, len(tokens), tokens) | def function[consume_token, parameter[self, tokens, index, tokens_len]]:
constant[Consume a token.
Returns a tuple of (tokens, tokens_len, index) when consumption is
completed and tokens have been merged together.
]
variable[finished] assign[=] constant[False]
if compare[call[name[tokens]][name[index]].line greater[>] name[self].line] begin[:]
variable[finished] assign[=] constant[True]
variable[end] assign[=] name[index]
if name[finished] begin[:]
variable[pasted_together_contents] assign[=] constant[]
for taget[name[i]] in starred[call[name[range], parameter[name[self].begin, name[end]]]] begin[:]
<ast.AugAssign object at 0x7da204622f50>
variable[replacement] assign[=] list[[<ast.Call object at 0x7da2046207c0>]]
variable[tokens] assign[=] call[name[_replace_token_range], parameter[name[tokens], name[self].begin, name[end], name[replacement]]]
return[tuple[[<ast.Attribute object at 0x7da204623d60>, <ast.Call object at 0x7da204621f60>, <ast.Name object at 0x7da2046217b0>]]] | keyword[def] identifier[consume_token] ( identifier[self] , identifier[tokens] , identifier[index] , identifier[tokens_len] ):
literal[string]
identifier[finished] = keyword[False]
keyword[if] identifier[tokens] [ identifier[index] ]. identifier[line] > identifier[self] . identifier[line] :
identifier[finished] = keyword[True]
identifier[end] = identifier[index]
keyword[elif] identifier[index] == identifier[tokens_len] - literal[int] :
identifier[finished] = keyword[True]
identifier[end] = identifier[index] + literal[int]
keyword[if] identifier[finished] :
identifier[pasted_together_contents] = literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[begin] , identifier[end] ):
identifier[pasted_together_contents] += identifier[tokens] [ identifier[i] ]. identifier[content]
identifier[replacement] =[ identifier[Token] ( identifier[type] = identifier[TokenType] . identifier[Comment] ,
identifier[content] = identifier[pasted_together_contents] ,
identifier[line] = identifier[tokens] [ identifier[self] . identifier[begin] ]. identifier[line] ,
identifier[col] = identifier[tokens] [ identifier[self] . identifier[begin] ]. identifier[col] )]
identifier[tokens] = identifier[_replace_token_range] ( identifier[tokens] ,
identifier[self] . identifier[begin] ,
identifier[end] ,
identifier[replacement] )
keyword[return] ( identifier[self] . identifier[begin] , identifier[len] ( identifier[tokens] ), identifier[tokens] ) | def consume_token(self, tokens, index, tokens_len):
"""Consume a token.
Returns a tuple of (tokens, tokens_len, index) when consumption is
completed and tokens have been merged together.
"""
finished = False
if tokens[index].line > self.line:
finished = True
end = index # depends on [control=['if'], data=[]]
elif index == tokens_len - 1:
finished = True
end = index + 1 # depends on [control=['if'], data=['index']]
if finished:
pasted_together_contents = ''
for i in range(self.begin, end):
pasted_together_contents += tokens[i].content # depends on [control=['for'], data=['i']]
replacement = [Token(type=TokenType.Comment, content=pasted_together_contents, line=tokens[self.begin].line, col=tokens[self.begin].col)]
tokens = _replace_token_range(tokens, self.begin, end, replacement)
return (self.begin, len(tokens), tokens) # depends on [control=['if'], data=[]] |
def delete_index(self, attr):
"""Deletes an index from the Table. Can be used to drop and rebuild an index,
or to convert a non-unique index to a unique index, or vice versa.
@param attr: name of an indexed attribute
@type attr: string
"""
if attr in self._indexes:
del self._indexes[attr]
self._uniqueIndexes = [ind for ind in self._indexes.values() if ind.is_unique]
return self | def function[delete_index, parameter[self, attr]]:
constant[Deletes an index from the Table. Can be used to drop and rebuild an index,
or to convert a non-unique index to a unique index, or vice versa.
@param attr: name of an indexed attribute
@type attr: string
]
if compare[name[attr] in name[self]._indexes] begin[:]
<ast.Delete object at 0x7da2054a7160>
name[self]._uniqueIndexes assign[=] <ast.ListComp object at 0x7da1b24ae890>
return[name[self]] | keyword[def] identifier[delete_index] ( identifier[self] , identifier[attr] ):
literal[string]
keyword[if] identifier[attr] keyword[in] identifier[self] . identifier[_indexes] :
keyword[del] identifier[self] . identifier[_indexes] [ identifier[attr] ]
identifier[self] . identifier[_uniqueIndexes] =[ identifier[ind] keyword[for] identifier[ind] keyword[in] identifier[self] . identifier[_indexes] . identifier[values] () keyword[if] identifier[ind] . identifier[is_unique] ]
keyword[return] identifier[self] | def delete_index(self, attr):
"""Deletes an index from the Table. Can be used to drop and rebuild an index,
or to convert a non-unique index to a unique index, or vice versa.
@param attr: name of an indexed attribute
@type attr: string
"""
if attr in self._indexes:
del self._indexes[attr]
self._uniqueIndexes = [ind for ind in self._indexes.values() if ind.is_unique] # depends on [control=['if'], data=['attr']]
return self |
def page_elements(self):
"""Find multiple PageElement using element locator
:returns: list of page element objects
:rtype: list of toolium.pageelements.PageElement
"""
if not self._page_elements or not self.config.getboolean_optional('Driver', 'save_web_element'):
self._page_elements = []
for order, web_element in enumerate(self.web_elements):
# Create multiple PageElement with original locator and order
page_element = self.page_element_class(self.locator[0], self.locator[1], parent=self.parent,
order=order)
page_element.reset_object(self.driver_wrapper)
page_element._web_element = web_element
self._page_elements.append(page_element)
return self._page_elements | def function[page_elements, parameter[self]]:
constant[Find multiple PageElement using element locator
:returns: list of page element objects
:rtype: list of toolium.pageelements.PageElement
]
if <ast.BoolOp object at 0x7da1b1d4e7a0> begin[:]
name[self]._page_elements assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b1d4fe20>, <ast.Name object at 0x7da1b1d4fcd0>]]] in starred[call[name[enumerate], parameter[name[self].web_elements]]] begin[:]
variable[page_element] assign[=] call[name[self].page_element_class, parameter[call[name[self].locator][constant[0]], call[name[self].locator][constant[1]]]]
call[name[page_element].reset_object, parameter[name[self].driver_wrapper]]
name[page_element]._web_element assign[=] name[web_element]
call[name[self]._page_elements.append, parameter[name[page_element]]]
return[name[self]._page_elements] | keyword[def] identifier[page_elements] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_page_elements] keyword[or] keyword[not] identifier[self] . identifier[config] . identifier[getboolean_optional] ( literal[string] , literal[string] ):
identifier[self] . identifier[_page_elements] =[]
keyword[for] identifier[order] , identifier[web_element] keyword[in] identifier[enumerate] ( identifier[self] . identifier[web_elements] ):
identifier[page_element] = identifier[self] . identifier[page_element_class] ( identifier[self] . identifier[locator] [ literal[int] ], identifier[self] . identifier[locator] [ literal[int] ], identifier[parent] = identifier[self] . identifier[parent] ,
identifier[order] = identifier[order] )
identifier[page_element] . identifier[reset_object] ( identifier[self] . identifier[driver_wrapper] )
identifier[page_element] . identifier[_web_element] = identifier[web_element]
identifier[self] . identifier[_page_elements] . identifier[append] ( identifier[page_element] )
keyword[return] identifier[self] . identifier[_page_elements] | def page_elements(self):
"""Find multiple PageElement using element locator
:returns: list of page element objects
:rtype: list of toolium.pageelements.PageElement
"""
if not self._page_elements or not self.config.getboolean_optional('Driver', 'save_web_element'):
self._page_elements = []
for (order, web_element) in enumerate(self.web_elements):
# Create multiple PageElement with original locator and order
page_element = self.page_element_class(self.locator[0], self.locator[1], parent=self.parent, order=order)
page_element.reset_object(self.driver_wrapper)
page_element._web_element = web_element
self._page_elements.append(page_element) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
return self._page_elements |
def add_user(self, name, password=None, read_only=None, **kwargs):
"""Create user `name` with password `password`.
Add a new user with permissions for this :class:`Database`.
.. note:: Will change the password if user `name` already exists.
:Parameters:
- `name`: the name of the user to create
- `password` (optional): the password of the user to create. Can not
be used with the ``userSource`` argument.
- `read_only` (optional): if ``True`` the user will be read only
- `**kwargs` (optional): optional fields for the user document
(e.g. ``userSource``, ``otherDBRoles``, or ``roles``). See
`<http://docs.mongodb.org/manual/reference/privilege-documents>`_
for more information.
.. note:: The use of optional keyword arguments like ``userSource``,
``otherDBRoles``, or ``roles`` requires MongoDB >= 2.4.0
.. versionchanged:: 2.5
Added kwargs support for optional fields introduced in MongoDB 2.4
.. versionchanged:: 2.2
Added support for read only users
"""
if not isinstance(name, string_type):
raise TypeError("name must be an "
"instance of %s" % (string_type.__name__,))
if password is not None:
if not isinstance(password, string_type):
raise TypeError("password must be an "
"instance of %s" % (string_type.__name__,))
if len(password) == 0:
raise ValueError("password can't be empty")
if read_only is not None:
read_only = common.validate_boolean('read_only', read_only)
if 'roles' in kwargs:
raise ConfigurationError("Can not use "
"read_only and roles together")
try:
uinfo = self.command("usersInfo", name)
# Create the user if not found in uinfo, otherwise update one.
self._create_or_update_user(
(not uinfo["users"]), name, password, read_only, **kwargs)
except OperationFailure as exc:
# MongoDB >= 2.5.3 requires the use of commands to manage
# users.
if exc.code in common.COMMAND_NOT_FOUND_CODES:
self._legacy_add_user(name, password, read_only, **kwargs)
return
# Unauthorized. Attempt to create the user in case of
# localhost exception.
elif exc.code == 13:
self._create_or_update_user(
True, name, password, read_only, **kwargs)
else:
raise | def function[add_user, parameter[self, name, password, read_only]]:
constant[Create user `name` with password `password`.
Add a new user with permissions for this :class:`Database`.
.. note:: Will change the password if user `name` already exists.
:Parameters:
- `name`: the name of the user to create
- `password` (optional): the password of the user to create. Can not
be used with the ``userSource`` argument.
- `read_only` (optional): if ``True`` the user will be read only
- `**kwargs` (optional): optional fields for the user document
(e.g. ``userSource``, ``otherDBRoles``, or ``roles``). See
`<http://docs.mongodb.org/manual/reference/privilege-documents>`_
for more information.
.. note:: The use of optional keyword arguments like ``userSource``,
``otherDBRoles``, or ``roles`` requires MongoDB >= 2.4.0
.. versionchanged:: 2.5
Added kwargs support for optional fields introduced in MongoDB 2.4
.. versionchanged:: 2.2
Added support for read only users
]
if <ast.UnaryOp object at 0x7da20c7ca410> begin[:]
<ast.Raise object at 0x7da20c7c9390>
if compare[name[password] is_not constant[None]] begin[:]
if <ast.UnaryOp object at 0x7da20c7c9ab0> begin[:]
<ast.Raise object at 0x7da20c7c9090>
if compare[call[name[len], parameter[name[password]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da20c7cb670>
if compare[name[read_only] is_not constant[None]] begin[:]
variable[read_only] assign[=] call[name[common].validate_boolean, parameter[constant[read_only], name[read_only]]]
if compare[constant[roles] in name[kwargs]] begin[:]
<ast.Raise object at 0x7da20c7cb460>
<ast.Try object at 0x7da20c7c9bd0> | keyword[def] identifier[add_user] ( identifier[self] , identifier[name] , identifier[password] = keyword[None] , identifier[read_only] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[name] , identifier[string_type] ):
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] %( identifier[string_type] . identifier[__name__] ,))
keyword[if] identifier[password] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[password] , identifier[string_type] ):
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] %( identifier[string_type] . identifier[__name__] ,))
keyword[if] identifier[len] ( identifier[password] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[read_only] keyword[is] keyword[not] keyword[None] :
identifier[read_only] = identifier[common] . identifier[validate_boolean] ( literal[string] , identifier[read_only] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
keyword[raise] identifier[ConfigurationError] ( literal[string]
literal[string] )
keyword[try] :
identifier[uinfo] = identifier[self] . identifier[command] ( literal[string] , identifier[name] )
identifier[self] . identifier[_create_or_update_user] (
( keyword[not] identifier[uinfo] [ literal[string] ]), identifier[name] , identifier[password] , identifier[read_only] ,** identifier[kwargs] )
keyword[except] identifier[OperationFailure] keyword[as] identifier[exc] :
keyword[if] identifier[exc] . identifier[code] keyword[in] identifier[common] . identifier[COMMAND_NOT_FOUND_CODES] :
identifier[self] . identifier[_legacy_add_user] ( identifier[name] , identifier[password] , identifier[read_only] ,** identifier[kwargs] )
keyword[return]
keyword[elif] identifier[exc] . identifier[code] == literal[int] :
identifier[self] . identifier[_create_or_update_user] (
keyword[True] , identifier[name] , identifier[password] , identifier[read_only] ,** identifier[kwargs] )
keyword[else] :
keyword[raise] | def add_user(self, name, password=None, read_only=None, **kwargs):
"""Create user `name` with password `password`.
Add a new user with permissions for this :class:`Database`.
.. note:: Will change the password if user `name` already exists.
:Parameters:
- `name`: the name of the user to create
- `password` (optional): the password of the user to create. Can not
be used with the ``userSource`` argument.
- `read_only` (optional): if ``True`` the user will be read only
- `**kwargs` (optional): optional fields for the user document
(e.g. ``userSource``, ``otherDBRoles``, or ``roles``). See
`<http://docs.mongodb.org/manual/reference/privilege-documents>`_
for more information.
.. note:: The use of optional keyword arguments like ``userSource``,
``otherDBRoles``, or ``roles`` requires MongoDB >= 2.4.0
.. versionchanged:: 2.5
Added kwargs support for optional fields introduced in MongoDB 2.4
.. versionchanged:: 2.2
Added support for read only users
"""
if not isinstance(name, string_type):
raise TypeError('name must be an instance of %s' % (string_type.__name__,)) # depends on [control=['if'], data=[]]
if password is not None:
if not isinstance(password, string_type):
raise TypeError('password must be an instance of %s' % (string_type.__name__,)) # depends on [control=['if'], data=[]]
if len(password) == 0:
raise ValueError("password can't be empty") # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['password']]
if read_only is not None:
read_only = common.validate_boolean('read_only', read_only)
if 'roles' in kwargs:
raise ConfigurationError('Can not use read_only and roles together') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['read_only']]
try:
uinfo = self.command('usersInfo', name)
# Create the user if not found in uinfo, otherwise update one.
self._create_or_update_user(not uinfo['users'], name, password, read_only, **kwargs) # depends on [control=['try'], data=[]]
except OperationFailure as exc:
# MongoDB >= 2.5.3 requires the use of commands to manage
# users.
if exc.code in common.COMMAND_NOT_FOUND_CODES:
self._legacy_add_user(name, password, read_only, **kwargs)
return # depends on [control=['if'], data=[]]
# Unauthorized. Attempt to create the user in case of
# localhost exception.
elif exc.code == 13:
self._create_or_update_user(True, name, password, read_only, **kwargs) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=['exc']] |
def update_stampfile_hook(self, dependencies): # suppress(no-self-use)
"""Loop over all dependencies and store hash for each of them."""
hashes = {d: _sha1_for_file(d) for d in dependencies
if os.path.exists(d)}
with open(self._stamp_file_hashes_path, "wb") as hashes_file:
hashes_file.write(json.dumps(hashes).encode("utf-8")) | def function[update_stampfile_hook, parameter[self, dependencies]]:
constant[Loop over all dependencies and store hash for each of them.]
variable[hashes] assign[=] <ast.DictComp object at 0x7da2054a50f0>
with call[name[open], parameter[name[self]._stamp_file_hashes_path, constant[wb]]] begin[:]
call[name[hashes_file].write, parameter[call[call[name[json].dumps, parameter[name[hashes]]].encode, parameter[constant[utf-8]]]]] | keyword[def] identifier[update_stampfile_hook] ( identifier[self] , identifier[dependencies] ):
literal[string]
identifier[hashes] ={ identifier[d] : identifier[_sha1_for_file] ( identifier[d] ) keyword[for] identifier[d] keyword[in] identifier[dependencies]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[d] )}
keyword[with] identifier[open] ( identifier[self] . identifier[_stamp_file_hashes_path] , literal[string] ) keyword[as] identifier[hashes_file] :
identifier[hashes_file] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[hashes] ). identifier[encode] ( literal[string] )) | def update_stampfile_hook(self, dependencies): # suppress(no-self-use)
'Loop over all dependencies and store hash for each of them.'
hashes = {d: _sha1_for_file(d) for d in dependencies if os.path.exists(d)}
with open(self._stamp_file_hashes_path, 'wb') as hashes_file:
hashes_file.write(json.dumps(hashes).encode('utf-8')) # depends on [control=['with'], data=['hashes_file']] |
def nguHanhNapAm(diaChi, thienCan, xuatBanMenh=False):
"""Sử dụng Ngũ Hành nạp âm để tính Hành của năm.
Args:
diaChi (integer): Số thứ tự của địa chi (Tý=1, Sửu=2,...)
thienCan (integer): Số thứ tự của thiên can (Giáp=1, Ất=2,...)
Returns:
Trả về chữ viết tắt Hành của năm (K, T, H, O, M)
"""
banMenh = {
"K1": "HẢI TRUNG KIM",
"T1": "GIÁNG HẠ THỦY",
"H1": "TÍCH LỊCH HỎA",
"O1": "BÍCH THƯỢNG THỔ",
"M1": "TANG ÐỐ MỘC",
"T2": "ÐẠI KHÊ THỦY",
"H2": "LƯ TRUNG HỎA",
"O2": "THÀNH ÐẦU THỔ",
"M2": "TÒNG BÁ MỘC",
"K2": "KIM BẠCH KIM",
"H3": "PHÚ ÐĂNG HỎA",
"O3": "SA TRUNG THỔ",
"M3": "ÐẠI LÂM MỘC",
"K3": "BẠCH LẠP KIM",
"T3": "TRƯỜNG LƯU THỦY",
"K4": "SA TRUNG KIM",
"T4": "THIÊN HÀ THỦY",
"H4": "THIÊN THƯỢNG HỎA",
"O4": "LỘ BÀN THỔ",
"M4": "DƯƠNG LIỄU MỘC",
"T5": "TRUYỀN TRUNG THỦY",
"H5": "SƠN HẠ HỎA",
"O5": "ÐẠI TRẠCH THỔ",
"M5": "THẠCH LỰU MỘC",
"K5": "KIẾM PHONG KIM",
"H6": "SƠN ÐẦU HỎA",
"O6": "ỐC THƯỢNG THỔ",
"M6": "BÌNH ÐỊA MỘC",
"K6": "XOA XUYẾN KIM",
"T6": "ÐẠI HẢI THỦY"}
matranNapAm = [
[0, "G", "Ất", "Bính", "Đinh", "Mậu", "Kỷ", "Canh", "Tân", "N", "Q"],
[1, "K1", False, "T1", False, "H1", False, "O1", False, "M1", False],
[2, False, "K1", False, "T1", False, "H1", False, "O1", False, "M1"],
[3, "T2", False, "H2", False, "O2", False, "M2", False, "K2", False],
[4, False, "T2", False, "H2", False, "O2", False, "M2", False, "K2"],
[5, "H3", False, "O3", False, "M3", False, "K3", False, "T3", False],
[6, False, "H3", False, "O3", False, "M3", False, "K3", False, "T3"],
[7, "K4", False, "T4", False, "H4", False, "O4", False, "M4", False],
[8, False, "K4", False, "T4", False, "H4", False, "O4", False, "M4"],
[9, "T5", False, "H5", False, "O5", False, "M5", False, "K5", False],
[10, False, "T5", False, "H5", False, "O5", False, "M5", False, "K5"],
[11, "H6", False, "O6", False, "M6", False, "K6", False, "T6", False],
[12, False, "H6", False, "O6", False, "M6", False, "K6", False, "T6"]
]
try:
nh = matranNapAm[diaChi][thienCan]
if nh[0] in ["K", "M", "T", "H", "O"]:
if xuatBanMenh is True:
return banMenh[nh]
else:
return nh[0]
except:
raise Exception(nguHanhNapAm.__doc__) | def function[nguHanhNapAm, parameter[diaChi, thienCan, xuatBanMenh]]:
constant[Sử dụng Ngũ Hành nạp âm để tính Hành của năm.
Args:
diaChi (integer): Số thứ tự của địa chi (Tý=1, Sửu=2,...)
thienCan (integer): Số thứ tự của thiên can (Giáp=1, Ất=2,...)
Returns:
Trả về chữ viết tắt Hành của năm (K, T, H, O, M)
]
variable[banMenh] assign[=] dictionary[[<ast.Constant object at 0x7da204961cc0>, <ast.Constant object at 0x7da204963d00>, <ast.Constant object at 0x7da204960e50>, <ast.Constant object at 0x7da204963940>, <ast.Constant object at 0x7da204963850>, <ast.Constant object at 0x7da204960a30>, <ast.Constant object at 0x7da204961ba0>, <ast.Constant object at 0x7da204961e70>, <ast.Constant object at 0x7da2049629b0>, <ast.Constant object at 0x7da2049619f0>, <ast.Constant object at 0x7da204963e50>, <ast.Constant object at 0x7da2049631f0>, <ast.Constant object at 0x7da204963ca0>, <ast.Constant object at 0x7da204962500>, <ast.Constant object at 0x7da204960df0>, <ast.Constant object at 0x7da204963df0>, <ast.Constant object at 0x7da2049628f0>, <ast.Constant object at 0x7da204963c40>, <ast.Constant object at 0x7da2049609a0>, <ast.Constant object at 0x7da2049632e0>, <ast.Constant object at 0x7da204963ee0>, <ast.Constant object at 0x7da204961630>, <ast.Constant object at 0x7da204960190>, <ast.Constant object at 0x7da204961db0>, <ast.Constant object at 0x7da204961450>, <ast.Constant object at 0x7da204960820>, <ast.Constant object at 0x7da204961d50>, <ast.Constant object at 0x7da204962350>, <ast.Constant object at 0x7da204962cb0>, <ast.Constant object at 0x7da204962860>], [<ast.Constant object at 0x7da204962b30>, <ast.Constant object at 0x7da204962380>, <ast.Constant object at 0x7da2049607c0>, <ast.Constant object at 0x7da204963af0>, <ast.Constant object at 0x7da204960760>, <ast.Constant object at 0x7da204963d30>, <ast.Constant object at 0x7da204963f40>, <ast.Constant object at 0x7da2049616c0>, <ast.Constant object at 0x7da204961a50>, <ast.Constant object at 0x7da2049600d0>, <ast.Constant object at 0x7da204961d80>, <ast.Constant object at 0x7da204962e60>, <ast.Constant object at 0x7da2049623e0>, <ast.Constant object at 0x7da204963340>, <ast.Constant object at 0x7da204960bb0>, <ast.Constant object at 0x7da204961570>, <ast.Constant object at 0x7da204960310>, <ast.Constant object at 0x7da204960cd0>, <ast.Constant object at 0x7da204962e00>, <ast.Constant object at 0x7da204960040>, <ast.Constant object at 0x7da2049605b0>, <ast.Constant object at 0x7da204960b50>, <ast.Constant object at 0x7da204961c00>, <ast.Constant object at 0x7da204961150>, <ast.Constant object at 0x7da204962a70>, <ast.Constant object at 0x7da204961360>, <ast.Constant object at 0x7da2049606d0>, <ast.Constant object at 0x7da204962d70>, <ast.Constant object at 0x7da204960ee0>, <ast.Constant object at 0x7da204961120>]]
variable[matranNapAm] assign[=] list[[<ast.List object at 0x7da204960460>, <ast.List object at 0x7da2049638e0>, <ast.List object at 0x7da204962c50>, <ast.List object at 0x7da204960100>, <ast.List object at 0x7da2049614b0>, <ast.List object at 0x7da20c6c6980>, <ast.List object at 0x7da20c6c6860>, <ast.List object at 0x7da20c6c7640>, <ast.List object at 0x7da20c6c56f0>, <ast.List object at 0x7da20c6c7eb0>, <ast.List object at 0x7da20c6c5d50>, <ast.List object at 0x7da20c6c7cd0>, <ast.List object at 0x7da20c6c7400>]]
<ast.Try object at 0x7da20c6c6c50> | keyword[def] identifier[nguHanhNapAm] ( identifier[diaChi] , identifier[thienCan] , identifier[xuatBanMenh] = keyword[False] ):
literal[string]
identifier[banMenh] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] }
identifier[matranNapAm] =[
[ literal[int] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ],
[ literal[int] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] ],
[ literal[int] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] ],
[ literal[int] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] ],
[ literal[int] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] ],
[ literal[int] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] ],
[ literal[int] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] ],
[ literal[int] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] ],
[ literal[int] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] ],
[ literal[int] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] ],
[ literal[int] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] ],
[ literal[int] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] ],
[ literal[int] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] , keyword[False] , literal[string] ]
]
keyword[try] :
identifier[nh] = identifier[matranNapAm] [ identifier[diaChi] ][ identifier[thienCan] ]
keyword[if] identifier[nh] [ literal[int] ] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
keyword[if] identifier[xuatBanMenh] keyword[is] keyword[True] :
keyword[return] identifier[banMenh] [ identifier[nh] ]
keyword[else] :
keyword[return] identifier[nh] [ literal[int] ]
keyword[except] :
keyword[raise] identifier[Exception] ( identifier[nguHanhNapAm] . identifier[__doc__] ) | def nguHanhNapAm(diaChi, thienCan, xuatBanMenh=False):
"""Sử dụng Ngũ Hành nạp âm để tính Hành của năm.
Args:
diaChi (integer): Số thứ tự của địa chi (Tý=1, Sửu=2,...)
thienCan (integer): Số thứ tự của thiên can (Giáp=1, Ất=2,...)
Returns:
Trả về chữ viết tắt Hành của năm (K, T, H, O, M)
"""
banMenh = {'K1': 'HẢI TRUNG KIM', 'T1': 'GIÁNG HẠ THỦY', 'H1': 'TÍCH LỊCH HỎA', 'O1': 'BÍCH THƯỢNG THỔ', 'M1': 'TANG ÐỐ MỘC', 'T2': 'ÐẠI KHÊ THỦY', 'H2': 'LƯ TRUNG HỎA', 'O2': 'THÀNH ÐẦU THỔ', 'M2': 'TÒNG BÁ MỘC', 'K2': 'KIM BẠCH KIM', 'H3': 'PHÚ ÐĂNG HỎA', 'O3': 'SA TRUNG THỔ', 'M3': 'ÐẠI LÂM MỘC', 'K3': 'BẠCH LẠP KIM', 'T3': 'TRƯỜNG LƯU THỦY', 'K4': 'SA TRUNG KIM', 'T4': 'THIÊN HÀ THỦY', 'H4': 'THIÊN THƯỢNG HỎA', 'O4': 'LỘ BÀN THỔ', 'M4': 'DƯƠNG LIỄU MỘC', 'T5': 'TRUYỀN TRUNG THỦY', 'H5': 'SƠN HẠ HỎA', 'O5': 'ÐẠI TRẠCH THỔ', 'M5': 'THẠCH LỰU MỘC', 'K5': 'KIẾM PHONG KIM', 'H6': 'SƠN ÐẦU HỎA', 'O6': 'ỐC THƯỢNG THỔ', 'M6': 'BÌNH ÐỊA MỘC', 'K6': 'XOA XUYẾN KIM', 'T6': 'ÐẠI HẢI THỦY'}
matranNapAm = [[0, 'G', 'Ất', 'Bính', 'Đinh', 'Mậu', 'Kỷ', 'Canh', 'Tân', 'N', 'Q'], [1, 'K1', False, 'T1', False, 'H1', False, 'O1', False, 'M1', False], [2, False, 'K1', False, 'T1', False, 'H1', False, 'O1', False, 'M1'], [3, 'T2', False, 'H2', False, 'O2', False, 'M2', False, 'K2', False], [4, False, 'T2', False, 'H2', False, 'O2', False, 'M2', False, 'K2'], [5, 'H3', False, 'O3', False, 'M3', False, 'K3', False, 'T3', False], [6, False, 'H3', False, 'O3', False, 'M3', False, 'K3', False, 'T3'], [7, 'K4', False, 'T4', False, 'H4', False, 'O4', False, 'M4', False], [8, False, 'K4', False, 'T4', False, 'H4', False, 'O4', False, 'M4'], [9, 'T5', False, 'H5', False, 'O5', False, 'M5', False, 'K5', False], [10, False, 'T5', False, 'H5', False, 'O5', False, 'M5', False, 'K5'], [11, 'H6', False, 'O6', False, 'M6', False, 'K6', False, 'T6', False], [12, False, 'H6', False, 'O6', False, 'M6', False, 'K6', False, 'T6']]
try:
nh = matranNapAm[diaChi][thienCan]
if nh[0] in ['K', 'M', 'T', 'H', 'O']:
if xuatBanMenh is True:
return banMenh[nh] # depends on [control=['if'], data=[]]
else:
return nh[0] # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
raise Exception(nguHanhNapAm.__doc__) # depends on [control=['except'], data=[]] |
def set_default(self, name, value):
"""Changes the default value of the named flag object.
The flag's current value is also updated if the flag is currently using
the default value, i.e. not specified in the command line, and not set
by FLAGS.name = value.
Args:
name: str, the name of the flag to modify.
value: The new default value.
Raises:
UnrecognizedFlagError: Raised when there is no registered flag named name.
IllegalFlagValueError: Raised when value is not valid.
"""
fl = self._flags()
if name not in fl:
self._set_unknown_flag(name, value)
return
fl[name]._set_default(value) # pylint: disable=protected-access
self._assert_validators(fl[name].validators) | def function[set_default, parameter[self, name, value]]:
constant[Changes the default value of the named flag object.
The flag's current value is also updated if the flag is currently using
the default value, i.e. not specified in the command line, and not set
by FLAGS.name = value.
Args:
name: str, the name of the flag to modify.
value: The new default value.
Raises:
UnrecognizedFlagError: Raised when there is no registered flag named name.
IllegalFlagValueError: Raised when value is not valid.
]
variable[fl] assign[=] call[name[self]._flags, parameter[]]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[fl]] begin[:]
call[name[self]._set_unknown_flag, parameter[name[name], name[value]]]
return[None]
call[call[name[fl]][name[name]]._set_default, parameter[name[value]]]
call[name[self]._assert_validators, parameter[call[name[fl]][name[name]].validators]] | keyword[def] identifier[set_default] ( identifier[self] , identifier[name] , identifier[value] ):
literal[string]
identifier[fl] = identifier[self] . identifier[_flags] ()
keyword[if] identifier[name] keyword[not] keyword[in] identifier[fl] :
identifier[self] . identifier[_set_unknown_flag] ( identifier[name] , identifier[value] )
keyword[return]
identifier[fl] [ identifier[name] ]. identifier[_set_default] ( identifier[value] )
identifier[self] . identifier[_assert_validators] ( identifier[fl] [ identifier[name] ]. identifier[validators] ) | def set_default(self, name, value):
"""Changes the default value of the named flag object.
The flag's current value is also updated if the flag is currently using
the default value, i.e. not specified in the command line, and not set
by FLAGS.name = value.
Args:
name: str, the name of the flag to modify.
value: The new default value.
Raises:
UnrecognizedFlagError: Raised when there is no registered flag named name.
IllegalFlagValueError: Raised when value is not valid.
"""
fl = self._flags()
if name not in fl:
self._set_unknown_flag(name, value)
return # depends on [control=['if'], data=['name']]
fl[name]._set_default(value) # pylint: disable=protected-access
self._assert_validators(fl[name].validators) |
def get_product_value(self, value_name, wanted_type=None):
'''
For the product section of the registry return the name value.
Args:
value_name (str): Registry value name.
wanted_type (str):
The type of value wanted if the type does not match
None is return. wanted_type support values are
``str`` ``int`` ``list`` ``bytes``.
Returns:
value: Value requested or ``None`` if not found.
'''
if not self.__reg_products_handle:
return None
subkey, search_value_name = os.path.split(value_name)
try:
if subkey:
handle = win32api.RegOpenKeyEx( # pylint: disable=no-member
self.__reg_products_handle,
subkey,
0,
win32con.KEY_READ | self.__reg_32bit_access)
item_value, item_type = self.__reg_query_value(handle, search_value_name)
win32api.RegCloseKey(handle) # pylint: disable=no-member
else:
item_value, item_type = \
win32api.RegQueryValueEx(self.__reg_products_handle, value_name) # pylint: disable=no-member
except pywintypes.error as exc: # pylint: disable=no-member
if exc.winerror == winerror.ERROR_FILE_NOT_FOUND:
# Not Found
return None
raise
if wanted_type and item_type not in self.__reg_types[wanted_type]:
item_value = None
return item_value | def function[get_product_value, parameter[self, value_name, wanted_type]]:
constant[
For the product section of the registry return the name value.
Args:
value_name (str): Registry value name.
wanted_type (str):
The type of value wanted if the type does not match
None is return. wanted_type support values are
``str`` ``int`` ``list`` ``bytes``.
Returns:
value: Value requested or ``None`` if not found.
]
if <ast.UnaryOp object at 0x7da1b2194040> begin[:]
return[constant[None]]
<ast.Tuple object at 0x7da1b2194fa0> assign[=] call[name[os].path.split, parameter[name[value_name]]]
<ast.Try object at 0x7da1b2194160>
if <ast.BoolOp object at 0x7da1b2194460> begin[:]
variable[item_value] assign[=] constant[None]
return[name[item_value]] | keyword[def] identifier[get_product_value] ( identifier[self] , identifier[value_name] , identifier[wanted_type] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[__reg_products_handle] :
keyword[return] keyword[None]
identifier[subkey] , identifier[search_value_name] = identifier[os] . identifier[path] . identifier[split] ( identifier[value_name] )
keyword[try] :
keyword[if] identifier[subkey] :
identifier[handle] = identifier[win32api] . identifier[RegOpenKeyEx] (
identifier[self] . identifier[__reg_products_handle] ,
identifier[subkey] ,
literal[int] ,
identifier[win32con] . identifier[KEY_READ] | identifier[self] . identifier[__reg_32bit_access] )
identifier[item_value] , identifier[item_type] = identifier[self] . identifier[__reg_query_value] ( identifier[handle] , identifier[search_value_name] )
identifier[win32api] . identifier[RegCloseKey] ( identifier[handle] )
keyword[else] :
identifier[item_value] , identifier[item_type] = identifier[win32api] . identifier[RegQueryValueEx] ( identifier[self] . identifier[__reg_products_handle] , identifier[value_name] )
keyword[except] identifier[pywintypes] . identifier[error] keyword[as] identifier[exc] :
keyword[if] identifier[exc] . identifier[winerror] == identifier[winerror] . identifier[ERROR_FILE_NOT_FOUND] :
keyword[return] keyword[None]
keyword[raise]
keyword[if] identifier[wanted_type] keyword[and] identifier[item_type] keyword[not] keyword[in] identifier[self] . identifier[__reg_types] [ identifier[wanted_type] ]:
identifier[item_value] = keyword[None]
keyword[return] identifier[item_value] | def get_product_value(self, value_name, wanted_type=None):
"""
For the product section of the registry return the name value.
Args:
value_name (str): Registry value name.
wanted_type (str):
The type of value wanted if the type does not match
None is return. wanted_type support values are
``str`` ``int`` ``list`` ``bytes``.
Returns:
value: Value requested or ``None`` if not found.
"""
if not self.__reg_products_handle:
return None # depends on [control=['if'], data=[]]
(subkey, search_value_name) = os.path.split(value_name)
try:
if subkey: # pylint: disable=no-member
handle = win32api.RegOpenKeyEx(self.__reg_products_handle, subkey, 0, win32con.KEY_READ | self.__reg_32bit_access)
(item_value, item_type) = self.__reg_query_value(handle, search_value_name)
win32api.RegCloseKey(handle) # pylint: disable=no-member # depends on [control=['if'], data=[]]
else:
(item_value, item_type) = win32api.RegQueryValueEx(self.__reg_products_handle, value_name) # pylint: disable=no-member # depends on [control=['try'], data=[]]
except pywintypes.error as exc: # pylint: disable=no-member
if exc.winerror == winerror.ERROR_FILE_NOT_FOUND:
# Not Found
return None # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=['exc']]
if wanted_type and item_type not in self.__reg_types[wanted_type]:
item_value = None # depends on [control=['if'], data=[]]
return item_value |
def bar(*args, **kwargs):
"""
Creates a bar plot, with white outlines and a fill color that defaults to
the first teal-ish green in ColorBrewer's Set2. Optionally accepts
grid='y' or grid='x' to draw a white grid over the bars,
to show the scale. Almost like "erasing" some of the plot,
but it adds more information!
Can also add an annotation of the height of the barplots directly onto
the bars with the `annotate` parameter, which can either be True,
which will annotate the values, or a list of strings, which will annotate
with the supplied strings.
Can support stacked bars with the value of each stack shown on the stack
(Added by Salil Banerjee)
@param ax: matplotlib.axes instance
@param left: Vector of values of where to put the left side of the bar
@param height: Vector of values of the bar heights
@param kwargs: Besides xticklabels, which is a prettyplotlib-specific
argument, any additional arguments to matplotlib.bar(): http://matplotlib
.org/api/axes_api.html#matplotlib.axes.Axes.bar is accepted.
"""
ax, args, kwargs = maybe_get_ax(*args, **kwargs)
kwargs.setdefault('color', set2[0])
kwargs.setdefault('edgecolor', 'white')
middle = 0.4 if 'width' not in kwargs else kwargs['width']/2.0
# Check if data contains stacks
stacked = kwargs.pop('stacked',False)
# Check if stack text should be included
stack_text = kwargs.pop('stack_text',False)
# Get legend if available
legend = kwargs.pop('legend',False)
left = args[0]
height = np.array(args[1])
# Label each individual bar, if xticklabels is provided
xtickabels = kwargs.pop('xticklabels', None)
# left+0.4 is the center of the bar
xticks = np.array(left) + middle
# Whether or not to annotate each bar with the height value
annotate = kwargs.pop('annotate', False)
show_ticks = kwargs.pop('show_ticks', False)
# If no grid specified, don't draw one.
grid = kwargs.pop('grid', None)
cmap = kwargs.pop('cmap', False)
if cmap:
kwargs['edgecolor'] = almost_black
if not stacked:
kwargs['color'] = getcolors(cmap, height, 0)
# Check if stacked and plot data accordingly
color = kwargs.get('color', None)
if stacked:
num_stacks, num_data = height.shape
bottom = np.zeros(num_data)
for i in np.arange(num_stacks):
lst = list(args)
lst[1] = height[i]
args = tuple(lst)
# make sure number of user specified colors equals to the stacks
if not color or len(color) != num_stacks:
if cmap:
kwargs['color'] = getcolors(cmap, height[i], i)
else:
kwargs['color'] = set2[i]
else:
kwargs['color'] = color[i]
kwargs['bottom'] = bottom
rectangles = ax.bar(*args, **kwargs)
bottom += height[i]
else:
rectangles = ax.bar(*args, **kwargs)
# add legend
if isinstance(legend, collections.Iterable):
ax.legend(legend,loc='upper center',bbox_to_anchor=(0.5,1.11), ncol=5)
# add whitespace padding on left
xmin, xmax = ax.get_xlim()
xmin -= 0.2
if stacked:
xmax = num_data
ax.set_xlim(xmin, xmax)
# If the user is only plotting one bar, make it an iterable
if not isinstance(height, collections.Iterable):
height = [height]
# If there are negative counts, remove the bottom axes
# and add a line at y=0
if any(h < 0 for h in height.tolist()):
axes_to_remove = ['top', 'right', 'bottom']
ax.hlines(y=0, xmin=xmin, xmax=xmax,
linewidths=0.75)
else:
axes_to_remove = ['top', 'right']
# Remove excess axes
remove_chartjunk(ax, axes_to_remove, grid=grid, show_ticks=show_ticks)
if stacked:
data = height
height = height.sum(axis=0)
# Add the xticklabels if they are there
if xtickabels is not None:
ax.set_xticks(xticks)
ax.set_xticklabels(xtickabels)
if annotate or isinstance(annotate, collections.Iterable):
annotate_yrange_factor = 0.025
ymin, ymax = ax.get_ylim()
yrange = ymax - ymin
# Reset ymax and ymin so there's enough room to see the annotation of
# the top-most
if ymax > 0:
ymax += yrange * 0.1
if ymin < 0:
ymin -= yrange * 0.1
ax.set_ylim(ymin, ymax)
yrange = ymax - ymin
if kwargs.get('log') == True:
offset_ = np.log(yrange) * annotate_yrange_factor
else:
offset_ = yrange * annotate_yrange_factor
if isinstance(annotate, collections.Iterable):
annotations = map(str, annotate)
else:
annotations = ['%.3f' % h if type(h) is np.float_ else str(h)
for h in height]
for x, h, annotation in zip(xticks, height, annotations):
# Adjust the offset to account for negative bars
offset = offset_ if h >= 0 else -1 * offset_
verticalalignment = 'bottom' if h >= 0 else 'top'
# Finally, add the text to the axes
ax.annotate(annotation, (x, h + offset),
verticalalignment=verticalalignment,
horizontalalignment='center',
color=almost_black)
# Text for each block of stack
# This was partially inspired by the following article by Tableau software
# http://www.tableausoftware.com/about/blog/2014/1/new-whitepaper-survey-data-less-ugly-more-understandable-27812
if stack_text:
bottom = np.zeros(num_data)
max_h = max(height)
for i in np.arange(num_stacks):
for x, d, b in zip(xticks, data[i], bottom):
if (d*100.0/max_h) > 4.0:
ax.text(x,b+d/2.0,d, ha='center', va='center', color=almost_black)
bottom += data[i]
return rectangles | def function[bar, parameter[]]:
constant[
Creates a bar plot, with white outlines and a fill color that defaults to
the first teal-ish green in ColorBrewer's Set2. Optionally accepts
grid='y' or grid='x' to draw a white grid over the bars,
to show the scale. Almost like "erasing" some of the plot,
but it adds more information!
Can also add an annotation of the height of the barplots directly onto
the bars with the `annotate` parameter, which can either be True,
which will annotate the values, or a list of strings, which will annotate
with the supplied strings.
Can support stacked bars with the value of each stack shown on the stack
(Added by Salil Banerjee)
@param ax: matplotlib.axes instance
@param left: Vector of values of where to put the left side of the bar
@param height: Vector of values of the bar heights
@param kwargs: Besides xticklabels, which is a prettyplotlib-specific
argument, any additional arguments to matplotlib.bar(): http://matplotlib
.org/api/axes_api.html#matplotlib.axes.Axes.bar is accepted.
]
<ast.Tuple object at 0x7da2047eb850> assign[=] call[name[maybe_get_ax], parameter[<ast.Starred object at 0x7da2047eb160>]]
call[name[kwargs].setdefault, parameter[constant[color], call[name[set2]][constant[0]]]]
call[name[kwargs].setdefault, parameter[constant[edgecolor], constant[white]]]
variable[middle] assign[=] <ast.IfExp object at 0x7da2047e8880>
variable[stacked] assign[=] call[name[kwargs].pop, parameter[constant[stacked], constant[False]]]
variable[stack_text] assign[=] call[name[kwargs].pop, parameter[constant[stack_text], constant[False]]]
variable[legend] assign[=] call[name[kwargs].pop, parameter[constant[legend], constant[False]]]
variable[left] assign[=] call[name[args]][constant[0]]
variable[height] assign[=] call[name[np].array, parameter[call[name[args]][constant[1]]]]
variable[xtickabels] assign[=] call[name[kwargs].pop, parameter[constant[xticklabels], constant[None]]]
variable[xticks] assign[=] binary_operation[call[name[np].array, parameter[name[left]]] + name[middle]]
variable[annotate] assign[=] call[name[kwargs].pop, parameter[constant[annotate], constant[False]]]
variable[show_ticks] assign[=] call[name[kwargs].pop, parameter[constant[show_ticks], constant[False]]]
variable[grid] assign[=] call[name[kwargs].pop, parameter[constant[grid], constant[None]]]
variable[cmap] assign[=] call[name[kwargs].pop, parameter[constant[cmap], constant[False]]]
if name[cmap] begin[:]
call[name[kwargs]][constant[edgecolor]] assign[=] name[almost_black]
if <ast.UnaryOp object at 0x7da20e954460> begin[:]
call[name[kwargs]][constant[color]] assign[=] call[name[getcolors], parameter[name[cmap], name[height], constant[0]]]
variable[color] assign[=] call[name[kwargs].get, parameter[constant[color], constant[None]]]
if name[stacked] begin[:]
<ast.Tuple object at 0x7da20e954df0> assign[=] name[height].shape
variable[bottom] assign[=] call[name[np].zeros, parameter[name[num_data]]]
for taget[name[i]] in starred[call[name[np].arange, parameter[name[num_stacks]]]] begin[:]
variable[lst] assign[=] call[name[list], parameter[name[args]]]
call[name[lst]][constant[1]] assign[=] call[name[height]][name[i]]
variable[args] assign[=] call[name[tuple], parameter[name[lst]]]
if <ast.BoolOp object at 0x7da20e955270> begin[:]
if name[cmap] begin[:]
call[name[kwargs]][constant[color]] assign[=] call[name[getcolors], parameter[name[cmap], call[name[height]][name[i]], name[i]]]
call[name[kwargs]][constant[bottom]] assign[=] name[bottom]
variable[rectangles] assign[=] call[name[ax].bar, parameter[<ast.Starred object at 0x7da1b060a4a0>]]
<ast.AugAssign object at 0x7da1b06085b0>
if call[name[isinstance], parameter[name[legend], name[collections].Iterable]] begin[:]
call[name[ax].legend, parameter[name[legend]]]
<ast.Tuple object at 0x7da1b060a3e0> assign[=] call[name[ax].get_xlim, parameter[]]
<ast.AugAssign object at 0x7da1b060ad10>
if name[stacked] begin[:]
variable[xmax] assign[=] name[num_data]
call[name[ax].set_xlim, parameter[name[xmin], name[xmax]]]
if <ast.UnaryOp object at 0x7da1b0608b80> begin[:]
variable[height] assign[=] list[[<ast.Name object at 0x7da1b060b370>]]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b060a950>]] begin[:]
variable[axes_to_remove] assign[=] list[[<ast.Constant object at 0x7da1b060a6e0>, <ast.Constant object at 0x7da1b0608670>, <ast.Constant object at 0x7da1b06087f0>]]
call[name[ax].hlines, parameter[]]
call[name[remove_chartjunk], parameter[name[ax], name[axes_to_remove]]]
if name[stacked] begin[:]
variable[data] assign[=] name[height]
variable[height] assign[=] call[name[height].sum, parameter[]]
if compare[name[xtickabels] is_not constant[None]] begin[:]
call[name[ax].set_xticks, parameter[name[xticks]]]
call[name[ax].set_xticklabels, parameter[name[xtickabels]]]
if <ast.BoolOp object at 0x7da20c6aad10> begin[:]
variable[annotate_yrange_factor] assign[=] constant[0.025]
<ast.Tuple object at 0x7da20c6a9c00> assign[=] call[name[ax].get_ylim, parameter[]]
variable[yrange] assign[=] binary_operation[name[ymax] - name[ymin]]
if compare[name[ymax] greater[>] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20c6abf70>
if compare[name[ymin] less[<] constant[0]] begin[:]
<ast.AugAssign object at 0x7da20c6a9ea0>
call[name[ax].set_ylim, parameter[name[ymin], name[ymax]]]
variable[yrange] assign[=] binary_operation[name[ymax] - name[ymin]]
if compare[call[name[kwargs].get, parameter[constant[log]]] equal[==] constant[True]] begin[:]
variable[offset_] assign[=] binary_operation[call[name[np].log, parameter[name[yrange]]] * name[annotate_yrange_factor]]
if call[name[isinstance], parameter[name[annotate], name[collections].Iterable]] begin[:]
variable[annotations] assign[=] call[name[map], parameter[name[str], name[annotate]]]
for taget[tuple[[<ast.Name object at 0x7da1b2345ff0>, <ast.Name object at 0x7da1b2347490>, <ast.Name object at 0x7da1b23477f0>]]] in starred[call[name[zip], parameter[name[xticks], name[height], name[annotations]]]] begin[:]
variable[offset] assign[=] <ast.IfExp object at 0x7da1b2347820>
variable[verticalalignment] assign[=] <ast.IfExp object at 0x7da1b2346c50>
call[name[ax].annotate, parameter[name[annotation], tuple[[<ast.Name object at 0x7da18f7232b0>, <ast.BinOp object at 0x7da18f7232e0>]]]]
if name[stack_text] begin[:]
variable[bottom] assign[=] call[name[np].zeros, parameter[name[num_data]]]
variable[max_h] assign[=] call[name[max], parameter[name[height]]]
for taget[name[i]] in starred[call[name[np].arange, parameter[name[num_stacks]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18f723070>, <ast.Name object at 0x7da18f720e20>, <ast.Name object at 0x7da18f7205b0>]]] in starred[call[name[zip], parameter[name[xticks], call[name[data]][name[i]], name[bottom]]]] begin[:]
if compare[binary_operation[binary_operation[name[d] * constant[100.0]] / name[max_h]] greater[>] constant[4.0]] begin[:]
call[name[ax].text, parameter[name[x], binary_operation[name[b] + binary_operation[name[d] / constant[2.0]]], name[d]]]
<ast.AugAssign object at 0x7da1b0616740>
return[name[rectangles]] | keyword[def] identifier[bar] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[ax] , identifier[args] , identifier[kwargs] = identifier[maybe_get_ax] (* identifier[args] ,** identifier[kwargs] )
identifier[kwargs] . identifier[setdefault] ( literal[string] , identifier[set2] [ literal[int] ])
identifier[kwargs] . identifier[setdefault] ( literal[string] , literal[string] )
identifier[middle] = literal[int] keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] keyword[else] identifier[kwargs] [ literal[string] ]/ literal[int]
identifier[stacked] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[stack_text] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[legend] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[left] = identifier[args] [ literal[int] ]
identifier[height] = identifier[np] . identifier[array] ( identifier[args] [ literal[int] ])
identifier[xtickabels] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[xticks] = identifier[np] . identifier[array] ( identifier[left] )+ identifier[middle]
identifier[annotate] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[show_ticks] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
identifier[grid] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[cmap] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
keyword[if] identifier[cmap] :
identifier[kwargs] [ literal[string] ]= identifier[almost_black]
keyword[if] keyword[not] identifier[stacked] :
identifier[kwargs] [ literal[string] ]= identifier[getcolors] ( identifier[cmap] , identifier[height] , literal[int] )
identifier[color] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[stacked] :
identifier[num_stacks] , identifier[num_data] = identifier[height] . identifier[shape]
identifier[bottom] = identifier[np] . identifier[zeros] ( identifier[num_data] )
keyword[for] identifier[i] keyword[in] identifier[np] . identifier[arange] ( identifier[num_stacks] ):
identifier[lst] = identifier[list] ( identifier[args] )
identifier[lst] [ literal[int] ]= identifier[height] [ identifier[i] ]
identifier[args] = identifier[tuple] ( identifier[lst] )
keyword[if] keyword[not] identifier[color] keyword[or] identifier[len] ( identifier[color] )!= identifier[num_stacks] :
keyword[if] identifier[cmap] :
identifier[kwargs] [ literal[string] ]= identifier[getcolors] ( identifier[cmap] , identifier[height] [ identifier[i] ], identifier[i] )
keyword[else] :
identifier[kwargs] [ literal[string] ]= identifier[set2] [ identifier[i] ]
keyword[else] :
identifier[kwargs] [ literal[string] ]= identifier[color] [ identifier[i] ]
identifier[kwargs] [ literal[string] ]= identifier[bottom]
identifier[rectangles] = identifier[ax] . identifier[bar] (* identifier[args] ,** identifier[kwargs] )
identifier[bottom] += identifier[height] [ identifier[i] ]
keyword[else] :
identifier[rectangles] = identifier[ax] . identifier[bar] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[isinstance] ( identifier[legend] , identifier[collections] . identifier[Iterable] ):
identifier[ax] . identifier[legend] ( identifier[legend] , identifier[loc] = literal[string] , identifier[bbox_to_anchor] =( literal[int] , literal[int] ), identifier[ncol] = literal[int] )
identifier[xmin] , identifier[xmax] = identifier[ax] . identifier[get_xlim] ()
identifier[xmin] -= literal[int]
keyword[if] identifier[stacked] :
identifier[xmax] = identifier[num_data]
identifier[ax] . identifier[set_xlim] ( identifier[xmin] , identifier[xmax] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[height] , identifier[collections] . identifier[Iterable] ):
identifier[height] =[ identifier[height] ]
keyword[if] identifier[any] ( identifier[h] < literal[int] keyword[for] identifier[h] keyword[in] identifier[height] . identifier[tolist] ()):
identifier[axes_to_remove] =[ literal[string] , literal[string] , literal[string] ]
identifier[ax] . identifier[hlines] ( identifier[y] = literal[int] , identifier[xmin] = identifier[xmin] , identifier[xmax] = identifier[xmax] ,
identifier[linewidths] = literal[int] )
keyword[else] :
identifier[axes_to_remove] =[ literal[string] , literal[string] ]
identifier[remove_chartjunk] ( identifier[ax] , identifier[axes_to_remove] , identifier[grid] = identifier[grid] , identifier[show_ticks] = identifier[show_ticks] )
keyword[if] identifier[stacked] :
identifier[data] = identifier[height]
identifier[height] = identifier[height] . identifier[sum] ( identifier[axis] = literal[int] )
keyword[if] identifier[xtickabels] keyword[is] keyword[not] keyword[None] :
identifier[ax] . identifier[set_xticks] ( identifier[xticks] )
identifier[ax] . identifier[set_xticklabels] ( identifier[xtickabels] )
keyword[if] identifier[annotate] keyword[or] identifier[isinstance] ( identifier[annotate] , identifier[collections] . identifier[Iterable] ):
identifier[annotate_yrange_factor] = literal[int]
identifier[ymin] , identifier[ymax] = identifier[ax] . identifier[get_ylim] ()
identifier[yrange] = identifier[ymax] - identifier[ymin]
keyword[if] identifier[ymax] > literal[int] :
identifier[ymax] += identifier[yrange] * literal[int]
keyword[if] identifier[ymin] < literal[int] :
identifier[ymin] -= identifier[yrange] * literal[int]
identifier[ax] . identifier[set_ylim] ( identifier[ymin] , identifier[ymax] )
identifier[yrange] = identifier[ymax] - identifier[ymin]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] )== keyword[True] :
identifier[offset_] = identifier[np] . identifier[log] ( identifier[yrange] )* identifier[annotate_yrange_factor]
keyword[else] :
identifier[offset_] = identifier[yrange] * identifier[annotate_yrange_factor]
keyword[if] identifier[isinstance] ( identifier[annotate] , identifier[collections] . identifier[Iterable] ):
identifier[annotations] = identifier[map] ( identifier[str] , identifier[annotate] )
keyword[else] :
identifier[annotations] =[ literal[string] % identifier[h] keyword[if] identifier[type] ( identifier[h] ) keyword[is] identifier[np] . identifier[float_] keyword[else] identifier[str] ( identifier[h] )
keyword[for] identifier[h] keyword[in] identifier[height] ]
keyword[for] identifier[x] , identifier[h] , identifier[annotation] keyword[in] identifier[zip] ( identifier[xticks] , identifier[height] , identifier[annotations] ):
identifier[offset] = identifier[offset_] keyword[if] identifier[h] >= literal[int] keyword[else] - literal[int] * identifier[offset_]
identifier[verticalalignment] = literal[string] keyword[if] identifier[h] >= literal[int] keyword[else] literal[string]
identifier[ax] . identifier[annotate] ( identifier[annotation] ,( identifier[x] , identifier[h] + identifier[offset] ),
identifier[verticalalignment] = identifier[verticalalignment] ,
identifier[horizontalalignment] = literal[string] ,
identifier[color] = identifier[almost_black] )
keyword[if] identifier[stack_text] :
identifier[bottom] = identifier[np] . identifier[zeros] ( identifier[num_data] )
identifier[max_h] = identifier[max] ( identifier[height] )
keyword[for] identifier[i] keyword[in] identifier[np] . identifier[arange] ( identifier[num_stacks] ):
keyword[for] identifier[x] , identifier[d] , identifier[b] keyword[in] identifier[zip] ( identifier[xticks] , identifier[data] [ identifier[i] ], identifier[bottom] ):
keyword[if] ( identifier[d] * literal[int] / identifier[max_h] )> literal[int] :
identifier[ax] . identifier[text] ( identifier[x] , identifier[b] + identifier[d] / literal[int] , identifier[d] , identifier[ha] = literal[string] , identifier[va] = literal[string] , identifier[color] = identifier[almost_black] )
identifier[bottom] += identifier[data] [ identifier[i] ]
keyword[return] identifier[rectangles] | def bar(*args, **kwargs):
"""
Creates a bar plot, with white outlines and a fill color that defaults to
the first teal-ish green in ColorBrewer's Set2. Optionally accepts
grid='y' or grid='x' to draw a white grid over the bars,
to show the scale. Almost like "erasing" some of the plot,
but it adds more information!
Can also add an annotation of the height of the barplots directly onto
the bars with the `annotate` parameter, which can either be True,
which will annotate the values, or a list of strings, which will annotate
with the supplied strings.
Can support stacked bars with the value of each stack shown on the stack
(Added by Salil Banerjee)
@param ax: matplotlib.axes instance
@param left: Vector of values of where to put the left side of the bar
@param height: Vector of values of the bar heights
@param kwargs: Besides xticklabels, which is a prettyplotlib-specific
argument, any additional arguments to matplotlib.bar(): http://matplotlib
.org/api/axes_api.html#matplotlib.axes.Axes.bar is accepted.
"""
(ax, args, kwargs) = maybe_get_ax(*args, **kwargs)
kwargs.setdefault('color', set2[0])
kwargs.setdefault('edgecolor', 'white')
middle = 0.4 if 'width' not in kwargs else kwargs['width'] / 2.0
# Check if data contains stacks
stacked = kwargs.pop('stacked', False)
# Check if stack text should be included
stack_text = kwargs.pop('stack_text', False)
# Get legend if available
legend = kwargs.pop('legend', False)
left = args[0]
height = np.array(args[1])
# Label each individual bar, if xticklabels is provided
xtickabels = kwargs.pop('xticklabels', None)
# left+0.4 is the center of the bar
xticks = np.array(left) + middle
# Whether or not to annotate each bar with the height value
annotate = kwargs.pop('annotate', False)
show_ticks = kwargs.pop('show_ticks', False)
# If no grid specified, don't draw one.
grid = kwargs.pop('grid', None)
cmap = kwargs.pop('cmap', False)
if cmap:
kwargs['edgecolor'] = almost_black
if not stacked:
kwargs['color'] = getcolors(cmap, height, 0) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Check if stacked and plot data accordingly
color = kwargs.get('color', None)
if stacked:
(num_stacks, num_data) = height.shape
bottom = np.zeros(num_data)
for i in np.arange(num_stacks):
lst = list(args)
lst[1] = height[i]
args = tuple(lst) # make sure number of user specified colors equals to the stacks
if not color or len(color) != num_stacks:
if cmap:
kwargs['color'] = getcolors(cmap, height[i], i) # depends on [control=['if'], data=[]]
else:
kwargs['color'] = set2[i] # depends on [control=['if'], data=[]]
else:
kwargs['color'] = color[i]
kwargs['bottom'] = bottom
rectangles = ax.bar(*args, **kwargs)
bottom += height[i] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
else:
rectangles = ax.bar(*args, **kwargs)
# add legend
if isinstance(legend, collections.Iterable):
ax.legend(legend, loc='upper center', bbox_to_anchor=(0.5, 1.11), ncol=5) # depends on [control=['if'], data=[]]
# add whitespace padding on left
(xmin, xmax) = ax.get_xlim()
xmin -= 0.2
if stacked:
xmax = num_data # depends on [control=['if'], data=[]]
ax.set_xlim(xmin, xmax)
# If the user is only plotting one bar, make it an iterable
if not isinstance(height, collections.Iterable):
height = [height] # depends on [control=['if'], data=[]]
# If there are negative counts, remove the bottom axes
# and add a line at y=0
if any((h < 0 for h in height.tolist())):
axes_to_remove = ['top', 'right', 'bottom']
ax.hlines(y=0, xmin=xmin, xmax=xmax, linewidths=0.75) # depends on [control=['if'], data=[]]
else:
axes_to_remove = ['top', 'right']
# Remove excess axes
remove_chartjunk(ax, axes_to_remove, grid=grid, show_ticks=show_ticks)
if stacked:
data = height
height = height.sum(axis=0) # depends on [control=['if'], data=[]]
# Add the xticklabels if they are there
if xtickabels is not None:
ax.set_xticks(xticks)
ax.set_xticklabels(xtickabels) # depends on [control=['if'], data=['xtickabels']]
if annotate or isinstance(annotate, collections.Iterable):
annotate_yrange_factor = 0.025
(ymin, ymax) = ax.get_ylim()
yrange = ymax - ymin
# Reset ymax and ymin so there's enough room to see the annotation of
# the top-most
if ymax > 0:
ymax += yrange * 0.1 # depends on [control=['if'], data=['ymax']]
if ymin < 0:
ymin -= yrange * 0.1 # depends on [control=['if'], data=['ymin']]
ax.set_ylim(ymin, ymax)
yrange = ymax - ymin
if kwargs.get('log') == True:
offset_ = np.log(yrange) * annotate_yrange_factor # depends on [control=['if'], data=[]]
else:
offset_ = yrange * annotate_yrange_factor
if isinstance(annotate, collections.Iterable):
annotations = map(str, annotate) # depends on [control=['if'], data=[]]
else:
annotations = ['%.3f' % h if type(h) is np.float_ else str(h) for h in height]
for (x, h, annotation) in zip(xticks, height, annotations):
# Adjust the offset to account for negative bars
offset = offset_ if h >= 0 else -1 * offset_
verticalalignment = 'bottom' if h >= 0 else 'top'
# Finally, add the text to the axes
ax.annotate(annotation, (x, h + offset), verticalalignment=verticalalignment, horizontalalignment='center', color=almost_black) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# Text for each block of stack
# This was partially inspired by the following article by Tableau software
# http://www.tableausoftware.com/about/blog/2014/1/new-whitepaper-survey-data-less-ugly-more-understandable-27812
if stack_text:
bottom = np.zeros(num_data)
max_h = max(height)
for i in np.arange(num_stacks):
for (x, d, b) in zip(xticks, data[i], bottom):
if d * 100.0 / max_h > 4.0:
ax.text(x, b + d / 2.0, d, ha='center', va='center', color=almost_black) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
bottom += data[i] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
return rectangles |
def count_SMS(self, conditions={}):
"""
Count all certified sms
"""
url = self.SMS_COUNT_URL + "?"
for key, value in conditions.items():
if key is 'ids':
value = ",".join(value)
url += '&%s=%s' % (key, value)
connection = Connection(self.token)
connection.set_url(self.production, url)
connection.set_url(self.production, url)
return connection.get_request() | def function[count_SMS, parameter[self, conditions]]:
constant[
Count all certified sms
]
variable[url] assign[=] binary_operation[name[self].SMS_COUNT_URL + constant[?]]
for taget[tuple[[<ast.Name object at 0x7da1b1152590>, <ast.Name object at 0x7da1b11508b0>]]] in starred[call[name[conditions].items, parameter[]]] begin[:]
if compare[name[key] is constant[ids]] begin[:]
variable[value] assign[=] call[constant[,].join, parameter[name[value]]]
<ast.AugAssign object at 0x7da1b11514b0>
variable[connection] assign[=] call[name[Connection], parameter[name[self].token]]
call[name[connection].set_url, parameter[name[self].production, name[url]]]
call[name[connection].set_url, parameter[name[self].production, name[url]]]
return[call[name[connection].get_request, parameter[]]] | keyword[def] identifier[count_SMS] ( identifier[self] , identifier[conditions] ={}):
literal[string]
identifier[url] = identifier[self] . identifier[SMS_COUNT_URL] + literal[string]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[conditions] . identifier[items] ():
keyword[if] identifier[key] keyword[is] literal[string] :
identifier[value] = literal[string] . identifier[join] ( identifier[value] )
identifier[url] += literal[string] %( identifier[key] , identifier[value] )
identifier[connection] = identifier[Connection] ( identifier[self] . identifier[token] )
identifier[connection] . identifier[set_url] ( identifier[self] . identifier[production] , identifier[url] )
identifier[connection] . identifier[set_url] ( identifier[self] . identifier[production] , identifier[url] )
keyword[return] identifier[connection] . identifier[get_request] () | def count_SMS(self, conditions={}):
"""
Count all certified sms
"""
url = self.SMS_COUNT_URL + '?'
for (key, value) in conditions.items():
if key is 'ids':
value = ','.join(value) # depends on [control=['if'], data=[]]
url += '&%s=%s' % (key, value) # depends on [control=['for'], data=[]]
connection = Connection(self.token)
connection.set_url(self.production, url)
connection.set_url(self.production, url)
return connection.get_request() |
def search(self, filters=None, start_index=0, limit=100):
"""
Search for a list of notes that can be invested in.
(similar to searching for notes in the Browse section on the site)
Parameters
----------
filters : lendingclub.filters.*, optional
The filter to use to search for notes. If no filter is passed, a wildcard search
will be performed.
start_index : int, optional
The result index to start on. By default only 100 records will be returned at a time, so use this
to start at a later index in the results. For example, to get results 200 - 300, set `start_index` to 200.
(default is 0)
limit : int, optional
The number of results to return per request. (default is 100)
Returns
-------
dict
A dictionary object with the list of matching loans under the `loans` key.
"""
assert filters is None or isinstance(filters, Filter), 'filter is not a lendingclub.filters.Filter'
# Set filters
if filters:
filter_string = filters.search_string()
else:
filter_string = 'default'
payload = {
'method': 'search',
'filter': filter_string,
'startindex': start_index,
'pagesize': limit
}
# Make request
response = self.session.post('/browse/browseNotesAj.action', data=payload)
json_response = response.json()
if self.session.json_success(json_response):
results = json_response['searchresult']
# Normalize results by converting loanGUID -> loan_id
for loan in results['loans']:
loan['loan_id'] = int(loan['loanGUID'])
# Validate that fractions do indeed match the filters
if filters is not None:
filters.validate(results['loans'])
return results
return False | def function[search, parameter[self, filters, start_index, limit]]:
constant[
Search for a list of notes that can be invested in.
(similar to searching for notes in the Browse section on the site)
Parameters
----------
filters : lendingclub.filters.*, optional
The filter to use to search for notes. If no filter is passed, a wildcard search
will be performed.
start_index : int, optional
The result index to start on. By default only 100 records will be returned at a time, so use this
to start at a later index in the results. For example, to get results 200 - 300, set `start_index` to 200.
(default is 0)
limit : int, optional
The number of results to return per request. (default is 100)
Returns
-------
dict
A dictionary object with the list of matching loans under the `loans` key.
]
assert[<ast.BoolOp object at 0x7da2054a5f60>]
if name[filters] begin[:]
variable[filter_string] assign[=] call[name[filters].search_string, parameter[]]
variable[payload] assign[=] dictionary[[<ast.Constant object at 0x7da207f992d0>, <ast.Constant object at 0x7da207f9bfa0>, <ast.Constant object at 0x7da207f9b8e0>, <ast.Constant object at 0x7da207f9b400>], [<ast.Constant object at 0x7da207f98940>, <ast.Name object at 0x7da207f9bf10>, <ast.Name object at 0x7da207f993f0>, <ast.Name object at 0x7da207f9a9e0>]]
variable[response] assign[=] call[name[self].session.post, parameter[constant[/browse/browseNotesAj.action]]]
variable[json_response] assign[=] call[name[response].json, parameter[]]
if call[name[self].session.json_success, parameter[name[json_response]]] begin[:]
variable[results] assign[=] call[name[json_response]][constant[searchresult]]
for taget[name[loan]] in starred[call[name[results]][constant[loans]]] begin[:]
call[name[loan]][constant[loan_id]] assign[=] call[name[int], parameter[call[name[loan]][constant[loanGUID]]]]
if compare[name[filters] is_not constant[None]] begin[:]
call[name[filters].validate, parameter[call[name[results]][constant[loans]]]]
return[name[results]]
return[constant[False]] | keyword[def] identifier[search] ( identifier[self] , identifier[filters] = keyword[None] , identifier[start_index] = literal[int] , identifier[limit] = literal[int] ):
literal[string]
keyword[assert] identifier[filters] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[filters] , identifier[Filter] ), literal[string]
keyword[if] identifier[filters] :
identifier[filter_string] = identifier[filters] . identifier[search_string] ()
keyword[else] :
identifier[filter_string] = literal[string]
identifier[payload] ={
literal[string] : literal[string] ,
literal[string] : identifier[filter_string] ,
literal[string] : identifier[start_index] ,
literal[string] : identifier[limit]
}
identifier[response] = identifier[self] . identifier[session] . identifier[post] ( literal[string] , identifier[data] = identifier[payload] )
identifier[json_response] = identifier[response] . identifier[json] ()
keyword[if] identifier[self] . identifier[session] . identifier[json_success] ( identifier[json_response] ):
identifier[results] = identifier[json_response] [ literal[string] ]
keyword[for] identifier[loan] keyword[in] identifier[results] [ literal[string] ]:
identifier[loan] [ literal[string] ]= identifier[int] ( identifier[loan] [ literal[string] ])
keyword[if] identifier[filters] keyword[is] keyword[not] keyword[None] :
identifier[filters] . identifier[validate] ( identifier[results] [ literal[string] ])
keyword[return] identifier[results]
keyword[return] keyword[False] | def search(self, filters=None, start_index=0, limit=100):
"""
Search for a list of notes that can be invested in.
(similar to searching for notes in the Browse section on the site)
Parameters
----------
filters : lendingclub.filters.*, optional
The filter to use to search for notes. If no filter is passed, a wildcard search
will be performed.
start_index : int, optional
The result index to start on. By default only 100 records will be returned at a time, so use this
to start at a later index in the results. For example, to get results 200 - 300, set `start_index` to 200.
(default is 0)
limit : int, optional
The number of results to return per request. (default is 100)
Returns
-------
dict
A dictionary object with the list of matching loans under the `loans` key.
"""
assert filters is None or isinstance(filters, Filter), 'filter is not a lendingclub.filters.Filter'
# Set filters
if filters:
filter_string = filters.search_string() # depends on [control=['if'], data=[]]
else:
filter_string = 'default'
payload = {'method': 'search', 'filter': filter_string, 'startindex': start_index, 'pagesize': limit}
# Make request
response = self.session.post('/browse/browseNotesAj.action', data=payload)
json_response = response.json()
if self.session.json_success(json_response):
results = json_response['searchresult']
# Normalize results by converting loanGUID -> loan_id
for loan in results['loans']:
loan['loan_id'] = int(loan['loanGUID']) # depends on [control=['for'], data=['loan']]
# Validate that fractions do indeed match the filters
if filters is not None:
filters.validate(results['loans']) # depends on [control=['if'], data=['filters']]
return results # depends on [control=['if'], data=[]]
return False |
def create_license_helper(self, lic):
"""
Handle single(no conjunction/disjunction) licenses.
Return the created node.
"""
if isinstance(lic, document.ExtractedLicense):
return self.create_extracted_license(lic)
if lic.identifier.rstrip('+') in config.LICENSE_MAP:
return URIRef(lic.url)
else:
matches = [l for l in self.document.extracted_licenses if l.identifier == lic.identifier]
if len(matches) != 0:
return self.create_extracted_license(matches[0])
else:
raise InvalidDocumentError('Missing extracted license: {0}'.format(lic.identifier)) | def function[create_license_helper, parameter[self, lic]]:
constant[
Handle single(no conjunction/disjunction) licenses.
Return the created node.
]
if call[name[isinstance], parameter[name[lic], name[document].ExtractedLicense]] begin[:]
return[call[name[self].create_extracted_license, parameter[name[lic]]]]
if compare[call[name[lic].identifier.rstrip, parameter[constant[+]]] in name[config].LICENSE_MAP] begin[:]
return[call[name[URIRef], parameter[name[lic].url]]] | keyword[def] identifier[create_license_helper] ( identifier[self] , identifier[lic] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[lic] , identifier[document] . identifier[ExtractedLicense] ):
keyword[return] identifier[self] . identifier[create_extracted_license] ( identifier[lic] )
keyword[if] identifier[lic] . identifier[identifier] . identifier[rstrip] ( literal[string] ) keyword[in] identifier[config] . identifier[LICENSE_MAP] :
keyword[return] identifier[URIRef] ( identifier[lic] . identifier[url] )
keyword[else] :
identifier[matches] =[ identifier[l] keyword[for] identifier[l] keyword[in] identifier[self] . identifier[document] . identifier[extracted_licenses] keyword[if] identifier[l] . identifier[identifier] == identifier[lic] . identifier[identifier] ]
keyword[if] identifier[len] ( identifier[matches] )!= literal[int] :
keyword[return] identifier[self] . identifier[create_extracted_license] ( identifier[matches] [ literal[int] ])
keyword[else] :
keyword[raise] identifier[InvalidDocumentError] ( literal[string] . identifier[format] ( identifier[lic] . identifier[identifier] )) | def create_license_helper(self, lic):
"""
Handle single(no conjunction/disjunction) licenses.
Return the created node.
"""
if isinstance(lic, document.ExtractedLicense):
return self.create_extracted_license(lic) # depends on [control=['if'], data=[]]
if lic.identifier.rstrip('+') in config.LICENSE_MAP:
return URIRef(lic.url) # depends on [control=['if'], data=[]]
else:
matches = [l for l in self.document.extracted_licenses if l.identifier == lic.identifier]
if len(matches) != 0:
return self.create_extracted_license(matches[0]) # depends on [control=['if'], data=[]]
else:
raise InvalidDocumentError('Missing extracted license: {0}'.format(lic.identifier)) |
def read_graph_from_string(txt):
"""Read a graph from a string, either in dot format, or our own
compressed format.
Returns:
`pygraph.digraph`: Graph object.
"""
if not txt.startswith('{'):
return read_dot(txt) # standard dot format
def conv(value):
if isinstance(value, basestring):
return '"' + value + '"'
else:
return value
# our compacted format
doc = literal_eval(txt)
g = digraph()
for attrs, values in doc.get("nodes", []):
attrs = [(k, conv(v)) for k, v in attrs]
for value in values:
if isinstance(value, basestring):
node_name = value
attrs_ = attrs
else:
node_name, label = value
attrs_ = attrs + [("label", conv(label))]
g.add_node(node_name, attrs=attrs_)
for attrs, values in doc.get("edges", []):
attrs_ = [(k, conv(v)) for k, v in attrs]
for value in values:
if len(value) == 3:
edge = value[:2]
label = value[-1]
else:
edge = value
label = ''
g.add_edge(edge, label=label, attrs=attrs_)
return g | def function[read_graph_from_string, parameter[txt]]:
constant[Read a graph from a string, either in dot format, or our own
compressed format.
Returns:
`pygraph.digraph`: Graph object.
]
if <ast.UnaryOp object at 0x7da18f8112a0> begin[:]
return[call[name[read_dot], parameter[name[txt]]]]
def function[conv, parameter[value]]:
if call[name[isinstance], parameter[name[value], name[basestring]]] begin[:]
return[binary_operation[binary_operation[constant["] + name[value]] + constant["]]]
variable[doc] assign[=] call[name[literal_eval], parameter[name[txt]]]
variable[g] assign[=] call[name[digraph], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18f8125c0>, <ast.Name object at 0x7da18f812230>]]] in starred[call[name[doc].get, parameter[constant[nodes], list[[]]]]] begin[:]
variable[attrs] assign[=] <ast.ListComp object at 0x7da18f8125f0>
for taget[name[value]] in starred[name[values]] begin[:]
if call[name[isinstance], parameter[name[value], name[basestring]]] begin[:]
variable[node_name] assign[=] name[value]
variable[attrs_] assign[=] name[attrs]
call[name[g].add_node, parameter[name[node_name]]]
for taget[tuple[[<ast.Name object at 0x7da18f810e20>, <ast.Name object at 0x7da18f813d30>]]] in starred[call[name[doc].get, parameter[constant[edges], list[[]]]]] begin[:]
variable[attrs_] assign[=] <ast.ListComp object at 0x7da18f8109a0>
for taget[name[value]] in starred[name[values]] begin[:]
if compare[call[name[len], parameter[name[value]]] equal[==] constant[3]] begin[:]
variable[edge] assign[=] call[name[value]][<ast.Slice object at 0x7da2054a7970>]
variable[label] assign[=] call[name[value]][<ast.UnaryOp object at 0x7da2054a4820>]
call[name[g].add_edge, parameter[name[edge]]]
return[name[g]] | keyword[def] identifier[read_graph_from_string] ( identifier[txt] ):
literal[string]
keyword[if] keyword[not] identifier[txt] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[read_dot] ( identifier[txt] )
keyword[def] identifier[conv] ( identifier[value] ):
keyword[if] identifier[isinstance] ( identifier[value] , identifier[basestring] ):
keyword[return] literal[string] + identifier[value] + literal[string]
keyword[else] :
keyword[return] identifier[value]
identifier[doc] = identifier[literal_eval] ( identifier[txt] )
identifier[g] = identifier[digraph] ()
keyword[for] identifier[attrs] , identifier[values] keyword[in] identifier[doc] . identifier[get] ( literal[string] ,[]):
identifier[attrs] =[( identifier[k] , identifier[conv] ( identifier[v] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[attrs] ]
keyword[for] identifier[value] keyword[in] identifier[values] :
keyword[if] identifier[isinstance] ( identifier[value] , identifier[basestring] ):
identifier[node_name] = identifier[value]
identifier[attrs_] = identifier[attrs]
keyword[else] :
identifier[node_name] , identifier[label] = identifier[value]
identifier[attrs_] = identifier[attrs] +[( literal[string] , identifier[conv] ( identifier[label] ))]
identifier[g] . identifier[add_node] ( identifier[node_name] , identifier[attrs] = identifier[attrs_] )
keyword[for] identifier[attrs] , identifier[values] keyword[in] identifier[doc] . identifier[get] ( literal[string] ,[]):
identifier[attrs_] =[( identifier[k] , identifier[conv] ( identifier[v] )) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[attrs] ]
keyword[for] identifier[value] keyword[in] identifier[values] :
keyword[if] identifier[len] ( identifier[value] )== literal[int] :
identifier[edge] = identifier[value] [: literal[int] ]
identifier[label] = identifier[value] [- literal[int] ]
keyword[else] :
identifier[edge] = identifier[value]
identifier[label] = literal[string]
identifier[g] . identifier[add_edge] ( identifier[edge] , identifier[label] = identifier[label] , identifier[attrs] = identifier[attrs_] )
keyword[return] identifier[g] | def read_graph_from_string(txt):
"""Read a graph from a string, either in dot format, or our own
compressed format.
Returns:
`pygraph.digraph`: Graph object.
"""
if not txt.startswith('{'):
return read_dot(txt) # standard dot format # depends on [control=['if'], data=[]]
def conv(value):
if isinstance(value, basestring):
return '"' + value + '"' # depends on [control=['if'], data=[]]
else:
return value
# our compacted format
doc = literal_eval(txt)
g = digraph()
for (attrs, values) in doc.get('nodes', []):
attrs = [(k, conv(v)) for (k, v) in attrs]
for value in values:
if isinstance(value, basestring):
node_name = value
attrs_ = attrs # depends on [control=['if'], data=[]]
else:
(node_name, label) = value
attrs_ = attrs + [('label', conv(label))]
g.add_node(node_name, attrs=attrs_) # depends on [control=['for'], data=['value']] # depends on [control=['for'], data=[]]
for (attrs, values) in doc.get('edges', []):
attrs_ = [(k, conv(v)) for (k, v) in attrs]
for value in values:
if len(value) == 3:
edge = value[:2]
label = value[-1] # depends on [control=['if'], data=[]]
else:
edge = value
label = ''
g.add_edge(edge, label=label, attrs=attrs_) # depends on [control=['for'], data=['value']] # depends on [control=['for'], data=[]]
return g |
def configure_inline_support(shell, backend, user_ns=None):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
user_ns : dict
A namespace where all configured variables will be placed. If not given,
the `user_ns` attribute of the shell object is used.
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from IPython.zmq.pylab.backend_inline import InlineBackend
except ImportError:
return
user_ns = shell.user_ns if user_ns is None else user_ns
cfg = InlineBackend.instance(config=shell.config)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from IPython.zmq.pylab.backend_inline import flush_figures
from matplotlib import pyplot
shell.register_post_execute(flush_figures)
# load inline_rc
pyplot.rcParams.update(cfg.rc)
# Add 'figsize' to pyplot and to the user's namespace
user_ns['figsize'] = pyplot.figsize = figsize
# Setup the default figure format
fmt = cfg.figure_format
select_figure_format(shell, fmt)
# The old pastefig function has been replaced by display
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs | def function[configure_inline_support, parameter[shell, backend, user_ns]]:
constant[Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
user_ns : dict
A namespace where all configured variables will be placed. If not given,
the `user_ns` attribute of the shell object is used.
]
<ast.Try object at 0x7da20c990880>
variable[user_ns] assign[=] <ast.IfExp object at 0x7da18f813e20>
variable[cfg] assign[=] call[name[InlineBackend].instance, parameter[]]
name[cfg].shell assign[=] name[shell]
if compare[name[cfg] <ast.NotIn object at 0x7da2590d7190> name[shell].configurables] begin[:]
call[name[shell].configurables.append, parameter[name[cfg]]]
if compare[name[backend] equal[==] call[name[backends]][constant[inline]]] begin[:]
from relative_module[IPython.zmq.pylab.backend_inline] import module[flush_figures]
from relative_module[matplotlib] import module[pyplot]
call[name[shell].register_post_execute, parameter[name[flush_figures]]]
call[name[pyplot].rcParams.update, parameter[name[cfg].rc]]
call[name[user_ns]][constant[figsize]] assign[=] name[figsize]
variable[fmt] assign[=] name[cfg].figure_format
call[name[select_figure_format], parameter[name[shell], name[fmt]]]
from relative_module[IPython.core.display] import module[display]
call[name[user_ns]][constant[display]] assign[=] name[display]
call[name[user_ns]][constant[getfigs]] assign[=] name[getfigs] | keyword[def] identifier[configure_inline_support] ( identifier[shell] , identifier[backend] , identifier[user_ns] = keyword[None] ):
literal[string]
keyword[try] :
keyword[from] identifier[IPython] . identifier[zmq] . identifier[pylab] . identifier[backend_inline] keyword[import] identifier[InlineBackend]
keyword[except] identifier[ImportError] :
keyword[return]
identifier[user_ns] = identifier[shell] . identifier[user_ns] keyword[if] identifier[user_ns] keyword[is] keyword[None] keyword[else] identifier[user_ns]
identifier[cfg] = identifier[InlineBackend] . identifier[instance] ( identifier[config] = identifier[shell] . identifier[config] )
identifier[cfg] . identifier[shell] = identifier[shell]
keyword[if] identifier[cfg] keyword[not] keyword[in] identifier[shell] . identifier[configurables] :
identifier[shell] . identifier[configurables] . identifier[append] ( identifier[cfg] )
keyword[if] identifier[backend] == identifier[backends] [ literal[string] ]:
keyword[from] identifier[IPython] . identifier[zmq] . identifier[pylab] . identifier[backend_inline] keyword[import] identifier[flush_figures]
keyword[from] identifier[matplotlib] keyword[import] identifier[pyplot]
identifier[shell] . identifier[register_post_execute] ( identifier[flush_figures] )
identifier[pyplot] . identifier[rcParams] . identifier[update] ( identifier[cfg] . identifier[rc] )
identifier[user_ns] [ literal[string] ]= identifier[pyplot] . identifier[figsize] = identifier[figsize]
identifier[fmt] = identifier[cfg] . identifier[figure_format]
identifier[select_figure_format] ( identifier[shell] , identifier[fmt] )
keyword[from] identifier[IPython] . identifier[core] . identifier[display] keyword[import] identifier[display]
identifier[user_ns] [ literal[string] ]= identifier[display]
identifier[user_ns] [ literal[string] ]= identifier[getfigs] | def configure_inline_support(shell, backend, user_ns=None):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
user_ns : dict
A namespace where all configured variables will be placed. If not given,
the `user_ns` attribute of the shell object is used.
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from IPython.zmq.pylab.backend_inline import InlineBackend # depends on [control=['try'], data=[]]
except ImportError:
return # depends on [control=['except'], data=[]]
user_ns = shell.user_ns if user_ns is None else user_ns
cfg = InlineBackend.instance(config=shell.config)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg) # depends on [control=['if'], data=['cfg']]
if backend == backends['inline']:
from IPython.zmq.pylab.backend_inline import flush_figures
from matplotlib import pyplot
shell.register_post_execute(flush_figures)
# load inline_rc
pyplot.rcParams.update(cfg.rc)
# Add 'figsize' to pyplot and to the user's namespace
user_ns['figsize'] = pyplot.figsize = figsize # depends on [control=['if'], data=[]]
# Setup the default figure format
fmt = cfg.figure_format
select_figure_format(shell, fmt)
# The old pastefig function has been replaced by display
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs |
def format_back(
number: FormatArg,
light: Optional[bool] = False,
extended: Optional[bool] = False) -> str:
""" Return an escape code for a back color, by number.
This is a convenience method for handling the different code types
all in one shot.
It also handles some validation.
"""
return _format_code(
number,
backcolor=True,
light=light,
extended=extended
) | def function[format_back, parameter[number, light, extended]]:
constant[ Return an escape code for a back color, by number.
This is a convenience method for handling the different code types
all in one shot.
It also handles some validation.
]
return[call[name[_format_code], parameter[name[number]]]] | keyword[def] identifier[format_back] (
identifier[number] : identifier[FormatArg] ,
identifier[light] : identifier[Optional] [ identifier[bool] ]= keyword[False] ,
identifier[extended] : identifier[Optional] [ identifier[bool] ]= keyword[False] )-> identifier[str] :
literal[string]
keyword[return] identifier[_format_code] (
identifier[number] ,
identifier[backcolor] = keyword[True] ,
identifier[light] = identifier[light] ,
identifier[extended] = identifier[extended]
) | def format_back(number: FormatArg, light: Optional[bool]=False, extended: Optional[bool]=False) -> str:
""" Return an escape code for a back color, by number.
This is a convenience method for handling the different code types
all in one shot.
It also handles some validation.
"""
return _format_code(number, backcolor=True, light=light, extended=extended) |
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
pending = len(iterables)
nexts = itertools.cycle(iter(it).next for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = itertools.cycle(itertools.islice(nexts, pending)) | def function[roundrobin, parameter[]]:
constant[roundrobin('ABC', 'D', 'EF') --> A D E B F C]
variable[pending] assign[=] call[name[len], parameter[name[iterables]]]
variable[nexts] assign[=] call[name[itertools].cycle, parameter[<ast.GeneratorExp object at 0x7da1b2842290>]]
while name[pending] begin[:]
<ast.Try object at 0x7da20c6c6c50> | keyword[def] identifier[roundrobin] (* identifier[iterables] ):
literal[string]
identifier[pending] = identifier[len] ( identifier[iterables] )
identifier[nexts] = identifier[itertools] . identifier[cycle] ( identifier[iter] ( identifier[it] ). identifier[next] keyword[for] identifier[it] keyword[in] identifier[iterables] )
keyword[while] identifier[pending] :
keyword[try] :
keyword[for] identifier[next] keyword[in] identifier[nexts] :
keyword[yield] identifier[next] ()
keyword[except] identifier[StopIteration] :
identifier[pending] -= literal[int]
identifier[nexts] = identifier[itertools] . identifier[cycle] ( identifier[itertools] . identifier[islice] ( identifier[nexts] , identifier[pending] )) | def roundrobin(*iterables):
"""roundrobin('ABC', 'D', 'EF') --> A D E B F C"""
# Recipe credited to George Sakkis
pending = len(iterables)
nexts = itertools.cycle((iter(it).next for it in iterables))
while pending:
try:
for next in nexts:
yield next() # depends on [control=['for'], data=['next']] # depends on [control=['try'], data=[]]
except StopIteration:
pending -= 1
nexts = itertools.cycle(itertools.islice(nexts, pending)) # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] |
def _handle_sigint(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning("Catched SIGINT")
self.shutdown() | def function[_handle_sigint, parameter[self, signum, frame]]:
constant[Shutdown after processing current task.]
call[name[logger].warning, parameter[constant[Catched SIGINT]]]
call[name[self].shutdown, parameter[]] | keyword[def] identifier[_handle_sigint] ( identifier[self] , identifier[signum] : identifier[int] , identifier[frame] : identifier[Any] )-> keyword[None] :
literal[string]
identifier[logger] . identifier[warning] ( literal[string] )
identifier[self] . identifier[shutdown] () | def _handle_sigint(self, signum: int, frame: Any) -> None:
"""Shutdown after processing current task."""
logger.warning('Catched SIGINT')
self.shutdown() |
def _encode_multipart(**kw):
' build a multipart/form-data body with randomly generated boundary '
boundary = '----------%s' % hex(int(time.time() * 1000))
data = []
for k, v in kw.iteritems():
data.append('--%s' % boundary)
if hasattr(v, 'read'):
# file-like object:
filename = getattr(v, 'name', '')
content = v.read()
data.append('Content-Disposition: form-data; name="%s"; filename="hidden"' % k)
data.append('Content-Length: %d' % len(content))
data.append('Content-Type: %s\r\n' % _guess_content_type(filename))
data.append(content)
else:
data.append('Content-Disposition: form-data; name="%s"\r\n' % k)
data.append(v.encode('utf-8') if isinstance(v, unicode) else v)
data.append('--%s--\r\n' % boundary)
return '\r\n'.join(data), boundary | def function[_encode_multipart, parameter[]]:
constant[ build a multipart/form-data body with randomly generated boundary ]
variable[boundary] assign[=] binary_operation[constant[----------%s] <ast.Mod object at 0x7da2590d6920> call[name[hex], parameter[call[name[int], parameter[binary_operation[call[name[time].time, parameter[]] * constant[1000]]]]]]]
variable[data] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c6a91b0>, <ast.Name object at 0x7da20c6a9c30>]]] in starred[call[name[kw].iteritems, parameter[]]] begin[:]
call[name[data].append, parameter[binary_operation[constant[--%s] <ast.Mod object at 0x7da2590d6920> name[boundary]]]]
if call[name[hasattr], parameter[name[v], constant[read]]] begin[:]
variable[filename] assign[=] call[name[getattr], parameter[name[v], constant[name], constant[]]]
variable[content] assign[=] call[name[v].read, parameter[]]
call[name[data].append, parameter[binary_operation[constant[Content-Disposition: form-data; name="%s"; filename="hidden"] <ast.Mod object at 0x7da2590d6920> name[k]]]]
call[name[data].append, parameter[binary_operation[constant[Content-Length: %d] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[content]]]]]]
call[name[data].append, parameter[binary_operation[constant[Content-Type: %s
] <ast.Mod object at 0x7da2590d6920> call[name[_guess_content_type], parameter[name[filename]]]]]]
call[name[data].append, parameter[name[content]]]
call[name[data].append, parameter[binary_operation[constant[--%s--
] <ast.Mod object at 0x7da2590d6920> name[boundary]]]]
return[tuple[[<ast.Call object at 0x7da1b22979d0>, <ast.Name object at 0x7da1b1de0130>]]] | keyword[def] identifier[_encode_multipart] (** identifier[kw] ):
literal[string]
identifier[boundary] = literal[string] % identifier[hex] ( identifier[int] ( identifier[time] . identifier[time] ()* literal[int] ))
identifier[data] =[]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[kw] . identifier[iteritems] ():
identifier[data] . identifier[append] ( literal[string] % identifier[boundary] )
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[filename] = identifier[getattr] ( identifier[v] , literal[string] , literal[string] )
identifier[content] = identifier[v] . identifier[read] ()
identifier[data] . identifier[append] ( literal[string] % identifier[k] )
identifier[data] . identifier[append] ( literal[string] % identifier[len] ( identifier[content] ))
identifier[data] . identifier[append] ( literal[string] % identifier[_guess_content_type] ( identifier[filename] ))
identifier[data] . identifier[append] ( identifier[content] )
keyword[else] :
identifier[data] . identifier[append] ( literal[string] % identifier[k] )
identifier[data] . identifier[append] ( identifier[v] . identifier[encode] ( literal[string] ) keyword[if] identifier[isinstance] ( identifier[v] , identifier[unicode] ) keyword[else] identifier[v] )
identifier[data] . identifier[append] ( literal[string] % identifier[boundary] )
keyword[return] literal[string] . identifier[join] ( identifier[data] ), identifier[boundary] | def _encode_multipart(**kw):
""" build a multipart/form-data body with randomly generated boundary """
boundary = '----------%s' % hex(int(time.time() * 1000))
data = []
for (k, v) in kw.iteritems():
data.append('--%s' % boundary)
if hasattr(v, 'read'):
# file-like object:
filename = getattr(v, 'name', '')
content = v.read()
data.append('Content-Disposition: form-data; name="%s"; filename="hidden"' % k)
data.append('Content-Length: %d' % len(content))
data.append('Content-Type: %s\r\n' % _guess_content_type(filename))
data.append(content) # depends on [control=['if'], data=[]]
else:
data.append('Content-Disposition: form-data; name="%s"\r\n' % k)
data.append(v.encode('utf-8') if isinstance(v, unicode) else v) # depends on [control=['for'], data=[]]
data.append('--%s--\r\n' % boundary)
return ('\r\n'.join(data), boundary) |
def add(self, variant, arch, image):
"""
Assign an :class:`.Image` object to variant and arch.
:param variant: compose variant UID
:type variant: str
:param arch: compose architecture
:type arch: str
:param image: image
:type image: :class:`.Image`
"""
if arch not in productmd.common.RPM_ARCHES:
raise ValueError("Arch not found in RPM_ARCHES: %s" % arch)
if arch in ["src", "nosrc"]:
raise ValueError("Source arch is not allowed. Map source files under binary arches.")
if self.header.version_tuple >= (1, 1):
# disallow adding a different image with same 'unique'
# attributes. can't do this pre-1.1 as we couldn't truly
# identify images before subvariant
for checkvar in self.images:
for checkarch in self.images[checkvar]:
for curimg in self.images[checkvar][checkarch]:
if identify_image(curimg) == identify_image(image) and curimg.checksums != image.checksums:
raise ValueError("Image {0} shares all UNIQUE_IMAGE_ATTRIBUTES with "
"image {1}! This is forbidden.".format(image, curimg))
self.images.setdefault(variant, {}).setdefault(arch, set()).add(image) | def function[add, parameter[self, variant, arch, image]]:
constant[
Assign an :class:`.Image` object to variant and arch.
:param variant: compose variant UID
:type variant: str
:param arch: compose architecture
:type arch: str
:param image: image
:type image: :class:`.Image`
]
if compare[name[arch] <ast.NotIn object at 0x7da2590d7190> name[productmd].common.RPM_ARCHES] begin[:]
<ast.Raise object at 0x7da18ede4730>
if compare[name[arch] in list[[<ast.Constant object at 0x7da18ede5750>, <ast.Constant object at 0x7da18ede6c20>]]] begin[:]
<ast.Raise object at 0x7da18ede7220>
if compare[name[self].header.version_tuple greater_or_equal[>=] tuple[[<ast.Constant object at 0x7da18ede5780>, <ast.Constant object at 0x7da18ede6e00>]]] begin[:]
for taget[name[checkvar]] in starred[name[self].images] begin[:]
for taget[name[checkarch]] in starred[call[name[self].images][name[checkvar]]] begin[:]
for taget[name[curimg]] in starred[call[call[name[self].images][name[checkvar]]][name[checkarch]]] begin[:]
if <ast.BoolOp object at 0x7da18ede5bd0> begin[:]
<ast.Raise object at 0x7da18ede6a10>
call[call[call[name[self].images.setdefault, parameter[name[variant], dictionary[[], []]]].setdefault, parameter[name[arch], call[name[set], parameter[]]]].add, parameter[name[image]]] | keyword[def] identifier[add] ( identifier[self] , identifier[variant] , identifier[arch] , identifier[image] ):
literal[string]
keyword[if] identifier[arch] keyword[not] keyword[in] identifier[productmd] . identifier[common] . identifier[RPM_ARCHES] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[arch] )
keyword[if] identifier[arch] keyword[in] [ literal[string] , literal[string] ]:
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[header] . identifier[version_tuple] >=( literal[int] , literal[int] ):
keyword[for] identifier[checkvar] keyword[in] identifier[self] . identifier[images] :
keyword[for] identifier[checkarch] keyword[in] identifier[self] . identifier[images] [ identifier[checkvar] ]:
keyword[for] identifier[curimg] keyword[in] identifier[self] . identifier[images] [ identifier[checkvar] ][ identifier[checkarch] ]:
keyword[if] identifier[identify_image] ( identifier[curimg] )== identifier[identify_image] ( identifier[image] ) keyword[and] identifier[curimg] . identifier[checksums] != identifier[image] . identifier[checksums] :
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[image] , identifier[curimg] ))
identifier[self] . identifier[images] . identifier[setdefault] ( identifier[variant] ,{}). identifier[setdefault] ( identifier[arch] , identifier[set] ()). identifier[add] ( identifier[image] ) | def add(self, variant, arch, image):
"""
Assign an :class:`.Image` object to variant and arch.
:param variant: compose variant UID
:type variant: str
:param arch: compose architecture
:type arch: str
:param image: image
:type image: :class:`.Image`
"""
if arch not in productmd.common.RPM_ARCHES:
raise ValueError('Arch not found in RPM_ARCHES: %s' % arch) # depends on [control=['if'], data=['arch']]
if arch in ['src', 'nosrc']:
raise ValueError('Source arch is not allowed. Map source files under binary arches.') # depends on [control=['if'], data=[]]
if self.header.version_tuple >= (1, 1):
# disallow adding a different image with same 'unique'
# attributes. can't do this pre-1.1 as we couldn't truly
# identify images before subvariant
for checkvar in self.images:
for checkarch in self.images[checkvar]:
for curimg in self.images[checkvar][checkarch]:
if identify_image(curimg) == identify_image(image) and curimg.checksums != image.checksums:
raise ValueError('Image {0} shares all UNIQUE_IMAGE_ATTRIBUTES with image {1}! This is forbidden.'.format(image, curimg)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['curimg']] # depends on [control=['for'], data=['checkarch']] # depends on [control=['for'], data=['checkvar']] # depends on [control=['if'], data=[]]
self.images.setdefault(variant, {}).setdefault(arch, set()).add(image) |
def build_circle_dict(self,
center_lat,
center_lng,
radius,
stroke_color='#FF0000',
stroke_opacity=.8,
stroke_weight=2,
fill_color='#FF0000',
fill_opacity=.3,
):
""" Set a dictionary with the javascript class Circle parameters
This function sets a default drawing configuration if the user just
pass the rectangle bounds, but also allows to set each parameter
individually if the user wish so.
Args:
center_lat (float): The circle center latitude
center_lng (float): The circle center longitude
radius (float): The circle radius, in meters
stroke_color (str): Sets the color of the rectangle border using
hexadecimal color notation
stroke_opacity (float): Sets the opacity of the rectangle border
in percentage. If stroke_opacity = 0, the border is transparent
stroke_weight (int): Sets the stroke girth in pixels.
fill_color (str): Sets the color of the circle fill using
hexadecimal color notation
fill_opacity (float): Sets the opacity of the circle fill
"""
circle = {
'stroke_color': stroke_color,
'stroke_opacity': stroke_opacity,
'stroke_weight': stroke_weight,
'fill_color': fill_color,
'fill_opacity': fill_opacity,
'center': {'lat': center_lat,
'lng': center_lng},
'radius': radius,
}
return circle | def function[build_circle_dict, parameter[self, center_lat, center_lng, radius, stroke_color, stroke_opacity, stroke_weight, fill_color, fill_opacity]]:
constant[ Set a dictionary with the javascript class Circle parameters
This function sets a default drawing configuration if the user just
pass the rectangle bounds, but also allows to set each parameter
individually if the user wish so.
Args:
center_lat (float): The circle center latitude
center_lng (float): The circle center longitude
radius (float): The circle radius, in meters
stroke_color (str): Sets the color of the rectangle border using
hexadecimal color notation
stroke_opacity (float): Sets the opacity of the rectangle border
in percentage. If stroke_opacity = 0, the border is transparent
stroke_weight (int): Sets the stroke girth in pixels.
fill_color (str): Sets the color of the circle fill using
hexadecimal color notation
fill_opacity (float): Sets the opacity of the circle fill
]
variable[circle] assign[=] dictionary[[<ast.Constant object at 0x7da1b1ebaa40>, <ast.Constant object at 0x7da1b1ebaf50>, <ast.Constant object at 0x7da1b1ebbf10>, <ast.Constant object at 0x7da1b1ebba60>, <ast.Constant object at 0x7da1b1ebb550>, <ast.Constant object at 0x7da1b1ebbe20>, <ast.Constant object at 0x7da1b1ebb070>], [<ast.Name object at 0x7da1b1ebb610>, <ast.Name object at 0x7da1b1e985b0>, <ast.Name object at 0x7da1b1e986a0>, <ast.Name object at 0x7da1b1e987f0>, <ast.Name object at 0x7da1b1e99510>, <ast.Dict object at 0x7da1b1e98d00>, <ast.Name object at 0x7da1b1e98850>]]
return[name[circle]] | keyword[def] identifier[build_circle_dict] ( identifier[self] ,
identifier[center_lat] ,
identifier[center_lng] ,
identifier[radius] ,
identifier[stroke_color] = literal[string] ,
identifier[stroke_opacity] = literal[int] ,
identifier[stroke_weight] = literal[int] ,
identifier[fill_color] = literal[string] ,
identifier[fill_opacity] = literal[int] ,
):
literal[string]
identifier[circle] ={
literal[string] : identifier[stroke_color] ,
literal[string] : identifier[stroke_opacity] ,
literal[string] : identifier[stroke_weight] ,
literal[string] : identifier[fill_color] ,
literal[string] : identifier[fill_opacity] ,
literal[string] :{ literal[string] : identifier[center_lat] ,
literal[string] : identifier[center_lng] },
literal[string] : identifier[radius] ,
}
keyword[return] identifier[circle] | def build_circle_dict(self, center_lat, center_lng, radius, stroke_color='#FF0000', stroke_opacity=0.8, stroke_weight=2, fill_color='#FF0000', fill_opacity=0.3):
""" Set a dictionary with the javascript class Circle parameters
This function sets a default drawing configuration if the user just
pass the rectangle bounds, but also allows to set each parameter
individually if the user wish so.
Args:
center_lat (float): The circle center latitude
center_lng (float): The circle center longitude
radius (float): The circle radius, in meters
stroke_color (str): Sets the color of the rectangle border using
hexadecimal color notation
stroke_opacity (float): Sets the opacity of the rectangle border
in percentage. If stroke_opacity = 0, the border is transparent
stroke_weight (int): Sets the stroke girth in pixels.
fill_color (str): Sets the color of the circle fill using
hexadecimal color notation
fill_opacity (float): Sets the opacity of the circle fill
"""
circle = {'stroke_color': stroke_color, 'stroke_opacity': stroke_opacity, 'stroke_weight': stroke_weight, 'fill_color': fill_color, 'fill_opacity': fill_opacity, 'center': {'lat': center_lat, 'lng': center_lng}, 'radius': radius}
return circle |
def _filter(self, filename):
"""
return 'true' if filename doesn't match name_filter regex and should be filtered out of the list.
@param filename:
@return:
"""
return self.name_filter is not None and re.search(self.name_filter, filename) is None | def function[_filter, parameter[self, filename]]:
constant[
return 'true' if filename doesn't match name_filter regex and should be filtered out of the list.
@param filename:
@return:
]
return[<ast.BoolOp object at 0x7da1b19d8340>] | keyword[def] identifier[_filter] ( identifier[self] , identifier[filename] ):
literal[string]
keyword[return] identifier[self] . identifier[name_filter] keyword[is] keyword[not] keyword[None] keyword[and] identifier[re] . identifier[search] ( identifier[self] . identifier[name_filter] , identifier[filename] ) keyword[is] keyword[None] | def _filter(self, filename):
"""
return 'true' if filename doesn't match name_filter regex and should be filtered out of the list.
@param filename:
@return:
"""
return self.name_filter is not None and re.search(self.name_filter, filename) is None |
def close(self):
""" Close the policy instance and its shared database connection. """
self._logger.info("Closing")
if self._conn is not None:
self._conn.close()
self._conn = None
else:
self._logger.warning(
"close() called, but connection policy was alredy closed")
return | def function[close, parameter[self]]:
constant[ Close the policy instance and its shared database connection. ]
call[name[self]._logger.info, parameter[constant[Closing]]]
if compare[name[self]._conn is_not constant[None]] begin[:]
call[name[self]._conn.close, parameter[]]
name[self]._conn assign[=] constant[None]
return[None] | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] )
keyword[if] identifier[self] . identifier[_conn] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_conn] . identifier[close] ()
identifier[self] . identifier[_conn] = keyword[None]
keyword[else] :
identifier[self] . identifier[_logger] . identifier[warning] (
literal[string] )
keyword[return] | def close(self):
""" Close the policy instance and its shared database connection. """
self._logger.info('Closing')
if self._conn is not None:
self._conn.close()
self._conn = None # depends on [control=['if'], data=[]]
else:
self._logger.warning('close() called, but connection policy was alredy closed')
return |
def Compile(self, filter_implementation):
"""Compile the binary expression into a filter object."""
operator = self.operator.lower()
if operator in ('and', '&&'):
method = 'AndFilter'
elif operator in ('or', '||'):
method = 'OrFilter'
else:
raise errors.ParseError(
'Invalid binary operator {0:s}.'.format(operator))
args = [x.Compile(filter_implementation) for x in self.args]
return filter_implementation.FILTERS[method](arguments=args) | def function[Compile, parameter[self, filter_implementation]]:
constant[Compile the binary expression into a filter object.]
variable[operator] assign[=] call[name[self].operator.lower, parameter[]]
if compare[name[operator] in tuple[[<ast.Constant object at 0x7da18ede7970>, <ast.Constant object at 0x7da18ede7610>]]] begin[:]
variable[method] assign[=] constant[AndFilter]
variable[args] assign[=] <ast.ListComp object at 0x7da18ede4d60>
return[call[call[name[filter_implementation].FILTERS][name[method]], parameter[]]] | keyword[def] identifier[Compile] ( identifier[self] , identifier[filter_implementation] ):
literal[string]
identifier[operator] = identifier[self] . identifier[operator] . identifier[lower] ()
keyword[if] identifier[operator] keyword[in] ( literal[string] , literal[string] ):
identifier[method] = literal[string]
keyword[elif] identifier[operator] keyword[in] ( literal[string] , literal[string] ):
identifier[method] = literal[string]
keyword[else] :
keyword[raise] identifier[errors] . identifier[ParseError] (
literal[string] . identifier[format] ( identifier[operator] ))
identifier[args] =[ identifier[x] . identifier[Compile] ( identifier[filter_implementation] ) keyword[for] identifier[x] keyword[in] identifier[self] . identifier[args] ]
keyword[return] identifier[filter_implementation] . identifier[FILTERS] [ identifier[method] ]( identifier[arguments] = identifier[args] ) | def Compile(self, filter_implementation):
"""Compile the binary expression into a filter object."""
operator = self.operator.lower()
if operator in ('and', '&&'):
method = 'AndFilter' # depends on [control=['if'], data=[]]
elif operator in ('or', '||'):
method = 'OrFilter' # depends on [control=['if'], data=[]]
else:
raise errors.ParseError('Invalid binary operator {0:s}.'.format(operator))
args = [x.Compile(filter_implementation) for x in self.args]
return filter_implementation.FILTERS[method](arguments=args) |
def get(cls, uuid):
"""Retrieve one specific object from the server by its UUID (unique 16-character id). UUIDs
can be found in the web browser's address bar while viewing analyses and other objects.
Parameters
----------
uuid : string
UUID of the object to retrieve.
Returns
-------
OneCodexBase | None
The object with that UUID or None if no object could be found.
Examples
--------
>>> api.Samples.get('xxxxxxxxxxxxxxxx')
<Sample xxxxxxxxxxxxxxxx>
"""
check_bind(cls)
# we're just retrieving one object from its uuid
try:
resource = cls._resource.fetch(uuid)
if isinstance(resource, list):
# TODO: Investigate why potion .fetch()
# method is occassionally returning a list here...
if len(resource) == 1:
resource = resource[0]
else:
raise TypeError("Potion-Client error in fetching resource")
except HTTPError as e:
# 404 error means this doesn't exist
if e.response.status_code == 404:
return None
else:
raise e
return cls(_resource=resource) | def function[get, parameter[cls, uuid]]:
constant[Retrieve one specific object from the server by its UUID (unique 16-character id). UUIDs
can be found in the web browser's address bar while viewing analyses and other objects.
Parameters
----------
uuid : string
UUID of the object to retrieve.
Returns
-------
OneCodexBase | None
The object with that UUID or None if no object could be found.
Examples
--------
>>> api.Samples.get('xxxxxxxxxxxxxxxx')
<Sample xxxxxxxxxxxxxxxx>
]
call[name[check_bind], parameter[name[cls]]]
<ast.Try object at 0x7da20c992290>
return[call[name[cls], parameter[]]] | keyword[def] identifier[get] ( identifier[cls] , identifier[uuid] ):
literal[string]
identifier[check_bind] ( identifier[cls] )
keyword[try] :
identifier[resource] = identifier[cls] . identifier[_resource] . identifier[fetch] ( identifier[uuid] )
keyword[if] identifier[isinstance] ( identifier[resource] , identifier[list] ):
keyword[if] identifier[len] ( identifier[resource] )== literal[int] :
identifier[resource] = identifier[resource] [ literal[int] ]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[except] identifier[HTTPError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[response] . identifier[status_code] == literal[int] :
keyword[return] keyword[None]
keyword[else] :
keyword[raise] identifier[e]
keyword[return] identifier[cls] ( identifier[_resource] = identifier[resource] ) | def get(cls, uuid):
"""Retrieve one specific object from the server by its UUID (unique 16-character id). UUIDs
can be found in the web browser's address bar while viewing analyses and other objects.
Parameters
----------
uuid : string
UUID of the object to retrieve.
Returns
-------
OneCodexBase | None
The object with that UUID or None if no object could be found.
Examples
--------
>>> api.Samples.get('xxxxxxxxxxxxxxxx')
<Sample xxxxxxxxxxxxxxxx>
"""
check_bind(cls)
# we're just retrieving one object from its uuid
try:
resource = cls._resource.fetch(uuid)
if isinstance(resource, list):
# TODO: Investigate why potion .fetch()
# method is occassionally returning a list here...
if len(resource) == 1:
resource = resource[0] # depends on [control=['if'], data=[]]
else:
raise TypeError('Potion-Client error in fetching resource') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except HTTPError as e:
# 404 error means this doesn't exist
if e.response.status_code == 404:
return None # depends on [control=['if'], data=[]]
else:
raise e # depends on [control=['except'], data=['e']]
return cls(_resource=resource) |
def parse_lines(stream, separator=None):
"""
Takes each line of a stream, creating a generator that yields
tuples of line, row - where row is the line split by separator
(or by whitespace if separator is None.
:param stream:
:param separator: (optional)
:return: generator
"""
separator = None if separator is None else unicode(separator)
for line in stream:
line = line.rstrip(u'\r\n')
row = [interpret_segment(i) for i in line.split(separator)]
yield line, row | def function[parse_lines, parameter[stream, separator]]:
constant[
Takes each line of a stream, creating a generator that yields
tuples of line, row - where row is the line split by separator
(or by whitespace if separator is None.
:param stream:
:param separator: (optional)
:return: generator
]
variable[separator] assign[=] <ast.IfExp object at 0x7da20c7cb4c0>
for taget[name[line]] in starred[name[stream]] begin[:]
variable[line] assign[=] call[name[line].rstrip, parameter[constant[
]]]
variable[row] assign[=] <ast.ListComp object at 0x7da20c7c9600>
<ast.Yield object at 0x7da18f723220> | keyword[def] identifier[parse_lines] ( identifier[stream] , identifier[separator] = keyword[None] ):
literal[string]
identifier[separator] = keyword[None] keyword[if] identifier[separator] keyword[is] keyword[None] keyword[else] identifier[unicode] ( identifier[separator] )
keyword[for] identifier[line] keyword[in] identifier[stream] :
identifier[line] = identifier[line] . identifier[rstrip] ( literal[string] )
identifier[row] =[ identifier[interpret_segment] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[line] . identifier[split] ( identifier[separator] )]
keyword[yield] identifier[line] , identifier[row] | def parse_lines(stream, separator=None):
"""
Takes each line of a stream, creating a generator that yields
tuples of line, row - where row is the line split by separator
(or by whitespace if separator is None.
:param stream:
:param separator: (optional)
:return: generator
"""
separator = None if separator is None else unicode(separator)
for line in stream:
line = line.rstrip(u'\r\n')
row = [interpret_segment(i) for i in line.split(separator)]
yield (line, row) # depends on [control=['for'], data=['line']] |
def exp2prob(expression_vector):
"""Convert an expression vector into a probability vector.
Attribute:
expression_vector (list): List of expression values.
"""
v = np.asarray(expression_vector)
if np.sum(v) == 0:
return np.zeros(len(expression_vector))
else:
return v / np.sum(v) | def function[exp2prob, parameter[expression_vector]]:
constant[Convert an expression vector into a probability vector.
Attribute:
expression_vector (list): List of expression values.
]
variable[v] assign[=] call[name[np].asarray, parameter[name[expression_vector]]]
if compare[call[name[np].sum, parameter[name[v]]] equal[==] constant[0]] begin[:]
return[call[name[np].zeros, parameter[call[name[len], parameter[name[expression_vector]]]]]] | keyword[def] identifier[exp2prob] ( identifier[expression_vector] ):
literal[string]
identifier[v] = identifier[np] . identifier[asarray] ( identifier[expression_vector] )
keyword[if] identifier[np] . identifier[sum] ( identifier[v] )== literal[int] :
keyword[return] identifier[np] . identifier[zeros] ( identifier[len] ( identifier[expression_vector] ))
keyword[else] :
keyword[return] identifier[v] / identifier[np] . identifier[sum] ( identifier[v] ) | def exp2prob(expression_vector):
"""Convert an expression vector into a probability vector.
Attribute:
expression_vector (list): List of expression values.
"""
v = np.asarray(expression_vector)
if np.sum(v) == 0:
return np.zeros(len(expression_vector)) # depends on [control=['if'], data=[]]
else:
return v / np.sum(v) |
def load_class(self, classname, namespace=None):
"""
Loads a class looking for it in each module registered. It's possible to load a class from
specific namespace using **namespace** parameter or using classname as "namespace:classname".
:param classname: Class name you want to load.
:type classname: str
:param namespace: Specific namespace where to look for class.
:type namespace: str
:return: Class object
:rtype: type
"""
if namespace is None and ':' in classname:
namespace, classname = classname.split(':', 1)
return self.load_class(classname, namespace)
if namespace:
if namespace not in self._namespaces:
raise NoRegisteredError("Namespace '{0}' is not registered on loader.".format(namespace))
try:
module = importlib.import_module(self._namespaces[namespace]) \
if isinstance(self._namespaces[namespace], str) else self._namespaces[namespace]
return import_class(classname, module.__name__)
except (AttributeError, ImportError):
raise ImportError("Class '{0}' could not be loaded from namespace '{1}'.".format(classname,
namespace))
return super(LoaderNamespace, self).load_class(classname) | def function[load_class, parameter[self, classname, namespace]]:
constant[
Loads a class looking for it in each module registered. It's possible to load a class from
specific namespace using **namespace** parameter or using classname as "namespace:classname".
:param classname: Class name you want to load.
:type classname: str
:param namespace: Specific namespace where to look for class.
:type namespace: str
:return: Class object
:rtype: type
]
if <ast.BoolOp object at 0x7da1b1434a60> begin[:]
<ast.Tuple object at 0x7da1b1434bb0> assign[=] call[name[classname].split, parameter[constant[:], constant[1]]]
return[call[name[self].load_class, parameter[name[classname], name[namespace]]]]
if name[namespace] begin[:]
if compare[name[namespace] <ast.NotIn object at 0x7da2590d7190> name[self]._namespaces] begin[:]
<ast.Raise object at 0x7da1b1437ac0>
<ast.Try object at 0x7da20e956fe0>
return[call[call[name[super], parameter[name[LoaderNamespace], name[self]]].load_class, parameter[name[classname]]]] | keyword[def] identifier[load_class] ( identifier[self] , identifier[classname] , identifier[namespace] = keyword[None] ):
literal[string]
keyword[if] identifier[namespace] keyword[is] keyword[None] keyword[and] literal[string] keyword[in] identifier[classname] :
identifier[namespace] , identifier[classname] = identifier[classname] . identifier[split] ( literal[string] , literal[int] )
keyword[return] identifier[self] . identifier[load_class] ( identifier[classname] , identifier[namespace] )
keyword[if] identifier[namespace] :
keyword[if] identifier[namespace] keyword[not] keyword[in] identifier[self] . identifier[_namespaces] :
keyword[raise] identifier[NoRegisteredError] ( literal[string] . identifier[format] ( identifier[namespace] ))
keyword[try] :
identifier[module] = identifier[importlib] . identifier[import_module] ( identifier[self] . identifier[_namespaces] [ identifier[namespace] ]) keyword[if] identifier[isinstance] ( identifier[self] . identifier[_namespaces] [ identifier[namespace] ], identifier[str] ) keyword[else] identifier[self] . identifier[_namespaces] [ identifier[namespace] ]
keyword[return] identifier[import_class] ( identifier[classname] , identifier[module] . identifier[__name__] )
keyword[except] ( identifier[AttributeError] , identifier[ImportError] ):
keyword[raise] identifier[ImportError] ( literal[string] . identifier[format] ( identifier[classname] ,
identifier[namespace] ))
keyword[return] identifier[super] ( identifier[LoaderNamespace] , identifier[self] ). identifier[load_class] ( identifier[classname] ) | def load_class(self, classname, namespace=None):
"""
Loads a class looking for it in each module registered. It's possible to load a class from
specific namespace using **namespace** parameter or using classname as "namespace:classname".
:param classname: Class name you want to load.
:type classname: str
:param namespace: Specific namespace where to look for class.
:type namespace: str
:return: Class object
:rtype: type
"""
if namespace is None and ':' in classname:
(namespace, classname) = classname.split(':', 1)
return self.load_class(classname, namespace) # depends on [control=['if'], data=[]]
if namespace:
if namespace not in self._namespaces:
raise NoRegisteredError("Namespace '{0}' is not registered on loader.".format(namespace)) # depends on [control=['if'], data=['namespace']]
try:
module = importlib.import_module(self._namespaces[namespace]) if isinstance(self._namespaces[namespace], str) else self._namespaces[namespace]
return import_class(classname, module.__name__) # depends on [control=['try'], data=[]]
except (AttributeError, ImportError):
raise ImportError("Class '{0}' could not be loaded from namespace '{1}'.".format(classname, namespace)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return super(LoaderNamespace, self).load_class(classname) |
def load_data_and_model(items_as_dict, **context):
"""
Loads every file in a dictionary {key: filename}.
The extension is either *pkl* and *onnx* and determines
how it it loaded. If the value is not a string,
the function assumes it was already loaded.
"""
res = {}
for k, v in items_as_dict.items():
if isinstance(v, str):
if os.path.splitext(v)[-1] == ".pkl":
with open(v, "rb") as f:
try:
bin = pickle.load(f)
except ImportError as e:
if '.model.' in v:
continue
else:
raise ImportError("Unable to load '{0}' due to {1}".format(v, e))
res[k] = bin
elif os.path.splitext(v)[-1] == ".keras":
import keras.models
res[k] = keras.models.load_model(v, custom_objects=context)
else:
res[k] = v
else:
res[k] = v
return res | def function[load_data_and_model, parameter[items_as_dict]]:
constant[
Loads every file in a dictionary {key: filename}.
The extension is either *pkl* and *onnx* and determines
how it it loaded. If the value is not a string,
the function assumes it was already loaded.
]
variable[res] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18bc70ee0>, <ast.Name object at 0x7da18bc73d60>]]] in starred[call[name[items_as_dict].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[v], name[str]]] begin[:]
if compare[call[call[name[os].path.splitext, parameter[name[v]]]][<ast.UnaryOp object at 0x7da1b1d4b040>] equal[==] constant[.pkl]] begin[:]
with call[name[open], parameter[name[v], constant[rb]]] begin[:]
<ast.Try object at 0x7da1b1d49900>
call[name[res]][name[k]] assign[=] name[bin]
return[name[res]] | keyword[def] identifier[load_data_and_model] ( identifier[items_as_dict] ,** identifier[context] ):
literal[string]
identifier[res] ={}
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[items_as_dict] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[v] , identifier[str] ):
keyword[if] identifier[os] . identifier[path] . identifier[splitext] ( identifier[v] )[- literal[int] ]== literal[string] :
keyword[with] identifier[open] ( identifier[v] , literal[string] ) keyword[as] identifier[f] :
keyword[try] :
identifier[bin] = identifier[pickle] . identifier[load] ( identifier[f] )
keyword[except] identifier[ImportError] keyword[as] identifier[e] :
keyword[if] literal[string] keyword[in] identifier[v] :
keyword[continue]
keyword[else] :
keyword[raise] identifier[ImportError] ( literal[string] . identifier[format] ( identifier[v] , identifier[e] ))
identifier[res] [ identifier[k] ]= identifier[bin]
keyword[elif] identifier[os] . identifier[path] . identifier[splitext] ( identifier[v] )[- literal[int] ]== literal[string] :
keyword[import] identifier[keras] . identifier[models]
identifier[res] [ identifier[k] ]= identifier[keras] . identifier[models] . identifier[load_model] ( identifier[v] , identifier[custom_objects] = identifier[context] )
keyword[else] :
identifier[res] [ identifier[k] ]= identifier[v]
keyword[else] :
identifier[res] [ identifier[k] ]= identifier[v]
keyword[return] identifier[res] | def load_data_and_model(items_as_dict, **context):
"""
Loads every file in a dictionary {key: filename}.
The extension is either *pkl* and *onnx* and determines
how it it loaded. If the value is not a string,
the function assumes it was already loaded.
"""
res = {}
for (k, v) in items_as_dict.items():
if isinstance(v, str):
if os.path.splitext(v)[-1] == '.pkl':
with open(v, 'rb') as f:
try:
bin = pickle.load(f) # depends on [control=['try'], data=[]]
except ImportError as e:
if '.model.' in v:
continue # depends on [control=['if'], data=[]]
else:
raise ImportError("Unable to load '{0}' due to {1}".format(v, e)) # depends on [control=['except'], data=['e']]
res[k] = bin # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
elif os.path.splitext(v)[-1] == '.keras':
import keras.models
res[k] = keras.models.load_model(v, custom_objects=context) # depends on [control=['if'], data=[]]
else:
res[k] = v # depends on [control=['if'], data=[]]
else:
res[k] = v # depends on [control=['for'], data=[]]
return res |
def load_config(self):
"""
Load configuration for the service
Args:
config_file: Configuration file path
"""
logger.debug('loading config file: %s', self.config_file)
if os.path.exists(self.config_file):
with open(self.config_file) as file_handle:
return json.load(file_handle)
else:
logger.error('configuration file is required for eventify')
logger.error('unable to load configuration for service')
raise EventifyConfigError(
'Configuration is required! Missing: %s' % self.config_file
) | def function[load_config, parameter[self]]:
constant[
Load configuration for the service
Args:
config_file: Configuration file path
]
call[name[logger].debug, parameter[constant[loading config file: %s], name[self].config_file]]
if call[name[os].path.exists, parameter[name[self].config_file]] begin[:]
with call[name[open], parameter[name[self].config_file]] begin[:]
return[call[name[json].load, parameter[name[file_handle]]]]
call[name[logger].error, parameter[constant[unable to load configuration for service]]]
<ast.Raise object at 0x7da2054a6590> | keyword[def] identifier[load_config] ( identifier[self] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[config_file] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[config_file] ):
keyword[with] identifier[open] ( identifier[self] . identifier[config_file] ) keyword[as] identifier[file_handle] :
keyword[return] identifier[json] . identifier[load] ( identifier[file_handle] )
keyword[else] :
identifier[logger] . identifier[error] ( literal[string] )
identifier[logger] . identifier[error] ( literal[string] )
keyword[raise] identifier[EventifyConfigError] (
literal[string] % identifier[self] . identifier[config_file]
) | def load_config(self):
"""
Load configuration for the service
Args:
config_file: Configuration file path
"""
logger.debug('loading config file: %s', self.config_file)
if os.path.exists(self.config_file):
with open(self.config_file) as file_handle:
return json.load(file_handle) # depends on [control=['with'], data=['file_handle']] # depends on [control=['if'], data=[]]
else:
logger.error('configuration file is required for eventify')
logger.error('unable to load configuration for service')
raise EventifyConfigError('Configuration is required! Missing: %s' % self.config_file) |
def _modifyInternal(self, *, sort=None, purge=False, done=None):
"""Creates a whole new database from existing one, based on given
modifiers.
:sort: pattern should look like this:
([(<index>, True|False)], {<level_index>: [(<index>, True|False)]}),
where True|False indicate whether to reverse or not,
<index> are one of Model.indexes and <level_index> indicate
a number of level to sort.
Of course, the lists above may contain multiple items.
:done: patterns looks similar to :sort:, except that it has additional
<regexp> values and that True|False means to mark as done|undone.
@note: Should not be used directly. It was defined here, because
:save: decorator needs undecorated version of Model.modify.
:sort: Pattern on which to sort the database.
:purge: Whether to purge done items.
:done: Pattern on which to mark items as done/undone.
:returns: New database, modified according to supplied arguments.
"""
sortAll, sortLevels = sort is not None and sort or ([], {})
doneAll, doneLevels = done is not None and done or ([], {})
def _mark(v, i):
if done is None:
return v[:4]
def _mark_(index, regexp, du):
if du is None:
return v[:4]
if index is None:
for v_ in v[:3]:
if regexp is None or re.match(regexp, str(v_)):
return v[:3] + [du]
return v[:4]
if regexp is None or re.match(regexp, str(v[index])):
return v[:3] + [du]
try:
for doneLevel in doneLevels[i]:
result = _mark_(*doneLevel)
if result is not None:
return result
except KeyError:
pass
for doneAll_ in doneAll:
result = _mark_(*doneAll_)
if result is None:
return v[:4]
return result
def _modify(submodel, i):
_new = list()
for v in submodel:
if purge:
if not v[3]:
_new.append(_mark(v, i) + [_modify(v[4], i + 1)])
else:
_new.append(_mark(v, i) + [_modify(v[4], i + 1)])
levels = sortLevels.get(i) or sortLevels.get(str(i))
for index, reverse in levels or sortAll:
_new = sorted(_new, key=lambda e: e[index], reverse=reverse)
return _new
return _modify(self.data, 1) | def function[_modifyInternal, parameter[self]]:
constant[Creates a whole new database from existing one, based on given
modifiers.
:sort: pattern should look like this:
([(<index>, True|False)], {<level_index>: [(<index>, True|False)]}),
where True|False indicate whether to reverse or not,
<index> are one of Model.indexes and <level_index> indicate
a number of level to sort.
Of course, the lists above may contain multiple items.
:done: patterns looks similar to :sort:, except that it has additional
<regexp> values and that True|False means to mark as done|undone.
@note: Should not be used directly. It was defined here, because
:save: decorator needs undecorated version of Model.modify.
:sort: Pattern on which to sort the database.
:purge: Whether to purge done items.
:done: Pattern on which to mark items as done/undone.
:returns: New database, modified according to supplied arguments.
]
<ast.Tuple object at 0x7da18f00cf10> assign[=] <ast.BoolOp object at 0x7da18f00d480>
<ast.Tuple object at 0x7da18f00cd60> assign[=] <ast.BoolOp object at 0x7da18f00f340>
def function[_mark, parameter[v, i]]:
if compare[name[done] is constant[None]] begin[:]
return[call[name[v]][<ast.Slice object at 0x7da18f00e470>]]
def function[_mark_, parameter[index, regexp, du]]:
if compare[name[du] is constant[None]] begin[:]
return[call[name[v]][<ast.Slice object at 0x7da18f00ead0>]]
if compare[name[index] is constant[None]] begin[:]
for taget[name[v_]] in starred[call[name[v]][<ast.Slice object at 0x7da18f00c5e0>]] begin[:]
if <ast.BoolOp object at 0x7da18f00c3d0> begin[:]
return[binary_operation[call[name[v]][<ast.Slice object at 0x7da18f00c1f0>] + list[[<ast.Name object at 0x7da18f00fc40>]]]]
return[call[name[v]][<ast.Slice object at 0x7da18f00c910>]]
if <ast.BoolOp object at 0x7da18f00fa90> begin[:]
return[binary_operation[call[name[v]][<ast.Slice object at 0x7da18f00fd90>] + list[[<ast.Name object at 0x7da18f00e8f0>]]]]
<ast.Try object at 0x7da18f00eaa0>
for taget[name[doneAll_]] in starred[name[doneAll]] begin[:]
variable[result] assign[=] call[name[_mark_], parameter[<ast.Starred object at 0x7da18f00f4c0>]]
if compare[name[result] is constant[None]] begin[:]
return[call[name[v]][<ast.Slice object at 0x7da18f00ce50>]]
return[name[result]]
def function[_modify, parameter[submodel, i]]:
variable[_new] assign[=] call[name[list], parameter[]]
for taget[name[v]] in starred[name[submodel]] begin[:]
if name[purge] begin[:]
if <ast.UnaryOp object at 0x7da18f00f2e0> begin[:]
call[name[_new].append, parameter[binary_operation[call[name[_mark], parameter[name[v], name[i]]] + list[[<ast.Call object at 0x7da18f00d420>]]]]]
variable[levels] assign[=] <ast.BoolOp object at 0x7da18f00cfd0>
for taget[tuple[[<ast.Name object at 0x7da18f00ceb0>, <ast.Name object at 0x7da18f00f6d0>]]] in starred[<ast.BoolOp object at 0x7da18f00e290>] begin[:]
variable[_new] assign[=] call[name[sorted], parameter[name[_new]]]
return[name[_new]]
return[call[name[_modify], parameter[name[self].data, constant[1]]]] | keyword[def] identifier[_modifyInternal] ( identifier[self] ,*, identifier[sort] = keyword[None] , identifier[purge] = keyword[False] , identifier[done] = keyword[None] ):
literal[string]
identifier[sortAll] , identifier[sortLevels] = identifier[sort] keyword[is] keyword[not] keyword[None] keyword[and] identifier[sort] keyword[or] ([],{})
identifier[doneAll] , identifier[doneLevels] = identifier[done] keyword[is] keyword[not] keyword[None] keyword[and] identifier[done] keyword[or] ([],{})
keyword[def] identifier[_mark] ( identifier[v] , identifier[i] ):
keyword[if] identifier[done] keyword[is] keyword[None] :
keyword[return] identifier[v] [: literal[int] ]
keyword[def] identifier[_mark_] ( identifier[index] , identifier[regexp] , identifier[du] ):
keyword[if] identifier[du] keyword[is] keyword[None] :
keyword[return] identifier[v] [: literal[int] ]
keyword[if] identifier[index] keyword[is] keyword[None] :
keyword[for] identifier[v_] keyword[in] identifier[v] [: literal[int] ]:
keyword[if] identifier[regexp] keyword[is] keyword[None] keyword[or] identifier[re] . identifier[match] ( identifier[regexp] , identifier[str] ( identifier[v_] )):
keyword[return] identifier[v] [: literal[int] ]+[ identifier[du] ]
keyword[return] identifier[v] [: literal[int] ]
keyword[if] identifier[regexp] keyword[is] keyword[None] keyword[or] identifier[re] . identifier[match] ( identifier[regexp] , identifier[str] ( identifier[v] [ identifier[index] ])):
keyword[return] identifier[v] [: literal[int] ]+[ identifier[du] ]
keyword[try] :
keyword[for] identifier[doneLevel] keyword[in] identifier[doneLevels] [ identifier[i] ]:
identifier[result] = identifier[_mark_] (* identifier[doneLevel] )
keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[result]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[for] identifier[doneAll_] keyword[in] identifier[doneAll] :
identifier[result] = identifier[_mark_] (* identifier[doneAll_] )
keyword[if] identifier[result] keyword[is] keyword[None] :
keyword[return] identifier[v] [: literal[int] ]
keyword[return] identifier[result]
keyword[def] identifier[_modify] ( identifier[submodel] , identifier[i] ):
identifier[_new] = identifier[list] ()
keyword[for] identifier[v] keyword[in] identifier[submodel] :
keyword[if] identifier[purge] :
keyword[if] keyword[not] identifier[v] [ literal[int] ]:
identifier[_new] . identifier[append] ( identifier[_mark] ( identifier[v] , identifier[i] )+[ identifier[_modify] ( identifier[v] [ literal[int] ], identifier[i] + literal[int] )])
keyword[else] :
identifier[_new] . identifier[append] ( identifier[_mark] ( identifier[v] , identifier[i] )+[ identifier[_modify] ( identifier[v] [ literal[int] ], identifier[i] + literal[int] )])
identifier[levels] = identifier[sortLevels] . identifier[get] ( identifier[i] ) keyword[or] identifier[sortLevels] . identifier[get] ( identifier[str] ( identifier[i] ))
keyword[for] identifier[index] , identifier[reverse] keyword[in] identifier[levels] keyword[or] identifier[sortAll] :
identifier[_new] = identifier[sorted] ( identifier[_new] , identifier[key] = keyword[lambda] identifier[e] : identifier[e] [ identifier[index] ], identifier[reverse] = identifier[reverse] )
keyword[return] identifier[_new]
keyword[return] identifier[_modify] ( identifier[self] . identifier[data] , literal[int] ) | def _modifyInternal(self, *, sort=None, purge=False, done=None):
"""Creates a whole new database from existing one, based on given
modifiers.
:sort: pattern should look like this:
([(<index>, True|False)], {<level_index>: [(<index>, True|False)]}),
where True|False indicate whether to reverse or not,
<index> are one of Model.indexes and <level_index> indicate
a number of level to sort.
Of course, the lists above may contain multiple items.
:done: patterns looks similar to :sort:, except that it has additional
<regexp> values and that True|False means to mark as done|undone.
@note: Should not be used directly. It was defined here, because
:save: decorator needs undecorated version of Model.modify.
:sort: Pattern on which to sort the database.
:purge: Whether to purge done items.
:done: Pattern on which to mark items as done/undone.
:returns: New database, modified according to supplied arguments.
"""
(sortAll, sortLevels) = sort is not None and sort or ([], {})
(doneAll, doneLevels) = done is not None and done or ([], {})
def _mark(v, i):
if done is None:
return v[:4] # depends on [control=['if'], data=[]]
def _mark_(index, regexp, du):
if du is None:
return v[:4] # depends on [control=['if'], data=[]]
if index is None:
for v_ in v[:3]:
if regexp is None or re.match(regexp, str(v_)):
return v[:3] + [du] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v_']]
return v[:4] # depends on [control=['if'], data=[]]
if regexp is None or re.match(regexp, str(v[index])):
return v[:3] + [du] # depends on [control=['if'], data=[]]
try:
for doneLevel in doneLevels[i]:
result = _mark_(*doneLevel) # depends on [control=['for'], data=['doneLevel']]
if result is not None:
return result # depends on [control=['if'], data=['result']] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
for doneAll_ in doneAll:
result = _mark_(*doneAll_) # depends on [control=['for'], data=['doneAll_']]
if result is None:
return v[:4] # depends on [control=['if'], data=[]]
return result
def _modify(submodel, i):
_new = list()
for v in submodel:
if purge:
if not v[3]:
_new.append(_mark(v, i) + [_modify(v[4], i + 1)]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
_new.append(_mark(v, i) + [_modify(v[4], i + 1)]) # depends on [control=['for'], data=['v']]
levels = sortLevels.get(i) or sortLevels.get(str(i))
for (index, reverse) in levels or sortAll:
_new = sorted(_new, key=lambda e: e[index], reverse=reverse) # depends on [control=['for'], data=[]]
return _new
return _modify(self.data, 1) |
def t_EQUAL(self, t):
r"\="
t.endlexpos = t.lexpos + len(t.value)
return t | def function[t_EQUAL, parameter[self, t]]:
constant[\=]
name[t].endlexpos assign[=] binary_operation[name[t].lexpos + call[name[len], parameter[name[t].value]]]
return[name[t]] | keyword[def] identifier[t_EQUAL] ( identifier[self] , identifier[t] ):
literal[string]
identifier[t] . identifier[endlexpos] = identifier[t] . identifier[lexpos] + identifier[len] ( identifier[t] . identifier[value] )
keyword[return] identifier[t] | def t_EQUAL(self, t):
"""\\="""
t.endlexpos = t.lexpos + len(t.value)
return t |
def _set_zero(self, i, j, a, b, r, s, t):
"""Let A[i, j] be zero based on Bezout's identity
[ii ij]
[ji jj] is a (k,k) minor of original 3x3 matrix.
"""
L = np.eye(3, dtype='intc')
L[i, i] = s
L[i, j] = t
L[j, i] = -b // r
L[j, j] = a // r
self._L.append(L.copy())
self._A = np.dot(L, self._A) | def function[_set_zero, parameter[self, i, j, a, b, r, s, t]]:
constant[Let A[i, j] be zero based on Bezout's identity
[ii ij]
[ji jj] is a (k,k) minor of original 3x3 matrix.
]
variable[L] assign[=] call[name[np].eye, parameter[constant[3]]]
call[name[L]][tuple[[<ast.Name object at 0x7da18fe92860>, <ast.Name object at 0x7da18fe92bc0>]]] assign[=] name[s]
call[name[L]][tuple[[<ast.Name object at 0x7da18fe90af0>, <ast.Name object at 0x7da18fe92680>]]] assign[=] name[t]
call[name[L]][tuple[[<ast.Name object at 0x7da18fe93d60>, <ast.Name object at 0x7da18fe92c80>]]] assign[=] binary_operation[<ast.UnaryOp object at 0x7da18fe91210> <ast.FloorDiv object at 0x7da2590d6bc0> name[r]]
call[name[L]][tuple[[<ast.Name object at 0x7da18fe93a60>, <ast.Name object at 0x7da18fe919c0>]]] assign[=] binary_operation[name[a] <ast.FloorDiv object at 0x7da2590d6bc0> name[r]]
call[name[self]._L.append, parameter[call[name[L].copy, parameter[]]]]
name[self]._A assign[=] call[name[np].dot, parameter[name[L], name[self]._A]] | keyword[def] identifier[_set_zero] ( identifier[self] , identifier[i] , identifier[j] , identifier[a] , identifier[b] , identifier[r] , identifier[s] , identifier[t] ):
literal[string]
identifier[L] = identifier[np] . identifier[eye] ( literal[int] , identifier[dtype] = literal[string] )
identifier[L] [ identifier[i] , identifier[i] ]= identifier[s]
identifier[L] [ identifier[i] , identifier[j] ]= identifier[t]
identifier[L] [ identifier[j] , identifier[i] ]=- identifier[b] // identifier[r]
identifier[L] [ identifier[j] , identifier[j] ]= identifier[a] // identifier[r]
identifier[self] . identifier[_L] . identifier[append] ( identifier[L] . identifier[copy] ())
identifier[self] . identifier[_A] = identifier[np] . identifier[dot] ( identifier[L] , identifier[self] . identifier[_A] ) | def _set_zero(self, i, j, a, b, r, s, t):
"""Let A[i, j] be zero based on Bezout's identity
[ii ij]
[ji jj] is a (k,k) minor of original 3x3 matrix.
"""
L = np.eye(3, dtype='intc')
L[i, i] = s
L[i, j] = t
L[j, i] = -b // r
L[j, j] = a // r
self._L.append(L.copy())
self._A = np.dot(L, self._A) |
def _get_content_type(url):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if not scheme in ('http', 'https', 'ftp', 'ftps'):
## FIXME: some warning or something?
## assertion error?
return ''
req = Urllib2HeadRequest(url, headers={'Host': netloc})
resp = urlopen(req)
try:
if hasattr(resp, 'code') and resp.code != 200 and scheme not in ('ftp', 'ftps'):
## FIXME: doesn't handle redirects
return ''
return resp.info().get('content-type', '')
finally:
resp.close() | def function[_get_content_type, parameter[url]]:
constant[Get the Content-Type of the given url, using a HEAD request]
<ast.Tuple object at 0x7da207f9af20> assign[=] call[name[urlparse].urlsplit, parameter[name[url]]]
if <ast.UnaryOp object at 0x7da207f98910> begin[:]
return[constant[]]
variable[req] assign[=] call[name[Urllib2HeadRequest], parameter[name[url]]]
variable[resp] assign[=] call[name[urlopen], parameter[name[req]]]
<ast.Try object at 0x7da207f9a860> | keyword[def] identifier[_get_content_type] ( identifier[url] ):
literal[string]
identifier[scheme] , identifier[netloc] , identifier[path] , identifier[query] , identifier[fragment] = identifier[urlparse] . identifier[urlsplit] ( identifier[url] )
keyword[if] keyword[not] identifier[scheme] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[return] literal[string]
identifier[req] = identifier[Urllib2HeadRequest] ( identifier[url] , identifier[headers] ={ literal[string] : identifier[netloc] })
identifier[resp] = identifier[urlopen] ( identifier[req] )
keyword[try] :
keyword[if] identifier[hasattr] ( identifier[resp] , literal[string] ) keyword[and] identifier[resp] . identifier[code] != literal[int] keyword[and] identifier[scheme] keyword[not] keyword[in] ( literal[string] , literal[string] ):
keyword[return] literal[string]
keyword[return] identifier[resp] . identifier[info] (). identifier[get] ( literal[string] , literal[string] )
keyword[finally] :
identifier[resp] . identifier[close] () | def _get_content_type(url):
"""Get the Content-Type of the given url, using a HEAD request"""
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(url)
if not scheme in ('http', 'https', 'ftp', 'ftps'):
## FIXME: some warning or something?
## assertion error?
return '' # depends on [control=['if'], data=[]]
req = Urllib2HeadRequest(url, headers={'Host': netloc})
resp = urlopen(req)
try:
if hasattr(resp, 'code') and resp.code != 200 and (scheme not in ('ftp', 'ftps')):
## FIXME: doesn't handle redirects
return '' # depends on [control=['if'], data=[]]
return resp.info().get('content-type', '') # depends on [control=['try'], data=[]]
finally:
resp.close() |
def find(self, item, logical=False):
"""Find files using :attr:`finders` registry. The ``item`` parameter
can be an instance of :class:`~gears.asset_attributes.AssetAttributes`
class, a path to the asset or a logical path to the asset. If ``item``
is a logical path, `logical` parameter must be set to ``True``.
Returns a tuple with :class:`~gears.asset_attributes.AssetAttributes`
instance for found file path as first item, and absolute path to this
file as second item.
If nothing is found, :class:`gears.exceptions.FileNotFound` exception
is rased.
"""
if isinstance(item, AssetAttributes):
for path in item.search_paths:
try:
return self.find(path, logical)
except FileNotFound:
continue
raise FileNotFound(item.path)
if logical:
asset_attributes = AssetAttributes(self, item)
suffixes = self.suffixes.find(asset_attributes.mimetype)
if not suffixes:
return self.find(item)
path = asset_attributes.path_without_suffix
for suffix in suffixes:
try:
return self.find(path + suffix)
except FileNotFound:
continue
else:
for finder in self.finders:
try:
absolute_path = finder.find(item)
except FileNotFound:
continue
return AssetAttributes(self, item), absolute_path
raise FileNotFound(item) | def function[find, parameter[self, item, logical]]:
constant[Find files using :attr:`finders` registry. The ``item`` parameter
can be an instance of :class:`~gears.asset_attributes.AssetAttributes`
class, a path to the asset or a logical path to the asset. If ``item``
is a logical path, `logical` parameter must be set to ``True``.
Returns a tuple with :class:`~gears.asset_attributes.AssetAttributes`
instance for found file path as first item, and absolute path to this
file as second item.
If nothing is found, :class:`gears.exceptions.FileNotFound` exception
is rased.
]
if call[name[isinstance], parameter[name[item], name[AssetAttributes]]] begin[:]
for taget[name[path]] in starred[name[item].search_paths] begin[:]
<ast.Try object at 0x7da1b0241cc0>
<ast.Raise object at 0x7da1b0241750>
if name[logical] begin[:]
variable[asset_attributes] assign[=] call[name[AssetAttributes], parameter[name[self], name[item]]]
variable[suffixes] assign[=] call[name[self].suffixes.find, parameter[name[asset_attributes].mimetype]]
if <ast.UnaryOp object at 0x7da1b02430a0> begin[:]
return[call[name[self].find, parameter[name[item]]]]
variable[path] assign[=] name[asset_attributes].path_without_suffix
for taget[name[suffix]] in starred[name[suffixes]] begin[:]
<ast.Try object at 0x7da1b0243490>
<ast.Raise object at 0x7da1b0243160> | keyword[def] identifier[find] ( identifier[self] , identifier[item] , identifier[logical] = keyword[False] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[item] , identifier[AssetAttributes] ):
keyword[for] identifier[path] keyword[in] identifier[item] . identifier[search_paths] :
keyword[try] :
keyword[return] identifier[self] . identifier[find] ( identifier[path] , identifier[logical] )
keyword[except] identifier[FileNotFound] :
keyword[continue]
keyword[raise] identifier[FileNotFound] ( identifier[item] . identifier[path] )
keyword[if] identifier[logical] :
identifier[asset_attributes] = identifier[AssetAttributes] ( identifier[self] , identifier[item] )
identifier[suffixes] = identifier[self] . identifier[suffixes] . identifier[find] ( identifier[asset_attributes] . identifier[mimetype] )
keyword[if] keyword[not] identifier[suffixes] :
keyword[return] identifier[self] . identifier[find] ( identifier[item] )
identifier[path] = identifier[asset_attributes] . identifier[path_without_suffix]
keyword[for] identifier[suffix] keyword[in] identifier[suffixes] :
keyword[try] :
keyword[return] identifier[self] . identifier[find] ( identifier[path] + identifier[suffix] )
keyword[except] identifier[FileNotFound] :
keyword[continue]
keyword[else] :
keyword[for] identifier[finder] keyword[in] identifier[self] . identifier[finders] :
keyword[try] :
identifier[absolute_path] = identifier[finder] . identifier[find] ( identifier[item] )
keyword[except] identifier[FileNotFound] :
keyword[continue]
keyword[return] identifier[AssetAttributes] ( identifier[self] , identifier[item] ), identifier[absolute_path]
keyword[raise] identifier[FileNotFound] ( identifier[item] ) | def find(self, item, logical=False):
"""Find files using :attr:`finders` registry. The ``item`` parameter
can be an instance of :class:`~gears.asset_attributes.AssetAttributes`
class, a path to the asset or a logical path to the asset. If ``item``
is a logical path, `logical` parameter must be set to ``True``.
Returns a tuple with :class:`~gears.asset_attributes.AssetAttributes`
instance for found file path as first item, and absolute path to this
file as second item.
If nothing is found, :class:`gears.exceptions.FileNotFound` exception
is rased.
"""
if isinstance(item, AssetAttributes):
for path in item.search_paths:
try:
return self.find(path, logical) # depends on [control=['try'], data=[]]
except FileNotFound:
continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['path']]
raise FileNotFound(item.path) # depends on [control=['if'], data=[]]
if logical:
asset_attributes = AssetAttributes(self, item)
suffixes = self.suffixes.find(asset_attributes.mimetype)
if not suffixes:
return self.find(item) # depends on [control=['if'], data=[]]
path = asset_attributes.path_without_suffix
for suffix in suffixes:
try:
return self.find(path + suffix) # depends on [control=['try'], data=[]]
except FileNotFound:
continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['suffix']] # depends on [control=['if'], data=[]]
else:
for finder in self.finders:
try:
absolute_path = finder.find(item) # depends on [control=['try'], data=[]]
except FileNotFound:
continue # depends on [control=['except'], data=[]]
return (AssetAttributes(self, item), absolute_path) # depends on [control=['for'], data=['finder']]
raise FileNotFound(item) |
def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.focus_changed.connect(self.main.plugin_focus_changed)
self.main.add_dockwidget(self)
# self.main.console.set_historylog(self)
self.main.console.shell.refresh.connect(self.refresh_plugin) | def function[register_plugin, parameter[self]]:
constant[Register plugin in Spyder's main window]
call[name[self].focus_changed.connect, parameter[name[self].main.plugin_focus_changed]]
call[name[self].main.add_dockwidget, parameter[name[self]]]
call[name[self].main.console.shell.refresh.connect, parameter[name[self].refresh_plugin]] | keyword[def] identifier[register_plugin] ( identifier[self] ):
literal[string]
identifier[self] . identifier[focus_changed] . identifier[connect] ( identifier[self] . identifier[main] . identifier[plugin_focus_changed] )
identifier[self] . identifier[main] . identifier[add_dockwidget] ( identifier[self] )
identifier[self] . identifier[main] . identifier[console] . identifier[shell] . identifier[refresh] . identifier[connect] ( identifier[self] . identifier[refresh_plugin] ) | def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.focus_changed.connect(self.main.plugin_focus_changed)
self.main.add_dockwidget(self) # self.main.console.set_historylog(self)
self.main.console.shell.refresh.connect(self.refresh_plugin) |
def _get_base_model(self):
"""
:return: base model from Keras based on user-supplied model name
"""
if self.model_name == 'inception_v3':
return InceptionV3(weights='imagenet', include_top=False)
elif self.model_name == 'xception':
return Xception(weights='imagenet', include_top=False)
elif self.model_name == 'vgg16':
return VGG16(weights='imagenet', include_top=False)
elif self.model_name == 'vgg19':
return VGG19(weights='imagenet', include_top=False)
elif self.model_name == 'resnet50':
return ResNet50(weights='imagenet', include_top=False)
else:
raise ValueError('Cannot find base model %s' % self.model_name) | def function[_get_base_model, parameter[self]]:
constant[
:return: base model from Keras based on user-supplied model name
]
if compare[name[self].model_name equal[==] constant[inception_v3]] begin[:]
return[call[name[InceptionV3], parameter[]]] | keyword[def] identifier[_get_base_model] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[model_name] == literal[string] :
keyword[return] identifier[InceptionV3] ( identifier[weights] = literal[string] , identifier[include_top] = keyword[False] )
keyword[elif] identifier[self] . identifier[model_name] == literal[string] :
keyword[return] identifier[Xception] ( identifier[weights] = literal[string] , identifier[include_top] = keyword[False] )
keyword[elif] identifier[self] . identifier[model_name] == literal[string] :
keyword[return] identifier[VGG16] ( identifier[weights] = literal[string] , identifier[include_top] = keyword[False] )
keyword[elif] identifier[self] . identifier[model_name] == literal[string] :
keyword[return] identifier[VGG19] ( identifier[weights] = literal[string] , identifier[include_top] = keyword[False] )
keyword[elif] identifier[self] . identifier[model_name] == literal[string] :
keyword[return] identifier[ResNet50] ( identifier[weights] = literal[string] , identifier[include_top] = keyword[False] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[self] . identifier[model_name] ) | def _get_base_model(self):
"""
:return: base model from Keras based on user-supplied model name
"""
if self.model_name == 'inception_v3':
return InceptionV3(weights='imagenet', include_top=False) # depends on [control=['if'], data=[]]
elif self.model_name == 'xception':
return Xception(weights='imagenet', include_top=False) # depends on [control=['if'], data=[]]
elif self.model_name == 'vgg16':
return VGG16(weights='imagenet', include_top=False) # depends on [control=['if'], data=[]]
elif self.model_name == 'vgg19':
return VGG19(weights='imagenet', include_top=False) # depends on [control=['if'], data=[]]
elif self.model_name == 'resnet50':
return ResNet50(weights='imagenet', include_top=False) # depends on [control=['if'], data=[]]
else:
raise ValueError('Cannot find base model %s' % self.model_name) |
def binary(self):
"""
Get the object this function belongs to.
:return: The object this function belongs to.
"""
return self._project.loader.find_object_containing(self.addr, membership_check=False) | def function[binary, parameter[self]]:
constant[
Get the object this function belongs to.
:return: The object this function belongs to.
]
return[call[name[self]._project.loader.find_object_containing, parameter[name[self].addr]]] | keyword[def] identifier[binary] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[_project] . identifier[loader] . identifier[find_object_containing] ( identifier[self] . identifier[addr] , identifier[membership_check] = keyword[False] ) | def binary(self):
"""
Get the object this function belongs to.
:return: The object this function belongs to.
"""
return self._project.loader.find_object_containing(self.addr, membership_check=False) |
def get_course(self, courseid):
"""
:param courseid: the course id of the course
:raise InvalidNameException, CourseNotFoundException, CourseUnreadableException
:return: an object representing the course, of the type given in the constructor
"""
if not id_checker(courseid):
raise InvalidNameException("Course with invalid name: " + courseid)
if self._cache_update_needed(courseid):
self._update_cache(courseid)
return self._cache[courseid][0] | def function[get_course, parameter[self, courseid]]:
constant[
:param courseid: the course id of the course
:raise InvalidNameException, CourseNotFoundException, CourseUnreadableException
:return: an object representing the course, of the type given in the constructor
]
if <ast.UnaryOp object at 0x7da1b2344f70> begin[:]
<ast.Raise object at 0x7da1b23478b0>
if call[name[self]._cache_update_needed, parameter[name[courseid]]] begin[:]
call[name[self]._update_cache, parameter[name[courseid]]]
return[call[call[name[self]._cache][name[courseid]]][constant[0]]] | keyword[def] identifier[get_course] ( identifier[self] , identifier[courseid] ):
literal[string]
keyword[if] keyword[not] identifier[id_checker] ( identifier[courseid] ):
keyword[raise] identifier[InvalidNameException] ( literal[string] + identifier[courseid] )
keyword[if] identifier[self] . identifier[_cache_update_needed] ( identifier[courseid] ):
identifier[self] . identifier[_update_cache] ( identifier[courseid] )
keyword[return] identifier[self] . identifier[_cache] [ identifier[courseid] ][ literal[int] ] | def get_course(self, courseid):
"""
:param courseid: the course id of the course
:raise InvalidNameException, CourseNotFoundException, CourseUnreadableException
:return: an object representing the course, of the type given in the constructor
"""
if not id_checker(courseid):
raise InvalidNameException('Course with invalid name: ' + courseid) # depends on [control=['if'], data=[]]
if self._cache_update_needed(courseid):
self._update_cache(courseid) # depends on [control=['if'], data=[]]
return self._cache[courseid][0] |
def parse_nexus(self):
"get newick data from NEXUS"
if self.data[0].strip().upper() == "#NEXUS":
nex = NexusParser(self.data)
self.data = nex.newicks
self.tdict = nex.tdict | def function[parse_nexus, parameter[self]]:
constant[get newick data from NEXUS]
if compare[call[call[call[name[self].data][constant[0]].strip, parameter[]].upper, parameter[]] equal[==] constant[#NEXUS]] begin[:]
variable[nex] assign[=] call[name[NexusParser], parameter[name[self].data]]
name[self].data assign[=] name[nex].newicks
name[self].tdict assign[=] name[nex].tdict | keyword[def] identifier[parse_nexus] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[data] [ literal[int] ]. identifier[strip] (). identifier[upper] ()== literal[string] :
identifier[nex] = identifier[NexusParser] ( identifier[self] . identifier[data] )
identifier[self] . identifier[data] = identifier[nex] . identifier[newicks]
identifier[self] . identifier[tdict] = identifier[nex] . identifier[tdict] | def parse_nexus(self):
"""get newick data from NEXUS"""
if self.data[0].strip().upper() == '#NEXUS':
nex = NexusParser(self.data)
self.data = nex.newicks
self.tdict = nex.tdict # depends on [control=['if'], data=[]] |
def get_consumer_offsets_metadata(
kafka_client,
group,
topics,
raise_on_error=True,
):
"""This method:
* refreshes metadata for the kafka client
* fetches group offsets
* fetches watermarks
:param kafka_client: KafkaToolClient instance
:param group: group id
:param topics: list of topics
:param raise_on_error: if False the method ignores missing topics and
missing partitions. It still may fail on the request send.
:returns: dict <topic>: [ConsumerPartitionOffsets]
"""
# Refresh client metadata. We do not use the topic list, because we
# don't want to accidentally create the topic if it does not exist.
# If Kafka is unavailable, let's retry loading client metadata
try:
kafka_client.load_metadata_for_topics()
except KafkaUnavailableError:
kafka_client.load_metadata_for_topics()
group_offsets = get_current_consumer_offsets(
kafka_client, group, topics, raise_on_error
)
watermarks = get_topics_watermarks(
kafka_client, topics, raise_on_error
)
result = {}
for topic, partitions in six.iteritems(group_offsets):
result[topic] = [
ConsumerPartitionOffsets(
topic=topic,
partition=partition,
current=group_offsets[topic][partition],
highmark=watermarks[topic][partition].highmark,
lowmark=watermarks[topic][partition].lowmark,
) for partition in partitions
]
return result | def function[get_consumer_offsets_metadata, parameter[kafka_client, group, topics, raise_on_error]]:
constant[This method:
* refreshes metadata for the kafka client
* fetches group offsets
* fetches watermarks
:param kafka_client: KafkaToolClient instance
:param group: group id
:param topics: list of topics
:param raise_on_error: if False the method ignores missing topics and
missing partitions. It still may fail on the request send.
:returns: dict <topic>: [ConsumerPartitionOffsets]
]
<ast.Try object at 0x7da1b07acdf0>
variable[group_offsets] assign[=] call[name[get_current_consumer_offsets], parameter[name[kafka_client], name[group], name[topics], name[raise_on_error]]]
variable[watermarks] assign[=] call[name[get_topics_watermarks], parameter[name[kafka_client], name[topics], name[raise_on_error]]]
variable[result] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b07ae440>, <ast.Name object at 0x7da1b07ac580>]]] in starred[call[name[six].iteritems, parameter[name[group_offsets]]]] begin[:]
call[name[result]][name[topic]] assign[=] <ast.ListComp object at 0x7da1b07aded0>
return[name[result]] | keyword[def] identifier[get_consumer_offsets_metadata] (
identifier[kafka_client] ,
identifier[group] ,
identifier[topics] ,
identifier[raise_on_error] = keyword[True] ,
):
literal[string]
keyword[try] :
identifier[kafka_client] . identifier[load_metadata_for_topics] ()
keyword[except] identifier[KafkaUnavailableError] :
identifier[kafka_client] . identifier[load_metadata_for_topics] ()
identifier[group_offsets] = identifier[get_current_consumer_offsets] (
identifier[kafka_client] , identifier[group] , identifier[topics] , identifier[raise_on_error]
)
identifier[watermarks] = identifier[get_topics_watermarks] (
identifier[kafka_client] , identifier[topics] , identifier[raise_on_error]
)
identifier[result] ={}
keyword[for] identifier[topic] , identifier[partitions] keyword[in] identifier[six] . identifier[iteritems] ( identifier[group_offsets] ):
identifier[result] [ identifier[topic] ]=[
identifier[ConsumerPartitionOffsets] (
identifier[topic] = identifier[topic] ,
identifier[partition] = identifier[partition] ,
identifier[current] = identifier[group_offsets] [ identifier[topic] ][ identifier[partition] ],
identifier[highmark] = identifier[watermarks] [ identifier[topic] ][ identifier[partition] ]. identifier[highmark] ,
identifier[lowmark] = identifier[watermarks] [ identifier[topic] ][ identifier[partition] ]. identifier[lowmark] ,
) keyword[for] identifier[partition] keyword[in] identifier[partitions]
]
keyword[return] identifier[result] | def get_consumer_offsets_metadata(kafka_client, group, topics, raise_on_error=True):
"""This method:
* refreshes metadata for the kafka client
* fetches group offsets
* fetches watermarks
:param kafka_client: KafkaToolClient instance
:param group: group id
:param topics: list of topics
:param raise_on_error: if False the method ignores missing topics and
missing partitions. It still may fail on the request send.
:returns: dict <topic>: [ConsumerPartitionOffsets]
"""
# Refresh client metadata. We do not use the topic list, because we
# don't want to accidentally create the topic if it does not exist.
# If Kafka is unavailable, let's retry loading client metadata
try:
kafka_client.load_metadata_for_topics() # depends on [control=['try'], data=[]]
except KafkaUnavailableError:
kafka_client.load_metadata_for_topics() # depends on [control=['except'], data=[]]
group_offsets = get_current_consumer_offsets(kafka_client, group, topics, raise_on_error)
watermarks = get_topics_watermarks(kafka_client, topics, raise_on_error)
result = {}
for (topic, partitions) in six.iteritems(group_offsets):
result[topic] = [ConsumerPartitionOffsets(topic=topic, partition=partition, current=group_offsets[topic][partition], highmark=watermarks[topic][partition].highmark, lowmark=watermarks[topic][partition].lowmark) for partition in partitions] # depends on [control=['for'], data=[]]
return result |
def load_session_from_file(self, username: str, filename: Optional[str] = None) -> None:
"""Internally stores :class:`requests.Session` object loaded from file.
If filename is None, the file with the default session path is loaded.
:raises FileNotFoundError: If the file does not exist.
"""
if filename is None:
filename = get_default_session_filename(username)
with open(filename, 'rb') as sessionfile:
self.context.load_session_from_file(username, sessionfile)
self.context.log("Loaded session from %s." % filename) | def function[load_session_from_file, parameter[self, username, filename]]:
constant[Internally stores :class:`requests.Session` object loaded from file.
If filename is None, the file with the default session path is loaded.
:raises FileNotFoundError: If the file does not exist.
]
if compare[name[filename] is constant[None]] begin[:]
variable[filename] assign[=] call[name[get_default_session_filename], parameter[name[username]]]
with call[name[open], parameter[name[filename], constant[rb]]] begin[:]
call[name[self].context.load_session_from_file, parameter[name[username], name[sessionfile]]]
call[name[self].context.log, parameter[binary_operation[constant[Loaded session from %s.] <ast.Mod object at 0x7da2590d6920> name[filename]]]] | keyword[def] identifier[load_session_from_file] ( identifier[self] , identifier[username] : identifier[str] , identifier[filename] : identifier[Optional] [ identifier[str] ]= keyword[None] )-> keyword[None] :
literal[string]
keyword[if] identifier[filename] keyword[is] keyword[None] :
identifier[filename] = identifier[get_default_session_filename] ( identifier[username] )
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[sessionfile] :
identifier[self] . identifier[context] . identifier[load_session_from_file] ( identifier[username] , identifier[sessionfile] )
identifier[self] . identifier[context] . identifier[log] ( literal[string] % identifier[filename] ) | def load_session_from_file(self, username: str, filename: Optional[str]=None) -> None:
"""Internally stores :class:`requests.Session` object loaded from file.
If filename is None, the file with the default session path is loaded.
:raises FileNotFoundError: If the file does not exist.
"""
if filename is None:
filename = get_default_session_filename(username) # depends on [control=['if'], data=['filename']]
with open(filename, 'rb') as sessionfile:
self.context.load_session_from_file(username, sessionfile)
self.context.log('Loaded session from %s.' % filename) # depends on [control=['with'], data=['sessionfile']] |
def validator_for(schema, default=_LATEST_VERSION):
"""
Retrieve the validator class appropriate for validating the given schema.
Uses the :validator:`$schema` property that should be present in the given
schema to look up the appropriate validator class.
Arguments:
schema (collections.Mapping or bool):
the schema to look at
default:
the default to return if the appropriate validator class cannot be
determined.
If unprovided, the default is to return
the latest supported draft.
"""
if schema is True or schema is False or u"$schema" not in schema:
return default
if schema[u"$schema"] not in meta_schemas:
warn(
(
"The metaschema specified by $schema was not found. "
"Using the latest draft to validate, but this will raise "
"an error in the future."
),
DeprecationWarning,
stacklevel=2,
)
return meta_schemas.get(schema[u"$schema"], _LATEST_VERSION) | def function[validator_for, parameter[schema, default]]:
constant[
Retrieve the validator class appropriate for validating the given schema.
Uses the :validator:`$schema` property that should be present in the given
schema to look up the appropriate validator class.
Arguments:
schema (collections.Mapping or bool):
the schema to look at
default:
the default to return if the appropriate validator class cannot be
determined.
If unprovided, the default is to return
the latest supported draft.
]
if <ast.BoolOp object at 0x7da20c7c84c0> begin[:]
return[name[default]]
if compare[call[name[schema]][constant[$schema]] <ast.NotIn object at 0x7da2590d7190> name[meta_schemas]] begin[:]
call[name[warn], parameter[constant[The metaschema specified by $schema was not found. Using the latest draft to validate, but this will raise an error in the future.], name[DeprecationWarning]]]
return[call[name[meta_schemas].get, parameter[call[name[schema]][constant[$schema]], name[_LATEST_VERSION]]]] | keyword[def] identifier[validator_for] ( identifier[schema] , identifier[default] = identifier[_LATEST_VERSION] ):
literal[string]
keyword[if] identifier[schema] keyword[is] keyword[True] keyword[or] identifier[schema] keyword[is] keyword[False] keyword[or] literal[string] keyword[not] keyword[in] identifier[schema] :
keyword[return] identifier[default]
keyword[if] identifier[schema] [ literal[string] ] keyword[not] keyword[in] identifier[meta_schemas] :
identifier[warn] (
(
literal[string]
literal[string]
literal[string]
),
identifier[DeprecationWarning] ,
identifier[stacklevel] = literal[int] ,
)
keyword[return] identifier[meta_schemas] . identifier[get] ( identifier[schema] [ literal[string] ], identifier[_LATEST_VERSION] ) | def validator_for(schema, default=_LATEST_VERSION):
"""
Retrieve the validator class appropriate for validating the given schema.
Uses the :validator:`$schema` property that should be present in the given
schema to look up the appropriate validator class.
Arguments:
schema (collections.Mapping or bool):
the schema to look at
default:
the default to return if the appropriate validator class cannot be
determined.
If unprovided, the default is to return
the latest supported draft.
"""
if schema is True or schema is False or u'$schema' not in schema:
return default # depends on [control=['if'], data=[]]
if schema[u'$schema'] not in meta_schemas:
warn('The metaschema specified by $schema was not found. Using the latest draft to validate, but this will raise an error in the future.', DeprecationWarning, stacklevel=2) # depends on [control=['if'], data=[]]
return meta_schemas.get(schema[u'$schema'], _LATEST_VERSION) |
def get_context_data(self, **kwargs):
"""
Add the current category in context.
"""
context = super(BaseCategoryDetail, self).get_context_data(**kwargs)
context['category'] = self.category
return context | def function[get_context_data, parameter[self]]:
constant[
Add the current category in context.
]
variable[context] assign[=] call[call[name[super], parameter[name[BaseCategoryDetail], name[self]]].get_context_data, parameter[]]
call[name[context]][constant[category]] assign[=] name[self].category
return[name[context]] | keyword[def] identifier[get_context_data] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[context] = identifier[super] ( identifier[BaseCategoryDetail] , identifier[self] ). identifier[get_context_data] (** identifier[kwargs] )
identifier[context] [ literal[string] ]= identifier[self] . identifier[category]
keyword[return] identifier[context] | def get_context_data(self, **kwargs):
"""
Add the current category in context.
"""
context = super(BaseCategoryDetail, self).get_context_data(**kwargs)
context['category'] = self.category
return context |
def add(self, varname, result, pointer=None):
"""Adds the specified python-typed result and an optional Ftype pointer
to use when cleaning up this object.
:arg result: a python-typed representation of the result.
:arg pointer: an instance of Ftype with pointer information for deallocating
the c-pointer.
"""
self.result[varname] = result
setattr(self, varname, result)
if pointer is not None:
self._finalizers[varname] = pointer | def function[add, parameter[self, varname, result, pointer]]:
constant[Adds the specified python-typed result and an optional Ftype pointer
to use when cleaning up this object.
:arg result: a python-typed representation of the result.
:arg pointer: an instance of Ftype with pointer information for deallocating
the c-pointer.
]
call[name[self].result][name[varname]] assign[=] name[result]
call[name[setattr], parameter[name[self], name[varname], name[result]]]
if compare[name[pointer] is_not constant[None]] begin[:]
call[name[self]._finalizers][name[varname]] assign[=] name[pointer] | keyword[def] identifier[add] ( identifier[self] , identifier[varname] , identifier[result] , identifier[pointer] = keyword[None] ):
literal[string]
identifier[self] . identifier[result] [ identifier[varname] ]= identifier[result]
identifier[setattr] ( identifier[self] , identifier[varname] , identifier[result] )
keyword[if] identifier[pointer] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_finalizers] [ identifier[varname] ]= identifier[pointer] | def add(self, varname, result, pointer=None):
"""Adds the specified python-typed result and an optional Ftype pointer
to use when cleaning up this object.
:arg result: a python-typed representation of the result.
:arg pointer: an instance of Ftype with pointer information for deallocating
the c-pointer.
"""
self.result[varname] = result
setattr(self, varname, result)
if pointer is not None:
self._finalizers[varname] = pointer # depends on [control=['if'], data=['pointer']] |
def quit(self):
"""
Quit the player, blocking until the process has died
"""
if self._process is None:
logger.debug('Quit was called after self._process had already been released')
return
try:
logger.debug('Quitting OMXPlayer')
process_group_id = os.getpgid(self._process.pid)
os.killpg(process_group_id, signal.SIGTERM)
logger.debug('SIGTERM Sent to pid: %s' % process_group_id)
self._process_monitor.join()
except OSError:
logger.error('Could not find the process to kill')
self._process = None | def function[quit, parameter[self]]:
constant[
Quit the player, blocking until the process has died
]
if compare[name[self]._process is constant[None]] begin[:]
call[name[logger].debug, parameter[constant[Quit was called after self._process had already been released]]]
return[None]
<ast.Try object at 0x7da20e9567d0>
name[self]._process assign[=] constant[None] | keyword[def] identifier[quit] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_process] keyword[is] keyword[None] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return]
keyword[try] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[process_group_id] = identifier[os] . identifier[getpgid] ( identifier[self] . identifier[_process] . identifier[pid] )
identifier[os] . identifier[killpg] ( identifier[process_group_id] , identifier[signal] . identifier[SIGTERM] )
identifier[logger] . identifier[debug] ( literal[string] % identifier[process_group_id] )
identifier[self] . identifier[_process_monitor] . identifier[join] ()
keyword[except] identifier[OSError] :
identifier[logger] . identifier[error] ( literal[string] )
identifier[self] . identifier[_process] = keyword[None] | def quit(self):
"""
Quit the player, blocking until the process has died
"""
if self._process is None:
logger.debug('Quit was called after self._process had already been released')
return # depends on [control=['if'], data=[]]
try:
logger.debug('Quitting OMXPlayer')
process_group_id = os.getpgid(self._process.pid)
os.killpg(process_group_id, signal.SIGTERM)
logger.debug('SIGTERM Sent to pid: %s' % process_group_id)
self._process_monitor.join() # depends on [control=['try'], data=[]]
except OSError:
logger.error('Could not find the process to kill') # depends on [control=['except'], data=[]]
self._process = None |
def cert(name,
aliases=None,
email=None,
webroot=None,
test_cert=False,
renew=None,
keysize=None,
server=None,
owner='root',
group='root',
mode='0640',
certname=None,
preferred_challenges=None,
tls_sni_01_port=None,
tls_sni_01_address=None,
http_01_port=None,
http_01_address=None,
dns_plugin=None,
dns_plugin_credentials=None):
'''
Obtain/renew a certificate from an ACME CA, probably Let's Encrypt.
:param name: Common Name of the certificate (DNS name of certificate)
:param aliases: subjectAltNames (Additional DNS names on certificate)
:param email: e-mail address for interaction with ACME provider
:param webroot: True or a full path to webroot. Otherwise use standalone mode
:param test_cert: Request a certificate from the Happy Hacker Fake CA (mutually exclusive with 'server')
:param renew: True/'force' to force a renewal, or a window of renewal before expiry in days
:param keysize: RSA key bits
:param server: API endpoint to talk to
:param owner: owner of the private key file
:param group: group of the private key file
:param mode: mode of the private key file
:param certname: Name of the certificate to save
:param preferred_challenges: A sorted, comma delimited list of the preferred
challenge to use during authorization with the
most preferred challenge listed first.
:param tls_sni_01_port: Port used during tls-sni-01 challenge. This only affects
the port Certbot listens on. A conforming ACME server
will still attempt to connect on port 443.
:param tls_sni_01_address: The address the server listens to during tls-sni-01
challenge.
:param http_01_port: Port used in the http-01 challenge. This only affects
the port Certbot listens on. A conforming ACME server
will still attempt to connect on port 80.
:param https_01_address: The address the server listens to during http-01 challenge.
:param dns_plugin: Name of a DNS plugin to use (currently only 'cloudflare')
:param dns_plugin_credentials: Path to the credentials file if required by the specified DNS plugin
'''
if __opts__['test']:
ret = {
'name': name,
'changes': {},
'result': None
}
window = None
try:
window = int(renew)
except Exception:
pass
comment = 'Certificate {0} '.format(name)
if not __salt__['acme.has'](name):
comment += 'would have been obtained'
elif __salt__['acme.needs_renewal'](name, window):
comment += 'would have been renewed'
else:
comment += 'would not have been touched'
ret['result'] = True
ret['comment'] = comment
return ret
if not __salt__['acme.has'](name):
old = None
else:
old = __salt__['acme.info'](name)
res = __salt__['acme.cert'](
name,
aliases=aliases,
email=email,
webroot=webroot,
certname=certname,
test_cert=test_cert,
renew=renew,
keysize=keysize,
server=server,
owner=owner,
group=group,
mode=mode,
preferred_challenges=preferred_challenges,
tls_sni_01_port=tls_sni_01_port,
tls_sni_01_address=tls_sni_01_address,
http_01_port=http_01_port,
http_01_address=http_01_address,
dns_plugin=dns_plugin,
dns_plugin_credentials=dns_plugin_credentials,
)
ret = {
'name': name,
'result': res['result'] is not False,
'comment': res['comment']
}
if res['result'] is None:
ret['changes'] = {}
else:
if not __salt__['acme.has'](name):
new = None
else:
new = __salt__['acme.info'](name)
ret['changes'] = {
'old': old,
'new': new
}
return ret | def function[cert, parameter[name, aliases, email, webroot, test_cert, renew, keysize, server, owner, group, mode, certname, preferred_challenges, tls_sni_01_port, tls_sni_01_address, http_01_port, http_01_address, dns_plugin, dns_plugin_credentials]]:
constant[
Obtain/renew a certificate from an ACME CA, probably Let's Encrypt.
:param name: Common Name of the certificate (DNS name of certificate)
:param aliases: subjectAltNames (Additional DNS names on certificate)
:param email: e-mail address for interaction with ACME provider
:param webroot: True or a full path to webroot. Otherwise use standalone mode
:param test_cert: Request a certificate from the Happy Hacker Fake CA (mutually exclusive with 'server')
:param renew: True/'force' to force a renewal, or a window of renewal before expiry in days
:param keysize: RSA key bits
:param server: API endpoint to talk to
:param owner: owner of the private key file
:param group: group of the private key file
:param mode: mode of the private key file
:param certname: Name of the certificate to save
:param preferred_challenges: A sorted, comma delimited list of the preferred
challenge to use during authorization with the
most preferred challenge listed first.
:param tls_sni_01_port: Port used during tls-sni-01 challenge. This only affects
the port Certbot listens on. A conforming ACME server
will still attempt to connect on port 443.
:param tls_sni_01_address: The address the server listens to during tls-sni-01
challenge.
:param http_01_port: Port used in the http-01 challenge. This only affects
the port Certbot listens on. A conforming ACME server
will still attempt to connect on port 80.
:param https_01_address: The address the server listens to during http-01 challenge.
:param dns_plugin: Name of a DNS plugin to use (currently only 'cloudflare')
:param dns_plugin_credentials: Path to the credentials file if required by the specified DNS plugin
]
if call[name[__opts__]][constant[test]] begin[:]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da204567eb0>, <ast.Constant object at 0x7da204566dd0>, <ast.Constant object at 0x7da204565960>], [<ast.Name object at 0x7da204567070>, <ast.Dict object at 0x7da204566860>, <ast.Constant object at 0x7da204564a30>]]
variable[window] assign[=] constant[None]
<ast.Try object at 0x7da204565930>
variable[comment] assign[=] call[constant[Certificate {0} ].format, parameter[name[name]]]
if <ast.UnaryOp object at 0x7da204566fe0> begin[:]
<ast.AugAssign object at 0x7da204567ee0>
call[name[ret]][constant[comment]] assign[=] name[comment]
return[name[ret]]
if <ast.UnaryOp object at 0x7da204620730> begin[:]
variable[old] assign[=] constant[None]
variable[res] assign[=] call[call[name[__salt__]][constant[acme.cert]], parameter[name[name]]]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b208a440>, <ast.Constant object at 0x7da1b208b3d0>, <ast.Constant object at 0x7da1b208ae30>], [<ast.Name object at 0x7da1b20888e0>, <ast.Compare object at 0x7da1b208abf0>, <ast.Subscript object at 0x7da1b208a380>]]
if compare[call[name[res]][constant[result]] is constant[None]] begin[:]
call[name[ret]][constant[changes]] assign[=] dictionary[[], []]
return[name[ret]] | keyword[def] identifier[cert] ( identifier[name] ,
identifier[aliases] = keyword[None] ,
identifier[email] = keyword[None] ,
identifier[webroot] = keyword[None] ,
identifier[test_cert] = keyword[False] ,
identifier[renew] = keyword[None] ,
identifier[keysize] = keyword[None] ,
identifier[server] = keyword[None] ,
identifier[owner] = literal[string] ,
identifier[group] = literal[string] ,
identifier[mode] = literal[string] ,
identifier[certname] = keyword[None] ,
identifier[preferred_challenges] = keyword[None] ,
identifier[tls_sni_01_port] = keyword[None] ,
identifier[tls_sni_01_address] = keyword[None] ,
identifier[http_01_port] = keyword[None] ,
identifier[http_01_address] = keyword[None] ,
identifier[dns_plugin] = keyword[None] ,
identifier[dns_plugin_credentials] = keyword[None] ):
literal[string]
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] ={
literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[None]
}
identifier[window] = keyword[None]
keyword[try] :
identifier[window] = identifier[int] ( identifier[renew] )
keyword[except] identifier[Exception] :
keyword[pass]
identifier[comment] = literal[string] . identifier[format] ( identifier[name] )
keyword[if] keyword[not] identifier[__salt__] [ literal[string] ]( identifier[name] ):
identifier[comment] += literal[string]
keyword[elif] identifier[__salt__] [ literal[string] ]( identifier[name] , identifier[window] ):
identifier[comment] += literal[string]
keyword[else] :
identifier[comment] += literal[string]
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= identifier[comment]
keyword[return] identifier[ret]
keyword[if] keyword[not] identifier[__salt__] [ literal[string] ]( identifier[name] ):
identifier[old] = keyword[None]
keyword[else] :
identifier[old] = identifier[__salt__] [ literal[string] ]( identifier[name] )
identifier[res] = identifier[__salt__] [ literal[string] ](
identifier[name] ,
identifier[aliases] = identifier[aliases] ,
identifier[email] = identifier[email] ,
identifier[webroot] = identifier[webroot] ,
identifier[certname] = identifier[certname] ,
identifier[test_cert] = identifier[test_cert] ,
identifier[renew] = identifier[renew] ,
identifier[keysize] = identifier[keysize] ,
identifier[server] = identifier[server] ,
identifier[owner] = identifier[owner] ,
identifier[group] = identifier[group] ,
identifier[mode] = identifier[mode] ,
identifier[preferred_challenges] = identifier[preferred_challenges] ,
identifier[tls_sni_01_port] = identifier[tls_sni_01_port] ,
identifier[tls_sni_01_address] = identifier[tls_sni_01_address] ,
identifier[http_01_port] = identifier[http_01_port] ,
identifier[http_01_address] = identifier[http_01_address] ,
identifier[dns_plugin] = identifier[dns_plugin] ,
identifier[dns_plugin_credentials] = identifier[dns_plugin_credentials] ,
)
identifier[ret] ={
literal[string] : identifier[name] ,
literal[string] : identifier[res] [ literal[string] ] keyword[is] keyword[not] keyword[False] ,
literal[string] : identifier[res] [ literal[string] ]
}
keyword[if] identifier[res] [ literal[string] ] keyword[is] keyword[None] :
identifier[ret] [ literal[string] ]={}
keyword[else] :
keyword[if] keyword[not] identifier[__salt__] [ literal[string] ]( identifier[name] ):
identifier[new] = keyword[None]
keyword[else] :
identifier[new] = identifier[__salt__] [ literal[string] ]( identifier[name] )
identifier[ret] [ literal[string] ]={
literal[string] : identifier[old] ,
literal[string] : identifier[new]
}
keyword[return] identifier[ret] | def cert(name, aliases=None, email=None, webroot=None, test_cert=False, renew=None, keysize=None, server=None, owner='root', group='root', mode='0640', certname=None, preferred_challenges=None, tls_sni_01_port=None, tls_sni_01_address=None, http_01_port=None, http_01_address=None, dns_plugin=None, dns_plugin_credentials=None):
"""
Obtain/renew a certificate from an ACME CA, probably Let's Encrypt.
:param name: Common Name of the certificate (DNS name of certificate)
:param aliases: subjectAltNames (Additional DNS names on certificate)
:param email: e-mail address for interaction with ACME provider
:param webroot: True or a full path to webroot. Otherwise use standalone mode
:param test_cert: Request a certificate from the Happy Hacker Fake CA (mutually exclusive with 'server')
:param renew: True/'force' to force a renewal, or a window of renewal before expiry in days
:param keysize: RSA key bits
:param server: API endpoint to talk to
:param owner: owner of the private key file
:param group: group of the private key file
:param mode: mode of the private key file
:param certname: Name of the certificate to save
:param preferred_challenges: A sorted, comma delimited list of the preferred
challenge to use during authorization with the
most preferred challenge listed first.
:param tls_sni_01_port: Port used during tls-sni-01 challenge. This only affects
the port Certbot listens on. A conforming ACME server
will still attempt to connect on port 443.
:param tls_sni_01_address: The address the server listens to during tls-sni-01
challenge.
:param http_01_port: Port used in the http-01 challenge. This only affects
the port Certbot listens on. A conforming ACME server
will still attempt to connect on port 80.
:param https_01_address: The address the server listens to during http-01 challenge.
:param dns_plugin: Name of a DNS plugin to use (currently only 'cloudflare')
:param dns_plugin_credentials: Path to the credentials file if required by the specified DNS plugin
"""
if __opts__['test']:
ret = {'name': name, 'changes': {}, 'result': None}
window = None
try:
window = int(renew) # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
comment = 'Certificate {0} '.format(name)
if not __salt__['acme.has'](name):
comment += 'would have been obtained' # depends on [control=['if'], data=[]]
elif __salt__['acme.needs_renewal'](name, window):
comment += 'would have been renewed' # depends on [control=['if'], data=[]]
else:
comment += 'would not have been touched'
ret['result'] = True
ret['comment'] = comment
return ret # depends on [control=['if'], data=[]]
if not __salt__['acme.has'](name):
old = None # depends on [control=['if'], data=[]]
else:
old = __salt__['acme.info'](name)
res = __salt__['acme.cert'](name, aliases=aliases, email=email, webroot=webroot, certname=certname, test_cert=test_cert, renew=renew, keysize=keysize, server=server, owner=owner, group=group, mode=mode, preferred_challenges=preferred_challenges, tls_sni_01_port=tls_sni_01_port, tls_sni_01_address=tls_sni_01_address, http_01_port=http_01_port, http_01_address=http_01_address, dns_plugin=dns_plugin, dns_plugin_credentials=dns_plugin_credentials)
ret = {'name': name, 'result': res['result'] is not False, 'comment': res['comment']}
if res['result'] is None:
ret['changes'] = {} # depends on [control=['if'], data=[]]
else:
if not __salt__['acme.has'](name):
new = None # depends on [control=['if'], data=[]]
else:
new = __salt__['acme.info'](name)
ret['changes'] = {'old': old, 'new': new}
return ret |
def check_rst(code, ignore):
"""Yield errors in nested RST code."""
filename = '<string>'
for result in check(code,
filename=filename,
ignore=ignore):
yield result | def function[check_rst, parameter[code, ignore]]:
constant[Yield errors in nested RST code.]
variable[filename] assign[=] constant[<string>]
for taget[name[result]] in starred[call[name[check], parameter[name[code]]]] begin[:]
<ast.Yield object at 0x7da1b08b09a0> | keyword[def] identifier[check_rst] ( identifier[code] , identifier[ignore] ):
literal[string]
identifier[filename] = literal[string]
keyword[for] identifier[result] keyword[in] identifier[check] ( identifier[code] ,
identifier[filename] = identifier[filename] ,
identifier[ignore] = identifier[ignore] ):
keyword[yield] identifier[result] | def check_rst(code, ignore):
"""Yield errors in nested RST code."""
filename = '<string>'
for result in check(code, filename=filename, ignore=ignore):
yield result # depends on [control=['for'], data=['result']] |
def supplement_filesystem(old_size, user_cap=False):
"""Return new size accounting for the metadata."""
new_size = old_size
if user_cap:
if old_size <= _GiB_to_Byte(1.5):
new_size = _GiB_to_Byte(3)
else:
new_size += _GiB_to_Byte(1.5)
return int(new_size) | def function[supplement_filesystem, parameter[old_size, user_cap]]:
constant[Return new size accounting for the metadata.]
variable[new_size] assign[=] name[old_size]
if name[user_cap] begin[:]
if compare[name[old_size] less_or_equal[<=] call[name[_GiB_to_Byte], parameter[constant[1.5]]]] begin[:]
variable[new_size] assign[=] call[name[_GiB_to_Byte], parameter[constant[3]]]
return[call[name[int], parameter[name[new_size]]]] | keyword[def] identifier[supplement_filesystem] ( identifier[old_size] , identifier[user_cap] = keyword[False] ):
literal[string]
identifier[new_size] = identifier[old_size]
keyword[if] identifier[user_cap] :
keyword[if] identifier[old_size] <= identifier[_GiB_to_Byte] ( literal[int] ):
identifier[new_size] = identifier[_GiB_to_Byte] ( literal[int] )
keyword[else] :
identifier[new_size] += identifier[_GiB_to_Byte] ( literal[int] )
keyword[return] identifier[int] ( identifier[new_size] ) | def supplement_filesystem(old_size, user_cap=False):
"""Return new size accounting for the metadata."""
new_size = old_size
if user_cap:
if old_size <= _GiB_to_Byte(1.5):
new_size = _GiB_to_Byte(3) # depends on [control=['if'], data=[]]
else:
new_size += _GiB_to_Byte(1.5) # depends on [control=['if'], data=[]]
return int(new_size) |
def get_body(self):
'''Get the response Body
:returns Body: A Body object containing the response.
'''
if self._body is None:
resp = self._dispatcher._dispatch(self.request)
self._body = self._create_body(resp)
return self._body | def function[get_body, parameter[self]]:
constant[Get the response Body
:returns Body: A Body object containing the response.
]
if compare[name[self]._body is constant[None]] begin[:]
variable[resp] assign[=] call[name[self]._dispatcher._dispatch, parameter[name[self].request]]
name[self]._body assign[=] call[name[self]._create_body, parameter[name[resp]]]
return[name[self]._body] | keyword[def] identifier[get_body] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_body] keyword[is] keyword[None] :
identifier[resp] = identifier[self] . identifier[_dispatcher] . identifier[_dispatch] ( identifier[self] . identifier[request] )
identifier[self] . identifier[_body] = identifier[self] . identifier[_create_body] ( identifier[resp] )
keyword[return] identifier[self] . identifier[_body] | def get_body(self):
"""Get the response Body
:returns Body: A Body object containing the response.
"""
if self._body is None:
resp = self._dispatcher._dispatch(self.request)
self._body = self._create_body(resp) # depends on [control=['if'], data=[]]
return self._body |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._tab is not None:
return False
if self._payment is not None:
return False
if self._request_reference_split_the_bill is not None:
return False
return True | def function[is_all_field_none, parameter[self]]:
constant[
:rtype: bool
]
if compare[name[self]._tab is_not constant[None]] begin[:]
return[constant[False]]
if compare[name[self]._payment is_not constant[None]] begin[:]
return[constant[False]]
if compare[name[self]._request_reference_split_the_bill is_not constant[None]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_all_field_none] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_tab] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_payment] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[_request_reference_split_the_bill] keyword[is] keyword[not] keyword[None] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_all_field_none(self):
"""
:rtype: bool
"""
if self._tab is not None:
return False # depends on [control=['if'], data=[]]
if self._payment is not None:
return False # depends on [control=['if'], data=[]]
if self._request_reference_split_the_bill is not None:
return False # depends on [control=['if'], data=[]]
return True |
def read_ascii_catalog(filename, format_, unit=None):
"""
Read an ASCII catalog file using Astropy.
This routine is used by pymoctool to load coordinates from a
catalog file in order to generate a MOC representation.
"""
catalog = ascii.read(filename, format=format_)
columns = catalog.columns
if 'RA' in columns and 'Dec' in columns:
if unit is None:
unit = (hour, degree)
coords = SkyCoord(catalog['RA'],
catalog['Dec'],
unit=unit,
frame='icrs')
elif 'Lat' in columns and 'Lon' in columns:
if unit is None:
unit = (degree, degree)
coords = SkyCoord(catalog['Lon'],
catalog['Lat'],
unit=unit,
frame='galactic')
else:
raise Exception('columns RA,Dec or Lon,Lat not found')
return coords | def function[read_ascii_catalog, parameter[filename, format_, unit]]:
constant[
Read an ASCII catalog file using Astropy.
This routine is used by pymoctool to load coordinates from a
catalog file in order to generate a MOC representation.
]
variable[catalog] assign[=] call[name[ascii].read, parameter[name[filename]]]
variable[columns] assign[=] name[catalog].columns
if <ast.BoolOp object at 0x7da1b0a2cb20> begin[:]
if compare[name[unit] is constant[None]] begin[:]
variable[unit] assign[=] tuple[[<ast.Name object at 0x7da1b0a2ce20>, <ast.Name object at 0x7da1b0a2c5b0>]]
variable[coords] assign[=] call[name[SkyCoord], parameter[call[name[catalog]][constant[RA]], call[name[catalog]][constant[Dec]]]]
return[name[coords]] | keyword[def] identifier[read_ascii_catalog] ( identifier[filename] , identifier[format_] , identifier[unit] = keyword[None] ):
literal[string]
identifier[catalog] = identifier[ascii] . identifier[read] ( identifier[filename] , identifier[format] = identifier[format_] )
identifier[columns] = identifier[catalog] . identifier[columns]
keyword[if] literal[string] keyword[in] identifier[columns] keyword[and] literal[string] keyword[in] identifier[columns] :
keyword[if] identifier[unit] keyword[is] keyword[None] :
identifier[unit] =( identifier[hour] , identifier[degree] )
identifier[coords] = identifier[SkyCoord] ( identifier[catalog] [ literal[string] ],
identifier[catalog] [ literal[string] ],
identifier[unit] = identifier[unit] ,
identifier[frame] = literal[string] )
keyword[elif] literal[string] keyword[in] identifier[columns] keyword[and] literal[string] keyword[in] identifier[columns] :
keyword[if] identifier[unit] keyword[is] keyword[None] :
identifier[unit] =( identifier[degree] , identifier[degree] )
identifier[coords] = identifier[SkyCoord] ( identifier[catalog] [ literal[string] ],
identifier[catalog] [ literal[string] ],
identifier[unit] = identifier[unit] ,
identifier[frame] = literal[string] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] identifier[coords] | def read_ascii_catalog(filename, format_, unit=None):
"""
Read an ASCII catalog file using Astropy.
This routine is used by pymoctool to load coordinates from a
catalog file in order to generate a MOC representation.
"""
catalog = ascii.read(filename, format=format_)
columns = catalog.columns
if 'RA' in columns and 'Dec' in columns:
if unit is None:
unit = (hour, degree) # depends on [control=['if'], data=['unit']]
coords = SkyCoord(catalog['RA'], catalog['Dec'], unit=unit, frame='icrs') # depends on [control=['if'], data=[]]
elif 'Lat' in columns and 'Lon' in columns:
if unit is None:
unit = (degree, degree) # depends on [control=['if'], data=['unit']]
coords = SkyCoord(catalog['Lon'], catalog['Lat'], unit=unit, frame='galactic') # depends on [control=['if'], data=[]]
else:
raise Exception('columns RA,Dec or Lon,Lat not found')
return coords |
def choose_language(self, lang, request):
"""Deal with the multiple corner case of choosing the language."""
# Can be an empty string or None
if not lang:
lang = get_language_from_request(request)
# Raise a 404 if the language is not in not in the list
if lang not in [key for (key, value) in settings.PAGE_LANGUAGES]:
raise Http404
# We're going to serve CMS pages in language lang;
# make django gettext use that language too
if lang and translation.check_for_language(lang):
translation.activate(lang)
return lang | def function[choose_language, parameter[self, lang, request]]:
constant[Deal with the multiple corner case of choosing the language.]
if <ast.UnaryOp object at 0x7da18dc9a5c0> begin[:]
variable[lang] assign[=] call[name[get_language_from_request], parameter[name[request]]]
if compare[name[lang] <ast.NotIn object at 0x7da2590d7190> <ast.ListComp object at 0x7da18dc9ab30>] begin[:]
<ast.Raise object at 0x7da18dc9bbb0>
if <ast.BoolOp object at 0x7da18f00c550> begin[:]
call[name[translation].activate, parameter[name[lang]]]
return[name[lang]] | keyword[def] identifier[choose_language] ( identifier[self] , identifier[lang] , identifier[request] ):
literal[string]
keyword[if] keyword[not] identifier[lang] :
identifier[lang] = identifier[get_language_from_request] ( identifier[request] )
keyword[if] identifier[lang] keyword[not] keyword[in] [ identifier[key] keyword[for] ( identifier[key] , identifier[value] ) keyword[in] identifier[settings] . identifier[PAGE_LANGUAGES] ]:
keyword[raise] identifier[Http404]
keyword[if] identifier[lang] keyword[and] identifier[translation] . identifier[check_for_language] ( identifier[lang] ):
identifier[translation] . identifier[activate] ( identifier[lang] )
keyword[return] identifier[lang] | def choose_language(self, lang, request):
"""Deal with the multiple corner case of choosing the language."""
# Can be an empty string or None
if not lang:
lang = get_language_from_request(request) # depends on [control=['if'], data=[]]
# Raise a 404 if the language is not in not in the list
if lang not in [key for (key, value) in settings.PAGE_LANGUAGES]:
raise Http404 # depends on [control=['if'], data=[]]
# We're going to serve CMS pages in language lang;
# make django gettext use that language too
if lang and translation.check_for_language(lang):
translation.activate(lang) # depends on [control=['if'], data=[]]
return lang |
def production_url(path, original):
"""
For a production environment (DEBUG=False), replaces original path
created by Django's {% static %} template tag with relevant path from
our mapping.
"""
mapping = _get_mapping()
if mapping:
if path in mapping:
return original.replace(path, mapping[path])
return original
else:
return dev_url(original) | def function[production_url, parameter[path, original]]:
constant[
For a production environment (DEBUG=False), replaces original path
created by Django's {% static %} template tag with relevant path from
our mapping.
]
variable[mapping] assign[=] call[name[_get_mapping], parameter[]]
if name[mapping] begin[:]
if compare[name[path] in name[mapping]] begin[:]
return[call[name[original].replace, parameter[name[path], call[name[mapping]][name[path]]]]]
return[name[original]] | keyword[def] identifier[production_url] ( identifier[path] , identifier[original] ):
literal[string]
identifier[mapping] = identifier[_get_mapping] ()
keyword[if] identifier[mapping] :
keyword[if] identifier[path] keyword[in] identifier[mapping] :
keyword[return] identifier[original] . identifier[replace] ( identifier[path] , identifier[mapping] [ identifier[path] ])
keyword[return] identifier[original]
keyword[else] :
keyword[return] identifier[dev_url] ( identifier[original] ) | def production_url(path, original):
"""
For a production environment (DEBUG=False), replaces original path
created by Django's {% static %} template tag with relevant path from
our mapping.
"""
mapping = _get_mapping()
if mapping:
if path in mapping:
return original.replace(path, mapping[path]) # depends on [control=['if'], data=['path', 'mapping']]
return original # depends on [control=['if'], data=[]]
else:
return dev_url(original) |
def SetHasherNames(self, hasher_names_string):
"""Sets the hashers that should be enabled.
Args:
hasher_names_string (str): comma separated names of hashers to enable.
"""
hasher_names = hashers_manager.HashersManager.GetHasherNamesFromString(
hasher_names_string)
debug_hasher_names = ', '.join(hasher_names)
logger.debug('Got hasher names: {0:s}'.format(debug_hasher_names))
self._hashers = hashers_manager.HashersManager.GetHashers(hasher_names)
self._hasher_names_string = hasher_names_string | def function[SetHasherNames, parameter[self, hasher_names_string]]:
constant[Sets the hashers that should be enabled.
Args:
hasher_names_string (str): comma separated names of hashers to enable.
]
variable[hasher_names] assign[=] call[name[hashers_manager].HashersManager.GetHasherNamesFromString, parameter[name[hasher_names_string]]]
variable[debug_hasher_names] assign[=] call[constant[, ].join, parameter[name[hasher_names]]]
call[name[logger].debug, parameter[call[constant[Got hasher names: {0:s}].format, parameter[name[debug_hasher_names]]]]]
name[self]._hashers assign[=] call[name[hashers_manager].HashersManager.GetHashers, parameter[name[hasher_names]]]
name[self]._hasher_names_string assign[=] name[hasher_names_string] | keyword[def] identifier[SetHasherNames] ( identifier[self] , identifier[hasher_names_string] ):
literal[string]
identifier[hasher_names] = identifier[hashers_manager] . identifier[HashersManager] . identifier[GetHasherNamesFromString] (
identifier[hasher_names_string] )
identifier[debug_hasher_names] = literal[string] . identifier[join] ( identifier[hasher_names] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[debug_hasher_names] ))
identifier[self] . identifier[_hashers] = identifier[hashers_manager] . identifier[HashersManager] . identifier[GetHashers] ( identifier[hasher_names] )
identifier[self] . identifier[_hasher_names_string] = identifier[hasher_names_string] | def SetHasherNames(self, hasher_names_string):
"""Sets the hashers that should be enabled.
Args:
hasher_names_string (str): comma separated names of hashers to enable.
"""
hasher_names = hashers_manager.HashersManager.GetHasherNamesFromString(hasher_names_string)
debug_hasher_names = ', '.join(hasher_names)
logger.debug('Got hasher names: {0:s}'.format(debug_hasher_names))
self._hashers = hashers_manager.HashersManager.GetHashers(hasher_names)
self._hasher_names_string = hasher_names_string |
def ConsultarCaracteres(self, sep="||"):
"Retorna listado de caracteres emisor/receptor (código, descripción)"
ret = self.client.consultarCaracteresParticipante(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['respuesta']
self.__analizar_errores(ret)
array = ret.get('caracter', []) + ret.get('caracterPorcino', [])
if sep is None:
return dict([(it['codigo'], it['descripcion']) for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigo'], it['descripcion']) for it in array] | def function[ConsultarCaracteres, parameter[self, sep]]:
constant[Retorna listado de caracteres emisor/receptor (código, descripción)]
variable[ret] assign[=] call[call[name[self].client.consultarCaracteresParticipante, parameter[]]][constant[respuesta]]
call[name[self].__analizar_errores, parameter[name[ret]]]
variable[array] assign[=] binary_operation[call[name[ret].get, parameter[constant[caracter], list[[]]]] + call[name[ret].get, parameter[constant[caracterPorcino], list[[]]]]]
if compare[name[sep] is constant[None]] begin[:]
return[call[name[dict], parameter[<ast.ListComp object at 0x7da1b208a800>]]] | keyword[def] identifier[ConsultarCaracteres] ( identifier[self] , identifier[sep] = literal[string] ):
literal[string]
identifier[ret] = identifier[self] . identifier[client] . identifier[consultarCaracteresParticipante] (
identifier[auth] ={
literal[string] : identifier[self] . identifier[Token] , literal[string] : identifier[self] . identifier[Sign] ,
literal[string] : identifier[self] . identifier[Cuit] ,},
)[ literal[string] ]
identifier[self] . identifier[__analizar_errores] ( identifier[ret] )
identifier[array] = identifier[ret] . identifier[get] ( literal[string] ,[])+ identifier[ret] . identifier[get] ( literal[string] ,[])
keyword[if] identifier[sep] keyword[is] keyword[None] :
keyword[return] identifier[dict] ([( identifier[it] [ literal[string] ], identifier[it] [ literal[string] ]) keyword[for] identifier[it] keyword[in] identifier[array] ])
keyword[else] :
keyword[return] [( literal[string] %( identifier[sep] , identifier[sep] , identifier[sep] ))%
( identifier[it] [ literal[string] ], identifier[it] [ literal[string] ]) keyword[for] identifier[it] keyword[in] identifier[array] ] | def ConsultarCaracteres(self, sep='||'):
"""Retorna listado de caracteres emisor/receptor (código, descripción)"""
ret = self.client.consultarCaracteresParticipante(auth={'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit})['respuesta']
self.__analizar_errores(ret)
array = ret.get('caracter', []) + ret.get('caracterPorcino', [])
if sep is None:
return dict([(it['codigo'], it['descripcion']) for it in array]) # depends on [control=['if'], data=[]]
else:
return ['%s %%s %s %%s %s' % (sep, sep, sep) % (it['codigo'], it['descripcion']) for it in array] |
def delete_os_dummy_rtr_nwk(self, rtr_id, net_id, subnet_id):
"""Delete the dummy interface to the router. """
subnet_lst = set()
subnet_lst.add(subnet_id)
ret = self.os_helper.delete_intf_router(None, None, rtr_id, subnet_lst)
if not ret:
return ret
return self.os_helper.delete_network_all_subnets(net_id) | def function[delete_os_dummy_rtr_nwk, parameter[self, rtr_id, net_id, subnet_id]]:
constant[Delete the dummy interface to the router. ]
variable[subnet_lst] assign[=] call[name[set], parameter[]]
call[name[subnet_lst].add, parameter[name[subnet_id]]]
variable[ret] assign[=] call[name[self].os_helper.delete_intf_router, parameter[constant[None], constant[None], name[rtr_id], name[subnet_lst]]]
if <ast.UnaryOp object at 0x7da2041dbf40> begin[:]
return[name[ret]]
return[call[name[self].os_helper.delete_network_all_subnets, parameter[name[net_id]]]] | keyword[def] identifier[delete_os_dummy_rtr_nwk] ( identifier[self] , identifier[rtr_id] , identifier[net_id] , identifier[subnet_id] ):
literal[string]
identifier[subnet_lst] = identifier[set] ()
identifier[subnet_lst] . identifier[add] ( identifier[subnet_id] )
identifier[ret] = identifier[self] . identifier[os_helper] . identifier[delete_intf_router] ( keyword[None] , keyword[None] , identifier[rtr_id] , identifier[subnet_lst] )
keyword[if] keyword[not] identifier[ret] :
keyword[return] identifier[ret]
keyword[return] identifier[self] . identifier[os_helper] . identifier[delete_network_all_subnets] ( identifier[net_id] ) | def delete_os_dummy_rtr_nwk(self, rtr_id, net_id, subnet_id):
"""Delete the dummy interface to the router. """
subnet_lst = set()
subnet_lst.add(subnet_id)
ret = self.os_helper.delete_intf_router(None, None, rtr_id, subnet_lst)
if not ret:
return ret # depends on [control=['if'], data=[]]
return self.os_helper.delete_network_all_subnets(net_id) |
def clean(self):
"""
Make sure that the scope is less or equal to the scope allowed on the
grant!
"""
data = self.cleaned_data
want_scope = data.get('scope') or 0
grant = data.get('grant')
has_scope = grant.scope if grant else 0
# Only check if we've actually got a scope in the data
# (read: All fields have been cleaned)
if want_scope is not 0 and not scope.check(want_scope, has_scope):
raise OAuthValidationError({'error': 'invalid_scope'})
return data | def function[clean, parameter[self]]:
constant[
Make sure that the scope is less or equal to the scope allowed on the
grant!
]
variable[data] assign[=] name[self].cleaned_data
variable[want_scope] assign[=] <ast.BoolOp object at 0x7da18dc07cd0>
variable[grant] assign[=] call[name[data].get, parameter[constant[grant]]]
variable[has_scope] assign[=] <ast.IfExp object at 0x7da18dc07b20>
if <ast.BoolOp object at 0x7da18dc05090> begin[:]
<ast.Raise object at 0x7da18dc065c0>
return[name[data]] | keyword[def] identifier[clean] ( identifier[self] ):
literal[string]
identifier[data] = identifier[self] . identifier[cleaned_data]
identifier[want_scope] = identifier[data] . identifier[get] ( literal[string] ) keyword[or] literal[int]
identifier[grant] = identifier[data] . identifier[get] ( literal[string] )
identifier[has_scope] = identifier[grant] . identifier[scope] keyword[if] identifier[grant] keyword[else] literal[int]
keyword[if] identifier[want_scope] keyword[is] keyword[not] literal[int] keyword[and] keyword[not] identifier[scope] . identifier[check] ( identifier[want_scope] , identifier[has_scope] ):
keyword[raise] identifier[OAuthValidationError] ({ literal[string] : literal[string] })
keyword[return] identifier[data] | def clean(self):
"""
Make sure that the scope is less or equal to the scope allowed on the
grant!
"""
data = self.cleaned_data
want_scope = data.get('scope') or 0
grant = data.get('grant')
has_scope = grant.scope if grant else 0
# Only check if we've actually got a scope in the data
# (read: All fields have been cleaned)
if want_scope is not 0 and (not scope.check(want_scope, has_scope)):
raise OAuthValidationError({'error': 'invalid_scope'}) # depends on [control=['if'], data=[]]
return data |
def resume_follow(self, index, body=None, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-resume-follow.html>`_
:arg index: The name of the follow index to resume following.
:arg body: The name of the leader index and other optional ccr related
parameters
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
return self.transport.perform_request(
"POST", _make_path(index, "_ccr", "resume_follow"), params=params, body=body
) | def function[resume_follow, parameter[self, index, body, params]]:
constant[
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-resume-follow.html>`_
:arg index: The name of the follow index to resume following.
:arg body: The name of the leader index and other optional ccr related
parameters
]
if compare[name[index] in name[SKIP_IN_PATH]] begin[:]
<ast.Raise object at 0x7da1b21e0880>
return[call[name[self].transport.perform_request, parameter[constant[POST], call[name[_make_path], parameter[name[index], constant[_ccr], constant[resume_follow]]]]]] | keyword[def] identifier[resume_follow] ( identifier[self] , identifier[index] , identifier[body] = keyword[None] , identifier[params] = keyword[None] ):
literal[string]
keyword[if] identifier[index] keyword[in] identifier[SKIP_IN_PATH] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[self] . identifier[transport] . identifier[perform_request] (
literal[string] , identifier[_make_path] ( identifier[index] , literal[string] , literal[string] ), identifier[params] = identifier[params] , identifier[body] = identifier[body]
) | def resume_follow(self, index, body=None, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-post-resume-follow.html>`_
:arg index: The name of the follow index to resume following.
:arg body: The name of the leader index and other optional ccr related
parameters
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.") # depends on [control=['if'], data=[]]
return self.transport.perform_request('POST', _make_path(index, '_ccr', 'resume_follow'), params=params, body=body) |
def parse(self, root):
"""Returns a dictionary of values extracted from the root of the
specified XML file. It is assumed that the file is an input/output
file to be converted into plaintext. As such the file should only
specify a single version number."""
#Use the first element in the versions list since there should only be one.
v = _get_xml_version(root)[0]
result = {}
for child in root:
if child.tag in self.versions[v].entries:
entry = self.versions[v].entries[child.tag]
#Entry can be either a line or a group. Both objects have a parse
#method that returns a list of values. In the line's case, the
#list is the values from that line. For the group, it is a list
#of dictionaries, a dictionary for each tag name.
result[child.tag] = entry.parse(child)
return result | def function[parse, parameter[self, root]]:
constant[Returns a dictionary of values extracted from the root of the
specified XML file. It is assumed that the file is an input/output
file to be converted into plaintext. As such the file should only
specify a single version number.]
variable[v] assign[=] call[call[name[_get_xml_version], parameter[name[root]]]][constant[0]]
variable[result] assign[=] dictionary[[], []]
for taget[name[child]] in starred[name[root]] begin[:]
if compare[name[child].tag in call[name[self].versions][name[v]].entries] begin[:]
variable[entry] assign[=] call[call[name[self].versions][name[v]].entries][name[child].tag]
call[name[result]][name[child].tag] assign[=] call[name[entry].parse, parameter[name[child]]]
return[name[result]] | keyword[def] identifier[parse] ( identifier[self] , identifier[root] ):
literal[string]
identifier[v] = identifier[_get_xml_version] ( identifier[root] )[ literal[int] ]
identifier[result] ={}
keyword[for] identifier[child] keyword[in] identifier[root] :
keyword[if] identifier[child] . identifier[tag] keyword[in] identifier[self] . identifier[versions] [ identifier[v] ]. identifier[entries] :
identifier[entry] = identifier[self] . identifier[versions] [ identifier[v] ]. identifier[entries] [ identifier[child] . identifier[tag] ]
identifier[result] [ identifier[child] . identifier[tag] ]= identifier[entry] . identifier[parse] ( identifier[child] )
keyword[return] identifier[result] | def parse(self, root):
"""Returns a dictionary of values extracted from the root of the
specified XML file. It is assumed that the file is an input/output
file to be converted into plaintext. As such the file should only
specify a single version number."""
#Use the first element in the versions list since there should only be one.
v = _get_xml_version(root)[0]
result = {}
for child in root:
if child.tag in self.versions[v].entries:
entry = self.versions[v].entries[child.tag]
#Entry can be either a line or a group. Both objects have a parse
#method that returns a list of values. In the line's case, the
#list is the values from that line. For the group, it is a list
#of dictionaries, a dictionary for each tag name.
result[child.tag] = entry.parse(child) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']]
return result |
def toStringProto(self, inObject, proto):
"""
Wraps the object in a list, and then defers to ``amp.AmpList``.
"""
return amp.AmpList.toStringProto(self, [inObject], proto) | def function[toStringProto, parameter[self, inObject, proto]]:
constant[
Wraps the object in a list, and then defers to ``amp.AmpList``.
]
return[call[name[amp].AmpList.toStringProto, parameter[name[self], list[[<ast.Name object at 0x7da18fe92e30>]], name[proto]]]] | keyword[def] identifier[toStringProto] ( identifier[self] , identifier[inObject] , identifier[proto] ):
literal[string]
keyword[return] identifier[amp] . identifier[AmpList] . identifier[toStringProto] ( identifier[self] ,[ identifier[inObject] ], identifier[proto] ) | def toStringProto(self, inObject, proto):
"""
Wraps the object in a list, and then defers to ``amp.AmpList``.
"""
return amp.AmpList.toStringProto(self, [inObject], proto) |
def p_assignment_expr(self, p):
"""
assignment_expr \
: conditional_expr
| left_hand_side_expr assignment_operator assignment_expr
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ast.Assign(left=p[1], op=p[2], right=p[3]) | def function[p_assignment_expr, parameter[self, p]]:
constant[
assignment_expr : conditional_expr
| left_hand_side_expr assignment_operator assignment_expr
]
if compare[call[name[len], parameter[name[p]]] equal[==] constant[2]] begin[:]
call[name[p]][constant[0]] assign[=] call[name[p]][constant[1]] | keyword[def] identifier[p_assignment_expr] ( identifier[self] , identifier[p] ):
literal[string]
keyword[if] identifier[len] ( identifier[p] )== literal[int] :
identifier[p] [ literal[int] ]= identifier[p] [ literal[int] ]
keyword[else] :
identifier[p] [ literal[int] ]= identifier[ast] . identifier[Assign] ( identifier[left] = identifier[p] [ literal[int] ], identifier[op] = identifier[p] [ literal[int] ], identifier[right] = identifier[p] [ literal[int] ]) | def p_assignment_expr(self, p):
"""
assignment_expr : conditional_expr
| left_hand_side_expr assignment_operator assignment_expr
"""
if len(p) == 2:
p[0] = p[1] # depends on [control=['if'], data=[]]
else:
p[0] = ast.Assign(left=p[1], op=p[2], right=p[3]) |
def from_edf(fname):
"""
DataFrame constructor to open XBT EDF ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_edf(data_path.joinpath('XBT.EDF.gz'))
>>> ax = cast['temperature'].plot_cast()
"""
f = _read_file(fname)
header, names = [], []
for k, line in enumerate(f.readlines()):
line = line.strip()
if line.startswith("Serial Number"):
serial = line.strip().split(":")[1].strip()
elif line.startswith("Latitude"):
try:
hemisphere = line[-1]
lat = line.strip(hemisphere).split(":")[1].strip()
lat = np.float_(lat.split())
if hemisphere == "S":
lat = -(lat[0] + lat[1] / 60.0)
elif hemisphere == "N":
lat = lat[0] + lat[1] / 60.0
except (IndexError, ValueError):
lat = None
elif line.startswith("Longitude"):
try:
hemisphere = line[-1]
lon = line.strip(hemisphere).split(":")[1].strip()
lon = np.float_(lon.split())
if hemisphere == "W":
lon = -(lon[0] + lon[1] / 60.0)
elif hemisphere == "E":
lon = lon[0] + lon[1] / 60.0
except (IndexError, ValueError):
lon = None
else:
header.append(line)
if line.startswith("Field"):
col, unit = [l.strip().lower() for l in line.split(":")]
names.append(unit.split()[0])
if line == "// Data":
skiprows = k + 1
break
f.seek(0)
df = pd.read_csv(
f,
header=None,
index_col=None,
names=names,
skiprows=skiprows,
delim_whitespace=True,
)
f.close()
df.set_index("depth", drop=True, inplace=True)
df.index.name = "Depth [m]"
name = _basename(fname)[1]
metadata = {
"lon": lon,
"lat": lat,
"name": str(name),
"header": "\n".join(header),
"serial": serial,
}
setattr(df, "_metadata", metadata)
return df | def function[from_edf, parameter[fname]]:
constant[
DataFrame constructor to open XBT EDF ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_edf(data_path.joinpath('XBT.EDF.gz'))
>>> ax = cast['temperature'].plot_cast()
]
variable[f] assign[=] call[name[_read_file], parameter[name[fname]]]
<ast.Tuple object at 0x7da1b06736d0> assign[=] tuple[[<ast.List object at 0x7da1b0673730>, <ast.List object at 0x7da1b0673910>]]
for taget[tuple[[<ast.Name object at 0x7da1b0405870>, <ast.Name object at 0x7da1b04053f0>]]] in starred[call[name[enumerate], parameter[call[name[f].readlines, parameter[]]]]] begin[:]
variable[line] assign[=] call[name[line].strip, parameter[]]
if call[name[line].startswith, parameter[constant[Serial Number]]] begin[:]
variable[serial] assign[=] call[call[call[call[name[line].strip, parameter[]].split, parameter[constant[:]]]][constant[1]].strip, parameter[]]
if compare[name[line] equal[==] constant[// Data]] begin[:]
variable[skiprows] assign[=] binary_operation[name[k] + constant[1]]
break
call[name[f].seek, parameter[constant[0]]]
variable[df] assign[=] call[name[pd].read_csv, parameter[name[f]]]
call[name[f].close, parameter[]]
call[name[df].set_index, parameter[constant[depth]]]
name[df].index.name assign[=] constant[Depth [m]]
variable[name] assign[=] call[call[name[_basename], parameter[name[fname]]]][constant[1]]
variable[metadata] assign[=] dictionary[[<ast.Constant object at 0x7da1b04efd30>, <ast.Constant object at 0x7da1b04eeec0>, <ast.Constant object at 0x7da1b04edd80>, <ast.Constant object at 0x7da1b04efa90>, <ast.Constant object at 0x7da1b04edde0>], [<ast.Name object at 0x7da1b04edf90>, <ast.Name object at 0x7da1b04efe20>, <ast.Call object at 0x7da1b04eed40>, <ast.Call object at 0x7da1b04efac0>, <ast.Name object at 0x7da1b04ec610>]]
call[name[setattr], parameter[name[df], constant[_metadata], name[metadata]]]
return[name[df]] | keyword[def] identifier[from_edf] ( identifier[fname] ):
literal[string]
identifier[f] = identifier[_read_file] ( identifier[fname] )
identifier[header] , identifier[names] =[],[]
keyword[for] identifier[k] , identifier[line] keyword[in] identifier[enumerate] ( identifier[f] . identifier[readlines] ()):
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[serial] = identifier[line] . identifier[strip] (). identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[try] :
identifier[hemisphere] = identifier[line] [- literal[int] ]
identifier[lat] = identifier[line] . identifier[strip] ( identifier[hemisphere] ). identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
identifier[lat] = identifier[np] . identifier[float_] ( identifier[lat] . identifier[split] ())
keyword[if] identifier[hemisphere] == literal[string] :
identifier[lat] =-( identifier[lat] [ literal[int] ]+ identifier[lat] [ literal[int] ]/ literal[int] )
keyword[elif] identifier[hemisphere] == literal[string] :
identifier[lat] = identifier[lat] [ literal[int] ]+ identifier[lat] [ literal[int] ]/ literal[int]
keyword[except] ( identifier[IndexError] , identifier[ValueError] ):
identifier[lat] = keyword[None]
keyword[elif] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[try] :
identifier[hemisphere] = identifier[line] [- literal[int] ]
identifier[lon] = identifier[line] . identifier[strip] ( identifier[hemisphere] ). identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
identifier[lon] = identifier[np] . identifier[float_] ( identifier[lon] . identifier[split] ())
keyword[if] identifier[hemisphere] == literal[string] :
identifier[lon] =-( identifier[lon] [ literal[int] ]+ identifier[lon] [ literal[int] ]/ literal[int] )
keyword[elif] identifier[hemisphere] == literal[string] :
identifier[lon] = identifier[lon] [ literal[int] ]+ identifier[lon] [ literal[int] ]/ literal[int]
keyword[except] ( identifier[IndexError] , identifier[ValueError] ):
identifier[lon] = keyword[None]
keyword[else] :
identifier[header] . identifier[append] ( identifier[line] )
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[col] , identifier[unit] =[ identifier[l] . identifier[strip] (). identifier[lower] () keyword[for] identifier[l] keyword[in] identifier[line] . identifier[split] ( literal[string] )]
identifier[names] . identifier[append] ( identifier[unit] . identifier[split] ()[ literal[int] ])
keyword[if] identifier[line] == literal[string] :
identifier[skiprows] = identifier[k] + literal[int]
keyword[break]
identifier[f] . identifier[seek] ( literal[int] )
identifier[df] = identifier[pd] . identifier[read_csv] (
identifier[f] ,
identifier[header] = keyword[None] ,
identifier[index_col] = keyword[None] ,
identifier[names] = identifier[names] ,
identifier[skiprows] = identifier[skiprows] ,
identifier[delim_whitespace] = keyword[True] ,
)
identifier[f] . identifier[close] ()
identifier[df] . identifier[set_index] ( literal[string] , identifier[drop] = keyword[True] , identifier[inplace] = keyword[True] )
identifier[df] . identifier[index] . identifier[name] = literal[string]
identifier[name] = identifier[_basename] ( identifier[fname] )[ literal[int] ]
identifier[metadata] ={
literal[string] : identifier[lon] ,
literal[string] : identifier[lat] ,
literal[string] : identifier[str] ( identifier[name] ),
literal[string] : literal[string] . identifier[join] ( identifier[header] ),
literal[string] : identifier[serial] ,
}
identifier[setattr] ( identifier[df] , literal[string] , identifier[metadata] )
keyword[return] identifier[df] | def from_edf(fname):
"""
DataFrame constructor to open XBT EDF ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_edf(data_path.joinpath('XBT.EDF.gz'))
>>> ax = cast['temperature'].plot_cast()
"""
f = _read_file(fname)
(header, names) = ([], [])
for (k, line) in enumerate(f.readlines()):
line = line.strip()
if line.startswith('Serial Number'):
serial = line.strip().split(':')[1].strip() # depends on [control=['if'], data=[]]
elif line.startswith('Latitude'):
try:
hemisphere = line[-1]
lat = line.strip(hemisphere).split(':')[1].strip()
lat = np.float_(lat.split())
if hemisphere == 'S':
lat = -(lat[0] + lat[1] / 60.0) # depends on [control=['if'], data=[]]
elif hemisphere == 'N':
lat = lat[0] + lat[1] / 60.0 # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (IndexError, ValueError):
lat = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif line.startswith('Longitude'):
try:
hemisphere = line[-1]
lon = line.strip(hemisphere).split(':')[1].strip()
lon = np.float_(lon.split())
if hemisphere == 'W':
lon = -(lon[0] + lon[1] / 60.0) # depends on [control=['if'], data=[]]
elif hemisphere == 'E':
lon = lon[0] + lon[1] / 60.0 # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (IndexError, ValueError):
lon = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
header.append(line)
if line.startswith('Field'):
(col, unit) = [l.strip().lower() for l in line.split(':')]
names.append(unit.split()[0]) # depends on [control=['if'], data=[]]
if line == '// Data':
skiprows = k + 1
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
f.seek(0)
df = pd.read_csv(f, header=None, index_col=None, names=names, skiprows=skiprows, delim_whitespace=True)
f.close()
df.set_index('depth', drop=True, inplace=True)
df.index.name = 'Depth [m]'
name = _basename(fname)[1]
metadata = {'lon': lon, 'lat': lat, 'name': str(name), 'header': '\n'.join(header), 'serial': serial}
setattr(df, '_metadata', metadata)
return df |
def raise_204(instance):
"""Abort the current request with a 204 (No Content) response code. Clears
out the body of the response.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 204
"""
instance.response.status = 204
instance.response.body = ''
instance.response.body_raw = None
raise ResponseException(instance.response) | def function[raise_204, parameter[instance]]:
constant[Abort the current request with a 204 (No Content) response code. Clears
out the body of the response.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 204
]
name[instance].response.status assign[=] constant[204]
name[instance].response.body assign[=] constant[]
name[instance].response.body_raw assign[=] constant[None]
<ast.Raise object at 0x7da1b229a680> | keyword[def] identifier[raise_204] ( identifier[instance] ):
literal[string]
identifier[instance] . identifier[response] . identifier[status] = literal[int]
identifier[instance] . identifier[response] . identifier[body] = literal[string]
identifier[instance] . identifier[response] . identifier[body_raw] = keyword[None]
keyword[raise] identifier[ResponseException] ( identifier[instance] . identifier[response] ) | def raise_204(instance):
"""Abort the current request with a 204 (No Content) response code. Clears
out the body of the response.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 204
"""
instance.response.status = 204
instance.response.body = ''
instance.response.body_raw = None
raise ResponseException(instance.response) |
def supported_tasks(self, lang=None):
"""Languages that are covered by a specific task.
Args:
lang (string): Language code name.
"""
if lang:
collection = self.get_collection(lang=lang)
return [x.id.split('.')[0] for x in collection.packages]
else:
return [x.name.split()[0] for x in self.collections() if Downloader.TASK_PREFIX in x.id] | def function[supported_tasks, parameter[self, lang]]:
constant[Languages that are covered by a specific task.
Args:
lang (string): Language code name.
]
if name[lang] begin[:]
variable[collection] assign[=] call[name[self].get_collection, parameter[]]
return[<ast.ListComp object at 0x7da20c6ab7c0>] | keyword[def] identifier[supported_tasks] ( identifier[self] , identifier[lang] = keyword[None] ):
literal[string]
keyword[if] identifier[lang] :
identifier[collection] = identifier[self] . identifier[get_collection] ( identifier[lang] = identifier[lang] )
keyword[return] [ identifier[x] . identifier[id] . identifier[split] ( literal[string] )[ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[collection] . identifier[packages] ]
keyword[else] :
keyword[return] [ identifier[x] . identifier[name] . identifier[split] ()[ literal[int] ] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[collections] () keyword[if] identifier[Downloader] . identifier[TASK_PREFIX] keyword[in] identifier[x] . identifier[id] ] | def supported_tasks(self, lang=None):
"""Languages that are covered by a specific task.
Args:
lang (string): Language code name.
"""
if lang:
collection = self.get_collection(lang=lang)
return [x.id.split('.')[0] for x in collection.packages] # depends on [control=['if'], data=[]]
else:
return [x.name.split()[0] for x in self.collections() if Downloader.TASK_PREFIX in x.id] |
def cv(data, channels=None):
"""
Calculate the Coeff. of Variation of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The Coefficient of Variation of the events in the specified
channels of `data`.
Notes
-----
The Coefficient of Variation (CV) of a dataset is defined as the
standard deviation divided by the mean of such dataset.
"""
# Slice data to take statistics from
if channels is None:
data_stats = data
else:
data_stats = data[:, channels]
# Calculate and return statistic
return np.std(data_stats, axis=0) / np.mean(data_stats, axis=0) | def function[cv, parameter[data, channels]]:
constant[
Calculate the Coeff. of Variation of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The Coefficient of Variation of the events in the specified
channels of `data`.
Notes
-----
The Coefficient of Variation (CV) of a dataset is defined as the
standard deviation divided by the mean of such dataset.
]
if compare[name[channels] is constant[None]] begin[:]
variable[data_stats] assign[=] name[data]
return[binary_operation[call[name[np].std, parameter[name[data_stats]]] / call[name[np].mean, parameter[name[data_stats]]]]] | keyword[def] identifier[cv] ( identifier[data] , identifier[channels] = keyword[None] ):
literal[string]
keyword[if] identifier[channels] keyword[is] keyword[None] :
identifier[data_stats] = identifier[data]
keyword[else] :
identifier[data_stats] = identifier[data] [:, identifier[channels] ]
keyword[return] identifier[np] . identifier[std] ( identifier[data_stats] , identifier[axis] = literal[int] )/ identifier[np] . identifier[mean] ( identifier[data_stats] , identifier[axis] = literal[int] ) | def cv(data, channels=None):
"""
Calculate the Coeff. of Variation of the events in an FCSData object.
Parameters
----------
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters (aka channels).
channels : int or str or list of int or list of str, optional
Channels on which to calculate the statistic. If None, use all
channels.
Returns
-------
float or numpy array
The Coefficient of Variation of the events in the specified
channels of `data`.
Notes
-----
The Coefficient of Variation (CV) of a dataset is defined as the
standard deviation divided by the mean of such dataset.
"""
# Slice data to take statistics from
if channels is None:
data_stats = data # depends on [control=['if'], data=[]]
else:
data_stats = data[:, channels]
# Calculate and return statistic
return np.std(data_stats, axis=0) / np.mean(data_stats, axis=0) |
def has_files(self):
"""stub"""
# I had to add the following check because file record types
# don't seem to be implemented
# correctly for raw edx Question objects
if 'fileIds' not in self.my_osid_object._my_map:
return False
return bool(self.my_osid_object._my_map['fileIds']) | def function[has_files, parameter[self]]:
constant[stub]
if compare[constant[fileIds] <ast.NotIn object at 0x7da2590d7190> name[self].my_osid_object._my_map] begin[:]
return[constant[False]]
return[call[name[bool], parameter[call[name[self].my_osid_object._my_map][constant[fileIds]]]]] | keyword[def] identifier[has_files] ( identifier[self] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[my_osid_object] . identifier[_my_map] :
keyword[return] keyword[False]
keyword[return] identifier[bool] ( identifier[self] . identifier[my_osid_object] . identifier[_my_map] [ literal[string] ]) | def has_files(self):
"""stub"""
# I had to add the following check because file record types
# don't seem to be implemented
# correctly for raw edx Question objects
if 'fileIds' not in self.my_osid_object._my_map:
return False # depends on [control=['if'], data=[]]
return bool(self.my_osid_object._my_map['fileIds']) |
def _extract_query(self, redirect_url):
"""Extract query parameters from a url.
Parameters
redirect_url (str)
The full URL that the Lyft server redirected to after
the user authorized your app.
Returns
(dict)
A dictionary of query parameters.
"""
qs = urlparse(redirect_url)
# redirect_urls return data after query identifier (?)
qs = qs.query
query_params = parse_qs(qs)
query_params = {qp: query_params[qp][0] for qp in query_params}
return query_params | def function[_extract_query, parameter[self, redirect_url]]:
constant[Extract query parameters from a url.
Parameters
redirect_url (str)
The full URL that the Lyft server redirected to after
the user authorized your app.
Returns
(dict)
A dictionary of query parameters.
]
variable[qs] assign[=] call[name[urlparse], parameter[name[redirect_url]]]
variable[qs] assign[=] name[qs].query
variable[query_params] assign[=] call[name[parse_qs], parameter[name[qs]]]
variable[query_params] assign[=] <ast.DictComp object at 0x7da18f721e70>
return[name[query_params]] | keyword[def] identifier[_extract_query] ( identifier[self] , identifier[redirect_url] ):
literal[string]
identifier[qs] = identifier[urlparse] ( identifier[redirect_url] )
identifier[qs] = identifier[qs] . identifier[query]
identifier[query_params] = identifier[parse_qs] ( identifier[qs] )
identifier[query_params] ={ identifier[qp] : identifier[query_params] [ identifier[qp] ][ literal[int] ] keyword[for] identifier[qp] keyword[in] identifier[query_params] }
keyword[return] identifier[query_params] | def _extract_query(self, redirect_url):
"""Extract query parameters from a url.
Parameters
redirect_url (str)
The full URL that the Lyft server redirected to after
the user authorized your app.
Returns
(dict)
A dictionary of query parameters.
"""
qs = urlparse(redirect_url)
# redirect_urls return data after query identifier (?)
qs = qs.query
query_params = parse_qs(qs)
query_params = {qp: query_params[qp][0] for qp in query_params}
return query_params |
def stream_by_id(self, dst_id):
"""
Return the :class:`Stream` that should be used to communicate with
`dst_id`. If a specific route for `dst_id` is not known, a reference to
the parent context's stream is returned.
"""
return (
self._stream_by_id.get(dst_id) or
self._stream_by_id.get(mitogen.parent_id)
) | def function[stream_by_id, parameter[self, dst_id]]:
constant[
Return the :class:`Stream` that should be used to communicate with
`dst_id`. If a specific route for `dst_id` is not known, a reference to
the parent context's stream is returned.
]
return[<ast.BoolOp object at 0x7da1b1d529e0>] | keyword[def] identifier[stream_by_id] ( identifier[self] , identifier[dst_id] ):
literal[string]
keyword[return] (
identifier[self] . identifier[_stream_by_id] . identifier[get] ( identifier[dst_id] ) keyword[or]
identifier[self] . identifier[_stream_by_id] . identifier[get] ( identifier[mitogen] . identifier[parent_id] )
) | def stream_by_id(self, dst_id):
"""
Return the :class:`Stream` that should be used to communicate with
`dst_id`. If a specific route for `dst_id` is not known, a reference to
the parent context's stream is returned.
"""
return self._stream_by_id.get(dst_id) or self._stream_by_id.get(mitogen.parent_id) |
def make_call_keywords(stack_builders, count):
"""
Make the keywords entry for an ast.Call node.
"""
out = []
for _ in range(count):
value = make_expr(stack_builders)
load_kwname = stack_builders.pop()
if not isinstance(load_kwname, instrs.LOAD_CONST):
raise DecompilationError(
"Expected a LOAD_CONST, but got %r" % load_kwname
)
if not isinstance(load_kwname.arg, str):
raise DecompilationError(
"Expected LOAD_CONST of a str, but got %r." % load_kwname,
)
out.append(ast.keyword(arg=load_kwname.arg, value=value))
out.reverse()
return out | def function[make_call_keywords, parameter[stack_builders, count]]:
constant[
Make the keywords entry for an ast.Call node.
]
variable[out] assign[=] list[[]]
for taget[name[_]] in starred[call[name[range], parameter[name[count]]]] begin[:]
variable[value] assign[=] call[name[make_expr], parameter[name[stack_builders]]]
variable[load_kwname] assign[=] call[name[stack_builders].pop, parameter[]]
if <ast.UnaryOp object at 0x7da1b05b5db0> begin[:]
<ast.Raise object at 0x7da1b05b4580>
if <ast.UnaryOp object at 0x7da1b05b49a0> begin[:]
<ast.Raise object at 0x7da1b05b6dd0>
call[name[out].append, parameter[call[name[ast].keyword, parameter[]]]]
call[name[out].reverse, parameter[]]
return[name[out]] | keyword[def] identifier[make_call_keywords] ( identifier[stack_builders] , identifier[count] ):
literal[string]
identifier[out] =[]
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[count] ):
identifier[value] = identifier[make_expr] ( identifier[stack_builders] )
identifier[load_kwname] = identifier[stack_builders] . identifier[pop] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[load_kwname] , identifier[instrs] . identifier[LOAD_CONST] ):
keyword[raise] identifier[DecompilationError] (
literal[string] % identifier[load_kwname]
)
keyword[if] keyword[not] identifier[isinstance] ( identifier[load_kwname] . identifier[arg] , identifier[str] ):
keyword[raise] identifier[DecompilationError] (
literal[string] % identifier[load_kwname] ,
)
identifier[out] . identifier[append] ( identifier[ast] . identifier[keyword] ( identifier[arg] = identifier[load_kwname] . identifier[arg] , identifier[value] = identifier[value] ))
identifier[out] . identifier[reverse] ()
keyword[return] identifier[out] | def make_call_keywords(stack_builders, count):
"""
Make the keywords entry for an ast.Call node.
"""
out = []
for _ in range(count):
value = make_expr(stack_builders)
load_kwname = stack_builders.pop()
if not isinstance(load_kwname, instrs.LOAD_CONST):
raise DecompilationError('Expected a LOAD_CONST, but got %r' % load_kwname) # depends on [control=['if'], data=[]]
if not isinstance(load_kwname.arg, str):
raise DecompilationError('Expected LOAD_CONST of a str, but got %r.' % load_kwname) # depends on [control=['if'], data=[]]
out.append(ast.keyword(arg=load_kwname.arg, value=value)) # depends on [control=['for'], data=[]]
out.reverse()
return out |
def FDMT_params(f_min, f_max, maxDT, inttime):
"""
Summarize DM grid and other parameters.
"""
maxDM = inttime*maxDT/(4.1488e-3 * (1/f_min**2 - 1/f_max**2))
logger.info('Freqs from {0}-{1}, MaxDT {2}, Int time {3} => maxDM {4}'.format(f_min, f_max, maxDT, inttime, maxDM)) | def function[FDMT_params, parameter[f_min, f_max, maxDT, inttime]]:
constant[
Summarize DM grid and other parameters.
]
variable[maxDM] assign[=] binary_operation[binary_operation[name[inttime] * name[maxDT]] / binary_operation[constant[0.0041488] * binary_operation[binary_operation[constant[1] / binary_operation[name[f_min] ** constant[2]]] - binary_operation[constant[1] / binary_operation[name[f_max] ** constant[2]]]]]]
call[name[logger].info, parameter[call[constant[Freqs from {0}-{1}, MaxDT {2}, Int time {3} => maxDM {4}].format, parameter[name[f_min], name[f_max], name[maxDT], name[inttime], name[maxDM]]]]] | keyword[def] identifier[FDMT_params] ( identifier[f_min] , identifier[f_max] , identifier[maxDT] , identifier[inttime] ):
literal[string]
identifier[maxDM] = identifier[inttime] * identifier[maxDT] /( literal[int] *( literal[int] / identifier[f_min] ** literal[int] - literal[int] / identifier[f_max] ** literal[int] ))
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[f_min] , identifier[f_max] , identifier[maxDT] , identifier[inttime] , identifier[maxDM] )) | def FDMT_params(f_min, f_max, maxDT, inttime):
"""
Summarize DM grid and other parameters.
"""
maxDM = inttime * maxDT / (0.0041488 * (1 / f_min ** 2 - 1 / f_max ** 2))
logger.info('Freqs from {0}-{1}, MaxDT {2}, Int time {3} => maxDM {4}'.format(f_min, f_max, maxDT, inttime, maxDM)) |
def get_filtered_graph(self, relations=None, prefix=None):
"""
Returns a networkx graph for the whole ontology, for a subset of relations
Only implemented for eager methods.
Implementation notes: currently this is not cached
Arguments
---------
- relations : list
list of object property IDs, e.g. subClassOf, BFO:0000050. If empty, uses all.
- prefix : String
if specified, create a subgraph using only classes with this prefix, e.g. ENVO, PATO, GO
Return
------
nx.MultiDiGraph
A networkx MultiDiGraph object representing the filtered ontology
"""
# trigger synonym cache
self.all_synonyms()
self.all_obsoletes()
# default method - wrap get_graph
srcg = self.get_graph()
if prefix is not None:
srcg = srcg.subgraph([n for n in srcg.nodes() if n.startswith(prefix+":")])
if relations is None:
logger.info("No filtering on "+str(self))
return srcg
logger.info("Filtering {} for {}".format(self, relations))
g = nx.MultiDiGraph()
# TODO: copy full metadata
logger.info("copying nodes")
for (n,d) in srcg.nodes(data=True):
g.add_node(n, **d)
logger.info("copying edges")
num_edges = 0
for (x,y,d) in srcg.edges(data=True):
if d['pred'] in relations:
num_edges += 1
g.add_edge(x,y,**d)
logger.info("Filtered edges: {}".format(num_edges))
return g | def function[get_filtered_graph, parameter[self, relations, prefix]]:
constant[
Returns a networkx graph for the whole ontology, for a subset of relations
Only implemented for eager methods.
Implementation notes: currently this is not cached
Arguments
---------
- relations : list
list of object property IDs, e.g. subClassOf, BFO:0000050. If empty, uses all.
- prefix : String
if specified, create a subgraph using only classes with this prefix, e.g. ENVO, PATO, GO
Return
------
nx.MultiDiGraph
A networkx MultiDiGraph object representing the filtered ontology
]
call[name[self].all_synonyms, parameter[]]
call[name[self].all_obsoletes, parameter[]]
variable[srcg] assign[=] call[name[self].get_graph, parameter[]]
if compare[name[prefix] is_not constant[None]] begin[:]
variable[srcg] assign[=] call[name[srcg].subgraph, parameter[<ast.ListComp object at 0x7da1b08bb940>]]
if compare[name[relations] is constant[None]] begin[:]
call[name[logger].info, parameter[binary_operation[constant[No filtering on ] + call[name[str], parameter[name[self]]]]]]
return[name[srcg]]
call[name[logger].info, parameter[call[constant[Filtering {} for {}].format, parameter[name[self], name[relations]]]]]
variable[g] assign[=] call[name[nx].MultiDiGraph, parameter[]]
call[name[logger].info, parameter[constant[copying nodes]]]
for taget[tuple[[<ast.Name object at 0x7da1b088a290>, <ast.Name object at 0x7da1b088a110>]]] in starred[call[name[srcg].nodes, parameter[]]] begin[:]
call[name[g].add_node, parameter[name[n]]]
call[name[logger].info, parameter[constant[copying edges]]]
variable[num_edges] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b0889e10>, <ast.Name object at 0x7da1b088a050>, <ast.Name object at 0x7da1b088a080>]]] in starred[call[name[srcg].edges, parameter[]]] begin[:]
if compare[call[name[d]][constant[pred]] in name[relations]] begin[:]
<ast.AugAssign object at 0x7da1b088a3e0>
call[name[g].add_edge, parameter[name[x], name[y]]]
call[name[logger].info, parameter[call[constant[Filtered edges: {}].format, parameter[name[num_edges]]]]]
return[name[g]] | keyword[def] identifier[get_filtered_graph] ( identifier[self] , identifier[relations] = keyword[None] , identifier[prefix] = keyword[None] ):
literal[string]
identifier[self] . identifier[all_synonyms] ()
identifier[self] . identifier[all_obsoletes] ()
identifier[srcg] = identifier[self] . identifier[get_graph] ()
keyword[if] identifier[prefix] keyword[is] keyword[not] keyword[None] :
identifier[srcg] = identifier[srcg] . identifier[subgraph] ([ identifier[n] keyword[for] identifier[n] keyword[in] identifier[srcg] . identifier[nodes] () keyword[if] identifier[n] . identifier[startswith] ( identifier[prefix] + literal[string] )])
keyword[if] identifier[relations] keyword[is] keyword[None] :
identifier[logger] . identifier[info] ( literal[string] + identifier[str] ( identifier[self] ))
keyword[return] identifier[srcg]
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] , identifier[relations] ))
identifier[g] = identifier[nx] . identifier[MultiDiGraph] ()
identifier[logger] . identifier[info] ( literal[string] )
keyword[for] ( identifier[n] , identifier[d] ) keyword[in] identifier[srcg] . identifier[nodes] ( identifier[data] = keyword[True] ):
identifier[g] . identifier[add_node] ( identifier[n] ,** identifier[d] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[num_edges] = literal[int]
keyword[for] ( identifier[x] , identifier[y] , identifier[d] ) keyword[in] identifier[srcg] . identifier[edges] ( identifier[data] = keyword[True] ):
keyword[if] identifier[d] [ literal[string] ] keyword[in] identifier[relations] :
identifier[num_edges] += literal[int]
identifier[g] . identifier[add_edge] ( identifier[x] , identifier[y] ,** identifier[d] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[num_edges] ))
keyword[return] identifier[g] | def get_filtered_graph(self, relations=None, prefix=None):
"""
Returns a networkx graph for the whole ontology, for a subset of relations
Only implemented for eager methods.
Implementation notes: currently this is not cached
Arguments
---------
- relations : list
list of object property IDs, e.g. subClassOf, BFO:0000050. If empty, uses all.
- prefix : String
if specified, create a subgraph using only classes with this prefix, e.g. ENVO, PATO, GO
Return
------
nx.MultiDiGraph
A networkx MultiDiGraph object representing the filtered ontology
"""
# trigger synonym cache
self.all_synonyms()
self.all_obsoletes()
# default method - wrap get_graph
srcg = self.get_graph()
if prefix is not None:
srcg = srcg.subgraph([n for n in srcg.nodes() if n.startswith(prefix + ':')]) # depends on [control=['if'], data=['prefix']]
if relations is None:
logger.info('No filtering on ' + str(self))
return srcg # depends on [control=['if'], data=[]]
logger.info('Filtering {} for {}'.format(self, relations))
g = nx.MultiDiGraph()
# TODO: copy full metadata
logger.info('copying nodes')
for (n, d) in srcg.nodes(data=True):
g.add_node(n, **d) # depends on [control=['for'], data=[]]
logger.info('copying edges')
num_edges = 0
for (x, y, d) in srcg.edges(data=True):
if d['pred'] in relations:
num_edges += 1
g.add_edge(x, y, **d) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
logger.info('Filtered edges: {}'.format(num_edges))
return g |
def _get_platform_patterns(spec, package, src_dir):
"""
yield platform-specific path patterns (suitable for glob
or fn_match) from a glob-based spec (such as
self.package_data or self.exclude_package_data)
matching package in src_dir.
"""
raw_patterns = itertools.chain(
spec.get('', []),
spec.get(package, []),
)
return (
# Each pattern has to be converted to a platform-specific path
os.path.join(src_dir, convert_path(pattern))
for pattern in raw_patterns
) | def function[_get_platform_patterns, parameter[spec, package, src_dir]]:
constant[
yield platform-specific path patterns (suitable for glob
or fn_match) from a glob-based spec (such as
self.package_data or self.exclude_package_data)
matching package in src_dir.
]
variable[raw_patterns] assign[=] call[name[itertools].chain, parameter[call[name[spec].get, parameter[constant[], list[[]]]], call[name[spec].get, parameter[name[package], list[[]]]]]]
return[<ast.GeneratorExp object at 0x7da1b1b13a90>] | keyword[def] identifier[_get_platform_patterns] ( identifier[spec] , identifier[package] , identifier[src_dir] ):
literal[string]
identifier[raw_patterns] = identifier[itertools] . identifier[chain] (
identifier[spec] . identifier[get] ( literal[string] ,[]),
identifier[spec] . identifier[get] ( identifier[package] ,[]),
)
keyword[return] (
identifier[os] . identifier[path] . identifier[join] ( identifier[src_dir] , identifier[convert_path] ( identifier[pattern] ))
keyword[for] identifier[pattern] keyword[in] identifier[raw_patterns]
) | def _get_platform_patterns(spec, package, src_dir):
"""
yield platform-specific path patterns (suitable for glob
or fn_match) from a glob-based spec (such as
self.package_data or self.exclude_package_data)
matching package in src_dir.
"""
raw_patterns = itertools.chain(spec.get('', []), spec.get(package, []))
# Each pattern has to be converted to a platform-specific path
return (os.path.join(src_dir, convert_path(pattern)) for pattern in raw_patterns) |
def fetch_assets(self):
""" download bootstrap assets to control host.
If present on the control host they will be uploaded to the target host during bootstrapping.
"""
# allow overwrites from the commandline
packages = set(
env.instance.config.get('bootstrap-packages', '').split())
packages.update(['python27'])
cmd = env.instance.config.get('bootstrap-local-download-cmd', 'wget -c -O "{0.local}" "{0.url}"')
items = sorted(self.bootstrap_files.items())
for filename, asset in items:
if asset.url:
if not exists(dirname(asset.local)):
os.makedirs(dirname(asset.local))
local(cmd.format(asset))
if filename == 'packagesite.txz':
# add packages to download
items.extend(self._fetch_packages(asset.local, packages)) | def function[fetch_assets, parameter[self]]:
constant[ download bootstrap assets to control host.
If present on the control host they will be uploaded to the target host during bootstrapping.
]
variable[packages] assign[=] call[name[set], parameter[call[call[name[env].instance.config.get, parameter[constant[bootstrap-packages], constant[]]].split, parameter[]]]]
call[name[packages].update, parameter[list[[<ast.Constant object at 0x7da20c990d90>]]]]
variable[cmd] assign[=] call[name[env].instance.config.get, parameter[constant[bootstrap-local-download-cmd], constant[wget -c -O "{0.local}" "{0.url}"]]]
variable[items] assign[=] call[name[sorted], parameter[call[name[self].bootstrap_files.items, parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da20c992a70>, <ast.Name object at 0x7da20c991a20>]]] in starred[name[items]] begin[:]
if name[asset].url begin[:]
if <ast.UnaryOp object at 0x7da20c992f20> begin[:]
call[name[os].makedirs, parameter[call[name[dirname], parameter[name[asset].local]]]]
call[name[local], parameter[call[name[cmd].format, parameter[name[asset]]]]]
if compare[name[filename] equal[==] constant[packagesite.txz]] begin[:]
call[name[items].extend, parameter[call[name[self]._fetch_packages, parameter[name[asset].local, name[packages]]]]] | keyword[def] identifier[fetch_assets] ( identifier[self] ):
literal[string]
identifier[packages] = identifier[set] (
identifier[env] . identifier[instance] . identifier[config] . identifier[get] ( literal[string] , literal[string] ). identifier[split] ())
identifier[packages] . identifier[update] ([ literal[string] ])
identifier[cmd] = identifier[env] . identifier[instance] . identifier[config] . identifier[get] ( literal[string] , literal[string] )
identifier[items] = identifier[sorted] ( identifier[self] . identifier[bootstrap_files] . identifier[items] ())
keyword[for] identifier[filename] , identifier[asset] keyword[in] identifier[items] :
keyword[if] identifier[asset] . identifier[url] :
keyword[if] keyword[not] identifier[exists] ( identifier[dirname] ( identifier[asset] . identifier[local] )):
identifier[os] . identifier[makedirs] ( identifier[dirname] ( identifier[asset] . identifier[local] ))
identifier[local] ( identifier[cmd] . identifier[format] ( identifier[asset] ))
keyword[if] identifier[filename] == literal[string] :
identifier[items] . identifier[extend] ( identifier[self] . identifier[_fetch_packages] ( identifier[asset] . identifier[local] , identifier[packages] )) | def fetch_assets(self):
""" download bootstrap assets to control host.
If present on the control host they will be uploaded to the target host during bootstrapping.
"""
# allow overwrites from the commandline
packages = set(env.instance.config.get('bootstrap-packages', '').split())
packages.update(['python27'])
cmd = env.instance.config.get('bootstrap-local-download-cmd', 'wget -c -O "{0.local}" "{0.url}"')
items = sorted(self.bootstrap_files.items())
for (filename, asset) in items:
if asset.url:
if not exists(dirname(asset.local)):
os.makedirs(dirname(asset.local)) # depends on [control=['if'], data=[]]
local(cmd.format(asset)) # depends on [control=['if'], data=[]]
if filename == 'packagesite.txz':
# add packages to download
items.extend(self._fetch_packages(asset.local, packages)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def aws_to_unix_id(aws_key_id):
"""Converts a AWS Key ID into a UID"""
uid_bytes = hashlib.sha256(aws_key_id.encode()).digest()[-2:]
if USING_PYTHON2:
return 2000 + int(from_bytes(uid_bytes) // 2)
else:
return 2000 + (int.from_bytes(uid_bytes, byteorder=sys.byteorder) // 2) | def function[aws_to_unix_id, parameter[aws_key_id]]:
constant[Converts a AWS Key ID into a UID]
variable[uid_bytes] assign[=] call[call[call[name[hashlib].sha256, parameter[call[name[aws_key_id].encode, parameter[]]]].digest, parameter[]]][<ast.Slice object at 0x7da1b07ae050>]
if name[USING_PYTHON2] begin[:]
return[binary_operation[constant[2000] + call[name[int], parameter[binary_operation[call[name[from_bytes], parameter[name[uid_bytes]]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]]]]] | keyword[def] identifier[aws_to_unix_id] ( identifier[aws_key_id] ):
literal[string]
identifier[uid_bytes] = identifier[hashlib] . identifier[sha256] ( identifier[aws_key_id] . identifier[encode] ()). identifier[digest] ()[- literal[int] :]
keyword[if] identifier[USING_PYTHON2] :
keyword[return] literal[int] + identifier[int] ( identifier[from_bytes] ( identifier[uid_bytes] )// literal[int] )
keyword[else] :
keyword[return] literal[int] +( identifier[int] . identifier[from_bytes] ( identifier[uid_bytes] , identifier[byteorder] = identifier[sys] . identifier[byteorder] )// literal[int] ) | def aws_to_unix_id(aws_key_id):
"""Converts a AWS Key ID into a UID"""
uid_bytes = hashlib.sha256(aws_key_id.encode()).digest()[-2:]
if USING_PYTHON2:
return 2000 + int(from_bytes(uid_bytes) // 2) # depends on [control=['if'], data=[]]
else:
return 2000 + int.from_bytes(uid_bytes, byteorder=sys.byteorder) // 2 |
def cut_by_plane(self, plane, inverted=False):
'''
Like cut_across_axis, but works with an arbitrary plane. Keeps
vertices that lie in front of the plane (i.e. in the direction
of the plane normal).
inverted: When `True`, invert the logic, to keep the vertices
that lie behind the plane instead.
Return the original indices of the kept vertices.
'''
vertices_to_keep = plane.points_in_front(self.v, inverted=inverted, ret_indices=True)
self.keep_vertices(vertices_to_keep)
return vertices_to_keep | def function[cut_by_plane, parameter[self, plane, inverted]]:
constant[
Like cut_across_axis, but works with an arbitrary plane. Keeps
vertices that lie in front of the plane (i.e. in the direction
of the plane normal).
inverted: When `True`, invert the logic, to keep the vertices
that lie behind the plane instead.
Return the original indices of the kept vertices.
]
variable[vertices_to_keep] assign[=] call[name[plane].points_in_front, parameter[name[self].v]]
call[name[self].keep_vertices, parameter[name[vertices_to_keep]]]
return[name[vertices_to_keep]] | keyword[def] identifier[cut_by_plane] ( identifier[self] , identifier[plane] , identifier[inverted] = keyword[False] ):
literal[string]
identifier[vertices_to_keep] = identifier[plane] . identifier[points_in_front] ( identifier[self] . identifier[v] , identifier[inverted] = identifier[inverted] , identifier[ret_indices] = keyword[True] )
identifier[self] . identifier[keep_vertices] ( identifier[vertices_to_keep] )
keyword[return] identifier[vertices_to_keep] | def cut_by_plane(self, plane, inverted=False):
"""
Like cut_across_axis, but works with an arbitrary plane. Keeps
vertices that lie in front of the plane (i.e. in the direction
of the plane normal).
inverted: When `True`, invert the logic, to keep the vertices
that lie behind the plane instead.
Return the original indices of the kept vertices.
"""
vertices_to_keep = plane.points_in_front(self.v, inverted=inverted, ret_indices=True)
self.keep_vertices(vertices_to_keep)
return vertices_to_keep |
def call(self):
"""
call: ['mut'] ID ['(' parameters ')']
"""
is_mutable = False
if self.token.nature == Nature.MUT:
is_mutable = True
self._process(Nature.MUT)
identifier = Identifier(name=self.token.value)
self._process(Nature.ID)
if self.token.nature == Nature.LPAREN:
return FunctionCall(identifier=identifier, parameters=self.parameters())
else:
return Variable(identifier=identifier, is_mutable=is_mutable) | def function[call, parameter[self]]:
constant[
call: ['mut'] ID ['(' parameters ')']
]
variable[is_mutable] assign[=] constant[False]
if compare[name[self].token.nature equal[==] name[Nature].MUT] begin[:]
variable[is_mutable] assign[=] constant[True]
call[name[self]._process, parameter[name[Nature].MUT]]
variable[identifier] assign[=] call[name[Identifier], parameter[]]
call[name[self]._process, parameter[name[Nature].ID]]
if compare[name[self].token.nature equal[==] name[Nature].LPAREN] begin[:]
return[call[name[FunctionCall], parameter[]]] | keyword[def] identifier[call] ( identifier[self] ):
literal[string]
identifier[is_mutable] = keyword[False]
keyword[if] identifier[self] . identifier[token] . identifier[nature] == identifier[Nature] . identifier[MUT] :
identifier[is_mutable] = keyword[True]
identifier[self] . identifier[_process] ( identifier[Nature] . identifier[MUT] )
identifier[identifier] = identifier[Identifier] ( identifier[name] = identifier[self] . identifier[token] . identifier[value] )
identifier[self] . identifier[_process] ( identifier[Nature] . identifier[ID] )
keyword[if] identifier[self] . identifier[token] . identifier[nature] == identifier[Nature] . identifier[LPAREN] :
keyword[return] identifier[FunctionCall] ( identifier[identifier] = identifier[identifier] , identifier[parameters] = identifier[self] . identifier[parameters] ())
keyword[else] :
keyword[return] identifier[Variable] ( identifier[identifier] = identifier[identifier] , identifier[is_mutable] = identifier[is_mutable] ) | def call(self):
"""
call: ['mut'] ID ['(' parameters ')']
"""
is_mutable = False
if self.token.nature == Nature.MUT:
is_mutable = True
self._process(Nature.MUT) # depends on [control=['if'], data=[]]
identifier = Identifier(name=self.token.value)
self._process(Nature.ID)
if self.token.nature == Nature.LPAREN:
return FunctionCall(identifier=identifier, parameters=self.parameters()) # depends on [control=['if'], data=[]]
else:
return Variable(identifier=identifier, is_mutable=is_mutable) |
def _search_in_bases(type_):
"""Implementation detail."""
found = False
for base_type in type_.declaration.bases:
try:
found = internal_type_traits.get_by_name(
base_type.related_class, "element_type")
except runtime_errors.declaration_not_found_t:
pass
if found:
return found
raise RuntimeError(
("Unable to find 'element_type' declaration '%s'"
"in type '%s'.") % type_.decl_string) | def function[_search_in_bases, parameter[type_]]:
constant[Implementation detail.]
variable[found] assign[=] constant[False]
for taget[name[base_type]] in starred[name[type_].declaration.bases] begin[:]
<ast.Try object at 0x7da2041d8c70>
if name[found] begin[:]
return[name[found]]
<ast.Raise object at 0x7da1b1304910> | keyword[def] identifier[_search_in_bases] ( identifier[type_] ):
literal[string]
identifier[found] = keyword[False]
keyword[for] identifier[base_type] keyword[in] identifier[type_] . identifier[declaration] . identifier[bases] :
keyword[try] :
identifier[found] = identifier[internal_type_traits] . identifier[get_by_name] (
identifier[base_type] . identifier[related_class] , literal[string] )
keyword[except] identifier[runtime_errors] . identifier[declaration_not_found_t] :
keyword[pass]
keyword[if] identifier[found] :
keyword[return] identifier[found]
keyword[raise] identifier[RuntimeError] (
( literal[string]
literal[string] )% identifier[type_] . identifier[decl_string] ) | def _search_in_bases(type_):
"""Implementation detail."""
found = False
for base_type in type_.declaration.bases:
try:
found = internal_type_traits.get_by_name(base_type.related_class, 'element_type') # depends on [control=['try'], data=[]]
except runtime_errors.declaration_not_found_t:
pass # depends on [control=['except'], data=[]]
if found:
return found # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['base_type']]
raise RuntimeError("Unable to find 'element_type' declaration '%s'in type '%s'." % type_.decl_string) |
def density(args):
"""
%prog density test.clm
Estimate link density of contigs.
"""
p = OptionParser(density.__doc__)
p.add_option("--save", default=False, action="store_true",
help="Write log densitites of contigs to file")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
clmfile, = args
clm = CLMFile(clmfile)
pf = clmfile.rsplit(".", 1)[0]
if opts.save:
logdensities = clm.calculate_densities()
densityfile = pf + ".density"
fw = open(densityfile, "w")
for name, logd in logdensities.items():
s = clm.tig_to_size[name]
print("\t".join(str(x) for x in (name, s, logd)), file=fw)
fw.close()
logging.debug("Density written to `{}`".format(densityfile))
tourfile = clmfile.rsplit(".", 1)[0] + ".tour"
tour = clm.activate(tourfile=tourfile, backuptour=False)
clm.flip_all(tour)
clm.flip_whole(tour)
clm.flip_one(tour) | def function[density, parameter[args]]:
constant[
%prog density test.clm
Estimate link density of contigs.
]
variable[p] assign[=] call[name[OptionParser], parameter[name[density].__doc__]]
call[name[p].add_option, parameter[constant[--save]]]
call[name[p].set_cpus, parameter[]]
<ast.Tuple object at 0x7da18bc705e0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da20e956d10>]]
<ast.Tuple object at 0x7da20e9549a0> assign[=] name[args]
variable[clm] assign[=] call[name[CLMFile], parameter[name[clmfile]]]
variable[pf] assign[=] call[call[name[clmfile].rsplit, parameter[constant[.], constant[1]]]][constant[0]]
if name[opts].save begin[:]
variable[logdensities] assign[=] call[name[clm].calculate_densities, parameter[]]
variable[densityfile] assign[=] binary_operation[name[pf] + constant[.density]]
variable[fw] assign[=] call[name[open], parameter[name[densityfile], constant[w]]]
for taget[tuple[[<ast.Name object at 0x7da20e957e50>, <ast.Name object at 0x7da20e954df0>]]] in starred[call[name[logdensities].items, parameter[]]] begin[:]
variable[s] assign[=] call[name[clm].tig_to_size][name[name]]
call[name[print], parameter[call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da20e9560e0>]]]]
call[name[fw].close, parameter[]]
call[name[logging].debug, parameter[call[constant[Density written to `{}`].format, parameter[name[densityfile]]]]]
variable[tourfile] assign[=] binary_operation[call[call[name[clmfile].rsplit, parameter[constant[.], constant[1]]]][constant[0]] + constant[.tour]]
variable[tour] assign[=] call[name[clm].activate, parameter[]]
call[name[clm].flip_all, parameter[name[tour]]]
call[name[clm].flip_whole, parameter[name[tour]]]
call[name[clm].flip_one, parameter[name[tour]]] | keyword[def] identifier[density] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[density] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = keyword[False] , identifier[action] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[set_cpus] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[clmfile] ,= identifier[args]
identifier[clm] = identifier[CLMFile] ( identifier[clmfile] )
identifier[pf] = identifier[clmfile] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]
keyword[if] identifier[opts] . identifier[save] :
identifier[logdensities] = identifier[clm] . identifier[calculate_densities] ()
identifier[densityfile] = identifier[pf] + literal[string]
identifier[fw] = identifier[open] ( identifier[densityfile] , literal[string] )
keyword[for] identifier[name] , identifier[logd] keyword[in] identifier[logdensities] . identifier[items] ():
identifier[s] = identifier[clm] . identifier[tig_to_size] [ identifier[name] ]
identifier[print] ( literal[string] . identifier[join] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[name] , identifier[s] , identifier[logd] )), identifier[file] = identifier[fw] )
identifier[fw] . identifier[close] ()
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[densityfile] ))
identifier[tourfile] = identifier[clmfile] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]+ literal[string]
identifier[tour] = identifier[clm] . identifier[activate] ( identifier[tourfile] = identifier[tourfile] , identifier[backuptour] = keyword[False] )
identifier[clm] . identifier[flip_all] ( identifier[tour] )
identifier[clm] . identifier[flip_whole] ( identifier[tour] )
identifier[clm] . identifier[flip_one] ( identifier[tour] ) | def density(args):
"""
%prog density test.clm
Estimate link density of contigs.
"""
p = OptionParser(density.__doc__)
p.add_option('--save', default=False, action='store_true', help='Write log densitites of contigs to file')
p.set_cpus()
(opts, args) = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(clmfile,) = args
clm = CLMFile(clmfile)
pf = clmfile.rsplit('.', 1)[0]
if opts.save:
logdensities = clm.calculate_densities()
densityfile = pf + '.density'
fw = open(densityfile, 'w')
for (name, logd) in logdensities.items():
s = clm.tig_to_size[name]
print('\t'.join((str(x) for x in (name, s, logd))), file=fw) # depends on [control=['for'], data=[]]
fw.close()
logging.debug('Density written to `{}`'.format(densityfile)) # depends on [control=['if'], data=[]]
tourfile = clmfile.rsplit('.', 1)[0] + '.tour'
tour = clm.activate(tourfile=tourfile, backuptour=False)
clm.flip_all(tour)
clm.flip_whole(tour)
clm.flip_one(tour) |
def download(
self,
file_name: str = "",
block: bool = True,
progress: callable = None,
progress_args: tuple = ()
) -> "Message":
"""Bound method *download* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.download_media(message)
Example:
.. code-block:: python
message.download()
Args:
file_name (``str``, *optional*):
A custom *file_name* to be used instead of the one provided by Telegram.
By default, all files are downloaded in the *downloads* folder in your working directory.
You can also specify a path for downloading files in a custom location: paths that end with "/"
are considered directories. All non-existent folders will be created automatically.
block (``bool``, *optional*):
Blocks the code execution until the file has been downloaded.
Defaults to True.
progress (``callable``):
Pass a callback function to view the download progress.
The function must take *(client, current, total, \*args)* as positional arguments (look at the section
below for a detailed description).
progress_args (``tuple``):
Extra custom arguments for the progress callback function. Useful, for example, if you want to pass
a chat_id and a message_id in order to edit a message with the updated progress.
Returns:
On success, the absolute path of the downloaded file as string is returned, None otherwise.
Raises:
:class:`RPCError <pyrogram.RPCError>`
``ValueError``: If the message doesn't contain any downloadable media
"""
return self._client.download_media(
message=self,
file_name=file_name,
block=block,
progress=progress,
progress_args=progress_args,
) | def function[download, parameter[self, file_name, block, progress, progress_args]]:
constant[Bound method *download* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.download_media(message)
Example:
.. code-block:: python
message.download()
Args:
file_name (``str``, *optional*):
A custom *file_name* to be used instead of the one provided by Telegram.
By default, all files are downloaded in the *downloads* folder in your working directory.
You can also specify a path for downloading files in a custom location: paths that end with "/"
are considered directories. All non-existent folders will be created automatically.
block (``bool``, *optional*):
Blocks the code execution until the file has been downloaded.
Defaults to True.
progress (``callable``):
Pass a callback function to view the download progress.
The function must take *(client, current, total, \*args)* as positional arguments (look at the section
below for a detailed description).
progress_args (``tuple``):
Extra custom arguments for the progress callback function. Useful, for example, if you want to pass
a chat_id and a message_id in order to edit a message with the updated progress.
Returns:
On success, the absolute path of the downloaded file as string is returned, None otherwise.
Raises:
:class:`RPCError <pyrogram.RPCError>`
``ValueError``: If the message doesn't contain any downloadable media
]
return[call[name[self]._client.download_media, parameter[]]] | keyword[def] identifier[download] (
identifier[self] ,
identifier[file_name] : identifier[str] = literal[string] ,
identifier[block] : identifier[bool] = keyword[True] ,
identifier[progress] : identifier[callable] = keyword[None] ,
identifier[progress_args] : identifier[tuple] =()
)-> literal[string] :
literal[string]
keyword[return] identifier[self] . identifier[_client] . identifier[download_media] (
identifier[message] = identifier[self] ,
identifier[file_name] = identifier[file_name] ,
identifier[block] = identifier[block] ,
identifier[progress] = identifier[progress] ,
identifier[progress_args] = identifier[progress_args] ,
) | def download(self, file_name: str='', block: bool=True, progress: callable=None, progress_args: tuple=()) -> 'Message':
"""Bound method *download* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.download_media(message)
Example:
.. code-block:: python
message.download()
Args:
file_name (``str``, *optional*):
A custom *file_name* to be used instead of the one provided by Telegram.
By default, all files are downloaded in the *downloads* folder in your working directory.
You can also specify a path for downloading files in a custom location: paths that end with "/"
are considered directories. All non-existent folders will be created automatically.
block (``bool``, *optional*):
Blocks the code execution until the file has been downloaded.
Defaults to True.
progress (``callable``):
Pass a callback function to view the download progress.
The function must take *(client, current, total, \\*args)* as positional arguments (look at the section
below for a detailed description).
progress_args (``tuple``):
Extra custom arguments for the progress callback function. Useful, for example, if you want to pass
a chat_id and a message_id in order to edit a message with the updated progress.
Returns:
On success, the absolute path of the downloaded file as string is returned, None otherwise.
Raises:
:class:`RPCError <pyrogram.RPCError>`
``ValueError``: If the message doesn't contain any downloadable media
"""
return self._client.download_media(message=self, file_name=file_name, block=block, progress=progress, progress_args=progress_args) |
def ap_adc_encode(self, adc1, adc2, adc3, adc4, adc5, adc6):
'''
raw ADC output
adc1 : ADC output 1 (uint16_t)
adc2 : ADC output 2 (uint16_t)
adc3 : ADC output 3 (uint16_t)
adc4 : ADC output 4 (uint16_t)
adc5 : ADC output 5 (uint16_t)
adc6 : ADC output 6 (uint16_t)
'''
return MAVLink_ap_adc_message(adc1, adc2, adc3, adc4, adc5, adc6) | def function[ap_adc_encode, parameter[self, adc1, adc2, adc3, adc4, adc5, adc6]]:
constant[
raw ADC output
adc1 : ADC output 1 (uint16_t)
adc2 : ADC output 2 (uint16_t)
adc3 : ADC output 3 (uint16_t)
adc4 : ADC output 4 (uint16_t)
adc5 : ADC output 5 (uint16_t)
adc6 : ADC output 6 (uint16_t)
]
return[call[name[MAVLink_ap_adc_message], parameter[name[adc1], name[adc2], name[adc3], name[adc4], name[adc5], name[adc6]]]] | keyword[def] identifier[ap_adc_encode] ( identifier[self] , identifier[adc1] , identifier[adc2] , identifier[adc3] , identifier[adc4] , identifier[adc5] , identifier[adc6] ):
literal[string]
keyword[return] identifier[MAVLink_ap_adc_message] ( identifier[adc1] , identifier[adc2] , identifier[adc3] , identifier[adc4] , identifier[adc5] , identifier[adc6] ) | def ap_adc_encode(self, adc1, adc2, adc3, adc4, adc5, adc6):
"""
raw ADC output
adc1 : ADC output 1 (uint16_t)
adc2 : ADC output 2 (uint16_t)
adc3 : ADC output 3 (uint16_t)
adc4 : ADC output 4 (uint16_t)
adc5 : ADC output 5 (uint16_t)
adc6 : ADC output 6 (uint16_t)
"""
return MAVLink_ap_adc_message(adc1, adc2, adc3, adc4, adc5, adc6) |
def percentage_progress(self):
"""
Returns a float between 0 and 1, representing the current job's progress in its task.
If total_progress is not given or 0, just return self.progress.
:return: float corresponding to the total percentage progress of the job.
"""
if self.total_progress != 0:
return float(self.progress) / self.total_progress
else:
return self.progress | def function[percentage_progress, parameter[self]]:
constant[
Returns a float between 0 and 1, representing the current job's progress in its task.
If total_progress is not given or 0, just return self.progress.
:return: float corresponding to the total percentage progress of the job.
]
if compare[name[self].total_progress not_equal[!=] constant[0]] begin[:]
return[binary_operation[call[name[float], parameter[name[self].progress]] / name[self].total_progress]] | keyword[def] identifier[percentage_progress] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[total_progress] != literal[int] :
keyword[return] identifier[float] ( identifier[self] . identifier[progress] )/ identifier[self] . identifier[total_progress]
keyword[else] :
keyword[return] identifier[self] . identifier[progress] | def percentage_progress(self):
"""
Returns a float between 0 and 1, representing the current job's progress in its task.
If total_progress is not given or 0, just return self.progress.
:return: float corresponding to the total percentage progress of the job.
"""
if self.total_progress != 0:
return float(self.progress) / self.total_progress # depends on [control=['if'], data=[]]
else:
return self.progress |
def encode(self, entity):
"""
Generate string of cwr format for all possible combinations of fields,
accumulate and then elect the best. The best string it is who used most of all fields
:param entity:
:return:
"""
possible_results = []
entity_dict = self.get_entity_dict(entity)
record_field_encoders = self.get_record_fields_encoders()
for field_encoders in record_field_encoders:
result = self.try_encode(field_encoders, entity_dict)
if result:
possible_results.append({'result': result, 'len': len(field_encoders)})
cwr = self.head(entity) + self._get_best_result(possible_results) + "\r\n"
return cwr | def function[encode, parameter[self, entity]]:
constant[
Generate string of cwr format for all possible combinations of fields,
accumulate and then elect the best. The best string it is who used most of all fields
:param entity:
:return:
]
variable[possible_results] assign[=] list[[]]
variable[entity_dict] assign[=] call[name[self].get_entity_dict, parameter[name[entity]]]
variable[record_field_encoders] assign[=] call[name[self].get_record_fields_encoders, parameter[]]
for taget[name[field_encoders]] in starred[name[record_field_encoders]] begin[:]
variable[result] assign[=] call[name[self].try_encode, parameter[name[field_encoders], name[entity_dict]]]
if name[result] begin[:]
call[name[possible_results].append, parameter[dictionary[[<ast.Constant object at 0x7da1b1990400>, <ast.Constant object at 0x7da1b19914e0>], [<ast.Name object at 0x7da1b19931c0>, <ast.Call object at 0x7da1b1990100>]]]]
variable[cwr] assign[=] binary_operation[binary_operation[call[name[self].head, parameter[name[entity]]] + call[name[self]._get_best_result, parameter[name[possible_results]]]] + constant[
]]
return[name[cwr]] | keyword[def] identifier[encode] ( identifier[self] , identifier[entity] ):
literal[string]
identifier[possible_results] =[]
identifier[entity_dict] = identifier[self] . identifier[get_entity_dict] ( identifier[entity] )
identifier[record_field_encoders] = identifier[self] . identifier[get_record_fields_encoders] ()
keyword[for] identifier[field_encoders] keyword[in] identifier[record_field_encoders] :
identifier[result] = identifier[self] . identifier[try_encode] ( identifier[field_encoders] , identifier[entity_dict] )
keyword[if] identifier[result] :
identifier[possible_results] . identifier[append] ({ literal[string] : identifier[result] , literal[string] : identifier[len] ( identifier[field_encoders] )})
identifier[cwr] = identifier[self] . identifier[head] ( identifier[entity] )+ identifier[self] . identifier[_get_best_result] ( identifier[possible_results] )+ literal[string]
keyword[return] identifier[cwr] | def encode(self, entity):
"""
Generate string of cwr format for all possible combinations of fields,
accumulate and then elect the best. The best string it is who used most of all fields
:param entity:
:return:
"""
possible_results = []
entity_dict = self.get_entity_dict(entity)
record_field_encoders = self.get_record_fields_encoders()
for field_encoders in record_field_encoders:
result = self.try_encode(field_encoders, entity_dict)
if result:
possible_results.append({'result': result, 'len': len(field_encoders)}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field_encoders']]
cwr = self.head(entity) + self._get_best_result(possible_results) + '\r\n'
return cwr |
def cache_penalty_model(penalty_model, database=None):
"""Caching function for penaltymodel_cache.
Args:
penalty_model (:class:`penaltymodel.PenaltyModel`): Penalty model to
be cached.
database (str, optional): The path to the desired sqlite database
file. If None, will use the default.
"""
# only handles index-labelled nodes
if not _is_index_labelled(penalty_model.graph):
mapping, __ = _graph_canonicalization(penalty_model.graph)
penalty_model = penalty_model.relabel_variables(mapping, inplace=False)
# connect to the database. Note that once the connection is made it cannot be
# broken up between several processes.
if database is None:
conn = cache_connect()
else:
conn = cache_connect(database)
# load into the database
with conn as cur:
insert_penalty_model(cur, penalty_model)
# close the connection
conn.close() | def function[cache_penalty_model, parameter[penalty_model, database]]:
constant[Caching function for penaltymodel_cache.
Args:
penalty_model (:class:`penaltymodel.PenaltyModel`): Penalty model to
be cached.
database (str, optional): The path to the desired sqlite database
file. If None, will use the default.
]
if <ast.UnaryOp object at 0x7da18f00ea10> begin[:]
<ast.Tuple object at 0x7da18f00d3f0> assign[=] call[name[_graph_canonicalization], parameter[name[penalty_model].graph]]
variable[penalty_model] assign[=] call[name[penalty_model].relabel_variables, parameter[name[mapping]]]
if compare[name[database] is constant[None]] begin[:]
variable[conn] assign[=] call[name[cache_connect], parameter[]]
with name[conn] begin[:]
call[name[insert_penalty_model], parameter[name[cur], name[penalty_model]]]
call[name[conn].close, parameter[]] | keyword[def] identifier[cache_penalty_model] ( identifier[penalty_model] , identifier[database] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[_is_index_labelled] ( identifier[penalty_model] . identifier[graph] ):
identifier[mapping] , identifier[__] = identifier[_graph_canonicalization] ( identifier[penalty_model] . identifier[graph] )
identifier[penalty_model] = identifier[penalty_model] . identifier[relabel_variables] ( identifier[mapping] , identifier[inplace] = keyword[False] )
keyword[if] identifier[database] keyword[is] keyword[None] :
identifier[conn] = identifier[cache_connect] ()
keyword[else] :
identifier[conn] = identifier[cache_connect] ( identifier[database] )
keyword[with] identifier[conn] keyword[as] identifier[cur] :
identifier[insert_penalty_model] ( identifier[cur] , identifier[penalty_model] )
identifier[conn] . identifier[close] () | def cache_penalty_model(penalty_model, database=None):
"""Caching function for penaltymodel_cache.
Args:
penalty_model (:class:`penaltymodel.PenaltyModel`): Penalty model to
be cached.
database (str, optional): The path to the desired sqlite database
file. If None, will use the default.
"""
# only handles index-labelled nodes
if not _is_index_labelled(penalty_model.graph):
(mapping, __) = _graph_canonicalization(penalty_model.graph)
penalty_model = penalty_model.relabel_variables(mapping, inplace=False) # depends on [control=['if'], data=[]]
# connect to the database. Note that once the connection is made it cannot be
# broken up between several processes.
if database is None:
conn = cache_connect() # depends on [control=['if'], data=[]]
else:
conn = cache_connect(database)
# load into the database
with conn as cur:
insert_penalty_model(cur, penalty_model) # depends on [control=['with'], data=['cur']]
# close the connection
conn.close() |
def fmt_data(text, data_formating = None, data_type = None):
"""
Format given text according to given data formating pattern or data type.
"""
if data_type:
return DATA_TYPES[data_type](text)
elif data_formating:
return str(data_formating).format(text)
return str(text) | def function[fmt_data, parameter[text, data_formating, data_type]]:
constant[
Format given text according to given data formating pattern or data type.
]
if name[data_type] begin[:]
return[call[call[name[DATA_TYPES]][name[data_type]], parameter[name[text]]]]
return[call[name[str], parameter[name[text]]]] | keyword[def] identifier[fmt_data] ( identifier[text] , identifier[data_formating] = keyword[None] , identifier[data_type] = keyword[None] ):
literal[string]
keyword[if] identifier[data_type] :
keyword[return] identifier[DATA_TYPES] [ identifier[data_type] ]( identifier[text] )
keyword[elif] identifier[data_formating] :
keyword[return] identifier[str] ( identifier[data_formating] ). identifier[format] ( identifier[text] )
keyword[return] identifier[str] ( identifier[text] ) | def fmt_data(text, data_formating=None, data_type=None):
"""
Format given text according to given data formating pattern or data type.
"""
if data_type:
return DATA_TYPES[data_type](text) # depends on [control=['if'], data=[]]
elif data_formating:
return str(data_formating).format(text) # depends on [control=['if'], data=[]]
return str(text) |
def edit_txt(filename, substitutions, newname=None):
"""Primitive text file stream editor.
This function can be used to edit free-form text files such as the
topology file. By default it does an **in-place edit** of
*filename*. If *newname* is supplied then the edited
file is written to *newname*.
:Arguments:
*filename*
input text file
*substitutions*
substitution commands (see below for format)
*newname*
output filename; if ``None`` then *filename* is changed in
place [``None``]
*substitutions* is a list of triplets; the first two elements are regular
expression strings, the last is the substitution value. It mimics
``sed`` search and replace. The rules for *substitutions*:
.. productionlist::
substitutions: "[" search_replace_tuple, ... "]"
search_replace_tuple: "(" line_match_RE "," search_RE "," replacement ")"
line_match_RE: regular expression that selects the line (uses match)
search_RE: regular expression that is searched in the line
replacement: replacement string for search_RE
Running :func:`edit_txt` does pretty much what a simple ::
sed /line_match_RE/s/search_RE/replacement/
with repeated substitution commands does.
Special replacement values:
- ``None``: the rule is ignored
- ``False``: the line is deleted (even if other rules match)
.. note::
* No sanity checks are performed and the substitutions must be supplied
exactly as shown.
* All substitutions are applied to a line; thus the order of the substitution
commands may matter when one substitution generates a match for a subsequent rule.
* If replacement is set to ``None`` then the whole expression is ignored and
whatever is in the template is used. To unset values you must provided an
empty string or similar.
* Delete a matching line if replacement=``False``.
"""
if newname is None:
newname = filename
# No sanity checks (figure out later how to give decent diagnostics).
# Filter out any rules that have None in replacement.
_substitutions = [{'lRE': re.compile(str(lRE)),
'sRE': re.compile(str(sRE)),
'repl': repl}
for lRE,sRE,repl in substitutions if repl is not None]
with tempfile.TemporaryFile() as target:
with open(filename, 'rb') as src:
logger.info("editing txt = {0!r} ({1:d} substitutions)".format(filename, len(substitutions)))
for line in src:
line = line.decode("utf-8")
keep_line = True
for subst in _substitutions:
m = subst['lRE'].match(line)
if m: # apply substition to this line?
logger.debug('match: '+line.rstrip())
if subst['repl'] is False: # special rule: delete line
keep_line = False
else: # standard replacement
line = subst['sRE'].sub(str(subst['repl']), line)
logger.debug('replaced: '+line.rstrip())
if keep_line:
target.write(line.encode('utf-8'))
else:
logger.debug("Deleting line %r", line)
target.seek(0)
with open(newname, 'wb') as final:
shutil.copyfileobj(target, final)
logger.info("edited txt = {newname!r}".format(**vars())) | def function[edit_txt, parameter[filename, substitutions, newname]]:
constant[Primitive text file stream editor.
This function can be used to edit free-form text files such as the
topology file. By default it does an **in-place edit** of
*filename*. If *newname* is supplied then the edited
file is written to *newname*.
:Arguments:
*filename*
input text file
*substitutions*
substitution commands (see below for format)
*newname*
output filename; if ``None`` then *filename* is changed in
place [``None``]
*substitutions* is a list of triplets; the first two elements are regular
expression strings, the last is the substitution value. It mimics
``sed`` search and replace. The rules for *substitutions*:
.. productionlist::
substitutions: "[" search_replace_tuple, ... "]"
search_replace_tuple: "(" line_match_RE "," search_RE "," replacement ")"
line_match_RE: regular expression that selects the line (uses match)
search_RE: regular expression that is searched in the line
replacement: replacement string for search_RE
Running :func:`edit_txt` does pretty much what a simple ::
sed /line_match_RE/s/search_RE/replacement/
with repeated substitution commands does.
Special replacement values:
- ``None``: the rule is ignored
- ``False``: the line is deleted (even if other rules match)
.. note::
* No sanity checks are performed and the substitutions must be supplied
exactly as shown.
* All substitutions are applied to a line; thus the order of the substitution
commands may matter when one substitution generates a match for a subsequent rule.
* If replacement is set to ``None`` then the whole expression is ignored and
whatever is in the template is used. To unset values you must provided an
empty string or similar.
* Delete a matching line if replacement=``False``.
]
if compare[name[newname] is constant[None]] begin[:]
variable[newname] assign[=] name[filename]
variable[_substitutions] assign[=] <ast.ListComp object at 0x7da2044c13f0>
with call[name[tempfile].TemporaryFile, parameter[]] begin[:]
with call[name[open], parameter[name[filename], constant[rb]]] begin[:]
call[name[logger].info, parameter[call[constant[editing txt = {0!r} ({1:d} substitutions)].format, parameter[name[filename], call[name[len], parameter[name[substitutions]]]]]]]
for taget[name[line]] in starred[name[src]] begin[:]
variable[line] assign[=] call[name[line].decode, parameter[constant[utf-8]]]
variable[keep_line] assign[=] constant[True]
for taget[name[subst]] in starred[name[_substitutions]] begin[:]
variable[m] assign[=] call[call[name[subst]][constant[lRE]].match, parameter[name[line]]]
if name[m] begin[:]
call[name[logger].debug, parameter[binary_operation[constant[match: ] + call[name[line].rstrip, parameter[]]]]]
if compare[call[name[subst]][constant[repl]] is constant[False]] begin[:]
variable[keep_line] assign[=] constant[False]
if name[keep_line] begin[:]
call[name[target].write, parameter[call[name[line].encode, parameter[constant[utf-8]]]]]
call[name[target].seek, parameter[constant[0]]]
with call[name[open], parameter[name[newname], constant[wb]]] begin[:]
call[name[shutil].copyfileobj, parameter[name[target], name[final]]]
call[name[logger].info, parameter[call[constant[edited txt = {newname!r}].format, parameter[]]]] | keyword[def] identifier[edit_txt] ( identifier[filename] , identifier[substitutions] , identifier[newname] = keyword[None] ):
literal[string]
keyword[if] identifier[newname] keyword[is] keyword[None] :
identifier[newname] = identifier[filename]
identifier[_substitutions] =[{ literal[string] : identifier[re] . identifier[compile] ( identifier[str] ( identifier[lRE] )),
literal[string] : identifier[re] . identifier[compile] ( identifier[str] ( identifier[sRE] )),
literal[string] : identifier[repl] }
keyword[for] identifier[lRE] , identifier[sRE] , identifier[repl] keyword[in] identifier[substitutions] keyword[if] identifier[repl] keyword[is] keyword[not] keyword[None] ]
keyword[with] identifier[tempfile] . identifier[TemporaryFile] () keyword[as] identifier[target] :
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[src] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[filename] , identifier[len] ( identifier[substitutions] )))
keyword[for] identifier[line] keyword[in] identifier[src] :
identifier[line] = identifier[line] . identifier[decode] ( literal[string] )
identifier[keep_line] = keyword[True]
keyword[for] identifier[subst] keyword[in] identifier[_substitutions] :
identifier[m] = identifier[subst] [ literal[string] ]. identifier[match] ( identifier[line] )
keyword[if] identifier[m] :
identifier[logger] . identifier[debug] ( literal[string] + identifier[line] . identifier[rstrip] ())
keyword[if] identifier[subst] [ literal[string] ] keyword[is] keyword[False] :
identifier[keep_line] = keyword[False]
keyword[else] :
identifier[line] = identifier[subst] [ literal[string] ]. identifier[sub] ( identifier[str] ( identifier[subst] [ literal[string] ]), identifier[line] )
identifier[logger] . identifier[debug] ( literal[string] + identifier[line] . identifier[rstrip] ())
keyword[if] identifier[keep_line] :
identifier[target] . identifier[write] ( identifier[line] . identifier[encode] ( literal[string] ))
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[line] )
identifier[target] . identifier[seek] ( literal[int] )
keyword[with] identifier[open] ( identifier[newname] , literal[string] ) keyword[as] identifier[final] :
identifier[shutil] . identifier[copyfileobj] ( identifier[target] , identifier[final] )
identifier[logger] . identifier[info] ( literal[string] . identifier[format] (** identifier[vars] ())) | def edit_txt(filename, substitutions, newname=None):
"""Primitive text file stream editor.
This function can be used to edit free-form text files such as the
topology file. By default it does an **in-place edit** of
*filename*. If *newname* is supplied then the edited
file is written to *newname*.
:Arguments:
*filename*
input text file
*substitutions*
substitution commands (see below for format)
*newname*
output filename; if ``None`` then *filename* is changed in
place [``None``]
*substitutions* is a list of triplets; the first two elements are regular
expression strings, the last is the substitution value. It mimics
``sed`` search and replace. The rules for *substitutions*:
.. productionlist::
substitutions: "[" search_replace_tuple, ... "]"
search_replace_tuple: "(" line_match_RE "," search_RE "," replacement ")"
line_match_RE: regular expression that selects the line (uses match)
search_RE: regular expression that is searched in the line
replacement: replacement string for search_RE
Running :func:`edit_txt` does pretty much what a simple ::
sed /line_match_RE/s/search_RE/replacement/
with repeated substitution commands does.
Special replacement values:
- ``None``: the rule is ignored
- ``False``: the line is deleted (even if other rules match)
.. note::
* No sanity checks are performed and the substitutions must be supplied
exactly as shown.
* All substitutions are applied to a line; thus the order of the substitution
commands may matter when one substitution generates a match for a subsequent rule.
* If replacement is set to ``None`` then the whole expression is ignored and
whatever is in the template is used. To unset values you must provided an
empty string or similar.
* Delete a matching line if replacement=``False``.
"""
if newname is None:
newname = filename # depends on [control=['if'], data=['newname']]
# No sanity checks (figure out later how to give decent diagnostics).
# Filter out any rules that have None in replacement.
_substitutions = [{'lRE': re.compile(str(lRE)), 'sRE': re.compile(str(sRE)), 'repl': repl} for (lRE, sRE, repl) in substitutions if repl is not None]
with tempfile.TemporaryFile() as target:
with open(filename, 'rb') as src:
logger.info('editing txt = {0!r} ({1:d} substitutions)'.format(filename, len(substitutions)))
for line in src:
line = line.decode('utf-8')
keep_line = True
for subst in _substitutions:
m = subst['lRE'].match(line)
if m: # apply substition to this line?
logger.debug('match: ' + line.rstrip())
if subst['repl'] is False: # special rule: delete line
keep_line = False # depends on [control=['if'], data=[]]
else: # standard replacement
line = subst['sRE'].sub(str(subst['repl']), line)
logger.debug('replaced: ' + line.rstrip()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['subst']]
if keep_line:
target.write(line.encode('utf-8')) # depends on [control=['if'], data=[]]
else:
logger.debug('Deleting line %r', line) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['src']]
target.seek(0)
with open(newname, 'wb') as final:
shutil.copyfileobj(target, final) # depends on [control=['with'], data=['final']] # depends on [control=['with'], data=['target']]
logger.info('edited txt = {newname!r}'.format(**vars())) |
def weights_concatenated(labels):
"""Assign weight 1.0 to the "target" part of the concatenated labels.
The labels look like:
source English I love you . ID1 target French Je t'aime . ID1 source
English the cat ID1 target French le chat ID1 source English ...
We want to assign weight 1.0 to all words in the target text (including the
ID1 end symbol), but not to the source text or the boilerplate. In the
above example, the target words that get positive weight are:
Je t'aime . ID1 le chat ID1
Args:
labels: a Tensor
Returns:
a Tensor
"""
eos_mask = tf.to_int32(tf.equal(labels, 1))
sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)
in_target = tf.equal(tf.mod(sentence_num, 2), 1)
# first two tokens of each sentence are boilerplate.
sentence_num_plus_one = sentence_num + 1
shifted = tf.pad(sentence_num_plus_one,
[[0, 0], [2, 0], [0, 0], [0, 0]])[:, :-2, :, :]
nonboilerplate = tf.equal(sentence_num_plus_one, shifted)
ret = to_float(tf.logical_and(nonboilerplate, in_target))
return ret | def function[weights_concatenated, parameter[labels]]:
constant[Assign weight 1.0 to the "target" part of the concatenated labels.
The labels look like:
source English I love you . ID1 target French Je t'aime . ID1 source
English the cat ID1 target French le chat ID1 source English ...
We want to assign weight 1.0 to all words in the target text (including the
ID1 end symbol), but not to the source text or the boilerplate. In the
above example, the target words that get positive weight are:
Je t'aime . ID1 le chat ID1
Args:
labels: a Tensor
Returns:
a Tensor
]
variable[eos_mask] assign[=] call[name[tf].to_int32, parameter[call[name[tf].equal, parameter[name[labels], constant[1]]]]]
variable[sentence_num] assign[=] call[name[tf].cumsum, parameter[name[eos_mask]]]
variable[in_target] assign[=] call[name[tf].equal, parameter[call[name[tf].mod, parameter[name[sentence_num], constant[2]]], constant[1]]]
variable[sentence_num_plus_one] assign[=] binary_operation[name[sentence_num] + constant[1]]
variable[shifted] assign[=] call[call[name[tf].pad, parameter[name[sentence_num_plus_one], list[[<ast.List object at 0x7da1b205af20>, <ast.List object at 0x7da1b2059630>, <ast.List object at 0x7da1b205b790>, <ast.List object at 0x7da1b1ff1fc0>]]]]][tuple[[<ast.Slice object at 0x7da1b1ff1090>, <ast.Slice object at 0x7da1b1ff11b0>, <ast.Slice object at 0x7da1b1ff14b0>, <ast.Slice object at 0x7da1b1ff07f0>]]]
variable[nonboilerplate] assign[=] call[name[tf].equal, parameter[name[sentence_num_plus_one], name[shifted]]]
variable[ret] assign[=] call[name[to_float], parameter[call[name[tf].logical_and, parameter[name[nonboilerplate], name[in_target]]]]]
return[name[ret]] | keyword[def] identifier[weights_concatenated] ( identifier[labels] ):
literal[string]
identifier[eos_mask] = identifier[tf] . identifier[to_int32] ( identifier[tf] . identifier[equal] ( identifier[labels] , literal[int] ))
identifier[sentence_num] = identifier[tf] . identifier[cumsum] ( identifier[eos_mask] , identifier[axis] = literal[int] , identifier[exclusive] = keyword[True] )
identifier[in_target] = identifier[tf] . identifier[equal] ( identifier[tf] . identifier[mod] ( identifier[sentence_num] , literal[int] ), literal[int] )
identifier[sentence_num_plus_one] = identifier[sentence_num] + literal[int]
identifier[shifted] = identifier[tf] . identifier[pad] ( identifier[sentence_num_plus_one] ,
[[ literal[int] , literal[int] ],[ literal[int] , literal[int] ],[ literal[int] , literal[int] ],[ literal[int] , literal[int] ]])[:,:- literal[int] ,:,:]
identifier[nonboilerplate] = identifier[tf] . identifier[equal] ( identifier[sentence_num_plus_one] , identifier[shifted] )
identifier[ret] = identifier[to_float] ( identifier[tf] . identifier[logical_and] ( identifier[nonboilerplate] , identifier[in_target] ))
keyword[return] identifier[ret] | def weights_concatenated(labels):
"""Assign weight 1.0 to the "target" part of the concatenated labels.
The labels look like:
source English I love you . ID1 target French Je t'aime . ID1 source
English the cat ID1 target French le chat ID1 source English ...
We want to assign weight 1.0 to all words in the target text (including the
ID1 end symbol), but not to the source text or the boilerplate. In the
above example, the target words that get positive weight are:
Je t'aime . ID1 le chat ID1
Args:
labels: a Tensor
Returns:
a Tensor
"""
eos_mask = tf.to_int32(tf.equal(labels, 1))
sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True)
in_target = tf.equal(tf.mod(sentence_num, 2), 1)
# first two tokens of each sentence are boilerplate.
sentence_num_plus_one = sentence_num + 1
shifted = tf.pad(sentence_num_plus_one, [[0, 0], [2, 0], [0, 0], [0, 0]])[:, :-2, :, :]
nonboilerplate = tf.equal(sentence_num_plus_one, shifted)
ret = to_float(tf.logical_and(nonboilerplate, in_target))
return ret |
def consume(self, key, default=None, current=None, print_on_success=False):
"""
Consume a key from the configuration. When a key is consumed, it
is removed from the configuration.
If not found, the default is returned. If the current value is not
None, it will be returned instead, but the key will still be
considered consumed.
"""
value = self._configuration.pop(key, default)
if current:
return current
if value and print_on_success:
LOG.debug("Found %s: `%s`", key, ", ".join(value))
if value and key in self._deprecated:
LOG.warning(
"Configuration file uses deprecated item `%s`: "
"please migrate to its replacement `%s`",
key,
self._deprecated[key],
)
return value | def function[consume, parameter[self, key, default, current, print_on_success]]:
constant[
Consume a key from the configuration. When a key is consumed, it
is removed from the configuration.
If not found, the default is returned. If the current value is not
None, it will be returned instead, but the key will still be
considered consumed.
]
variable[value] assign[=] call[name[self]._configuration.pop, parameter[name[key], name[default]]]
if name[current] begin[:]
return[name[current]]
if <ast.BoolOp object at 0x7da1b1bee710> begin[:]
call[name[LOG].debug, parameter[constant[Found %s: `%s`], name[key], call[constant[, ].join, parameter[name[value]]]]]
if <ast.BoolOp object at 0x7da1b1beed10> begin[:]
call[name[LOG].warning, parameter[constant[Configuration file uses deprecated item `%s`: please migrate to its replacement `%s`], name[key], call[name[self]._deprecated][name[key]]]]
return[name[value]] | keyword[def] identifier[consume] ( identifier[self] , identifier[key] , identifier[default] = keyword[None] , identifier[current] = keyword[None] , identifier[print_on_success] = keyword[False] ):
literal[string]
identifier[value] = identifier[self] . identifier[_configuration] . identifier[pop] ( identifier[key] , identifier[default] )
keyword[if] identifier[current] :
keyword[return] identifier[current]
keyword[if] identifier[value] keyword[and] identifier[print_on_success] :
identifier[LOG] . identifier[debug] ( literal[string] , identifier[key] , literal[string] . identifier[join] ( identifier[value] ))
keyword[if] identifier[value] keyword[and] identifier[key] keyword[in] identifier[self] . identifier[_deprecated] :
identifier[LOG] . identifier[warning] (
literal[string]
literal[string] ,
identifier[key] ,
identifier[self] . identifier[_deprecated] [ identifier[key] ],
)
keyword[return] identifier[value] | def consume(self, key, default=None, current=None, print_on_success=False):
"""
Consume a key from the configuration. When a key is consumed, it
is removed from the configuration.
If not found, the default is returned. If the current value is not
None, it will be returned instead, but the key will still be
considered consumed.
"""
value = self._configuration.pop(key, default)
if current:
return current # depends on [control=['if'], data=[]]
if value and print_on_success:
LOG.debug('Found %s: `%s`', key, ', '.join(value)) # depends on [control=['if'], data=[]]
if value and key in self._deprecated:
LOG.warning('Configuration file uses deprecated item `%s`: please migrate to its replacement `%s`', key, self._deprecated[key]) # depends on [control=['if'], data=[]]
return value |
def getHostCaPath(self, name):
'''
Gets the path to the CA certificate that issued a given host keypair.
Args:
name (str): The name of the host keypair.
Examples:
Get the path to the CA cert which issue the cert for "myhost":
mypath = cdir.getHostCaPath('myhost')
Returns:
str: The path if exists.
'''
cert = self.getHostCert(name)
if cert is None:
return None
return self._getCaPath(cert) | def function[getHostCaPath, parameter[self, name]]:
constant[
Gets the path to the CA certificate that issued a given host keypair.
Args:
name (str): The name of the host keypair.
Examples:
Get the path to the CA cert which issue the cert for "myhost":
mypath = cdir.getHostCaPath('myhost')
Returns:
str: The path if exists.
]
variable[cert] assign[=] call[name[self].getHostCert, parameter[name[name]]]
if compare[name[cert] is constant[None]] begin[:]
return[constant[None]]
return[call[name[self]._getCaPath, parameter[name[cert]]]] | keyword[def] identifier[getHostCaPath] ( identifier[self] , identifier[name] ):
literal[string]
identifier[cert] = identifier[self] . identifier[getHostCert] ( identifier[name] )
keyword[if] identifier[cert] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[return] identifier[self] . identifier[_getCaPath] ( identifier[cert] ) | def getHostCaPath(self, name):
"""
Gets the path to the CA certificate that issued a given host keypair.
Args:
name (str): The name of the host keypair.
Examples:
Get the path to the CA cert which issue the cert for "myhost":
mypath = cdir.getHostCaPath('myhost')
Returns:
str: The path if exists.
"""
cert = self.getHostCert(name)
if cert is None:
return None # depends on [control=['if'], data=[]]
return self._getCaPath(cert) |
def make_zip(folder_path, output_filename):
"""将目录中除zip之外的文件打包成zip文件(包括子文件夹)
空文件夹不会被打包
example
----------------
make_zip('results','zips//招标信息结果_2017-05-09.zip')
"""
cwd = os.getcwd()
# 获取需要打包的文件列表
file_lists = []
for root, dirs, files in os.walk(folder_path):
for file in files:
file_1 = os.path.join(root, file).replace(folder_path + '/', '')
if 'zip' not in file_1:
file_lists.append(file_1)
# 将文件列表打包成zip
os.chdir(folder_path)
with zipfile.ZipFile(output_filename, 'w') as myzip:
for file in file_lists:
myzip.write(file)
# 将工作目录切换回原始
os.chdir(cwd) | def function[make_zip, parameter[folder_path, output_filename]]:
constant[将目录中除zip之外的文件打包成zip文件(包括子文件夹)
空文件夹不会被打包
example
----------------
make_zip('results','zips//招标信息结果_2017-05-09.zip')
]
variable[cwd] assign[=] call[name[os].getcwd, parameter[]]
variable[file_lists] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18ede6650>, <ast.Name object at 0x7da18ede5540>, <ast.Name object at 0x7da18ede5c90>]]] in starred[call[name[os].walk, parameter[name[folder_path]]]] begin[:]
for taget[name[file]] in starred[name[files]] begin[:]
variable[file_1] assign[=] call[call[name[os].path.join, parameter[name[root], name[file]]].replace, parameter[binary_operation[name[folder_path] + constant[/]], constant[]]]
if compare[constant[zip] <ast.NotIn object at 0x7da2590d7190> name[file_1]] begin[:]
call[name[file_lists].append, parameter[name[file_1]]]
call[name[os].chdir, parameter[name[folder_path]]]
with call[name[zipfile].ZipFile, parameter[name[output_filename], constant[w]]] begin[:]
for taget[name[file]] in starred[name[file_lists]] begin[:]
call[name[myzip].write, parameter[name[file]]]
call[name[os].chdir, parameter[name[cwd]]] | keyword[def] identifier[make_zip] ( identifier[folder_path] , identifier[output_filename] ):
literal[string]
identifier[cwd] = identifier[os] . identifier[getcwd] ()
identifier[file_lists] =[]
keyword[for] identifier[root] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[folder_path] ):
keyword[for] identifier[file] keyword[in] identifier[files] :
identifier[file_1] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[file] ). identifier[replace] ( identifier[folder_path] + literal[string] , literal[string] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[file_1] :
identifier[file_lists] . identifier[append] ( identifier[file_1] )
identifier[os] . identifier[chdir] ( identifier[folder_path] )
keyword[with] identifier[zipfile] . identifier[ZipFile] ( identifier[output_filename] , literal[string] ) keyword[as] identifier[myzip] :
keyword[for] identifier[file] keyword[in] identifier[file_lists] :
identifier[myzip] . identifier[write] ( identifier[file] )
identifier[os] . identifier[chdir] ( identifier[cwd] ) | def make_zip(folder_path, output_filename):
"""将目录中除zip之外的文件打包成zip文件(包括子文件夹)
空文件夹不会被打包
example
----------------
make_zip('results','zips//招标信息结果_2017-05-09.zip')
"""
cwd = os.getcwd()
# 获取需要打包的文件列表
file_lists = []
for (root, dirs, files) in os.walk(folder_path):
for file in files:
file_1 = os.path.join(root, file).replace(folder_path + '/', '')
if 'zip' not in file_1:
file_lists.append(file_1) # depends on [control=['if'], data=['file_1']] # depends on [control=['for'], data=['file']] # depends on [control=['for'], data=[]]
# 将文件列表打包成zip
os.chdir(folder_path)
with zipfile.ZipFile(output_filename, 'w') as myzip:
for file in file_lists:
myzip.write(file) # depends on [control=['for'], data=['file']] # depends on [control=['with'], data=['myzip']]
# 将工作目录切换回原始
os.chdir(cwd) |
def get_urls(self):
"""
Add a preview URL.
"""
from django.conf.urls import patterns, url
urls = super(RecurrenceRuleAdmin, self).get_urls()
my_urls = patterns(
'',
url(
r'^preview/$',
self.admin_site.admin_view(self.preview),
name='icekit_events_recurrencerule_preview'
),
)
return my_urls + urls | def function[get_urls, parameter[self]]:
constant[
Add a preview URL.
]
from relative_module[django.conf.urls] import module[patterns], module[url]
variable[urls] assign[=] call[call[name[super], parameter[name[RecurrenceRuleAdmin], name[self]]].get_urls, parameter[]]
variable[my_urls] assign[=] call[name[patterns], parameter[constant[], call[name[url], parameter[constant[^preview/$], call[name[self].admin_site.admin_view, parameter[name[self].preview]]]]]]
return[binary_operation[name[my_urls] + name[urls]]] | keyword[def] identifier[get_urls] ( identifier[self] ):
literal[string]
keyword[from] identifier[django] . identifier[conf] . identifier[urls] keyword[import] identifier[patterns] , identifier[url]
identifier[urls] = identifier[super] ( identifier[RecurrenceRuleAdmin] , identifier[self] ). identifier[get_urls] ()
identifier[my_urls] = identifier[patterns] (
literal[string] ,
identifier[url] (
literal[string] ,
identifier[self] . identifier[admin_site] . identifier[admin_view] ( identifier[self] . identifier[preview] ),
identifier[name] = literal[string]
),
)
keyword[return] identifier[my_urls] + identifier[urls] | def get_urls(self):
"""
Add a preview URL.
"""
from django.conf.urls import patterns, url
urls = super(RecurrenceRuleAdmin, self).get_urls()
my_urls = patterns('', url('^preview/$', self.admin_site.admin_view(self.preview), name='icekit_events_recurrencerule_preview'))
return my_urls + urls |
def make_gym_env(name,
rl_env_max_episode_steps=-1,
maxskip_env=False,
rendered_env=False,
rendered_env_resize_to=None,
sticky_actions=False):
"""Create a gym env optionally with a time limit and maxskip wrapper.
NOTE: The returned env may already be wrapped with TimeLimit!
Args:
name: `str` - base name of the gym env to make.
rl_env_max_episode_steps: `int` or None - Using any value < 0 returns the
env as-in, otherwise we impose the requested timelimit. Setting this to
None returns a wrapped env that doesn't have a step limit.
maxskip_env: whether to also use MaxAndSkip wrapper before time limit.
rendered_env: whether to force render for observations. Use this for
environments that are not natively rendering the scene for observations.
rendered_env_resize_to: a list of [height, width] to change the original
resolution of the native environment render.
sticky_actions: whether to use sticky_actions before MaxAndSkip wrapper.
Returns:
An instance of `gym.Env` or `gym.Wrapper`.
"""
env = gym.make(name)
return gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env,
rendered_env, rendered_env_resize_to, sticky_actions) | def function[make_gym_env, parameter[name, rl_env_max_episode_steps, maxskip_env, rendered_env, rendered_env_resize_to, sticky_actions]]:
constant[Create a gym env optionally with a time limit and maxskip wrapper.
NOTE: The returned env may already be wrapped with TimeLimit!
Args:
name: `str` - base name of the gym env to make.
rl_env_max_episode_steps: `int` or None - Using any value < 0 returns the
env as-in, otherwise we impose the requested timelimit. Setting this to
None returns a wrapped env that doesn't have a step limit.
maxskip_env: whether to also use MaxAndSkip wrapper before time limit.
rendered_env: whether to force render for observations. Use this for
environments that are not natively rendering the scene for observations.
rendered_env_resize_to: a list of [height, width] to change the original
resolution of the native environment render.
sticky_actions: whether to use sticky_actions before MaxAndSkip wrapper.
Returns:
An instance of `gym.Env` or `gym.Wrapper`.
]
variable[env] assign[=] call[name[gym].make, parameter[name[name]]]
return[call[name[gym_env_wrapper], parameter[name[env], name[rl_env_max_episode_steps], name[maxskip_env], name[rendered_env], name[rendered_env_resize_to], name[sticky_actions]]]] | keyword[def] identifier[make_gym_env] ( identifier[name] ,
identifier[rl_env_max_episode_steps] =- literal[int] ,
identifier[maxskip_env] = keyword[False] ,
identifier[rendered_env] = keyword[False] ,
identifier[rendered_env_resize_to] = keyword[None] ,
identifier[sticky_actions] = keyword[False] ):
literal[string]
identifier[env] = identifier[gym] . identifier[make] ( identifier[name] )
keyword[return] identifier[gym_env_wrapper] ( identifier[env] , identifier[rl_env_max_episode_steps] , identifier[maxskip_env] ,
identifier[rendered_env] , identifier[rendered_env_resize_to] , identifier[sticky_actions] ) | def make_gym_env(name, rl_env_max_episode_steps=-1, maxskip_env=False, rendered_env=False, rendered_env_resize_to=None, sticky_actions=False):
"""Create a gym env optionally with a time limit and maxskip wrapper.
NOTE: The returned env may already be wrapped with TimeLimit!
Args:
name: `str` - base name of the gym env to make.
rl_env_max_episode_steps: `int` or None - Using any value < 0 returns the
env as-in, otherwise we impose the requested timelimit. Setting this to
None returns a wrapped env that doesn't have a step limit.
maxskip_env: whether to also use MaxAndSkip wrapper before time limit.
rendered_env: whether to force render for observations. Use this for
environments that are not natively rendering the scene for observations.
rendered_env_resize_to: a list of [height, width] to change the original
resolution of the native environment render.
sticky_actions: whether to use sticky_actions before MaxAndSkip wrapper.
Returns:
An instance of `gym.Env` or `gym.Wrapper`.
"""
env = gym.make(name)
return gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env, rendered_env, rendered_env_resize_to, sticky_actions) |
def create(gandi, datacenter, bandwidth, ip_version, vlan, ip, attach,
background):
"""Create a public or private ip
"""
if ip_version != 4 and vlan:
gandi.echo('You must have an --ip-version to 4 when having a vlan.')
return
if ip and not vlan:
gandi.echo('You must have a --vlan when giving an --ip.')
return
vm_ = gandi.iaas.info(attach) if attach else None
if datacenter and vm_:
dc_id = gandi.datacenter.usable_id(datacenter)
if dc_id != vm_['datacenter_id']:
gandi.echo('The datacenter you provided does not match the '
'datacenter of the vm you want to attach to.')
return
if not datacenter:
datacenter = vm_['datacenter_id'] if vm_ else 'LU'
try:
gandi.datacenter.is_opened(datacenter, 'iaas')
except DatacenterLimited as exc:
gandi.echo('/!\ Datacenter %s will be closed on %s, '
'please consider using another datacenter.' %
(datacenter, exc.date))
return gandi.ip.create(ip_version, datacenter, bandwidth, attach,
vlan, ip, background) | def function[create, parameter[gandi, datacenter, bandwidth, ip_version, vlan, ip, attach, background]]:
constant[Create a public or private ip
]
if <ast.BoolOp object at 0x7da20c6e6b60> begin[:]
call[name[gandi].echo, parameter[constant[You must have an --ip-version to 4 when having a vlan.]]]
return[None]
if <ast.BoolOp object at 0x7da20c6e7670> begin[:]
call[name[gandi].echo, parameter[constant[You must have a --vlan when giving an --ip.]]]
return[None]
variable[vm_] assign[=] <ast.IfExp object at 0x7da20c6e4bb0>
if <ast.BoolOp object at 0x7da20c6e6e60> begin[:]
variable[dc_id] assign[=] call[name[gandi].datacenter.usable_id, parameter[name[datacenter]]]
if compare[name[dc_id] not_equal[!=] call[name[vm_]][constant[datacenter_id]]] begin[:]
call[name[gandi].echo, parameter[constant[The datacenter you provided does not match the datacenter of the vm you want to attach to.]]]
return[None]
if <ast.UnaryOp object at 0x7da20c6e6ad0> begin[:]
variable[datacenter] assign[=] <ast.IfExp object at 0x7da20c6e74c0>
<ast.Try object at 0x7da20c6e6cb0>
return[call[name[gandi].ip.create, parameter[name[ip_version], name[datacenter], name[bandwidth], name[attach], name[vlan], name[ip], name[background]]]] | keyword[def] identifier[create] ( identifier[gandi] , identifier[datacenter] , identifier[bandwidth] , identifier[ip_version] , identifier[vlan] , identifier[ip] , identifier[attach] ,
identifier[background] ):
literal[string]
keyword[if] identifier[ip_version] != literal[int] keyword[and] identifier[vlan] :
identifier[gandi] . identifier[echo] ( literal[string] )
keyword[return]
keyword[if] identifier[ip] keyword[and] keyword[not] identifier[vlan] :
identifier[gandi] . identifier[echo] ( literal[string] )
keyword[return]
identifier[vm_] = identifier[gandi] . identifier[iaas] . identifier[info] ( identifier[attach] ) keyword[if] identifier[attach] keyword[else] keyword[None]
keyword[if] identifier[datacenter] keyword[and] identifier[vm_] :
identifier[dc_id] = identifier[gandi] . identifier[datacenter] . identifier[usable_id] ( identifier[datacenter] )
keyword[if] identifier[dc_id] != identifier[vm_] [ literal[string] ]:
identifier[gandi] . identifier[echo] ( literal[string]
literal[string] )
keyword[return]
keyword[if] keyword[not] identifier[datacenter] :
identifier[datacenter] = identifier[vm_] [ literal[string] ] keyword[if] identifier[vm_] keyword[else] literal[string]
keyword[try] :
identifier[gandi] . identifier[datacenter] . identifier[is_opened] ( identifier[datacenter] , literal[string] )
keyword[except] identifier[DatacenterLimited] keyword[as] identifier[exc] :
identifier[gandi] . identifier[echo] ( literal[string]
literal[string] %
( identifier[datacenter] , identifier[exc] . identifier[date] ))
keyword[return] identifier[gandi] . identifier[ip] . identifier[create] ( identifier[ip_version] , identifier[datacenter] , identifier[bandwidth] , identifier[attach] ,
identifier[vlan] , identifier[ip] , identifier[background] ) | def create(gandi, datacenter, bandwidth, ip_version, vlan, ip, attach, background):
"""Create a public or private ip
"""
if ip_version != 4 and vlan:
gandi.echo('You must have an --ip-version to 4 when having a vlan.')
return # depends on [control=['if'], data=[]]
if ip and (not vlan):
gandi.echo('You must have a --vlan when giving an --ip.')
return # depends on [control=['if'], data=[]]
vm_ = gandi.iaas.info(attach) if attach else None
if datacenter and vm_:
dc_id = gandi.datacenter.usable_id(datacenter)
if dc_id != vm_['datacenter_id']:
gandi.echo('The datacenter you provided does not match the datacenter of the vm you want to attach to.')
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if not datacenter:
datacenter = vm_['datacenter_id'] if vm_ else 'LU' # depends on [control=['if'], data=[]]
try:
gandi.datacenter.is_opened(datacenter, 'iaas') # depends on [control=['try'], data=[]]
except DatacenterLimited as exc:
gandi.echo('/!\\ Datacenter %s will be closed on %s, please consider using another datacenter.' % (datacenter, exc.date)) # depends on [control=['except'], data=['exc']]
return gandi.ip.create(ip_version, datacenter, bandwidth, attach, vlan, ip, background) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.