code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def _adjust_ser_count(self, plotArea, new_ser_count): """ Adjust the number of c:ser elements in *plotArea* to *new_ser_count*. Excess c:ser elements are deleted from the end, along with any xChart elements that are left empty as a result. Series elements are considered in xChart + series order. Any new c:ser elements required are added to the last xChart element and cloned from the last c:ser element in that xChart. """ ser_count_diff = new_ser_count - len(plotArea.sers) if ser_count_diff > 0: self._add_cloned_sers(plotArea, ser_count_diff) elif ser_count_diff < 0: self._trim_ser_count_by(plotArea, abs(ser_count_diff))
def function[_adjust_ser_count, parameter[self, plotArea, new_ser_count]]: constant[ Adjust the number of c:ser elements in *plotArea* to *new_ser_count*. Excess c:ser elements are deleted from the end, along with any xChart elements that are left empty as a result. Series elements are considered in xChart + series order. Any new c:ser elements required are added to the last xChart element and cloned from the last c:ser element in that xChart. ] variable[ser_count_diff] assign[=] binary_operation[name[new_ser_count] - call[name[len], parameter[name[plotArea].sers]]] if compare[name[ser_count_diff] greater[>] constant[0]] begin[:] call[name[self]._add_cloned_sers, parameter[name[plotArea], name[ser_count_diff]]]
keyword[def] identifier[_adjust_ser_count] ( identifier[self] , identifier[plotArea] , identifier[new_ser_count] ): literal[string] identifier[ser_count_diff] = identifier[new_ser_count] - identifier[len] ( identifier[plotArea] . identifier[sers] ) keyword[if] identifier[ser_count_diff] > literal[int] : identifier[self] . identifier[_add_cloned_sers] ( identifier[plotArea] , identifier[ser_count_diff] ) keyword[elif] identifier[ser_count_diff] < literal[int] : identifier[self] . identifier[_trim_ser_count_by] ( identifier[plotArea] , identifier[abs] ( identifier[ser_count_diff] ))
def _adjust_ser_count(self, plotArea, new_ser_count): """ Adjust the number of c:ser elements in *plotArea* to *new_ser_count*. Excess c:ser elements are deleted from the end, along with any xChart elements that are left empty as a result. Series elements are considered in xChart + series order. Any new c:ser elements required are added to the last xChart element and cloned from the last c:ser element in that xChart. """ ser_count_diff = new_ser_count - len(plotArea.sers) if ser_count_diff > 0: self._add_cloned_sers(plotArea, ser_count_diff) # depends on [control=['if'], data=['ser_count_diff']] elif ser_count_diff < 0: self._trim_ser_count_by(plotArea, abs(ser_count_diff)) # depends on [control=['if'], data=['ser_count_diff']]
def push_resource_cache(resourceid, info): """ Cache resource specific information :param resourceid: Resource id as string :param info: Dict to push :return: Nothing """ if not resourceid: raise ResourceInitError("Resource id missing") if not DutInformationList._cache.get(resourceid): DutInformationList._cache[resourceid] = dict() DutInformationList._cache[resourceid] = merge(DutInformationList._cache[resourceid], info)
def function[push_resource_cache, parameter[resourceid, info]]: constant[ Cache resource specific information :param resourceid: Resource id as string :param info: Dict to push :return: Nothing ] if <ast.UnaryOp object at 0x7da1b0c35780> begin[:] <ast.Raise object at 0x7da1b0c373d0> if <ast.UnaryOp object at 0x7da1b0c36d40> begin[:] call[name[DutInformationList]._cache][name[resourceid]] assign[=] call[name[dict], parameter[]] call[name[DutInformationList]._cache][name[resourceid]] assign[=] call[name[merge], parameter[call[name[DutInformationList]._cache][name[resourceid]], name[info]]]
keyword[def] identifier[push_resource_cache] ( identifier[resourceid] , identifier[info] ): literal[string] keyword[if] keyword[not] identifier[resourceid] : keyword[raise] identifier[ResourceInitError] ( literal[string] ) keyword[if] keyword[not] identifier[DutInformationList] . identifier[_cache] . identifier[get] ( identifier[resourceid] ): identifier[DutInformationList] . identifier[_cache] [ identifier[resourceid] ]= identifier[dict] () identifier[DutInformationList] . identifier[_cache] [ identifier[resourceid] ]= identifier[merge] ( identifier[DutInformationList] . identifier[_cache] [ identifier[resourceid] ], identifier[info] )
def push_resource_cache(resourceid, info): """ Cache resource specific information :param resourceid: Resource id as string :param info: Dict to push :return: Nothing """ if not resourceid: raise ResourceInitError('Resource id missing') # depends on [control=['if'], data=[]] if not DutInformationList._cache.get(resourceid): DutInformationList._cache[resourceid] = dict() # depends on [control=['if'], data=[]] DutInformationList._cache[resourceid] = merge(DutInformationList._cache[resourceid], info)
def check_comment_belongs_to_record(comid, recid): """ Return True if the comment is indeed part of given record (even if comment or/and record have been "deleted"). Else return False. :param comid: the id of the comment to check membership :param recid: the recid of the record we want to check if comment belongs to """ query = """SELECT id_bibrec from "cmtRECORDCOMMENT" WHERE id=%s""" params = (comid,) res = run_sql(query, params) if res and res[0][0] == recid: return True return False
def function[check_comment_belongs_to_record, parameter[comid, recid]]: constant[ Return True if the comment is indeed part of given record (even if comment or/and record have been "deleted"). Else return False. :param comid: the id of the comment to check membership :param recid: the recid of the record we want to check if comment belongs to ] variable[query] assign[=] constant[SELECT id_bibrec from "cmtRECORDCOMMENT" WHERE id=%s] variable[params] assign[=] tuple[[<ast.Name object at 0x7da2043464d0>]] variable[res] assign[=] call[name[run_sql], parameter[name[query], name[params]]] if <ast.BoolOp object at 0x7da204345ed0> begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[check_comment_belongs_to_record] ( identifier[comid] , identifier[recid] ): literal[string] identifier[query] = literal[string] identifier[params] =( identifier[comid] ,) identifier[res] = identifier[run_sql] ( identifier[query] , identifier[params] ) keyword[if] identifier[res] keyword[and] identifier[res] [ literal[int] ][ literal[int] ]== identifier[recid] : keyword[return] keyword[True] keyword[return] keyword[False]
def check_comment_belongs_to_record(comid, recid): """ Return True if the comment is indeed part of given record (even if comment or/and record have been "deleted"). Else return False. :param comid: the id of the comment to check membership :param recid: the recid of the record we want to check if comment belongs to """ query = 'SELECT id_bibrec from "cmtRECORDCOMMENT" WHERE id=%s' params = (comid,) res = run_sql(query, params) if res and res[0][0] == recid: return True # depends on [control=['if'], data=[]] return False
def post_comment(self, sharekey=None, comment=None): """ Post a comment on behalf of the current user to the SharedFile with the given sharekey. Args: sharekey (str): Sharekey of the SharedFile to which you'd like to post a comment. comment (str): Text of the comment to post. Returns: Comment object. """ endpoint = '/api/sharedfile/{0}/comments'.format(sharekey) post_data = {'body': comment} data = self._make_request("POST", endpoint=endpoint, data=post_data) return Comment.NewFromJSON(data)
def function[post_comment, parameter[self, sharekey, comment]]: constant[ Post a comment on behalf of the current user to the SharedFile with the given sharekey. Args: sharekey (str): Sharekey of the SharedFile to which you'd like to post a comment. comment (str): Text of the comment to post. Returns: Comment object. ] variable[endpoint] assign[=] call[constant[/api/sharedfile/{0}/comments].format, parameter[name[sharekey]]] variable[post_data] assign[=] dictionary[[<ast.Constant object at 0x7da204565a50>], [<ast.Name object at 0x7da204565690>]] variable[data] assign[=] call[name[self]._make_request, parameter[constant[POST]]] return[call[name[Comment].NewFromJSON, parameter[name[data]]]]
keyword[def] identifier[post_comment] ( identifier[self] , identifier[sharekey] = keyword[None] , identifier[comment] = keyword[None] ): literal[string] identifier[endpoint] = literal[string] . identifier[format] ( identifier[sharekey] ) identifier[post_data] ={ literal[string] : identifier[comment] } identifier[data] = identifier[self] . identifier[_make_request] ( literal[string] , identifier[endpoint] = identifier[endpoint] , identifier[data] = identifier[post_data] ) keyword[return] identifier[Comment] . identifier[NewFromJSON] ( identifier[data] )
def post_comment(self, sharekey=None, comment=None): """ Post a comment on behalf of the current user to the SharedFile with the given sharekey. Args: sharekey (str): Sharekey of the SharedFile to which you'd like to post a comment. comment (str): Text of the comment to post. Returns: Comment object. """ endpoint = '/api/sharedfile/{0}/comments'.format(sharekey) post_data = {'body': comment} data = self._make_request('POST', endpoint=endpoint, data=post_data) return Comment.NewFromJSON(data)
def BytesIO(*args, **kwargs): """BytesIO constructor shim for the async wrapper.""" raw = sync_io.BytesIO(*args, **kwargs) return AsyncBytesIOWrapper(raw)
def function[BytesIO, parameter[]]: constant[BytesIO constructor shim for the async wrapper.] variable[raw] assign[=] call[name[sync_io].BytesIO, parameter[<ast.Starred object at 0x7da204620a30>]] return[call[name[AsyncBytesIOWrapper], parameter[name[raw]]]]
keyword[def] identifier[BytesIO] (* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[raw] = identifier[sync_io] . identifier[BytesIO] (* identifier[args] ,** identifier[kwargs] ) keyword[return] identifier[AsyncBytesIOWrapper] ( identifier[raw] )
def BytesIO(*args, **kwargs): """BytesIO constructor shim for the async wrapper.""" raw = sync_io.BytesIO(*args, **kwargs) return AsyncBytesIOWrapper(raw)
def siblingsId(self) -> Tuple[CtsReference, CtsReference]: """ Siblings Identifiers of the passage :rtype: (str, str) """ self._raise_depth() if not self._text: raise MissingAttribute("CapitainsCtsPassage was initiated without CtsTextMetadata object") if self._prev_next: return self._prev_next document_references = self._text.getReffs(level=self.depth) if self.reference.is_range(): start, end = self.reference.start, self.reference.end range_length = len(self.getReffs(level=0)) else: start = end = self.reference.start range_length = 1 start = document_references.index(start) end = document_references.index(end) if start == 0: # If the passage is already at the beginning _prev = None elif start - range_length < 0: if start == end: _prev = document_references[0] else: _prev = "{}-{}".format(document_references[0], document_references[start-1]) else: if start == end: _prev = document_references[start-1] else: _prev = "{}-{}".format(document_references[start-range_length], document_references[start-1]) if start + 1 == len(document_references) or end + 1 == len(document_references): # If the passage is already at the end _next = None elif end + range_length > len(document_references): if start == end: _next = document_references[-1] else: _next = "{}-{}".format(document_references[end+1], document_references[-1]) else: if start == end: _next = document_references[end+1] else: _next = "{}-{}".format(document_references[end+1], document_references[end + range_length]) self._prev_next = (CtsReference(_prev), CtsReference(_next)) return self._prev_next
def function[siblingsId, parameter[self]]: constant[ Siblings Identifiers of the passage :rtype: (str, str) ] call[name[self]._raise_depth, parameter[]] if <ast.UnaryOp object at 0x7da18bc72710> begin[:] <ast.Raise object at 0x7da18bc71840> if name[self]._prev_next begin[:] return[name[self]._prev_next] variable[document_references] assign[=] call[name[self]._text.getReffs, parameter[]] if call[name[self].reference.is_range, parameter[]] begin[:] <ast.Tuple object at 0x7da18bc70910> assign[=] tuple[[<ast.Attribute object at 0x7da18bc71a80>, <ast.Attribute object at 0x7da18bc726b0>]] variable[range_length] assign[=] call[name[len], parameter[call[name[self].getReffs, parameter[]]]] variable[start] assign[=] call[name[document_references].index, parameter[name[start]]] variable[end] assign[=] call[name[document_references].index, parameter[name[end]]] if compare[name[start] equal[==] constant[0]] begin[:] variable[_prev] assign[=] constant[None] if <ast.BoolOp object at 0x7da2047e9e70> begin[:] variable[_next] assign[=] constant[None] name[self]._prev_next assign[=] tuple[[<ast.Call object at 0x7da18bcc91b0>, <ast.Call object at 0x7da18bccb6d0>]] return[name[self]._prev_next]
keyword[def] identifier[siblingsId] ( identifier[self] )-> identifier[Tuple] [ identifier[CtsReference] , identifier[CtsReference] ]: literal[string] identifier[self] . identifier[_raise_depth] () keyword[if] keyword[not] identifier[self] . identifier[_text] : keyword[raise] identifier[MissingAttribute] ( literal[string] ) keyword[if] identifier[self] . identifier[_prev_next] : keyword[return] identifier[self] . identifier[_prev_next] identifier[document_references] = identifier[self] . identifier[_text] . identifier[getReffs] ( identifier[level] = identifier[self] . identifier[depth] ) keyword[if] identifier[self] . identifier[reference] . identifier[is_range] (): identifier[start] , identifier[end] = identifier[self] . identifier[reference] . identifier[start] , identifier[self] . identifier[reference] . identifier[end] identifier[range_length] = identifier[len] ( identifier[self] . identifier[getReffs] ( identifier[level] = literal[int] )) keyword[else] : identifier[start] = identifier[end] = identifier[self] . identifier[reference] . identifier[start] identifier[range_length] = literal[int] identifier[start] = identifier[document_references] . identifier[index] ( identifier[start] ) identifier[end] = identifier[document_references] . identifier[index] ( identifier[end] ) keyword[if] identifier[start] == literal[int] : identifier[_prev] = keyword[None] keyword[elif] identifier[start] - identifier[range_length] < literal[int] : keyword[if] identifier[start] == identifier[end] : identifier[_prev] = identifier[document_references] [ literal[int] ] keyword[else] : identifier[_prev] = literal[string] . identifier[format] ( identifier[document_references] [ literal[int] ], identifier[document_references] [ identifier[start] - literal[int] ]) keyword[else] : keyword[if] identifier[start] == identifier[end] : identifier[_prev] = identifier[document_references] [ identifier[start] - literal[int] ] keyword[else] : identifier[_prev] = literal[string] . identifier[format] ( identifier[document_references] [ identifier[start] - identifier[range_length] ], identifier[document_references] [ identifier[start] - literal[int] ]) keyword[if] identifier[start] + literal[int] == identifier[len] ( identifier[document_references] ) keyword[or] identifier[end] + literal[int] == identifier[len] ( identifier[document_references] ): identifier[_next] = keyword[None] keyword[elif] identifier[end] + identifier[range_length] > identifier[len] ( identifier[document_references] ): keyword[if] identifier[start] == identifier[end] : identifier[_next] = identifier[document_references] [- literal[int] ] keyword[else] : identifier[_next] = literal[string] . identifier[format] ( identifier[document_references] [ identifier[end] + literal[int] ], identifier[document_references] [- literal[int] ]) keyword[else] : keyword[if] identifier[start] == identifier[end] : identifier[_next] = identifier[document_references] [ identifier[end] + literal[int] ] keyword[else] : identifier[_next] = literal[string] . identifier[format] ( identifier[document_references] [ identifier[end] + literal[int] ], identifier[document_references] [ identifier[end] + identifier[range_length] ]) identifier[self] . identifier[_prev_next] =( identifier[CtsReference] ( identifier[_prev] ), identifier[CtsReference] ( identifier[_next] )) keyword[return] identifier[self] . identifier[_prev_next]
def siblingsId(self) -> Tuple[CtsReference, CtsReference]: """ Siblings Identifiers of the passage :rtype: (str, str) """ self._raise_depth() if not self._text: raise MissingAttribute('CapitainsCtsPassage was initiated without CtsTextMetadata object') # depends on [control=['if'], data=[]] if self._prev_next: return self._prev_next # depends on [control=['if'], data=[]] document_references = self._text.getReffs(level=self.depth) if self.reference.is_range(): (start, end) = (self.reference.start, self.reference.end) range_length = len(self.getReffs(level=0)) # depends on [control=['if'], data=[]] else: start = end = self.reference.start range_length = 1 start = document_references.index(start) end = document_references.index(end) if start == 0: # If the passage is already at the beginning _prev = None # depends on [control=['if'], data=[]] elif start - range_length < 0: if start == end: _prev = document_references[0] # depends on [control=['if'], data=[]] else: _prev = '{}-{}'.format(document_references[0], document_references[start - 1]) # depends on [control=['if'], data=[]] elif start == end: _prev = document_references[start - 1] # depends on [control=['if'], data=['start']] else: _prev = '{}-{}'.format(document_references[start - range_length], document_references[start - 1]) if start + 1 == len(document_references) or end + 1 == len(document_references): # If the passage is already at the end _next = None # depends on [control=['if'], data=[]] elif end + range_length > len(document_references): if start == end: _next = document_references[-1] # depends on [control=['if'], data=[]] else: _next = '{}-{}'.format(document_references[end + 1], document_references[-1]) # depends on [control=['if'], data=[]] elif start == end: _next = document_references[end + 1] # depends on [control=['if'], data=['end']] else: _next = '{}-{}'.format(document_references[end + 1], document_references[end + range_length]) self._prev_next = (CtsReference(_prev), CtsReference(_next)) return self._prev_next
def focus_prev(self): """focus previous message in depth first order""" mid = self.get_selected_mid() localroot = self._sanitize_position((mid,)) if localroot == self.get_focus()[1]: newpos = self._tree.prev_position(mid) if newpos is not None: newpos = self._sanitize_position((newpos,)) else: newpos = localroot if newpos is not None: self.body.set_focus(newpos)
def function[focus_prev, parameter[self]]: constant[focus previous message in depth first order] variable[mid] assign[=] call[name[self].get_selected_mid, parameter[]] variable[localroot] assign[=] call[name[self]._sanitize_position, parameter[tuple[[<ast.Name object at 0x7da1b07d1cf0>]]]] if compare[name[localroot] equal[==] call[call[name[self].get_focus, parameter[]]][constant[1]]] begin[:] variable[newpos] assign[=] call[name[self]._tree.prev_position, parameter[name[mid]]] if compare[name[newpos] is_not constant[None]] begin[:] variable[newpos] assign[=] call[name[self]._sanitize_position, parameter[tuple[[<ast.Name object at 0x7da1b07d0a60>]]]] if compare[name[newpos] is_not constant[None]] begin[:] call[name[self].body.set_focus, parameter[name[newpos]]]
keyword[def] identifier[focus_prev] ( identifier[self] ): literal[string] identifier[mid] = identifier[self] . identifier[get_selected_mid] () identifier[localroot] = identifier[self] . identifier[_sanitize_position] (( identifier[mid] ,)) keyword[if] identifier[localroot] == identifier[self] . identifier[get_focus] ()[ literal[int] ]: identifier[newpos] = identifier[self] . identifier[_tree] . identifier[prev_position] ( identifier[mid] ) keyword[if] identifier[newpos] keyword[is] keyword[not] keyword[None] : identifier[newpos] = identifier[self] . identifier[_sanitize_position] (( identifier[newpos] ,)) keyword[else] : identifier[newpos] = identifier[localroot] keyword[if] identifier[newpos] keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[body] . identifier[set_focus] ( identifier[newpos] )
def focus_prev(self): """focus previous message in depth first order""" mid = self.get_selected_mid() localroot = self._sanitize_position((mid,)) if localroot == self.get_focus()[1]: newpos = self._tree.prev_position(mid) if newpos is not None: newpos = self._sanitize_position((newpos,)) # depends on [control=['if'], data=['newpos']] # depends on [control=['if'], data=[]] else: newpos = localroot if newpos is not None: self.body.set_focus(newpos) # depends on [control=['if'], data=['newpos']]
def _fix_gatk_header(exist_files, out_file, config): """Ensure consistent headers for VCF concatenation. Fixes problems for genomes that start with chrM by reheadering the first file. These files do haploid variant calling which lack the PID phasing key/value pair in FORMAT, so initial chrM samples cause errors during concatenation due to the lack of header merging. This fixes this by updating the first header. """ from bcbio.variation import ploidy c, base_file = exist_files[0] replace_file = base_file items = [{"config": config}] if ploidy.get_ploidy(items, region=(c, 1, 2)) == 1: for c, x in exist_files[1:]: if ploidy.get_ploidy(items, (c, 1, 2)) > 1: replace_file = x break base_fix_file = os.path.join(os.path.dirname(out_file), "%s-fixheader%s" % utils.splitext_plus(os.path.basename(base_file))) with file_transaction(config, base_fix_file) as tx_out_file: header_file = "%s-header.vcf" % utils.splitext_plus(tx_out_file)[0] do.run("zgrep ^# %s > %s" % (replace_file, header_file), "Prepare header file for merging") resources = config_utils.get_resources("picard", config) ropts = [] if "options" in resources: ropts += [str(x) for x in resources.get("options", [])] do.run("%s && picard FixVcfHeader HEADER=%s INPUT=%s OUTPUT=%s %s" % (utils.get_java_clprep(), header_file, base_file, base_fix_file, " ".join(ropts)), "Reheader initial VCF file in merge") bgzip_and_index(base_fix_file, config) return [base_fix_file] + [x for (c, x) in exist_files[1:]]
def function[_fix_gatk_header, parameter[exist_files, out_file, config]]: constant[Ensure consistent headers for VCF concatenation. Fixes problems for genomes that start with chrM by reheadering the first file. These files do haploid variant calling which lack the PID phasing key/value pair in FORMAT, so initial chrM samples cause errors during concatenation due to the lack of header merging. This fixes this by updating the first header. ] from relative_module[bcbio.variation] import module[ploidy] <ast.Tuple object at 0x7da1b18465f0> assign[=] call[name[exist_files]][constant[0]] variable[replace_file] assign[=] name[base_file] variable[items] assign[=] list[[<ast.Dict object at 0x7da1b1846380>]] if compare[call[name[ploidy].get_ploidy, parameter[name[items]]] equal[==] constant[1]] begin[:] for taget[tuple[[<ast.Name object at 0x7da1b1847cd0>, <ast.Name object at 0x7da1b1846800>]]] in starred[call[name[exist_files]][<ast.Slice object at 0x7da1b1847b20>]] begin[:] if compare[call[name[ploidy].get_ploidy, parameter[name[items], tuple[[<ast.Name object at 0x7da1b18452a0>, <ast.Constant object at 0x7da1b1845ae0>, <ast.Constant object at 0x7da1b1847880>]]]] greater[>] constant[1]] begin[:] variable[replace_file] assign[=] name[x] break variable[base_fix_file] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[out_file]]], binary_operation[constant[%s-fixheader%s] <ast.Mod object at 0x7da2590d6920> call[name[utils].splitext_plus, parameter[call[name[os].path.basename, parameter[name[base_file]]]]]]]] with call[name[file_transaction], parameter[name[config], name[base_fix_file]]] begin[:] variable[header_file] assign[=] binary_operation[constant[%s-header.vcf] <ast.Mod object at 0x7da2590d6920> call[call[name[utils].splitext_plus, parameter[name[tx_out_file]]]][constant[0]]] call[name[do].run, parameter[binary_operation[constant[zgrep ^# %s > %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1844d00>, <ast.Name object at 0x7da1b1847e50>]]], constant[Prepare header file for merging]]] variable[resources] assign[=] call[name[config_utils].get_resources, parameter[constant[picard], name[config]]] variable[ropts] assign[=] list[[]] if compare[constant[options] in name[resources]] begin[:] <ast.AugAssign object at 0x7da1b18478e0> call[name[do].run, parameter[binary_operation[constant[%s && picard FixVcfHeader HEADER=%s INPUT=%s OUTPUT=%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b18473d0>, <ast.Name object at 0x7da1b1844970>, <ast.Name object at 0x7da1b1846dd0>, <ast.Name object at 0x7da1b18464a0>, <ast.Call object at 0x7da1b18449d0>]]], constant[Reheader initial VCF file in merge]]] call[name[bgzip_and_index], parameter[name[base_fix_file], name[config]]] return[binary_operation[list[[<ast.Name object at 0x7da1b1847a90>]] + <ast.ListComp object at 0x7da1b1846f20>]]
keyword[def] identifier[_fix_gatk_header] ( identifier[exist_files] , identifier[out_file] , identifier[config] ): literal[string] keyword[from] identifier[bcbio] . identifier[variation] keyword[import] identifier[ploidy] identifier[c] , identifier[base_file] = identifier[exist_files] [ literal[int] ] identifier[replace_file] = identifier[base_file] identifier[items] =[{ literal[string] : identifier[config] }] keyword[if] identifier[ploidy] . identifier[get_ploidy] ( identifier[items] , identifier[region] =( identifier[c] , literal[int] , literal[int] ))== literal[int] : keyword[for] identifier[c] , identifier[x] keyword[in] identifier[exist_files] [ literal[int] :]: keyword[if] identifier[ploidy] . identifier[get_ploidy] ( identifier[items] ,( identifier[c] , literal[int] , literal[int] ))> literal[int] : identifier[replace_file] = identifier[x] keyword[break] identifier[base_fix_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[out_file] ), literal[string] % identifier[utils] . identifier[splitext_plus] ( identifier[os] . identifier[path] . identifier[basename] ( identifier[base_file] ))) keyword[with] identifier[file_transaction] ( identifier[config] , identifier[base_fix_file] ) keyword[as] identifier[tx_out_file] : identifier[header_file] = literal[string] % identifier[utils] . identifier[splitext_plus] ( identifier[tx_out_file] )[ literal[int] ] identifier[do] . identifier[run] ( literal[string] %( identifier[replace_file] , identifier[header_file] ), literal[string] ) identifier[resources] = identifier[config_utils] . identifier[get_resources] ( literal[string] , identifier[config] ) identifier[ropts] =[] keyword[if] literal[string] keyword[in] identifier[resources] : identifier[ropts] +=[ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[resources] . identifier[get] ( literal[string] ,[])] identifier[do] . identifier[run] ( literal[string] % ( identifier[utils] . identifier[get_java_clprep] (), identifier[header_file] , identifier[base_file] , identifier[base_fix_file] , literal[string] . identifier[join] ( identifier[ropts] )), literal[string] ) identifier[bgzip_and_index] ( identifier[base_fix_file] , identifier[config] ) keyword[return] [ identifier[base_fix_file] ]+[ identifier[x] keyword[for] ( identifier[c] , identifier[x] ) keyword[in] identifier[exist_files] [ literal[int] :]]
def _fix_gatk_header(exist_files, out_file, config): """Ensure consistent headers for VCF concatenation. Fixes problems for genomes that start with chrM by reheadering the first file. These files do haploid variant calling which lack the PID phasing key/value pair in FORMAT, so initial chrM samples cause errors during concatenation due to the lack of header merging. This fixes this by updating the first header. """ from bcbio.variation import ploidy (c, base_file) = exist_files[0] replace_file = base_file items = [{'config': config}] if ploidy.get_ploidy(items, region=(c, 1, 2)) == 1: for (c, x) in exist_files[1:]: if ploidy.get_ploidy(items, (c, 1, 2)) > 1: replace_file = x break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] base_fix_file = os.path.join(os.path.dirname(out_file), '%s-fixheader%s' % utils.splitext_plus(os.path.basename(base_file))) with file_transaction(config, base_fix_file) as tx_out_file: header_file = '%s-header.vcf' % utils.splitext_plus(tx_out_file)[0] do.run('zgrep ^# %s > %s' % (replace_file, header_file), 'Prepare header file for merging') resources = config_utils.get_resources('picard', config) ropts = [] if 'options' in resources: ropts += [str(x) for x in resources.get('options', [])] # depends on [control=['if'], data=['resources']] do.run('%s && picard FixVcfHeader HEADER=%s INPUT=%s OUTPUT=%s %s' % (utils.get_java_clprep(), header_file, base_file, base_fix_file, ' '.join(ropts)), 'Reheader initial VCF file in merge') # depends on [control=['with'], data=['tx_out_file']] bgzip_and_index(base_fix_file, config) return [base_fix_file] + [x for (c, x) in exist_files[1:]]
def parse(self): """ Parses the CSS contents and returns the cleaned CSS as a string :returns: The cleaned CSS :rtype: str """ # Build the HTML tree self.tree = self._build_tree(self.html_contents) # Parse the CSS contents self.stylesheet = self.parser.parse_stylesheet(self.css_contents) # Get the cleaned CSS contents self.cleaned_css = self._clean_css()
def function[parse, parameter[self]]: constant[ Parses the CSS contents and returns the cleaned CSS as a string :returns: The cleaned CSS :rtype: str ] name[self].tree assign[=] call[name[self]._build_tree, parameter[name[self].html_contents]] name[self].stylesheet assign[=] call[name[self].parser.parse_stylesheet, parameter[name[self].css_contents]] name[self].cleaned_css assign[=] call[name[self]._clean_css, parameter[]]
keyword[def] identifier[parse] ( identifier[self] ): literal[string] identifier[self] . identifier[tree] = identifier[self] . identifier[_build_tree] ( identifier[self] . identifier[html_contents] ) identifier[self] . identifier[stylesheet] = identifier[self] . identifier[parser] . identifier[parse_stylesheet] ( identifier[self] . identifier[css_contents] ) identifier[self] . identifier[cleaned_css] = identifier[self] . identifier[_clean_css] ()
def parse(self): """ Parses the CSS contents and returns the cleaned CSS as a string :returns: The cleaned CSS :rtype: str """ # Build the HTML tree self.tree = self._build_tree(self.html_contents) # Parse the CSS contents self.stylesheet = self.parser.parse_stylesheet(self.css_contents) # Get the cleaned CSS contents self.cleaned_css = self._clean_css()
def paginate_query(self, query, paginate_info): """Paginate query according to jsonapi 1.0 :param Query query: sqlalchemy queryset :param dict paginate_info: pagination information :return Query: the paginated query """ if int(paginate_info.get('size', 1)) == 0: return query page_size = int(paginate_info.get('size', 0)) or current_app.config['PAGE_SIZE'] query = query.limit(page_size) if paginate_info.get('number'): query = query.offset((int(paginate_info['number']) - 1) * page_size) return query
def function[paginate_query, parameter[self, query, paginate_info]]: constant[Paginate query according to jsonapi 1.0 :param Query query: sqlalchemy queryset :param dict paginate_info: pagination information :return Query: the paginated query ] if compare[call[name[int], parameter[call[name[paginate_info].get, parameter[constant[size], constant[1]]]]] equal[==] constant[0]] begin[:] return[name[query]] variable[page_size] assign[=] <ast.BoolOp object at 0x7da1b1642fe0> variable[query] assign[=] call[name[query].limit, parameter[name[page_size]]] if call[name[paginate_info].get, parameter[constant[number]]] begin[:] variable[query] assign[=] call[name[query].offset, parameter[binary_operation[binary_operation[call[name[int], parameter[call[name[paginate_info]][constant[number]]]] - constant[1]] * name[page_size]]]] return[name[query]]
keyword[def] identifier[paginate_query] ( identifier[self] , identifier[query] , identifier[paginate_info] ): literal[string] keyword[if] identifier[int] ( identifier[paginate_info] . identifier[get] ( literal[string] , literal[int] ))== literal[int] : keyword[return] identifier[query] identifier[page_size] = identifier[int] ( identifier[paginate_info] . identifier[get] ( literal[string] , literal[int] )) keyword[or] identifier[current_app] . identifier[config] [ literal[string] ] identifier[query] = identifier[query] . identifier[limit] ( identifier[page_size] ) keyword[if] identifier[paginate_info] . identifier[get] ( literal[string] ): identifier[query] = identifier[query] . identifier[offset] (( identifier[int] ( identifier[paginate_info] [ literal[string] ])- literal[int] )* identifier[page_size] ) keyword[return] identifier[query]
def paginate_query(self, query, paginate_info): """Paginate query according to jsonapi 1.0 :param Query query: sqlalchemy queryset :param dict paginate_info: pagination information :return Query: the paginated query """ if int(paginate_info.get('size', 1)) == 0: return query # depends on [control=['if'], data=[]] page_size = int(paginate_info.get('size', 0)) or current_app.config['PAGE_SIZE'] query = query.limit(page_size) if paginate_info.get('number'): query = query.offset((int(paginate_info['number']) - 1) * page_size) # depends on [control=['if'], data=[]] return query
def engine(self): """Return Render Engine.""" return self.backend({ 'APP_DIRS': True, 'DIRS': [str(ROOT / self.backend.app_dirname)], 'NAME': 'djangoforms', 'OPTIONS': {}, })
def function[engine, parameter[self]]: constant[Return Render Engine.] return[call[name[self].backend, parameter[dictionary[[<ast.Constant object at 0x7da1b1ed60b0>, <ast.Constant object at 0x7da1b1ed6770>, <ast.Constant object at 0x7da1b1ed6650>, <ast.Constant object at 0x7da1b1ed73d0>], [<ast.Constant object at 0x7da1b1ed6bf0>, <ast.List object at 0x7da1b1ed71c0>, <ast.Constant object at 0x7da1b1ed5cf0>, <ast.Dict object at 0x7da1b1ed5450>]]]]]
keyword[def] identifier[engine] ( identifier[self] ): literal[string] keyword[return] identifier[self] . identifier[backend] ({ literal[string] : keyword[True] , literal[string] :[ identifier[str] ( identifier[ROOT] / identifier[self] . identifier[backend] . identifier[app_dirname] )], literal[string] : literal[string] , literal[string] :{}, })
def engine(self): """Return Render Engine.""" return self.backend({'APP_DIRS': True, 'DIRS': [str(ROOT / self.backend.app_dirname)], 'NAME': 'djangoforms', 'OPTIONS': {}})
def merge_or_link(self, input_args, raw_folder, local_base="sample"): """ This function standardizes various input possibilities by converting either .bam, .fastq, or .fastq.gz files into a local file; merging those if multiple files given. :param list input_args: This is a list of arguments, each one is a class of inputs (which can in turn be a string or a list). Typically, input_args is a list with 2 elements: first a list of read1 files; second an (optional!) list of read2 files. :param str raw_folder: Name/path of folder for the merge/link. :param str local_base: Usually the sample name. This (plus file extension) will be the name of the local file linked (or merged) by this function. """ self.make_sure_path_exists(raw_folder) if not isinstance(input_args, list): raise Exception("Input must be a list") if any(isinstance(i, list) for i in input_args): # We have a list of lists. Process each individually. local_input_files = list() n_input_files = len(filter(bool, input_args)) print("Number of input file sets:\t\t" + str(n_input_files)) for input_i, input_arg in enumerate(input_args): # Count how many non-null items there are in the list; # we only append _R1 (etc.) if there are multiple input files. if n_input_files > 1: local_base_extended = local_base + "_R" + str(input_i + 1) else: local_base_extended = local_base if input_arg: out = self.merge_or_link( input_arg, raw_folder, local_base_extended) print("Local input file: '{}'".format(out)) # Make sure file exists: if not os.path.isfile(out): print("Not a file: '{}'".format(out)) local_input_files.append(out) return local_input_files else: # We have a list of individual arguments. Merge them. if len(input_args) == 1: # Only one argument in this list. A single input file; we just link # it, regardless of file type: # Pull the value out of the list input_arg = input_args[0] input_ext = self.get_input_ext(input_arg) # Convert to absolute path if not os.path.isabs(input_arg): input_arg = os.path.abspath(input_arg) # Link it to into the raw folder local_input_abs = os.path.join(raw_folder, local_base + input_ext) self.pm.run( "ln -sf " + input_arg + " " + local_input_abs, target=local_input_abs, shell=True) # return the local (linked) filename absolute path return local_input_abs else: # Otherwise, there are multiple inputs. # If more than 1 input file is given, then these are to be merged # if they are in bam format. if all([self.get_input_ext(x) == ".bam" for x in input_args]): sample_merged = local_base + ".merged.bam" output_merge = os.path.join(raw_folder, sample_merged) cmd = self.merge_bams(input_args, output_merge) self.pm.run(cmd, output_merge) cmd2 = self.validate_bam(output_merge) self.pm.run(cmd, output_merge, nofail=True) return output_merge # if multiple fastq if all([self.get_input_ext(x) == ".fastq.gz" for x in input_args]): sample_merged_gz = local_base + ".merged.fastq.gz" output_merge_gz = os.path.join(raw_folder, sample_merged_gz) #cmd1 = self.ziptool + "-d -c " + " ".join(input_args) + " > " + output_merge #cmd2 = self.ziptool + " " + output_merge #self.pm.run([cmd1, cmd2], output_merge_gz) # you can save yourself the decompression/recompression: cmd = "cat " + " ".join(input_args) + " > " + output_merge_gz self.pm.run(cmd, output_merge_gz) return output_merge_gz if all([self.get_input_ext(x) == ".fastq" for x in input_args]): sample_merged = local_base + ".merged.fastq" output_merge = os.path.join(raw_folder, sample_merged) cmd = "cat " + " ".join(input_args) + " > " + output_merge self.pm.run(cmd, output_merge) return output_merge # At this point, we don't recognize the input file types or they # do not match. raise NotImplementedError( "Input files must be of the same type, and can only " "merge bam or fastq.")
def function[merge_or_link, parameter[self, input_args, raw_folder, local_base]]: constant[ This function standardizes various input possibilities by converting either .bam, .fastq, or .fastq.gz files into a local file; merging those if multiple files given. :param list input_args: This is a list of arguments, each one is a class of inputs (which can in turn be a string or a list). Typically, input_args is a list with 2 elements: first a list of read1 files; second an (optional!) list of read2 files. :param str raw_folder: Name/path of folder for the merge/link. :param str local_base: Usually the sample name. This (plus file extension) will be the name of the local file linked (or merged) by this function. ] call[name[self].make_sure_path_exists, parameter[name[raw_folder]]] if <ast.UnaryOp object at 0x7da1b032a4d0> begin[:] <ast.Raise object at 0x7da1b0328130> if call[name[any], parameter[<ast.GeneratorExp object at 0x7da1b0328d30>]] begin[:] variable[local_input_files] assign[=] call[name[list], parameter[]] variable[n_input_files] assign[=] call[name[len], parameter[call[name[filter], parameter[name[bool], name[input_args]]]]] call[name[print], parameter[binary_operation[constant[Number of input file sets: ] + call[name[str], parameter[name[n_input_files]]]]]] for taget[tuple[[<ast.Name object at 0x7da1b03db490>, <ast.Name object at 0x7da1b03db9a0>]]] in starred[call[name[enumerate], parameter[name[input_args]]]] begin[:] if compare[name[n_input_files] greater[>] constant[1]] begin[:] variable[local_base_extended] assign[=] binary_operation[binary_operation[name[local_base] + constant[_R]] + call[name[str], parameter[binary_operation[name[input_i] + constant[1]]]]] if name[input_arg] begin[:] variable[out] assign[=] call[name[self].merge_or_link, parameter[name[input_arg], name[raw_folder], name[local_base_extended]]] call[name[print], parameter[call[constant[Local input file: '{}'].format, parameter[name[out]]]]] if <ast.UnaryOp object at 0x7da1b03e00d0> begin[:] call[name[print], parameter[call[constant[Not a file: '{}'].format, parameter[name[out]]]]] call[name[local_input_files].append, parameter[name[out]]] return[name[local_input_files]]
keyword[def] identifier[merge_or_link] ( identifier[self] , identifier[input_args] , identifier[raw_folder] , identifier[local_base] = literal[string] ): literal[string] identifier[self] . identifier[make_sure_path_exists] ( identifier[raw_folder] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[input_args] , identifier[list] ): keyword[raise] identifier[Exception] ( literal[string] ) keyword[if] identifier[any] ( identifier[isinstance] ( identifier[i] , identifier[list] ) keyword[for] identifier[i] keyword[in] identifier[input_args] ): identifier[local_input_files] = identifier[list] () identifier[n_input_files] = identifier[len] ( identifier[filter] ( identifier[bool] , identifier[input_args] )) identifier[print] ( literal[string] + identifier[str] ( identifier[n_input_files] )) keyword[for] identifier[input_i] , identifier[input_arg] keyword[in] identifier[enumerate] ( identifier[input_args] ): keyword[if] identifier[n_input_files] > literal[int] : identifier[local_base_extended] = identifier[local_base] + literal[string] + identifier[str] ( identifier[input_i] + literal[int] ) keyword[else] : identifier[local_base_extended] = identifier[local_base] keyword[if] identifier[input_arg] : identifier[out] = identifier[self] . identifier[merge_or_link] ( identifier[input_arg] , identifier[raw_folder] , identifier[local_base_extended] ) identifier[print] ( literal[string] . identifier[format] ( identifier[out] )) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[out] ): identifier[print] ( literal[string] . identifier[format] ( identifier[out] )) identifier[local_input_files] . identifier[append] ( identifier[out] ) keyword[return] identifier[local_input_files] keyword[else] : keyword[if] identifier[len] ( identifier[input_args] )== literal[int] : identifier[input_arg] = identifier[input_args] [ literal[int] ] identifier[input_ext] = identifier[self] . identifier[get_input_ext] ( identifier[input_arg] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isabs] ( identifier[input_arg] ): identifier[input_arg] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[input_arg] ) identifier[local_input_abs] = identifier[os] . identifier[path] . identifier[join] ( identifier[raw_folder] , identifier[local_base] + identifier[input_ext] ) identifier[self] . identifier[pm] . identifier[run] ( literal[string] + identifier[input_arg] + literal[string] + identifier[local_input_abs] , identifier[target] = identifier[local_input_abs] , identifier[shell] = keyword[True] ) keyword[return] identifier[local_input_abs] keyword[else] : keyword[if] identifier[all] ([ identifier[self] . identifier[get_input_ext] ( identifier[x] )== literal[string] keyword[for] identifier[x] keyword[in] identifier[input_args] ]): identifier[sample_merged] = identifier[local_base] + literal[string] identifier[output_merge] = identifier[os] . identifier[path] . identifier[join] ( identifier[raw_folder] , identifier[sample_merged] ) identifier[cmd] = identifier[self] . identifier[merge_bams] ( identifier[input_args] , identifier[output_merge] ) identifier[self] . identifier[pm] . identifier[run] ( identifier[cmd] , identifier[output_merge] ) identifier[cmd2] = identifier[self] . identifier[validate_bam] ( identifier[output_merge] ) identifier[self] . identifier[pm] . identifier[run] ( identifier[cmd] , identifier[output_merge] , identifier[nofail] = keyword[True] ) keyword[return] identifier[output_merge] keyword[if] identifier[all] ([ identifier[self] . identifier[get_input_ext] ( identifier[x] )== literal[string] keyword[for] identifier[x] keyword[in] identifier[input_args] ]): identifier[sample_merged_gz] = identifier[local_base] + literal[string] identifier[output_merge_gz] = identifier[os] . identifier[path] . identifier[join] ( identifier[raw_folder] , identifier[sample_merged_gz] ) identifier[cmd] = literal[string] + literal[string] . identifier[join] ( identifier[input_args] )+ literal[string] + identifier[output_merge_gz] identifier[self] . identifier[pm] . identifier[run] ( identifier[cmd] , identifier[output_merge_gz] ) keyword[return] identifier[output_merge_gz] keyword[if] identifier[all] ([ identifier[self] . identifier[get_input_ext] ( identifier[x] )== literal[string] keyword[for] identifier[x] keyword[in] identifier[input_args] ]): identifier[sample_merged] = identifier[local_base] + literal[string] identifier[output_merge] = identifier[os] . identifier[path] . identifier[join] ( identifier[raw_folder] , identifier[sample_merged] ) identifier[cmd] = literal[string] + literal[string] . identifier[join] ( identifier[input_args] )+ literal[string] + identifier[output_merge] identifier[self] . identifier[pm] . identifier[run] ( identifier[cmd] , identifier[output_merge] ) keyword[return] identifier[output_merge] keyword[raise] identifier[NotImplementedError] ( literal[string] literal[string] )
def merge_or_link(self, input_args, raw_folder, local_base='sample'): """ This function standardizes various input possibilities by converting either .bam, .fastq, or .fastq.gz files into a local file; merging those if multiple files given. :param list input_args: This is a list of arguments, each one is a class of inputs (which can in turn be a string or a list). Typically, input_args is a list with 2 elements: first a list of read1 files; second an (optional!) list of read2 files. :param str raw_folder: Name/path of folder for the merge/link. :param str local_base: Usually the sample name. This (plus file extension) will be the name of the local file linked (or merged) by this function. """ self.make_sure_path_exists(raw_folder) if not isinstance(input_args, list): raise Exception('Input must be a list') # depends on [control=['if'], data=[]] if any((isinstance(i, list) for i in input_args)): # We have a list of lists. Process each individually. local_input_files = list() n_input_files = len(filter(bool, input_args)) print('Number of input file sets:\t\t' + str(n_input_files)) for (input_i, input_arg) in enumerate(input_args): # Count how many non-null items there are in the list; # we only append _R1 (etc.) if there are multiple input files. if n_input_files > 1: local_base_extended = local_base + '_R' + str(input_i + 1) # depends on [control=['if'], data=[]] else: local_base_extended = local_base if input_arg: out = self.merge_or_link(input_arg, raw_folder, local_base_extended) print("Local input file: '{}'".format(out)) # Make sure file exists: if not os.path.isfile(out): print("Not a file: '{}'".format(out)) # depends on [control=['if'], data=[]] local_input_files.append(out) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] return local_input_files # depends on [control=['if'], data=[]] # We have a list of individual arguments. Merge them. elif len(input_args) == 1: # Only one argument in this list. A single input file; we just link # it, regardless of file type: # Pull the value out of the list input_arg = input_args[0] input_ext = self.get_input_ext(input_arg) # Convert to absolute path if not os.path.isabs(input_arg): input_arg = os.path.abspath(input_arg) # depends on [control=['if'], data=[]] # Link it to into the raw folder local_input_abs = os.path.join(raw_folder, local_base + input_ext) self.pm.run('ln -sf ' + input_arg + ' ' + local_input_abs, target=local_input_abs, shell=True) # return the local (linked) filename absolute path return local_input_abs # depends on [control=['if'], data=[]] else: # Otherwise, there are multiple inputs. # If more than 1 input file is given, then these are to be merged # if they are in bam format. if all([self.get_input_ext(x) == '.bam' for x in input_args]): sample_merged = local_base + '.merged.bam' output_merge = os.path.join(raw_folder, sample_merged) cmd = self.merge_bams(input_args, output_merge) self.pm.run(cmd, output_merge) cmd2 = self.validate_bam(output_merge) self.pm.run(cmd, output_merge, nofail=True) return output_merge # depends on [control=['if'], data=[]] # if multiple fastq if all([self.get_input_ext(x) == '.fastq.gz' for x in input_args]): sample_merged_gz = local_base + '.merged.fastq.gz' output_merge_gz = os.path.join(raw_folder, sample_merged_gz) #cmd1 = self.ziptool + "-d -c " + " ".join(input_args) + " > " + output_merge #cmd2 = self.ziptool + " " + output_merge #self.pm.run([cmd1, cmd2], output_merge_gz) # you can save yourself the decompression/recompression: cmd = 'cat ' + ' '.join(input_args) + ' > ' + output_merge_gz self.pm.run(cmd, output_merge_gz) return output_merge_gz # depends on [control=['if'], data=[]] if all([self.get_input_ext(x) == '.fastq' for x in input_args]): sample_merged = local_base + '.merged.fastq' output_merge = os.path.join(raw_folder, sample_merged) cmd = 'cat ' + ' '.join(input_args) + ' > ' + output_merge self.pm.run(cmd, output_merge) return output_merge # depends on [control=['if'], data=[]] # At this point, we don't recognize the input file types or they # do not match. raise NotImplementedError('Input files must be of the same type, and can only merge bam or fastq.')
def morphTo(self, region): """ Change shape of this region to match the given ``Region`` object """ if not region or not isinstance(region, Region): raise TypeError("morphTo expected a Region object") self.setROI(region) return self
def function[morphTo, parameter[self, region]]: constant[ Change shape of this region to match the given ``Region`` object ] if <ast.BoolOp object at 0x7da2041db520> begin[:] <ast.Raise object at 0x7da2041d8460> call[name[self].setROI, parameter[name[region]]] return[name[self]]
keyword[def] identifier[morphTo] ( identifier[self] , identifier[region] ): literal[string] keyword[if] keyword[not] identifier[region] keyword[or] keyword[not] identifier[isinstance] ( identifier[region] , identifier[Region] ): keyword[raise] identifier[TypeError] ( literal[string] ) identifier[self] . identifier[setROI] ( identifier[region] ) keyword[return] identifier[self]
def morphTo(self, region): """ Change shape of this region to match the given ``Region`` object """ if not region or not isinstance(region, Region): raise TypeError('morphTo expected a Region object') # depends on [control=['if'], data=[]] self.setROI(region) return self
def find_minimum_spanning_forest_as_subgraphs(graph): """Calculates the minimum spanning forest and returns a list of trees as subgraphs.""" forest = find_minimum_spanning_forest(graph) list_of_subgraphs = [get_subgraph_from_edge_list(graph, edge_list) for edge_list in forest] return list_of_subgraphs
def function[find_minimum_spanning_forest_as_subgraphs, parameter[graph]]: constant[Calculates the minimum spanning forest and returns a list of trees as subgraphs.] variable[forest] assign[=] call[name[find_minimum_spanning_forest], parameter[name[graph]]] variable[list_of_subgraphs] assign[=] <ast.ListComp object at 0x7da1b287c910> return[name[list_of_subgraphs]]
keyword[def] identifier[find_minimum_spanning_forest_as_subgraphs] ( identifier[graph] ): literal[string] identifier[forest] = identifier[find_minimum_spanning_forest] ( identifier[graph] ) identifier[list_of_subgraphs] =[ identifier[get_subgraph_from_edge_list] ( identifier[graph] , identifier[edge_list] ) keyword[for] identifier[edge_list] keyword[in] identifier[forest] ] keyword[return] identifier[list_of_subgraphs]
def find_minimum_spanning_forest_as_subgraphs(graph): """Calculates the minimum spanning forest and returns a list of trees as subgraphs.""" forest = find_minimum_spanning_forest(graph) list_of_subgraphs = [get_subgraph_from_edge_list(graph, edge_list) for edge_list in forest] return list_of_subgraphs
def extract_fields(document_data, prefix_path, expand_dots=False): """Do depth-first walk of tree, yielding field_path, value""" if not document_data: yield prefix_path, _EmptyDict else: for key, value in sorted(six.iteritems(document_data)): if expand_dots: sub_key = FieldPath.from_string(key) else: sub_key = FieldPath(key) field_path = FieldPath(*(prefix_path.parts + sub_key.parts)) if isinstance(value, dict): for s_path, s_value in extract_fields(value, field_path): yield s_path, s_value else: yield field_path, value
def function[extract_fields, parameter[document_data, prefix_path, expand_dots]]: constant[Do depth-first walk of tree, yielding field_path, value] if <ast.UnaryOp object at 0x7da204564310> begin[:] <ast.Yield object at 0x7da2045640a0>
keyword[def] identifier[extract_fields] ( identifier[document_data] , identifier[prefix_path] , identifier[expand_dots] = keyword[False] ): literal[string] keyword[if] keyword[not] identifier[document_data] : keyword[yield] identifier[prefix_path] , identifier[_EmptyDict] keyword[else] : keyword[for] identifier[key] , identifier[value] keyword[in] identifier[sorted] ( identifier[six] . identifier[iteritems] ( identifier[document_data] )): keyword[if] identifier[expand_dots] : identifier[sub_key] = identifier[FieldPath] . identifier[from_string] ( identifier[key] ) keyword[else] : identifier[sub_key] = identifier[FieldPath] ( identifier[key] ) identifier[field_path] = identifier[FieldPath] (*( identifier[prefix_path] . identifier[parts] + identifier[sub_key] . identifier[parts] )) keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ): keyword[for] identifier[s_path] , identifier[s_value] keyword[in] identifier[extract_fields] ( identifier[value] , identifier[field_path] ): keyword[yield] identifier[s_path] , identifier[s_value] keyword[else] : keyword[yield] identifier[field_path] , identifier[value]
def extract_fields(document_data, prefix_path, expand_dots=False): """Do depth-first walk of tree, yielding field_path, value""" if not document_data: yield (prefix_path, _EmptyDict) # depends on [control=['if'], data=[]] else: for (key, value) in sorted(six.iteritems(document_data)): if expand_dots: sub_key = FieldPath.from_string(key) # depends on [control=['if'], data=[]] else: sub_key = FieldPath(key) field_path = FieldPath(*prefix_path.parts + sub_key.parts) if isinstance(value, dict): for (s_path, s_value) in extract_fields(value, field_path): yield (s_path, s_value) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: yield (field_path, value) # depends on [control=['for'], data=[]]
def ProcessSources( self, source_path_specs, storage_writer, resolver_context, processing_configuration, filter_find_specs=None, status_update_callback=None): """Processes the sources. Args: source_path_specs (list[dfvfs.PathSpec]): path specifications of the sources to process. storage_writer (StorageWriter): storage writer for a session storage. resolver_context (dfvfs.Context): resolver context. processing_configuration (ProcessingConfiguration): processing configuration. filter_find_specs (Optional[list[dfvfs.FindSpec]]): find specifications used in path specification extraction. status_update_callback (Optional[function]): callback function for status updates. Returns: ProcessingStatus: processing status. """ parser_mediator = parsers_mediator.ParserMediator( storage_writer, self.knowledge_base, artifacts_filter_helper=self._artifacts_filter_helper, preferred_year=processing_configuration.preferred_year, resolver_context=resolver_context, temporary_directory=processing_configuration.temporary_directory) parser_mediator.SetEventExtractionConfiguration( processing_configuration.event_extraction) parser_mediator.SetInputSourceConfiguration( processing_configuration.input_source) extraction_worker = worker.EventExtractionWorker( parser_filter_expression=( processing_configuration.parser_filter_expression)) extraction_worker.SetExtractionConfiguration( processing_configuration.extraction) self._processing_configuration = processing_configuration self._status_update_callback = status_update_callback logger.debug('Processing started.') parser_mediator.StartProfiling( self._processing_configuration.profiling, self._name, self._process_information) self._StartProfiling(self._processing_configuration.profiling) if self._processing_profiler: extraction_worker.SetProcessingProfiler(self._processing_profiler) if self._serializers_profiler: storage_writer.SetSerializersProfiler(self._serializers_profiler) if self._storage_profiler: storage_writer.SetStorageProfiler(self._storage_profiler) storage_writer.Open() storage_writer.WriteSessionStart() try: storage_writer.WritePreprocessingInformation(self.knowledge_base) self._ProcessSources( source_path_specs, extraction_worker, parser_mediator, storage_writer, filter_find_specs=filter_find_specs) finally: storage_writer.WriteSessionCompletion(aborted=self._abort) storage_writer.Close() if self._processing_profiler: extraction_worker.SetProcessingProfiler(None) if self._serializers_profiler: storage_writer.SetSerializersProfiler(None) if self._storage_profiler: storage_writer.SetStorageProfiler(None) self._StopProfiling() parser_mediator.StopProfiling() if self._abort: logger.debug('Processing aborted.') self._processing_status.aborted = True else: logger.debug('Processing completed.') self._processing_configuration = None self._status_update_callback = None return self._processing_status
def function[ProcessSources, parameter[self, source_path_specs, storage_writer, resolver_context, processing_configuration, filter_find_specs, status_update_callback]]: constant[Processes the sources. Args: source_path_specs (list[dfvfs.PathSpec]): path specifications of the sources to process. storage_writer (StorageWriter): storage writer for a session storage. resolver_context (dfvfs.Context): resolver context. processing_configuration (ProcessingConfiguration): processing configuration. filter_find_specs (Optional[list[dfvfs.FindSpec]]): find specifications used in path specification extraction. status_update_callback (Optional[function]): callback function for status updates. Returns: ProcessingStatus: processing status. ] variable[parser_mediator] assign[=] call[name[parsers_mediator].ParserMediator, parameter[name[storage_writer], name[self].knowledge_base]] call[name[parser_mediator].SetEventExtractionConfiguration, parameter[name[processing_configuration].event_extraction]] call[name[parser_mediator].SetInputSourceConfiguration, parameter[name[processing_configuration].input_source]] variable[extraction_worker] assign[=] call[name[worker].EventExtractionWorker, parameter[]] call[name[extraction_worker].SetExtractionConfiguration, parameter[name[processing_configuration].extraction]] name[self]._processing_configuration assign[=] name[processing_configuration] name[self]._status_update_callback assign[=] name[status_update_callback] call[name[logger].debug, parameter[constant[Processing started.]]] call[name[parser_mediator].StartProfiling, parameter[name[self]._processing_configuration.profiling, name[self]._name, name[self]._process_information]] call[name[self]._StartProfiling, parameter[name[self]._processing_configuration.profiling]] if name[self]._processing_profiler begin[:] call[name[extraction_worker].SetProcessingProfiler, parameter[name[self]._processing_profiler]] if name[self]._serializers_profiler begin[:] call[name[storage_writer].SetSerializersProfiler, parameter[name[self]._serializers_profiler]] if name[self]._storage_profiler begin[:] call[name[storage_writer].SetStorageProfiler, parameter[name[self]._storage_profiler]] call[name[storage_writer].Open, parameter[]] call[name[storage_writer].WriteSessionStart, parameter[]] <ast.Try object at 0x7da1b26ae380> if name[self]._abort begin[:] call[name[logger].debug, parameter[constant[Processing aborted.]]] name[self]._processing_status.aborted assign[=] constant[True] name[self]._processing_configuration assign[=] constant[None] name[self]._status_update_callback assign[=] constant[None] return[name[self]._processing_status]
keyword[def] identifier[ProcessSources] ( identifier[self] , identifier[source_path_specs] , identifier[storage_writer] , identifier[resolver_context] , identifier[processing_configuration] , identifier[filter_find_specs] = keyword[None] , identifier[status_update_callback] = keyword[None] ): literal[string] identifier[parser_mediator] = identifier[parsers_mediator] . identifier[ParserMediator] ( identifier[storage_writer] , identifier[self] . identifier[knowledge_base] , identifier[artifacts_filter_helper] = identifier[self] . identifier[_artifacts_filter_helper] , identifier[preferred_year] = identifier[processing_configuration] . identifier[preferred_year] , identifier[resolver_context] = identifier[resolver_context] , identifier[temporary_directory] = identifier[processing_configuration] . identifier[temporary_directory] ) identifier[parser_mediator] . identifier[SetEventExtractionConfiguration] ( identifier[processing_configuration] . identifier[event_extraction] ) identifier[parser_mediator] . identifier[SetInputSourceConfiguration] ( identifier[processing_configuration] . identifier[input_source] ) identifier[extraction_worker] = identifier[worker] . identifier[EventExtractionWorker] ( identifier[parser_filter_expression] =( identifier[processing_configuration] . identifier[parser_filter_expression] )) identifier[extraction_worker] . identifier[SetExtractionConfiguration] ( identifier[processing_configuration] . identifier[extraction] ) identifier[self] . identifier[_processing_configuration] = identifier[processing_configuration] identifier[self] . identifier[_status_update_callback] = identifier[status_update_callback] identifier[logger] . identifier[debug] ( literal[string] ) identifier[parser_mediator] . identifier[StartProfiling] ( identifier[self] . identifier[_processing_configuration] . identifier[profiling] , identifier[self] . identifier[_name] , identifier[self] . identifier[_process_information] ) identifier[self] . identifier[_StartProfiling] ( identifier[self] . identifier[_processing_configuration] . identifier[profiling] ) keyword[if] identifier[self] . identifier[_processing_profiler] : identifier[extraction_worker] . identifier[SetProcessingProfiler] ( identifier[self] . identifier[_processing_profiler] ) keyword[if] identifier[self] . identifier[_serializers_profiler] : identifier[storage_writer] . identifier[SetSerializersProfiler] ( identifier[self] . identifier[_serializers_profiler] ) keyword[if] identifier[self] . identifier[_storage_profiler] : identifier[storage_writer] . identifier[SetStorageProfiler] ( identifier[self] . identifier[_storage_profiler] ) identifier[storage_writer] . identifier[Open] () identifier[storage_writer] . identifier[WriteSessionStart] () keyword[try] : identifier[storage_writer] . identifier[WritePreprocessingInformation] ( identifier[self] . identifier[knowledge_base] ) identifier[self] . identifier[_ProcessSources] ( identifier[source_path_specs] , identifier[extraction_worker] , identifier[parser_mediator] , identifier[storage_writer] , identifier[filter_find_specs] = identifier[filter_find_specs] ) keyword[finally] : identifier[storage_writer] . identifier[WriteSessionCompletion] ( identifier[aborted] = identifier[self] . identifier[_abort] ) identifier[storage_writer] . identifier[Close] () keyword[if] identifier[self] . identifier[_processing_profiler] : identifier[extraction_worker] . identifier[SetProcessingProfiler] ( keyword[None] ) keyword[if] identifier[self] . identifier[_serializers_profiler] : identifier[storage_writer] . identifier[SetSerializersProfiler] ( keyword[None] ) keyword[if] identifier[self] . identifier[_storage_profiler] : identifier[storage_writer] . identifier[SetStorageProfiler] ( keyword[None] ) identifier[self] . identifier[_StopProfiling] () identifier[parser_mediator] . identifier[StopProfiling] () keyword[if] identifier[self] . identifier[_abort] : identifier[logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[_processing_status] . identifier[aborted] = keyword[True] keyword[else] : identifier[logger] . identifier[debug] ( literal[string] ) identifier[self] . identifier[_processing_configuration] = keyword[None] identifier[self] . identifier[_status_update_callback] = keyword[None] keyword[return] identifier[self] . identifier[_processing_status]
def ProcessSources(self, source_path_specs, storage_writer, resolver_context, processing_configuration, filter_find_specs=None, status_update_callback=None): """Processes the sources. Args: source_path_specs (list[dfvfs.PathSpec]): path specifications of the sources to process. storage_writer (StorageWriter): storage writer for a session storage. resolver_context (dfvfs.Context): resolver context. processing_configuration (ProcessingConfiguration): processing configuration. filter_find_specs (Optional[list[dfvfs.FindSpec]]): find specifications used in path specification extraction. status_update_callback (Optional[function]): callback function for status updates. Returns: ProcessingStatus: processing status. """ parser_mediator = parsers_mediator.ParserMediator(storage_writer, self.knowledge_base, artifacts_filter_helper=self._artifacts_filter_helper, preferred_year=processing_configuration.preferred_year, resolver_context=resolver_context, temporary_directory=processing_configuration.temporary_directory) parser_mediator.SetEventExtractionConfiguration(processing_configuration.event_extraction) parser_mediator.SetInputSourceConfiguration(processing_configuration.input_source) extraction_worker = worker.EventExtractionWorker(parser_filter_expression=processing_configuration.parser_filter_expression) extraction_worker.SetExtractionConfiguration(processing_configuration.extraction) self._processing_configuration = processing_configuration self._status_update_callback = status_update_callback logger.debug('Processing started.') parser_mediator.StartProfiling(self._processing_configuration.profiling, self._name, self._process_information) self._StartProfiling(self._processing_configuration.profiling) if self._processing_profiler: extraction_worker.SetProcessingProfiler(self._processing_profiler) # depends on [control=['if'], data=[]] if self._serializers_profiler: storage_writer.SetSerializersProfiler(self._serializers_profiler) # depends on [control=['if'], data=[]] if self._storage_profiler: storage_writer.SetStorageProfiler(self._storage_profiler) # depends on [control=['if'], data=[]] storage_writer.Open() storage_writer.WriteSessionStart() try: storage_writer.WritePreprocessingInformation(self.knowledge_base) self._ProcessSources(source_path_specs, extraction_worker, parser_mediator, storage_writer, filter_find_specs=filter_find_specs) # depends on [control=['try'], data=[]] finally: storage_writer.WriteSessionCompletion(aborted=self._abort) storage_writer.Close() if self._processing_profiler: extraction_worker.SetProcessingProfiler(None) # depends on [control=['if'], data=[]] if self._serializers_profiler: storage_writer.SetSerializersProfiler(None) # depends on [control=['if'], data=[]] if self._storage_profiler: storage_writer.SetStorageProfiler(None) # depends on [control=['if'], data=[]] self._StopProfiling() parser_mediator.StopProfiling() if self._abort: logger.debug('Processing aborted.') self._processing_status.aborted = True # depends on [control=['if'], data=[]] else: logger.debug('Processing completed.') self._processing_configuration = None self._status_update_callback = None return self._processing_status
def to_yaml(obj, stream=None, dumper_cls=yaml.Dumper, default_flow_style=False, **kwargs): """ Serialize a Python object into a YAML stream with OrderedDict and default_flow_style defaulted to False. If stream is None, return the produced string instead. OrderedDict reference: http://stackoverflow.com/a/21912744 default_flow_style reference: http://stackoverflow.com/a/18210750 :param data: python object to be serialized :param stream: to be serialized to :param Dumper: base Dumper class to extend. :param kwargs: arguments to pass to to_dict :return: stream if provided, string if stream is None """ class OrderedDumper(dumper_cls): pass def dict_representer(dumper, data): return dumper.represent_mapping( yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items()) OrderedDumper.add_representer(OrderedDict, dict_representer) obj_dict = to_dict(obj, **kwargs) return yaml.dump(obj_dict, stream, OrderedDumper, default_flow_style=default_flow_style)
def function[to_yaml, parameter[obj, stream, dumper_cls, default_flow_style]]: constant[ Serialize a Python object into a YAML stream with OrderedDict and default_flow_style defaulted to False. If stream is None, return the produced string instead. OrderedDict reference: http://stackoverflow.com/a/21912744 default_flow_style reference: http://stackoverflow.com/a/18210750 :param data: python object to be serialized :param stream: to be serialized to :param Dumper: base Dumper class to extend. :param kwargs: arguments to pass to to_dict :return: stream if provided, string if stream is None ] class class[OrderedDumper, parameter[]] begin[:] pass def function[dict_representer, parameter[dumper, data]]: return[call[name[dumper].represent_mapping, parameter[name[yaml].resolver.BaseResolver.DEFAULT_MAPPING_TAG, call[name[data].items, parameter[]]]]] call[name[OrderedDumper].add_representer, parameter[name[OrderedDict], name[dict_representer]]] variable[obj_dict] assign[=] call[name[to_dict], parameter[name[obj]]] return[call[name[yaml].dump, parameter[name[obj_dict], name[stream], name[OrderedDumper]]]]
keyword[def] identifier[to_yaml] ( identifier[obj] , identifier[stream] = keyword[None] , identifier[dumper_cls] = identifier[yaml] . identifier[Dumper] , identifier[default_flow_style] = keyword[False] , ** identifier[kwargs] ): literal[string] keyword[class] identifier[OrderedDumper] ( identifier[dumper_cls] ): keyword[pass] keyword[def] identifier[dict_representer] ( identifier[dumper] , identifier[data] ): keyword[return] identifier[dumper] . identifier[represent_mapping] ( identifier[yaml] . identifier[resolver] . identifier[BaseResolver] . identifier[DEFAULT_MAPPING_TAG] , identifier[data] . identifier[items] ()) identifier[OrderedDumper] . identifier[add_representer] ( identifier[OrderedDict] , identifier[dict_representer] ) identifier[obj_dict] = identifier[to_dict] ( identifier[obj] ,** identifier[kwargs] ) keyword[return] identifier[yaml] . identifier[dump] ( identifier[obj_dict] , identifier[stream] , identifier[OrderedDumper] , identifier[default_flow_style] = identifier[default_flow_style] )
def to_yaml(obj, stream=None, dumper_cls=yaml.Dumper, default_flow_style=False, **kwargs): """ Serialize a Python object into a YAML stream with OrderedDict and default_flow_style defaulted to False. If stream is None, return the produced string instead. OrderedDict reference: http://stackoverflow.com/a/21912744 default_flow_style reference: http://stackoverflow.com/a/18210750 :param data: python object to be serialized :param stream: to be serialized to :param Dumper: base Dumper class to extend. :param kwargs: arguments to pass to to_dict :return: stream if provided, string if stream is None """ class OrderedDumper(dumper_cls): pass def dict_representer(dumper, data): return dumper.represent_mapping(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items()) OrderedDumper.add_representer(OrderedDict, dict_representer) obj_dict = to_dict(obj, **kwargs) return yaml.dump(obj_dict, stream, OrderedDumper, default_flow_style=default_flow_style)
def Overlay(child, parent): """Adds hint attributes to a child hint if they are not defined.""" for arg in child, parent: if not isinstance(arg, collections.Mapping): raise DefinitionError("Trying to merge badly defined hints. Child: %s, " "Parent: %s" % (type(child), type(parent))) for attr in ["fix", "format", "problem", "summary"]: if not child.get(attr): child[attr] = parent.get(attr, "").strip() return child
def function[Overlay, parameter[child, parent]]: constant[Adds hint attributes to a child hint if they are not defined.] for taget[name[arg]] in starred[tuple[[<ast.Name object at 0x7da1b1cc13c0>, <ast.Name object at 0x7da1b1cc31c0>]]] begin[:] if <ast.UnaryOp object at 0x7da1b1cc23b0> begin[:] <ast.Raise object at 0x7da1b1cc3ca0> for taget[name[attr]] in starred[list[[<ast.Constant object at 0x7da1b1cc2cb0>, <ast.Constant object at 0x7da1b1cc0490>, <ast.Constant object at 0x7da1b1cc3610>, <ast.Constant object at 0x7da1b1cc2590>]]] begin[:] if <ast.UnaryOp object at 0x7da1b1cc1b70> begin[:] call[name[child]][name[attr]] assign[=] call[call[name[parent].get, parameter[name[attr], constant[]]].strip, parameter[]] return[name[child]]
keyword[def] identifier[Overlay] ( identifier[child] , identifier[parent] ): literal[string] keyword[for] identifier[arg] keyword[in] identifier[child] , identifier[parent] : keyword[if] keyword[not] identifier[isinstance] ( identifier[arg] , identifier[collections] . identifier[Mapping] ): keyword[raise] identifier[DefinitionError] ( literal[string] literal[string] %( identifier[type] ( identifier[child] ), identifier[type] ( identifier[parent] ))) keyword[for] identifier[attr] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]: keyword[if] keyword[not] identifier[child] . identifier[get] ( identifier[attr] ): identifier[child] [ identifier[attr] ]= identifier[parent] . identifier[get] ( identifier[attr] , literal[string] ). identifier[strip] () keyword[return] identifier[child]
def Overlay(child, parent): """Adds hint attributes to a child hint if they are not defined.""" for arg in (child, parent): if not isinstance(arg, collections.Mapping): raise DefinitionError('Trying to merge badly defined hints. Child: %s, Parent: %s' % (type(child), type(parent))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['arg']] for attr in ['fix', 'format', 'problem', 'summary']: if not child.get(attr): child[attr] = parent.get(attr, '').strip() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['attr']] return child
def prepare_relationships(db, known_tables): """Enrich the registered Models with SQLAlchemy ``relationships`` so that related tables are correctly processed up by the admin. """ inspector = reflection.Inspector.from_engine(db.engine) for cls in set(known_tables.values()): for foreign_key in inspector.get_foreign_keys(cls.__tablename__): if foreign_key['referred_table'] in known_tables: other = known_tables[foreign_key['referred_table']] constrained_column = foreign_key['constrained_columns'] if other not in cls.__related_tables__ and cls not in ( other.__related_tables__) and other != cls: cls.__related_tables__.add(other) # Add a SQLAlchemy relationship as an attribute # on the class setattr(cls, other.__table__.name, relationship( other.__name__, backref=db.backref( cls.__name__.lower()), foreign_keys=str(cls.__name__) + '.' + ''.join(constrained_column)))
def function[prepare_relationships, parameter[db, known_tables]]: constant[Enrich the registered Models with SQLAlchemy ``relationships`` so that related tables are correctly processed up by the admin. ] variable[inspector] assign[=] call[name[reflection].Inspector.from_engine, parameter[name[db].engine]] for taget[name[cls]] in starred[call[name[set], parameter[call[name[known_tables].values, parameter[]]]]] begin[:] for taget[name[foreign_key]] in starred[call[name[inspector].get_foreign_keys, parameter[name[cls].__tablename__]]] begin[:] if compare[call[name[foreign_key]][constant[referred_table]] in name[known_tables]] begin[:] variable[other] assign[=] call[name[known_tables]][call[name[foreign_key]][constant[referred_table]]] variable[constrained_column] assign[=] call[name[foreign_key]][constant[constrained_columns]] if <ast.BoolOp object at 0x7da18f58ff70> begin[:] call[name[cls].__related_tables__.add, parameter[name[other]]] call[name[setattr], parameter[name[cls], name[other].__table__.name, call[name[relationship], parameter[name[other].__name__]]]]
keyword[def] identifier[prepare_relationships] ( identifier[db] , identifier[known_tables] ): literal[string] identifier[inspector] = identifier[reflection] . identifier[Inspector] . identifier[from_engine] ( identifier[db] . identifier[engine] ) keyword[for] identifier[cls] keyword[in] identifier[set] ( identifier[known_tables] . identifier[values] ()): keyword[for] identifier[foreign_key] keyword[in] identifier[inspector] . identifier[get_foreign_keys] ( identifier[cls] . identifier[__tablename__] ): keyword[if] identifier[foreign_key] [ literal[string] ] keyword[in] identifier[known_tables] : identifier[other] = identifier[known_tables] [ identifier[foreign_key] [ literal[string] ]] identifier[constrained_column] = identifier[foreign_key] [ literal[string] ] keyword[if] identifier[other] keyword[not] keyword[in] identifier[cls] . identifier[__related_tables__] keyword[and] identifier[cls] keyword[not] keyword[in] ( identifier[other] . identifier[__related_tables__] ) keyword[and] identifier[other] != identifier[cls] : identifier[cls] . identifier[__related_tables__] . identifier[add] ( identifier[other] ) identifier[setattr] ( identifier[cls] , identifier[other] . identifier[__table__] . identifier[name] , identifier[relationship] ( identifier[other] . identifier[__name__] , identifier[backref] = identifier[db] . identifier[backref] ( identifier[cls] . identifier[__name__] . identifier[lower] ()), identifier[foreign_keys] = identifier[str] ( identifier[cls] . identifier[__name__] )+ literal[string] + literal[string] . identifier[join] ( identifier[constrained_column] )))
def prepare_relationships(db, known_tables): """Enrich the registered Models with SQLAlchemy ``relationships`` so that related tables are correctly processed up by the admin. """ inspector = reflection.Inspector.from_engine(db.engine) for cls in set(known_tables.values()): for foreign_key in inspector.get_foreign_keys(cls.__tablename__): if foreign_key['referred_table'] in known_tables: other = known_tables[foreign_key['referred_table']] constrained_column = foreign_key['constrained_columns'] if other not in cls.__related_tables__ and cls not in other.__related_tables__ and (other != cls): cls.__related_tables__.add(other) # Add a SQLAlchemy relationship as an attribute # on the class setattr(cls, other.__table__.name, relationship(other.__name__, backref=db.backref(cls.__name__.lower()), foreign_keys=str(cls.__name__) + '.' + ''.join(constrained_column))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['known_tables']] # depends on [control=['for'], data=['foreign_key']] # depends on [control=['for'], data=['cls']]
def run_basic_group(): """Run the basic phase group example. In this example, there are no terminal phases; all phases are run. """ test = htf.Test(htf.PhaseGroup( setup=[setup_phase], main=[main_phase], teardown=[teardown_phase], )) test.execute()
def function[run_basic_group, parameter[]]: constant[Run the basic phase group example. In this example, there are no terminal phases; all phases are run. ] variable[test] assign[=] call[name[htf].Test, parameter[call[name[htf].PhaseGroup, parameter[]]]] call[name[test].execute, parameter[]]
keyword[def] identifier[run_basic_group] (): literal[string] identifier[test] = identifier[htf] . identifier[Test] ( identifier[htf] . identifier[PhaseGroup] ( identifier[setup] =[ identifier[setup_phase] ], identifier[main] =[ identifier[main_phase] ], identifier[teardown] =[ identifier[teardown_phase] ], )) identifier[test] . identifier[execute] ()
def run_basic_group(): """Run the basic phase group example. In this example, there are no terminal phases; all phases are run. """ test = htf.Test(htf.PhaseGroup(setup=[setup_phase], main=[main_phase], teardown=[teardown_phase])) test.execute()
def splitname(path): """Split a path into a directory, name, and extensions.""" dirpath, filename = os.path.split(path) # we don't use os.path.splitext here because we want all extensions, # not just the last, to be put in exts name, exts = filename.split(os.extsep, 1) return dirpath, name, exts
def function[splitname, parameter[path]]: constant[Split a path into a directory, name, and extensions.] <ast.Tuple object at 0x7da20c6e7e50> assign[=] call[name[os].path.split, parameter[name[path]]] <ast.Tuple object at 0x7da20c6e4dc0> assign[=] call[name[filename].split, parameter[name[os].extsep, constant[1]]] return[tuple[[<ast.Name object at 0x7da20c6e5210>, <ast.Name object at 0x7da20c6e6a10>, <ast.Name object at 0x7da20c6e6f50>]]]
keyword[def] identifier[splitname] ( identifier[path] ): literal[string] identifier[dirpath] , identifier[filename] = identifier[os] . identifier[path] . identifier[split] ( identifier[path] ) identifier[name] , identifier[exts] = identifier[filename] . identifier[split] ( identifier[os] . identifier[extsep] , literal[int] ) keyword[return] identifier[dirpath] , identifier[name] , identifier[exts]
def splitname(path): """Split a path into a directory, name, and extensions.""" (dirpath, filename) = os.path.split(path) # we don't use os.path.splitext here because we want all extensions, # not just the last, to be put in exts (name, exts) = filename.split(os.extsep, 1) return (dirpath, name, exts)
def _filter_db_instances_by_status(awsclient, db_instances, status_list): """helper to select dbinstances. :param awsclient: :param db_instances: :param status_list: :return: list of db_instances that match the filter """ client_rds = awsclient.get_client('rds') db_instances_with_status = [] for db in db_instances: response = client_rds.describe_db_instances( DBInstanceIdentifier=db ) for entry in response.get('DBInstances', []): if entry['DBInstanceStatus'] in status_list: db_instances_with_status.append(db) return db_instances_with_status
def function[_filter_db_instances_by_status, parameter[awsclient, db_instances, status_list]]: constant[helper to select dbinstances. :param awsclient: :param db_instances: :param status_list: :return: list of db_instances that match the filter ] variable[client_rds] assign[=] call[name[awsclient].get_client, parameter[constant[rds]]] variable[db_instances_with_status] assign[=] list[[]] for taget[name[db]] in starred[name[db_instances]] begin[:] variable[response] assign[=] call[name[client_rds].describe_db_instances, parameter[]] for taget[name[entry]] in starred[call[name[response].get, parameter[constant[DBInstances], list[[]]]]] begin[:] if compare[call[name[entry]][constant[DBInstanceStatus]] in name[status_list]] begin[:] call[name[db_instances_with_status].append, parameter[name[db]]] return[name[db_instances_with_status]]
keyword[def] identifier[_filter_db_instances_by_status] ( identifier[awsclient] , identifier[db_instances] , identifier[status_list] ): literal[string] identifier[client_rds] = identifier[awsclient] . identifier[get_client] ( literal[string] ) identifier[db_instances_with_status] =[] keyword[for] identifier[db] keyword[in] identifier[db_instances] : identifier[response] = identifier[client_rds] . identifier[describe_db_instances] ( identifier[DBInstanceIdentifier] = identifier[db] ) keyword[for] identifier[entry] keyword[in] identifier[response] . identifier[get] ( literal[string] ,[]): keyword[if] identifier[entry] [ literal[string] ] keyword[in] identifier[status_list] : identifier[db_instances_with_status] . identifier[append] ( identifier[db] ) keyword[return] identifier[db_instances_with_status]
def _filter_db_instances_by_status(awsclient, db_instances, status_list): """helper to select dbinstances. :param awsclient: :param db_instances: :param status_list: :return: list of db_instances that match the filter """ client_rds = awsclient.get_client('rds') db_instances_with_status = [] for db in db_instances: response = client_rds.describe_db_instances(DBInstanceIdentifier=db) for entry in response.get('DBInstances', []): if entry['DBInstanceStatus'] in status_list: db_instances_with_status.append(db) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry']] # depends on [control=['for'], data=['db']] return db_instances_with_status
def get_peer_cert_chain(self): """ Retrieve the other side's certificate (if any) :return: A list of X509 instances giving the peer's certificate chain, or None if it does not have one. """ cert_stack = _lib.SSL_get_peer_cert_chain(self._ssl) if cert_stack == _ffi.NULL: return None result = [] for i in range(_lib.sk_X509_num(cert_stack)): # TODO could incref instead of dup here cert = _lib.X509_dup(_lib.sk_X509_value(cert_stack, i)) pycert = X509._from_raw_x509_ptr(cert) result.append(pycert) return result
def function[get_peer_cert_chain, parameter[self]]: constant[ Retrieve the other side's certificate (if any) :return: A list of X509 instances giving the peer's certificate chain, or None if it does not have one. ] variable[cert_stack] assign[=] call[name[_lib].SSL_get_peer_cert_chain, parameter[name[self]._ssl]] if compare[name[cert_stack] equal[==] name[_ffi].NULL] begin[:] return[constant[None]] variable[result] assign[=] list[[]] for taget[name[i]] in starred[call[name[range], parameter[call[name[_lib].sk_X509_num, parameter[name[cert_stack]]]]]] begin[:] variable[cert] assign[=] call[name[_lib].X509_dup, parameter[call[name[_lib].sk_X509_value, parameter[name[cert_stack], name[i]]]]] variable[pycert] assign[=] call[name[X509]._from_raw_x509_ptr, parameter[name[cert]]] call[name[result].append, parameter[name[pycert]]] return[name[result]]
keyword[def] identifier[get_peer_cert_chain] ( identifier[self] ): literal[string] identifier[cert_stack] = identifier[_lib] . identifier[SSL_get_peer_cert_chain] ( identifier[self] . identifier[_ssl] ) keyword[if] identifier[cert_stack] == identifier[_ffi] . identifier[NULL] : keyword[return] keyword[None] identifier[result] =[] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[_lib] . identifier[sk_X509_num] ( identifier[cert_stack] )): identifier[cert] = identifier[_lib] . identifier[X509_dup] ( identifier[_lib] . identifier[sk_X509_value] ( identifier[cert_stack] , identifier[i] )) identifier[pycert] = identifier[X509] . identifier[_from_raw_x509_ptr] ( identifier[cert] ) identifier[result] . identifier[append] ( identifier[pycert] ) keyword[return] identifier[result]
def get_peer_cert_chain(self): """ Retrieve the other side's certificate (if any) :return: A list of X509 instances giving the peer's certificate chain, or None if it does not have one. """ cert_stack = _lib.SSL_get_peer_cert_chain(self._ssl) if cert_stack == _ffi.NULL: return None # depends on [control=['if'], data=[]] result = [] for i in range(_lib.sk_X509_num(cert_stack)): # TODO could incref instead of dup here cert = _lib.X509_dup(_lib.sk_X509_value(cert_stack, i)) pycert = X509._from_raw_x509_ptr(cert) result.append(pycert) # depends on [control=['for'], data=['i']] return result
def get_absolute_url(self): """Returns the default URL for this dashboard. The default URL is defined as the URL pattern with ``name="index"`` in the URLconf for the :class:`~horizon.Panel` specified by :attr:`~horizon.Dashboard.default_panel`. """ try: return self._registered(self.default_panel).get_absolute_url() except Exception: # Logging here since this will often be called in a template # where the exception would be hidden. LOG.exception("Error reversing absolute URL for %s.", self) raise
def function[get_absolute_url, parameter[self]]: constant[Returns the default URL for this dashboard. The default URL is defined as the URL pattern with ``name="index"`` in the URLconf for the :class:`~horizon.Panel` specified by :attr:`~horizon.Dashboard.default_panel`. ] <ast.Try object at 0x7da1b19d98a0>
keyword[def] identifier[get_absolute_url] ( identifier[self] ): literal[string] keyword[try] : keyword[return] identifier[self] . identifier[_registered] ( identifier[self] . identifier[default_panel] ). identifier[get_absolute_url] () keyword[except] identifier[Exception] : identifier[LOG] . identifier[exception] ( literal[string] , identifier[self] ) keyword[raise]
def get_absolute_url(self): """Returns the default URL for this dashboard. The default URL is defined as the URL pattern with ``name="index"`` in the URLconf for the :class:`~horizon.Panel` specified by :attr:`~horizon.Dashboard.default_panel`. """ try: return self._registered(self.default_panel).get_absolute_url() # depends on [control=['try'], data=[]] except Exception: # Logging here since this will often be called in a template # where the exception would be hidden. LOG.exception('Error reversing absolute URL for %s.', self) raise # depends on [control=['except'], data=[]]
def cmd(send, msg, args): """Queries WolframAlpha. Syntax: {command} <expression> """ if not msg: send("Evaluate what?") return params = {'format': 'plaintext', 'reinterpret': 'true', 'input': msg, 'appid': args['config']['api']['wolframapikey']} req = get('http://api.wolframalpha.com/v2/query', params=params) if req.status_code == 403: send("WolframAlpha is having issues.") return if not req.content: send("WolframAlpha returned an empty response.") return xml = fromstring(req.content) output = xml.findall('./pod') key = args['config']['api']['bitlykey'] url = get_short("http://www.wolframalpha.com/input/?i=%s" % quote(msg), key) text = "No output found." for x in output: if 'primary' in x.keys(): text = x.find('./subpod/plaintext').text if text is None: send("No Output parsable") else: # Only send the first three lines of output for t in text.splitlines()[:3]: send(t) send("See %s for more info" % url)
def function[cmd, parameter[send, msg, args]]: constant[Queries WolframAlpha. Syntax: {command} <expression> ] if <ast.UnaryOp object at 0x7da1b20d64a0> begin[:] call[name[send], parameter[constant[Evaluate what?]]] return[None] variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b20d5cc0>, <ast.Constant object at 0x7da1b20d58d0>, <ast.Constant object at 0x7da1b20d4b80>, <ast.Constant object at 0x7da1b20d5510>], [<ast.Constant object at 0x7da1b20d65c0>, <ast.Constant object at 0x7da1b20d5ae0>, <ast.Name object at 0x7da1b20d4be0>, <ast.Subscript object at 0x7da1b20d5720>]] variable[req] assign[=] call[name[get], parameter[constant[http://api.wolframalpha.com/v2/query]]] if compare[name[req].status_code equal[==] constant[403]] begin[:] call[name[send], parameter[constant[WolframAlpha is having issues.]]] return[None] if <ast.UnaryOp object at 0x7da1b20d6290> begin[:] call[name[send], parameter[constant[WolframAlpha returned an empty response.]]] return[None] variable[xml] assign[=] call[name[fromstring], parameter[name[req].content]] variable[output] assign[=] call[name[xml].findall, parameter[constant[./pod]]] variable[key] assign[=] call[call[call[name[args]][constant[config]]][constant[api]]][constant[bitlykey]] variable[url] assign[=] call[name[get_short], parameter[binary_operation[constant[http://www.wolframalpha.com/input/?i=%s] <ast.Mod object at 0x7da2590d6920> call[name[quote], parameter[name[msg]]]], name[key]]] variable[text] assign[=] constant[No output found.] for taget[name[x]] in starred[name[output]] begin[:] if compare[constant[primary] in call[name[x].keys, parameter[]]] begin[:] variable[text] assign[=] call[name[x].find, parameter[constant[./subpod/plaintext]]].text if compare[name[text] is constant[None]] begin[:] call[name[send], parameter[constant[No Output parsable]]] call[name[send], parameter[binary_operation[constant[See %s for more info] <ast.Mod object at 0x7da2590d6920> name[url]]]]
keyword[def] identifier[cmd] ( identifier[send] , identifier[msg] , identifier[args] ): literal[string] keyword[if] keyword[not] identifier[msg] : identifier[send] ( literal[string] ) keyword[return] identifier[params] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : identifier[msg] , literal[string] : identifier[args] [ literal[string] ][ literal[string] ][ literal[string] ]} identifier[req] = identifier[get] ( literal[string] , identifier[params] = identifier[params] ) keyword[if] identifier[req] . identifier[status_code] == literal[int] : identifier[send] ( literal[string] ) keyword[return] keyword[if] keyword[not] identifier[req] . identifier[content] : identifier[send] ( literal[string] ) keyword[return] identifier[xml] = identifier[fromstring] ( identifier[req] . identifier[content] ) identifier[output] = identifier[xml] . identifier[findall] ( literal[string] ) identifier[key] = identifier[args] [ literal[string] ][ literal[string] ][ literal[string] ] identifier[url] = identifier[get_short] ( literal[string] % identifier[quote] ( identifier[msg] ), identifier[key] ) identifier[text] = literal[string] keyword[for] identifier[x] keyword[in] identifier[output] : keyword[if] literal[string] keyword[in] identifier[x] . identifier[keys] (): identifier[text] = identifier[x] . identifier[find] ( literal[string] ). identifier[text] keyword[if] identifier[text] keyword[is] keyword[None] : identifier[send] ( literal[string] ) keyword[else] : keyword[for] identifier[t] keyword[in] identifier[text] . identifier[splitlines] ()[: literal[int] ]: identifier[send] ( identifier[t] ) identifier[send] ( literal[string] % identifier[url] )
def cmd(send, msg, args): """Queries WolframAlpha. Syntax: {command} <expression> """ if not msg: send('Evaluate what?') return # depends on [control=['if'], data=[]] params = {'format': 'plaintext', 'reinterpret': 'true', 'input': msg, 'appid': args['config']['api']['wolframapikey']} req = get('http://api.wolframalpha.com/v2/query', params=params) if req.status_code == 403: send('WolframAlpha is having issues.') return # depends on [control=['if'], data=[]] if not req.content: send('WolframAlpha returned an empty response.') return # depends on [control=['if'], data=[]] xml = fromstring(req.content) output = xml.findall('./pod') key = args['config']['api']['bitlykey'] url = get_short('http://www.wolframalpha.com/input/?i=%s' % quote(msg), key) text = 'No output found.' for x in output: if 'primary' in x.keys(): text = x.find('./subpod/plaintext').text # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] if text is None: send('No Output parsable') # depends on [control=['if'], data=[]] else: # Only send the first three lines of output for t in text.splitlines()[:3]: send(t) # depends on [control=['for'], data=['t']] send('See %s for more info' % url)
def _set_ethernet(self, v, load=False): """ Setter method for ethernet, mapped from YANG variable /interface/ethernet (list) If this variable is read-only (config: false) in the source YANG file, then _set_ethernet is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ethernet() directly. YANG Description: The list of Ethernet interfaces in the managed device. Each row represents a Ethernet interface. The list provides a way to discover all the physical interfaces in a managed device. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name",ethernet.ethernet, yang_name="ethernet", rest_name="Ethernet", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of Ethernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'Ethernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'callpoint': u'interface_phyintf', u'cli-mode-name': u'conf-if-eth-$(name)'}}), is_container='list', yang_name="ethernet", rest_name="Ethernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of Ethernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'Ethernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'callpoint': u'interface_phyintf', u'cli-mode-name': u'conf-if-eth-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ethernet must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("name",ethernet.ethernet, yang_name="ethernet", rest_name="Ethernet", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of Ethernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'Ethernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'callpoint': u'interface_phyintf', u'cli-mode-name': u'conf-if-eth-$(name)'}}), is_container='list', yang_name="ethernet", rest_name="Ethernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of Ethernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'Ethernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'callpoint': u'interface_phyintf', u'cli-mode-name': u'conf-if-eth-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True)""", }) self.__ethernet = t if hasattr(self, '_set'): self._set()
def function[_set_ethernet, parameter[self, v, load]]: constant[ Setter method for ethernet, mapped from YANG variable /interface/ethernet (list) If this variable is read-only (config: false) in the source YANG file, then _set_ethernet is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ethernet() directly. YANG Description: The list of Ethernet interfaces in the managed device. Each row represents a Ethernet interface. The list provides a way to discover all the physical interfaces in a managed device. ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da18eb55e40> name[self].__ethernet assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_ethernet] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[YANGListType] ( literal[string] , identifier[ethernet] . identifier[ethernet] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[is_container] = literal[string] , identifier[user_ordered] = keyword[True] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[yang_keys] = literal[string] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] }}), identifier[is_container] = literal[string] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__ethernet] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_ethernet(self, v, load=False): """ Setter method for ethernet, mapped from YANG variable /interface/ethernet (list) If this variable is read-only (config: false) in the source YANG file, then _set_ethernet is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ethernet() directly. YANG Description: The list of Ethernet interfaces in the managed device. Each row represents a Ethernet interface. The list provides a way to discover all the physical interfaces in a managed device. """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=YANGListType('name', ethernet.ethernet, yang_name='ethernet', rest_name='Ethernet', parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'The list of Ethernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'Ethernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'callpoint': u'interface_phyintf', u'cli-mode-name': u'conf-if-eth-$(name)'}}), is_container='list', yang_name='ethernet', rest_name='Ethernet', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of Ethernet interfaces.', u'cli-no-key-completion': None, u'alt-name': u'Ethernet', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL', u'cli-suppress-no': None, u'cli-suppress-show-path': None, u'cli-custom-range-actionpoint': u'NsmRangeCliActionpoint', u'cli-custom-range-enumerator': u'NsmRangeCliActionpoint', u'cli-no-match-completion': None, u'callpoint': u'interface_phyintf', u'cli-mode-name': u'conf-if-eth-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='list', is_config=True) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'ethernet must be of a type compatible with list', 'defined-type': 'list', 'generated-type': 'YANGDynClass(base=YANGListType("name",ethernet.ethernet, yang_name="ethernet", rest_name="Ethernet", parent=self, is_container=\'list\', user_ordered=True, path_helper=self._path_helper, yang_keys=\'name\', extensions={u\'tailf-common\': {u\'info\': u\'The list of Ethernet interfaces.\', u\'cli-no-key-completion\': None, u\'alt-name\': u\'Ethernet\', u\'sort-priority\': u\'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL\', u\'cli-suppress-no\': None, u\'cli-suppress-show-path\': None, u\'cli-custom-range-actionpoint\': u\'NsmRangeCliActionpoint\', u\'cli-custom-range-enumerator\': u\'NsmRangeCliActionpoint\', u\'cli-no-match-completion\': None, u\'callpoint\': u\'interface_phyintf\', u\'cli-mode-name\': u\'conf-if-eth-$(name)\'}}), is_container=\'list\', yang_name="ethernet", rest_name="Ethernet", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'The list of Ethernet interfaces.\', u\'cli-no-key-completion\': None, u\'alt-name\': u\'Ethernet\', u\'sort-priority\': u\'RUNNCFG_LEVEL_INTERFACE_TYPE_PHYSICAL\', u\'cli-suppress-no\': None, u\'cli-suppress-show-path\': None, u\'cli-custom-range-actionpoint\': u\'NsmRangeCliActionpoint\', u\'cli-custom-range-enumerator\': u\'NsmRangeCliActionpoint\', u\'cli-no-match-completion\': None, u\'callpoint\': u\'interface_phyintf\', u\'cli-mode-name\': u\'conf-if-eth-$(name)\'}}, namespace=\'urn:brocade.com:mgmt:brocade-interface\', defining_module=\'brocade-interface\', yang_type=\'list\', is_config=True)'}) # depends on [control=['except'], data=[]] self.__ethernet = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def _handle_lines(self): """Assemble incoming data into per-line packets.""" while b'\xdd' in self._buffer: linebuf, self._buffer = self._buffer.rsplit(b'\xdd', 1) line = linebuf[-19:] self._buffer += linebuf[:-19] if self._valid_packet(line): self._handle_raw_packet(line) else: self.logger.warning('dropping invalid data: %s', binascii.hexlify(line))
def function[_handle_lines, parameter[self]]: constant[Assemble incoming data into per-line packets.] while compare[constant[b'\xdd'] in name[self]._buffer] begin[:] <ast.Tuple object at 0x7da1b2780fa0> assign[=] call[name[self]._buffer.rsplit, parameter[constant[b'\xdd'], constant[1]]] variable[line] assign[=] call[name[linebuf]][<ast.Slice object at 0x7da1b27828c0>] <ast.AugAssign object at 0x7da1b2716350> if call[name[self]._valid_packet, parameter[name[line]]] begin[:] call[name[self]._handle_raw_packet, parameter[name[line]]]
keyword[def] identifier[_handle_lines] ( identifier[self] ): literal[string] keyword[while] literal[string] keyword[in] identifier[self] . identifier[_buffer] : identifier[linebuf] , identifier[self] . identifier[_buffer] = identifier[self] . identifier[_buffer] . identifier[rsplit] ( literal[string] , literal[int] ) identifier[line] = identifier[linebuf] [- literal[int] :] identifier[self] . identifier[_buffer] += identifier[linebuf] [:- literal[int] ] keyword[if] identifier[self] . identifier[_valid_packet] ( identifier[line] ): identifier[self] . identifier[_handle_raw_packet] ( identifier[line] ) keyword[else] : identifier[self] . identifier[logger] . identifier[warning] ( literal[string] , identifier[binascii] . identifier[hexlify] ( identifier[line] ))
def _handle_lines(self): """Assemble incoming data into per-line packets.""" while b'\xdd' in self._buffer: (linebuf, self._buffer) = self._buffer.rsplit(b'\xdd', 1) line = linebuf[-19:] self._buffer += linebuf[:-19] if self._valid_packet(line): self._handle_raw_packet(line) # depends on [control=['if'], data=[]] else: self.logger.warning('dropping invalid data: %s', binascii.hexlify(line)) # depends on [control=['while'], data=[]]
def profile(fun, *args, **kwargs): """ Profile a function. """ timer_name = kwargs.pop("prof_name", None) if not timer_name: module = inspect.getmodule(fun) c = [module.__name__] parentclass = labtypes.get_class_that_defined_method(fun) if parentclass: c.append(parentclass.__name__) c.append(fun.__name__) timer_name = ".".join(c) start(timer_name) ret = fun(*args, **kwargs) stop(timer_name) return ret
def function[profile, parameter[fun]]: constant[ Profile a function. ] variable[timer_name] assign[=] call[name[kwargs].pop, parameter[constant[prof_name], constant[None]]] if <ast.UnaryOp object at 0x7da18c4cf7f0> begin[:] variable[module] assign[=] call[name[inspect].getmodule, parameter[name[fun]]] variable[c] assign[=] list[[<ast.Attribute object at 0x7da18c4cdcc0>]] variable[parentclass] assign[=] call[name[labtypes].get_class_that_defined_method, parameter[name[fun]]] if name[parentclass] begin[:] call[name[c].append, parameter[name[parentclass].__name__]] call[name[c].append, parameter[name[fun].__name__]] variable[timer_name] assign[=] call[constant[.].join, parameter[name[c]]] call[name[start], parameter[name[timer_name]]] variable[ret] assign[=] call[name[fun], parameter[<ast.Starred object at 0x7da1b17df4c0>]] call[name[stop], parameter[name[timer_name]]] return[name[ret]]
keyword[def] identifier[profile] ( identifier[fun] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[timer_name] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] ) keyword[if] keyword[not] identifier[timer_name] : identifier[module] = identifier[inspect] . identifier[getmodule] ( identifier[fun] ) identifier[c] =[ identifier[module] . identifier[__name__] ] identifier[parentclass] = identifier[labtypes] . identifier[get_class_that_defined_method] ( identifier[fun] ) keyword[if] identifier[parentclass] : identifier[c] . identifier[append] ( identifier[parentclass] . identifier[__name__] ) identifier[c] . identifier[append] ( identifier[fun] . identifier[__name__] ) identifier[timer_name] = literal[string] . identifier[join] ( identifier[c] ) identifier[start] ( identifier[timer_name] ) identifier[ret] = identifier[fun] (* identifier[args] ,** identifier[kwargs] ) identifier[stop] ( identifier[timer_name] ) keyword[return] identifier[ret]
def profile(fun, *args, **kwargs): """ Profile a function. """ timer_name = kwargs.pop('prof_name', None) if not timer_name: module = inspect.getmodule(fun) c = [module.__name__] parentclass = labtypes.get_class_that_defined_method(fun) if parentclass: c.append(parentclass.__name__) # depends on [control=['if'], data=[]] c.append(fun.__name__) timer_name = '.'.join(c) # depends on [control=['if'], data=[]] start(timer_name) ret = fun(*args, **kwargs) stop(timer_name) return ret
def duration( days=0, # type: float seconds=0, # type: float microseconds=0, # type: float milliseconds=0, # type: float minutes=0, # type: float hours=0, # type: float weeks=0, # type: float years=0, # type: float months=0, # type: float ): # type: (...) -> Duration """ Create a Duration instance. """ return Duration( days=days, seconds=seconds, microseconds=microseconds, milliseconds=milliseconds, minutes=minutes, hours=hours, weeks=weeks, years=years, months=months, )
def function[duration, parameter[days, seconds, microseconds, milliseconds, minutes, hours, weeks, years, months]]: constant[ Create a Duration instance. ] return[call[name[Duration], parameter[]]]
keyword[def] identifier[duration] ( identifier[days] = literal[int] , identifier[seconds] = literal[int] , identifier[microseconds] = literal[int] , identifier[milliseconds] = literal[int] , identifier[minutes] = literal[int] , identifier[hours] = literal[int] , identifier[weeks] = literal[int] , identifier[years] = literal[int] , identifier[months] = literal[int] , ): literal[string] keyword[return] identifier[Duration] ( identifier[days] = identifier[days] , identifier[seconds] = identifier[seconds] , identifier[microseconds] = identifier[microseconds] , identifier[milliseconds] = identifier[milliseconds] , identifier[minutes] = identifier[minutes] , identifier[hours] = identifier[hours] , identifier[weeks] = identifier[weeks] , identifier[years] = identifier[years] , identifier[months] = identifier[months] , )
def duration(days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0, years=0, months=0): # type: float # type: float # type: float # type: float # type: float # type: float # type: float # type: float # type: float # type: (...) -> Duration '\n Create a Duration instance.\n ' return Duration(days=days, seconds=seconds, microseconds=microseconds, milliseconds=milliseconds, minutes=minutes, hours=hours, weeks=weeks, years=years, months=months)
def ParseDict(js_dict, message, ignore_unknown_fields=False): """Parses a JSON dictionary representation into a message. Args: js_dict: Dict representation of a JSON message. message: A protocol buffer message to merge into. ignore_unknown_fields: If True, do not raise errors for unknown fields. Returns: The same message passed as argument. """ parser = _Parser(ignore_unknown_fields) parser.ConvertMessage(js_dict, message) return message
def function[ParseDict, parameter[js_dict, message, ignore_unknown_fields]]: constant[Parses a JSON dictionary representation into a message. Args: js_dict: Dict representation of a JSON message. message: A protocol buffer message to merge into. ignore_unknown_fields: If True, do not raise errors for unknown fields. Returns: The same message passed as argument. ] variable[parser] assign[=] call[name[_Parser], parameter[name[ignore_unknown_fields]]] call[name[parser].ConvertMessage, parameter[name[js_dict], name[message]]] return[name[message]]
keyword[def] identifier[ParseDict] ( identifier[js_dict] , identifier[message] , identifier[ignore_unknown_fields] = keyword[False] ): literal[string] identifier[parser] = identifier[_Parser] ( identifier[ignore_unknown_fields] ) identifier[parser] . identifier[ConvertMessage] ( identifier[js_dict] , identifier[message] ) keyword[return] identifier[message]
def ParseDict(js_dict, message, ignore_unknown_fields=False): """Parses a JSON dictionary representation into a message. Args: js_dict: Dict representation of a JSON message. message: A protocol buffer message to merge into. ignore_unknown_fields: If True, do not raise errors for unknown fields. Returns: The same message passed as argument. """ parser = _Parser(ignore_unknown_fields) parser.ConvertMessage(js_dict, message) return message
def _get_batch_name(sample): """Retrieve batch name for use in SV calling outputs. Handles multiple batches split via SV calling. """ batch = dd.get_batch(sample) or dd.get_sample_name(sample) if isinstance(batch, (list, tuple)) and len(batch) > 1: batch = dd.get_sample_name(sample) return batch
def function[_get_batch_name, parameter[sample]]: constant[Retrieve batch name for use in SV calling outputs. Handles multiple batches split via SV calling. ] variable[batch] assign[=] <ast.BoolOp object at 0x7da1b2344670> if <ast.BoolOp object at 0x7da1b23456f0> begin[:] variable[batch] assign[=] call[name[dd].get_sample_name, parameter[name[sample]]] return[name[batch]]
keyword[def] identifier[_get_batch_name] ( identifier[sample] ): literal[string] identifier[batch] = identifier[dd] . identifier[get_batch] ( identifier[sample] ) keyword[or] identifier[dd] . identifier[get_sample_name] ( identifier[sample] ) keyword[if] identifier[isinstance] ( identifier[batch] ,( identifier[list] , identifier[tuple] )) keyword[and] identifier[len] ( identifier[batch] )> literal[int] : identifier[batch] = identifier[dd] . identifier[get_sample_name] ( identifier[sample] ) keyword[return] identifier[batch]
def _get_batch_name(sample): """Retrieve batch name for use in SV calling outputs. Handles multiple batches split via SV calling. """ batch = dd.get_batch(sample) or dd.get_sample_name(sample) if isinstance(batch, (list, tuple)) and len(batch) > 1: batch = dd.get_sample_name(sample) # depends on [control=['if'], data=[]] return batch
def get_slow_provider(self, timeout: int): """ Get web3 provider for slow queries. Default `HTTPProvider` timeouts after 10 seconds :param provider: Configured Web3 provider :param timeout: Timeout to configure for internal requests (default is 10) :return: A new web3 provider with the `slow_provider_timeout` """ if isinstance(self.w3_provider, AutoProvider): return HTTPProvider(endpoint_uri='http://localhost:8545', request_kwargs={'timeout': timeout}) elif isinstance(self.w3_provider, HTTPProvider): return HTTPProvider(endpoint_uri=self.w3_provider.endpoint_uri, request_kwargs={'timeout': timeout}) else: return self.w3_provider
def function[get_slow_provider, parameter[self, timeout]]: constant[ Get web3 provider for slow queries. Default `HTTPProvider` timeouts after 10 seconds :param provider: Configured Web3 provider :param timeout: Timeout to configure for internal requests (default is 10) :return: A new web3 provider with the `slow_provider_timeout` ] if call[name[isinstance], parameter[name[self].w3_provider, name[AutoProvider]]] begin[:] return[call[name[HTTPProvider], parameter[]]]
keyword[def] identifier[get_slow_provider] ( identifier[self] , identifier[timeout] : identifier[int] ): literal[string] keyword[if] identifier[isinstance] ( identifier[self] . identifier[w3_provider] , identifier[AutoProvider] ): keyword[return] identifier[HTTPProvider] ( identifier[endpoint_uri] = literal[string] , identifier[request_kwargs] ={ literal[string] : identifier[timeout] }) keyword[elif] identifier[isinstance] ( identifier[self] . identifier[w3_provider] , identifier[HTTPProvider] ): keyword[return] identifier[HTTPProvider] ( identifier[endpoint_uri] = identifier[self] . identifier[w3_provider] . identifier[endpoint_uri] , identifier[request_kwargs] ={ literal[string] : identifier[timeout] }) keyword[else] : keyword[return] identifier[self] . identifier[w3_provider]
def get_slow_provider(self, timeout: int): """ Get web3 provider for slow queries. Default `HTTPProvider` timeouts after 10 seconds :param provider: Configured Web3 provider :param timeout: Timeout to configure for internal requests (default is 10) :return: A new web3 provider with the `slow_provider_timeout` """ if isinstance(self.w3_provider, AutoProvider): return HTTPProvider(endpoint_uri='http://localhost:8545', request_kwargs={'timeout': timeout}) # depends on [control=['if'], data=[]] elif isinstance(self.w3_provider, HTTPProvider): return HTTPProvider(endpoint_uri=self.w3_provider.endpoint_uri, request_kwargs={'timeout': timeout}) # depends on [control=['if'], data=[]] else: return self.w3_provider
def delete_credit_card(self, *, customer_id, credit_card_id): """ Delete a credit card (Token) associated with a user. Args: customer_id: Identifier of the client of whom you are going to delete the token. credit_card_id: Identifier of the token to be deleted. Returns: """ fmt = 'customers/{}/creditCards/{}'.format(customer_id, credit_card_id) return self.client._delete(self.url + fmt, headers=self.get_headers())
def function[delete_credit_card, parameter[self]]: constant[ Delete a credit card (Token) associated with a user. Args: customer_id: Identifier of the client of whom you are going to delete the token. credit_card_id: Identifier of the token to be deleted. Returns: ] variable[fmt] assign[=] call[constant[customers/{}/creditCards/{}].format, parameter[name[customer_id], name[credit_card_id]]] return[call[name[self].client._delete, parameter[binary_operation[name[self].url + name[fmt]]]]]
keyword[def] identifier[delete_credit_card] ( identifier[self] ,*, identifier[customer_id] , identifier[credit_card_id] ): literal[string] identifier[fmt] = literal[string] . identifier[format] ( identifier[customer_id] , identifier[credit_card_id] ) keyword[return] identifier[self] . identifier[client] . identifier[_delete] ( identifier[self] . identifier[url] + identifier[fmt] , identifier[headers] = identifier[self] . identifier[get_headers] ())
def delete_credit_card(self, *, customer_id, credit_card_id): """ Delete a credit card (Token) associated with a user. Args: customer_id: Identifier of the client of whom you are going to delete the token. credit_card_id: Identifier of the token to be deleted. Returns: """ fmt = 'customers/{}/creditCards/{}'.format(customer_id, credit_card_id) return self.client._delete(self.url + fmt, headers=self.get_headers())
async def get_timezone(self) -> Optional[tzinfo]: """ We can't exactly know the time zone of the user from what Facebook gives (fucking morons) but we can still give something that'll work until next DST. """ u = await self._get_user() diff = float(u.get('timezone', 0)) * 3600.0 return tz.tzoffset('ITC', diff)
<ast.AsyncFunctionDef object at 0x7da18dc07a60>
keyword[async] keyword[def] identifier[get_timezone] ( identifier[self] )-> identifier[Optional] [ identifier[tzinfo] ]: literal[string] identifier[u] = keyword[await] identifier[self] . identifier[_get_user] () identifier[diff] = identifier[float] ( identifier[u] . identifier[get] ( literal[string] , literal[int] ))* literal[int] keyword[return] identifier[tz] . identifier[tzoffset] ( literal[string] , identifier[diff] )
async def get_timezone(self) -> Optional[tzinfo]: """ We can't exactly know the time zone of the user from what Facebook gives (fucking morons) but we can still give something that'll work until next DST. """ u = await self._get_user() diff = float(u.get('timezone', 0)) * 3600.0 return tz.tzoffset('ITC', diff)
def add_node(self, node, offset): """Add a Node object to nodes dictionary, calculating its coordinates using offset Parameters ---------- node : a Node object offset : float number between 0 and 1 that sets the distance from the start point at which the node will be placed """ # calculate x,y from offset considering axis start and end points width = self.end[0] - self.start[0] height = self.end[1] - self.start[1] node.x = self.start[0] + (width * offset) node.y = self.start[1] + (height * offset) self.nodes[node.ID] = node
def function[add_node, parameter[self, node, offset]]: constant[Add a Node object to nodes dictionary, calculating its coordinates using offset Parameters ---------- node : a Node object offset : float number between 0 and 1 that sets the distance from the start point at which the node will be placed ] variable[width] assign[=] binary_operation[call[name[self].end][constant[0]] - call[name[self].start][constant[0]]] variable[height] assign[=] binary_operation[call[name[self].end][constant[1]] - call[name[self].start][constant[1]]] name[node].x assign[=] binary_operation[call[name[self].start][constant[0]] + binary_operation[name[width] * name[offset]]] name[node].y assign[=] binary_operation[call[name[self].start][constant[1]] + binary_operation[name[height] * name[offset]]] call[name[self].nodes][name[node].ID] assign[=] name[node]
keyword[def] identifier[add_node] ( identifier[self] , identifier[node] , identifier[offset] ): literal[string] identifier[width] = identifier[self] . identifier[end] [ literal[int] ]- identifier[self] . identifier[start] [ literal[int] ] identifier[height] = identifier[self] . identifier[end] [ literal[int] ]- identifier[self] . identifier[start] [ literal[int] ] identifier[node] . identifier[x] = identifier[self] . identifier[start] [ literal[int] ]+( identifier[width] * identifier[offset] ) identifier[node] . identifier[y] = identifier[self] . identifier[start] [ literal[int] ]+( identifier[height] * identifier[offset] ) identifier[self] . identifier[nodes] [ identifier[node] . identifier[ID] ]= identifier[node]
def add_node(self, node, offset): """Add a Node object to nodes dictionary, calculating its coordinates using offset Parameters ---------- node : a Node object offset : float number between 0 and 1 that sets the distance from the start point at which the node will be placed """ # calculate x,y from offset considering axis start and end points width = self.end[0] - self.start[0] height = self.end[1] - self.start[1] node.x = self.start[0] + width * offset node.y = self.start[1] + height * offset self.nodes[node.ID] = node
def _pad_zeros(self, bunch_stack): """ :type bunch_stack: list of list """ min_len = min(map(len, bunch_stack)) for i in range(len(bunch_stack)): bunch_stack[i] = bunch_stack[i][:min_len]
def function[_pad_zeros, parameter[self, bunch_stack]]: constant[ :type bunch_stack: list of list ] variable[min_len] assign[=] call[name[min], parameter[call[name[map], parameter[name[len], name[bunch_stack]]]]] for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[bunch_stack]]]]]] begin[:] call[name[bunch_stack]][name[i]] assign[=] call[call[name[bunch_stack]][name[i]]][<ast.Slice object at 0x7da1b039ab30>]
keyword[def] identifier[_pad_zeros] ( identifier[self] , identifier[bunch_stack] ): literal[string] identifier[min_len] = identifier[min] ( identifier[map] ( identifier[len] , identifier[bunch_stack] )) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[bunch_stack] )): identifier[bunch_stack] [ identifier[i] ]= identifier[bunch_stack] [ identifier[i] ][: identifier[min_len] ]
def _pad_zeros(self, bunch_stack): """ :type bunch_stack: list of list """ min_len = min(map(len, bunch_stack)) for i in range(len(bunch_stack)): bunch_stack[i] = bunch_stack[i][:min_len] # depends on [control=['for'], data=['i']]
def transmit(self, bytes, protocol=None): '''Gain exclusive access to card during APDU transmission for if this decorator decorates a PCSCCardConnection.''' data, sw1, sw2 = CardConnectionDecorator.transmit( self, bytes, protocol) return data, sw1, sw2
def function[transmit, parameter[self, bytes, protocol]]: constant[Gain exclusive access to card during APDU transmission for if this decorator decorates a PCSCCardConnection.] <ast.Tuple object at 0x7da1b23ee6b0> assign[=] call[name[CardConnectionDecorator].transmit, parameter[name[self], name[bytes], name[protocol]]] return[tuple[[<ast.Name object at 0x7da1b23eca30>, <ast.Name object at 0x7da1b23ef0d0>, <ast.Name object at 0x7da1b23ee560>]]]
keyword[def] identifier[transmit] ( identifier[self] , identifier[bytes] , identifier[protocol] = keyword[None] ): literal[string] identifier[data] , identifier[sw1] , identifier[sw2] = identifier[CardConnectionDecorator] . identifier[transmit] ( identifier[self] , identifier[bytes] , identifier[protocol] ) keyword[return] identifier[data] , identifier[sw1] , identifier[sw2]
def transmit(self, bytes, protocol=None): """Gain exclusive access to card during APDU transmission for if this decorator decorates a PCSCCardConnection.""" (data, sw1, sw2) = CardConnectionDecorator.transmit(self, bytes, protocol) return (data, sw1, sw2)
def tag(self, tokens): """Return a list of ((token, tag), label) tuples for a given list of (token, tag) tuples.""" # Lazy load model first time we tag if not self._loaded_model: self.load(self.model) features = [self._get_features(tokens, i) for i in range(len(tokens))] labels = self._tagger.tag(features) tagged_sent = list(zip(tokens, labels)) return tagged_sent
def function[tag, parameter[self, tokens]]: constant[Return a list of ((token, tag), label) tuples for a given list of (token, tag) tuples.] if <ast.UnaryOp object at 0x7da1b12c7fa0> begin[:] call[name[self].load, parameter[name[self].model]] variable[features] assign[=] <ast.ListComp object at 0x7da1b12c7b50> variable[labels] assign[=] call[name[self]._tagger.tag, parameter[name[features]]] variable[tagged_sent] assign[=] call[name[list], parameter[call[name[zip], parameter[name[tokens], name[labels]]]]] return[name[tagged_sent]]
keyword[def] identifier[tag] ( identifier[self] , identifier[tokens] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[_loaded_model] : identifier[self] . identifier[load] ( identifier[self] . identifier[model] ) identifier[features] =[ identifier[self] . identifier[_get_features] ( identifier[tokens] , identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[tokens] ))] identifier[labels] = identifier[self] . identifier[_tagger] . identifier[tag] ( identifier[features] ) identifier[tagged_sent] = identifier[list] ( identifier[zip] ( identifier[tokens] , identifier[labels] )) keyword[return] identifier[tagged_sent]
def tag(self, tokens): """Return a list of ((token, tag), label) tuples for a given list of (token, tag) tuples.""" # Lazy load model first time we tag if not self._loaded_model: self.load(self.model) # depends on [control=['if'], data=[]] features = [self._get_features(tokens, i) for i in range(len(tokens))] labels = self._tagger.tag(features) tagged_sent = list(zip(tokens, labels)) return tagged_sent
def intersect(self, other, strategy=_STRATEGY.GEOMETRIC, _verify=True): """Find the common intersection with another surface. Args: other (Surface): Other surface to intersect with. strategy (Optional[~bezier.curve.IntersectionStrategy]): The intersection algorithm to use. Defaults to geometric. _verify (Optional[bool]): Indicates if extra caution should be used to verify assumptions about the algorithm as it proceeds. Can be disabled to speed up execution time. Defaults to :data:`True`. Returns: List[Union[~bezier.curved_polygon.CurvedPolygon, \ ~bezier.surface.Surface]]: List of intersections (possibly empty). Raises: TypeError: If ``other`` is not a surface (and ``_verify=True``). NotImplementedError: If at least one of the surfaces isn't two-dimensional (and ``_verify=True``). ValueError: If ``strategy`` is not a valid :class:`.IntersectionStrategy`. """ if _verify: if not isinstance(other, Surface): raise TypeError( "Can only intersect with another surface", "Received", other, ) if self._dimension != 2 or other._dimension != 2: raise NotImplementedError( "Intersection only implemented in 2D" ) if strategy == _STRATEGY.GEOMETRIC: do_intersect = _surface_intersection.geometric_intersect elif strategy == _STRATEGY.ALGEBRAIC: do_intersect = _surface_intersection.algebraic_intersect else: raise ValueError("Unexpected strategy.", strategy) edge_infos, contained, all_edge_nodes = do_intersect( self._nodes, self._degree, other._nodes, other._degree, _verify ) if edge_infos is None: if contained: return [self] else: return [other] else: return [ _make_intersection(edge_info, all_edge_nodes) for edge_info in edge_infos ]
def function[intersect, parameter[self, other, strategy, _verify]]: constant[Find the common intersection with another surface. Args: other (Surface): Other surface to intersect with. strategy (Optional[~bezier.curve.IntersectionStrategy]): The intersection algorithm to use. Defaults to geometric. _verify (Optional[bool]): Indicates if extra caution should be used to verify assumptions about the algorithm as it proceeds. Can be disabled to speed up execution time. Defaults to :data:`True`. Returns: List[Union[~bezier.curved_polygon.CurvedPolygon, ~bezier.surface.Surface]]: List of intersections (possibly empty). Raises: TypeError: If ``other`` is not a surface (and ``_verify=True``). NotImplementedError: If at least one of the surfaces isn't two-dimensional (and ``_verify=True``). ValueError: If ``strategy`` is not a valid :class:`.IntersectionStrategy`. ] if name[_verify] begin[:] if <ast.UnaryOp object at 0x7da18dc9bc70> begin[:] <ast.Raise object at 0x7da18dc99780> if <ast.BoolOp object at 0x7da18dc9add0> begin[:] <ast.Raise object at 0x7da18dc9aad0> if compare[name[strategy] equal[==] name[_STRATEGY].GEOMETRIC] begin[:] variable[do_intersect] assign[=] name[_surface_intersection].geometric_intersect <ast.Tuple object at 0x7da18dc99330> assign[=] call[name[do_intersect], parameter[name[self]._nodes, name[self]._degree, name[other]._nodes, name[other]._degree, name[_verify]]] if compare[name[edge_infos] is constant[None]] begin[:] if name[contained] begin[:] return[list[[<ast.Name object at 0x7da18dc9a260>]]]
keyword[def] identifier[intersect] ( identifier[self] , identifier[other] , identifier[strategy] = identifier[_STRATEGY] . identifier[GEOMETRIC] , identifier[_verify] = keyword[True] ): literal[string] keyword[if] identifier[_verify] : keyword[if] keyword[not] identifier[isinstance] ( identifier[other] , identifier[Surface] ): keyword[raise] identifier[TypeError] ( literal[string] , literal[string] , identifier[other] , ) keyword[if] identifier[self] . identifier[_dimension] != literal[int] keyword[or] identifier[other] . identifier[_dimension] != literal[int] : keyword[raise] identifier[NotImplementedError] ( literal[string] ) keyword[if] identifier[strategy] == identifier[_STRATEGY] . identifier[GEOMETRIC] : identifier[do_intersect] = identifier[_surface_intersection] . identifier[geometric_intersect] keyword[elif] identifier[strategy] == identifier[_STRATEGY] . identifier[ALGEBRAIC] : identifier[do_intersect] = identifier[_surface_intersection] . identifier[algebraic_intersect] keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] , identifier[strategy] ) identifier[edge_infos] , identifier[contained] , identifier[all_edge_nodes] = identifier[do_intersect] ( identifier[self] . identifier[_nodes] , identifier[self] . identifier[_degree] , identifier[other] . identifier[_nodes] , identifier[other] . identifier[_degree] , identifier[_verify] ) keyword[if] identifier[edge_infos] keyword[is] keyword[None] : keyword[if] identifier[contained] : keyword[return] [ identifier[self] ] keyword[else] : keyword[return] [ identifier[other] ] keyword[else] : keyword[return] [ identifier[_make_intersection] ( identifier[edge_info] , identifier[all_edge_nodes] ) keyword[for] identifier[edge_info] keyword[in] identifier[edge_infos] ]
def intersect(self, other, strategy=_STRATEGY.GEOMETRIC, _verify=True): """Find the common intersection with another surface. Args: other (Surface): Other surface to intersect with. strategy (Optional[~bezier.curve.IntersectionStrategy]): The intersection algorithm to use. Defaults to geometric. _verify (Optional[bool]): Indicates if extra caution should be used to verify assumptions about the algorithm as it proceeds. Can be disabled to speed up execution time. Defaults to :data:`True`. Returns: List[Union[~bezier.curved_polygon.CurvedPolygon, ~bezier.surface.Surface]]: List of intersections (possibly empty). Raises: TypeError: If ``other`` is not a surface (and ``_verify=True``). NotImplementedError: If at least one of the surfaces isn't two-dimensional (and ``_verify=True``). ValueError: If ``strategy`` is not a valid :class:`.IntersectionStrategy`. """ if _verify: if not isinstance(other, Surface): raise TypeError('Can only intersect with another surface', 'Received', other) # depends on [control=['if'], data=[]] if self._dimension != 2 or other._dimension != 2: raise NotImplementedError('Intersection only implemented in 2D') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if strategy == _STRATEGY.GEOMETRIC: do_intersect = _surface_intersection.geometric_intersect # depends on [control=['if'], data=[]] elif strategy == _STRATEGY.ALGEBRAIC: do_intersect = _surface_intersection.algebraic_intersect # depends on [control=['if'], data=[]] else: raise ValueError('Unexpected strategy.', strategy) (edge_infos, contained, all_edge_nodes) = do_intersect(self._nodes, self._degree, other._nodes, other._degree, _verify) if edge_infos is None: if contained: return [self] # depends on [control=['if'], data=[]] else: return [other] # depends on [control=['if'], data=[]] else: return [_make_intersection(edge_info, all_edge_nodes) for edge_info in edge_infos]
def model_fn(features, labels, mode, params, config): """Builds the model function for use in an Estimator. Arguments: features: The input features for the Estimator. labels: The labels, unused here. mode: Signifies whether it is train or test or predict. params: Some hyperparameters as a dictionary. config: The RunConfig, unused here. Returns: EstimatorSpec: A tf.estimator.EstimatorSpec instance. """ del labels, config # Set up the model's learnable parameters. logit_concentration = tf.compat.v1.get_variable( "logit_concentration", shape=[1, params["num_topics"]], initializer=tf.compat.v1.initializers.constant( _softplus_inverse(params["prior_initial_value"]))) concentration = _clip_dirichlet_parameters( tf.nn.softplus(logit_concentration)) num_words = features.shape[1] topics_words_logits = tf.compat.v1.get_variable( "topics_words_logits", shape=[params["num_topics"], num_words], initializer=tf.compat.v1.glorot_normal_initializer()) topics_words = tf.nn.softmax(topics_words_logits, axis=-1) # Compute expected log-likelihood. First, sample from the variational # distribution; second, compute the log-likelihood given the sample. lda_variational = make_lda_variational( params["activation"], params["num_topics"], params["layer_sizes"]) with ed.tape() as variational_tape: _ = lda_variational(features) with ed.tape() as model_tape: with ed.interception( make_value_setter(topics=variational_tape["topics_posterior"])): posterior_predictive = latent_dirichlet_allocation(concentration, topics_words) log_likelihood = posterior_predictive.distribution.log_prob(features) tf.compat.v1.summary.scalar("log_likelihood", tf.reduce_mean(input_tensor=log_likelihood)) # Compute the KL-divergence between two Dirichlets analytically. # The sampled KL does not work well for "sparse" distributions # (see Appendix D of [2]). kl = variational_tape["topics_posterior"].distribution.kl_divergence( model_tape["topics"].distribution) tf.compat.v1.summary.scalar("kl", tf.reduce_mean(input_tensor=kl)) # Ensure that the KL is non-negative (up to a very small slack). # Negative KL can happen due to numerical instability. with tf.control_dependencies( [tf.compat.v1.assert_greater(kl, -1e-3, message="kl")]): kl = tf.identity(kl) elbo = log_likelihood - kl avg_elbo = tf.reduce_mean(input_tensor=elbo) tf.compat.v1.summary.scalar("elbo", avg_elbo) loss = -avg_elbo # Perform variational inference by minimizing the -ELBO. global_step = tf.compat.v1.train.get_or_create_global_step() optimizer = tf.compat.v1.train.AdamOptimizer(params["learning_rate"]) # This implements the "burn-in" for prior parameters (see Appendix D of [2]). # For the first prior_burn_in_steps steps they are fixed, and then trained # jointly with the other parameters. grads_and_vars = optimizer.compute_gradients(loss) grads_and_vars_except_prior = [ x for x in grads_and_vars if x[1] != logit_concentration] def train_op_except_prior(): return optimizer.apply_gradients( grads_and_vars_except_prior, global_step=global_step) def train_op_all(): return optimizer.apply_gradients( grads_and_vars, global_step=global_step) train_op = tf.cond( pred=global_step < params["prior_burn_in_steps"], true_fn=train_op_except_prior, false_fn=train_op_all) # The perplexity is an exponent of the average negative ELBO per word. words_per_document = tf.reduce_sum(input_tensor=features, axis=1) log_perplexity = -elbo / words_per_document tf.compat.v1.summary.scalar( "perplexity", tf.exp(tf.reduce_mean(input_tensor=log_perplexity))) (log_perplexity_tensor, log_perplexity_update) = tf.compat.v1.metrics.mean(log_perplexity) perplexity_tensor = tf.exp(log_perplexity_tensor) # Obtain the topics summary. Implemented as a py_func for simplicity. topics = tf.compat.v1.py_func( functools.partial(get_topics_strings, vocabulary=params["vocabulary"]), [topics_words, concentration], tf.string, stateful=False) tf.compat.v1.summary.text("topics", topics) return tf.estimator.EstimatorSpec( mode=mode, loss=loss, train_op=train_op, eval_metric_ops={ "elbo": tf.compat.v1.metrics.mean(elbo), "log_likelihood": tf.compat.v1.metrics.mean(log_likelihood), "kl": tf.compat.v1.metrics.mean(kl), "perplexity": (perplexity_tensor, log_perplexity_update), "topics": (topics, tf.no_op()), }, )
def function[model_fn, parameter[features, labels, mode, params, config]]: constant[Builds the model function for use in an Estimator. Arguments: features: The input features for the Estimator. labels: The labels, unused here. mode: Signifies whether it is train or test or predict. params: Some hyperparameters as a dictionary. config: The RunConfig, unused here. Returns: EstimatorSpec: A tf.estimator.EstimatorSpec instance. ] <ast.Delete object at 0x7da1b0237d90> variable[logit_concentration] assign[=] call[name[tf].compat.v1.get_variable, parameter[constant[logit_concentration]]] variable[concentration] assign[=] call[name[_clip_dirichlet_parameters], parameter[call[name[tf].nn.softplus, parameter[name[logit_concentration]]]]] variable[num_words] assign[=] call[name[features].shape][constant[1]] variable[topics_words_logits] assign[=] call[name[tf].compat.v1.get_variable, parameter[constant[topics_words_logits]]] variable[topics_words] assign[=] call[name[tf].nn.softmax, parameter[name[topics_words_logits]]] variable[lda_variational] assign[=] call[name[make_lda_variational], parameter[call[name[params]][constant[activation]], call[name[params]][constant[num_topics]], call[name[params]][constant[layer_sizes]]]] with call[name[ed].tape, parameter[]] begin[:] variable[_] assign[=] call[name[lda_variational], parameter[name[features]]] with call[name[ed].tape, parameter[]] begin[:] with call[name[ed].interception, parameter[call[name[make_value_setter], parameter[]]]] begin[:] variable[posterior_predictive] assign[=] call[name[latent_dirichlet_allocation], parameter[name[concentration], name[topics_words]]] variable[log_likelihood] assign[=] call[name[posterior_predictive].distribution.log_prob, parameter[name[features]]] call[name[tf].compat.v1.summary.scalar, parameter[constant[log_likelihood], call[name[tf].reduce_mean, parameter[]]]] variable[kl] assign[=] call[call[name[variational_tape]][constant[topics_posterior]].distribution.kl_divergence, parameter[call[name[model_tape]][constant[topics]].distribution]] call[name[tf].compat.v1.summary.scalar, parameter[constant[kl], call[name[tf].reduce_mean, parameter[]]]] with call[name[tf].control_dependencies, parameter[list[[<ast.Call object at 0x7da1b0234a00>]]]] begin[:] variable[kl] assign[=] call[name[tf].identity, parameter[name[kl]]] variable[elbo] assign[=] binary_operation[name[log_likelihood] - name[kl]] variable[avg_elbo] assign[=] call[name[tf].reduce_mean, parameter[]] call[name[tf].compat.v1.summary.scalar, parameter[constant[elbo], name[avg_elbo]]] variable[loss] assign[=] <ast.UnaryOp object at 0x7da1b0234250> variable[global_step] assign[=] call[name[tf].compat.v1.train.get_or_create_global_step, parameter[]] variable[optimizer] assign[=] call[name[tf].compat.v1.train.AdamOptimizer, parameter[call[name[params]][constant[learning_rate]]]] variable[grads_and_vars] assign[=] call[name[optimizer].compute_gradients, parameter[name[loss]]] variable[grads_and_vars_except_prior] assign[=] <ast.ListComp object at 0x7da1b02ca4d0> def function[train_op_except_prior, parameter[]]: return[call[name[optimizer].apply_gradients, parameter[name[grads_and_vars_except_prior]]]] def function[train_op_all, parameter[]]: return[call[name[optimizer].apply_gradients, parameter[name[grads_and_vars]]]] variable[train_op] assign[=] call[name[tf].cond, parameter[]] variable[words_per_document] assign[=] call[name[tf].reduce_sum, parameter[]] variable[log_perplexity] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b02cbca0> / name[words_per_document]] call[name[tf].compat.v1.summary.scalar, parameter[constant[perplexity], call[name[tf].exp, parameter[call[name[tf].reduce_mean, parameter[]]]]]] <ast.Tuple object at 0x7da1b02c9090> assign[=] call[name[tf].compat.v1.metrics.mean, parameter[name[log_perplexity]]] variable[perplexity_tensor] assign[=] call[name[tf].exp, parameter[name[log_perplexity_tensor]]] variable[topics] assign[=] call[name[tf].compat.v1.py_func, parameter[call[name[functools].partial, parameter[name[get_topics_strings]]], list[[<ast.Name object at 0x7da1b02ca680>, <ast.Name object at 0x7da1b02ca5c0>]], name[tf].string]] call[name[tf].compat.v1.summary.text, parameter[constant[topics], name[topics]]] return[call[name[tf].estimator.EstimatorSpec, parameter[]]]
keyword[def] identifier[model_fn] ( identifier[features] , identifier[labels] , identifier[mode] , identifier[params] , identifier[config] ): literal[string] keyword[del] identifier[labels] , identifier[config] identifier[logit_concentration] = identifier[tf] . identifier[compat] . identifier[v1] . identifier[get_variable] ( literal[string] , identifier[shape] =[ literal[int] , identifier[params] [ literal[string] ]], identifier[initializer] = identifier[tf] . identifier[compat] . identifier[v1] . identifier[initializers] . identifier[constant] ( identifier[_softplus_inverse] ( identifier[params] [ literal[string] ]))) identifier[concentration] = identifier[_clip_dirichlet_parameters] ( identifier[tf] . identifier[nn] . identifier[softplus] ( identifier[logit_concentration] )) identifier[num_words] = identifier[features] . identifier[shape] [ literal[int] ] identifier[topics_words_logits] = identifier[tf] . identifier[compat] . identifier[v1] . identifier[get_variable] ( literal[string] , identifier[shape] =[ identifier[params] [ literal[string] ], identifier[num_words] ], identifier[initializer] = identifier[tf] . identifier[compat] . identifier[v1] . identifier[glorot_normal_initializer] ()) identifier[topics_words] = identifier[tf] . identifier[nn] . identifier[softmax] ( identifier[topics_words_logits] , identifier[axis] =- literal[int] ) identifier[lda_variational] = identifier[make_lda_variational] ( identifier[params] [ literal[string] ], identifier[params] [ literal[string] ], identifier[params] [ literal[string] ]) keyword[with] identifier[ed] . identifier[tape] () keyword[as] identifier[variational_tape] : identifier[_] = identifier[lda_variational] ( identifier[features] ) keyword[with] identifier[ed] . identifier[tape] () keyword[as] identifier[model_tape] : keyword[with] identifier[ed] . identifier[interception] ( identifier[make_value_setter] ( identifier[topics] = identifier[variational_tape] [ literal[string] ])): identifier[posterior_predictive] = identifier[latent_dirichlet_allocation] ( identifier[concentration] , identifier[topics_words] ) identifier[log_likelihood] = identifier[posterior_predictive] . identifier[distribution] . identifier[log_prob] ( identifier[features] ) identifier[tf] . identifier[compat] . identifier[v1] . identifier[summary] . identifier[scalar] ( literal[string] , identifier[tf] . identifier[reduce_mean] ( identifier[input_tensor] = identifier[log_likelihood] )) identifier[kl] = identifier[variational_tape] [ literal[string] ]. identifier[distribution] . identifier[kl_divergence] ( identifier[model_tape] [ literal[string] ]. identifier[distribution] ) identifier[tf] . identifier[compat] . identifier[v1] . identifier[summary] . identifier[scalar] ( literal[string] , identifier[tf] . identifier[reduce_mean] ( identifier[input_tensor] = identifier[kl] )) keyword[with] identifier[tf] . identifier[control_dependencies] ( [ identifier[tf] . identifier[compat] . identifier[v1] . identifier[assert_greater] ( identifier[kl] ,- literal[int] , identifier[message] = literal[string] )]): identifier[kl] = identifier[tf] . identifier[identity] ( identifier[kl] ) identifier[elbo] = identifier[log_likelihood] - identifier[kl] identifier[avg_elbo] = identifier[tf] . identifier[reduce_mean] ( identifier[input_tensor] = identifier[elbo] ) identifier[tf] . identifier[compat] . identifier[v1] . identifier[summary] . identifier[scalar] ( literal[string] , identifier[avg_elbo] ) identifier[loss] =- identifier[avg_elbo] identifier[global_step] = identifier[tf] . identifier[compat] . identifier[v1] . identifier[train] . identifier[get_or_create_global_step] () identifier[optimizer] = identifier[tf] . identifier[compat] . identifier[v1] . identifier[train] . identifier[AdamOptimizer] ( identifier[params] [ literal[string] ]) identifier[grads_and_vars] = identifier[optimizer] . identifier[compute_gradients] ( identifier[loss] ) identifier[grads_and_vars_except_prior] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[grads_and_vars] keyword[if] identifier[x] [ literal[int] ]!= identifier[logit_concentration] ] keyword[def] identifier[train_op_except_prior] (): keyword[return] identifier[optimizer] . identifier[apply_gradients] ( identifier[grads_and_vars_except_prior] , identifier[global_step] = identifier[global_step] ) keyword[def] identifier[train_op_all] (): keyword[return] identifier[optimizer] . identifier[apply_gradients] ( identifier[grads_and_vars] , identifier[global_step] = identifier[global_step] ) identifier[train_op] = identifier[tf] . identifier[cond] ( identifier[pred] = identifier[global_step] < identifier[params] [ literal[string] ], identifier[true_fn] = identifier[train_op_except_prior] , identifier[false_fn] = identifier[train_op_all] ) identifier[words_per_document] = identifier[tf] . identifier[reduce_sum] ( identifier[input_tensor] = identifier[features] , identifier[axis] = literal[int] ) identifier[log_perplexity] =- identifier[elbo] / identifier[words_per_document] identifier[tf] . identifier[compat] . identifier[v1] . identifier[summary] . identifier[scalar] ( literal[string] , identifier[tf] . identifier[exp] ( identifier[tf] . identifier[reduce_mean] ( identifier[input_tensor] = identifier[log_perplexity] ))) ( identifier[log_perplexity_tensor] , identifier[log_perplexity_update] )= identifier[tf] . identifier[compat] . identifier[v1] . identifier[metrics] . identifier[mean] ( identifier[log_perplexity] ) identifier[perplexity_tensor] = identifier[tf] . identifier[exp] ( identifier[log_perplexity_tensor] ) identifier[topics] = identifier[tf] . identifier[compat] . identifier[v1] . identifier[py_func] ( identifier[functools] . identifier[partial] ( identifier[get_topics_strings] , identifier[vocabulary] = identifier[params] [ literal[string] ]), [ identifier[topics_words] , identifier[concentration] ], identifier[tf] . identifier[string] , identifier[stateful] = keyword[False] ) identifier[tf] . identifier[compat] . identifier[v1] . identifier[summary] . identifier[text] ( literal[string] , identifier[topics] ) keyword[return] identifier[tf] . identifier[estimator] . identifier[EstimatorSpec] ( identifier[mode] = identifier[mode] , identifier[loss] = identifier[loss] , identifier[train_op] = identifier[train_op] , identifier[eval_metric_ops] ={ literal[string] : identifier[tf] . identifier[compat] . identifier[v1] . identifier[metrics] . identifier[mean] ( identifier[elbo] ), literal[string] : identifier[tf] . identifier[compat] . identifier[v1] . identifier[metrics] . identifier[mean] ( identifier[log_likelihood] ), literal[string] : identifier[tf] . identifier[compat] . identifier[v1] . identifier[metrics] . identifier[mean] ( identifier[kl] ), literal[string] :( identifier[perplexity_tensor] , identifier[log_perplexity_update] ), literal[string] :( identifier[topics] , identifier[tf] . identifier[no_op] ()), }, )
def model_fn(features, labels, mode, params, config): """Builds the model function for use in an Estimator. Arguments: features: The input features for the Estimator. labels: The labels, unused here. mode: Signifies whether it is train or test or predict. params: Some hyperparameters as a dictionary. config: The RunConfig, unused here. Returns: EstimatorSpec: A tf.estimator.EstimatorSpec instance. """ del labels, config # Set up the model's learnable parameters. logit_concentration = tf.compat.v1.get_variable('logit_concentration', shape=[1, params['num_topics']], initializer=tf.compat.v1.initializers.constant(_softplus_inverse(params['prior_initial_value']))) concentration = _clip_dirichlet_parameters(tf.nn.softplus(logit_concentration)) num_words = features.shape[1] topics_words_logits = tf.compat.v1.get_variable('topics_words_logits', shape=[params['num_topics'], num_words], initializer=tf.compat.v1.glorot_normal_initializer()) topics_words = tf.nn.softmax(topics_words_logits, axis=-1) # Compute expected log-likelihood. First, sample from the variational # distribution; second, compute the log-likelihood given the sample. lda_variational = make_lda_variational(params['activation'], params['num_topics'], params['layer_sizes']) with ed.tape() as variational_tape: _ = lda_variational(features) # depends on [control=['with'], data=[]] with ed.tape() as model_tape: with ed.interception(make_value_setter(topics=variational_tape['topics_posterior'])): posterior_predictive = latent_dirichlet_allocation(concentration, topics_words) # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]] log_likelihood = posterior_predictive.distribution.log_prob(features) tf.compat.v1.summary.scalar('log_likelihood', tf.reduce_mean(input_tensor=log_likelihood)) # Compute the KL-divergence between two Dirichlets analytically. # The sampled KL does not work well for "sparse" distributions # (see Appendix D of [2]). kl = variational_tape['topics_posterior'].distribution.kl_divergence(model_tape['topics'].distribution) tf.compat.v1.summary.scalar('kl', tf.reduce_mean(input_tensor=kl)) # Ensure that the KL is non-negative (up to a very small slack). # Negative KL can happen due to numerical instability. with tf.control_dependencies([tf.compat.v1.assert_greater(kl, -0.001, message='kl')]): kl = tf.identity(kl) # depends on [control=['with'], data=[]] elbo = log_likelihood - kl avg_elbo = tf.reduce_mean(input_tensor=elbo) tf.compat.v1.summary.scalar('elbo', avg_elbo) loss = -avg_elbo # Perform variational inference by minimizing the -ELBO. global_step = tf.compat.v1.train.get_or_create_global_step() optimizer = tf.compat.v1.train.AdamOptimizer(params['learning_rate']) # This implements the "burn-in" for prior parameters (see Appendix D of [2]). # For the first prior_burn_in_steps steps they are fixed, and then trained # jointly with the other parameters. grads_and_vars = optimizer.compute_gradients(loss) grads_and_vars_except_prior = [x for x in grads_and_vars if x[1] != logit_concentration] def train_op_except_prior(): return optimizer.apply_gradients(grads_and_vars_except_prior, global_step=global_step) def train_op_all(): return optimizer.apply_gradients(grads_and_vars, global_step=global_step) train_op = tf.cond(pred=global_step < params['prior_burn_in_steps'], true_fn=train_op_except_prior, false_fn=train_op_all) # The perplexity is an exponent of the average negative ELBO per word. words_per_document = tf.reduce_sum(input_tensor=features, axis=1) log_perplexity = -elbo / words_per_document tf.compat.v1.summary.scalar('perplexity', tf.exp(tf.reduce_mean(input_tensor=log_perplexity))) (log_perplexity_tensor, log_perplexity_update) = tf.compat.v1.metrics.mean(log_perplexity) perplexity_tensor = tf.exp(log_perplexity_tensor) # Obtain the topics summary. Implemented as a py_func for simplicity. topics = tf.compat.v1.py_func(functools.partial(get_topics_strings, vocabulary=params['vocabulary']), [topics_words, concentration], tf.string, stateful=False) tf.compat.v1.summary.text('topics', topics) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, eval_metric_ops={'elbo': tf.compat.v1.metrics.mean(elbo), 'log_likelihood': tf.compat.v1.metrics.mean(log_likelihood), 'kl': tf.compat.v1.metrics.mean(kl), 'perplexity': (perplexity_tensor, log_perplexity_update), 'topics': (topics, tf.no_op())})
def set_extent_size(self, length, units): """ Sets the volume group extent size in the given units:: from lvm2py import * lvm = LVM() vg = lvm.get_vg("myvg", "w") vg.set_extent_size(2, "MiB") *Args:* * length (int): The desired length size. * units (str): The desired units ("MiB", "GiB", etc...). *Raises:* * HandleError, CommitError, KeyError .. note:: The VolumeGroup instance must be in write mode, otherwise CommitError is raised. """ size = length * size_units[units] self.open() ext = lvm_vg_set_extent_size(self.handle, c_ulong(size)) self._commit() self.close() if ext != 0: raise CommitError("Failed to set extent size.")
def function[set_extent_size, parameter[self, length, units]]: constant[ Sets the volume group extent size in the given units:: from lvm2py import * lvm = LVM() vg = lvm.get_vg("myvg", "w") vg.set_extent_size(2, "MiB") *Args:* * length (int): The desired length size. * units (str): The desired units ("MiB", "GiB", etc...). *Raises:* * HandleError, CommitError, KeyError .. note:: The VolumeGroup instance must be in write mode, otherwise CommitError is raised. ] variable[size] assign[=] binary_operation[name[length] * call[name[size_units]][name[units]]] call[name[self].open, parameter[]] variable[ext] assign[=] call[name[lvm_vg_set_extent_size], parameter[name[self].handle, call[name[c_ulong], parameter[name[size]]]]] call[name[self]._commit, parameter[]] call[name[self].close, parameter[]] if compare[name[ext] not_equal[!=] constant[0]] begin[:] <ast.Raise object at 0x7da1b0f50400>
keyword[def] identifier[set_extent_size] ( identifier[self] , identifier[length] , identifier[units] ): literal[string] identifier[size] = identifier[length] * identifier[size_units] [ identifier[units] ] identifier[self] . identifier[open] () identifier[ext] = identifier[lvm_vg_set_extent_size] ( identifier[self] . identifier[handle] , identifier[c_ulong] ( identifier[size] )) identifier[self] . identifier[_commit] () identifier[self] . identifier[close] () keyword[if] identifier[ext] != literal[int] : keyword[raise] identifier[CommitError] ( literal[string] )
def set_extent_size(self, length, units): """ Sets the volume group extent size in the given units:: from lvm2py import * lvm = LVM() vg = lvm.get_vg("myvg", "w") vg.set_extent_size(2, "MiB") *Args:* * length (int): The desired length size. * units (str): The desired units ("MiB", "GiB", etc...). *Raises:* * HandleError, CommitError, KeyError .. note:: The VolumeGroup instance must be in write mode, otherwise CommitError is raised. """ size = length * size_units[units] self.open() ext = lvm_vg_set_extent_size(self.handle, c_ulong(size)) self._commit() self.close() if ext != 0: raise CommitError('Failed to set extent size.') # depends on [control=['if'], data=[]]
def allocate(self): """Initializes libvirt resources.""" disk_path = self.provider_image self._hypervisor = libvirt.open( self.configuration.get('hypervisor', 'vbox:///session')) self._domain = domain_create(self._hypervisor, self.identifier, self.configuration['domain'], disk_path)
def function[allocate, parameter[self]]: constant[Initializes libvirt resources.] variable[disk_path] assign[=] name[self].provider_image name[self]._hypervisor assign[=] call[name[libvirt].open, parameter[call[name[self].configuration.get, parameter[constant[hypervisor], constant[vbox:///session]]]]] name[self]._domain assign[=] call[name[domain_create], parameter[name[self]._hypervisor, name[self].identifier, call[name[self].configuration][constant[domain]], name[disk_path]]]
keyword[def] identifier[allocate] ( identifier[self] ): literal[string] identifier[disk_path] = identifier[self] . identifier[provider_image] identifier[self] . identifier[_hypervisor] = identifier[libvirt] . identifier[open] ( identifier[self] . identifier[configuration] . identifier[get] ( literal[string] , literal[string] )) identifier[self] . identifier[_domain] = identifier[domain_create] ( identifier[self] . identifier[_hypervisor] , identifier[self] . identifier[identifier] , identifier[self] . identifier[configuration] [ literal[string] ], identifier[disk_path] )
def allocate(self): """Initializes libvirt resources.""" disk_path = self.provider_image self._hypervisor = libvirt.open(self.configuration.get('hypervisor', 'vbox:///session')) self._domain = domain_create(self._hypervisor, self.identifier, self.configuration['domain'], disk_path)
def as_uninitialized(fn): """ Decorator: call fn with the parameterized_instance's initialization flag set to False, then revert the flag. (Used to decorate Parameterized methods that must alter a constant Parameter.) """ @wraps(fn) def override_initialization(self_,*args,**kw): parameterized_instance = self_.self original_initialized=parameterized_instance.initialized parameterized_instance.initialized=False fn(parameterized_instance,*args,**kw) parameterized_instance.initialized=original_initialized return override_initialization
def function[as_uninitialized, parameter[fn]]: constant[ Decorator: call fn with the parameterized_instance's initialization flag set to False, then revert the flag. (Used to decorate Parameterized methods that must alter a constant Parameter.) ] def function[override_initialization, parameter[self_]]: variable[parameterized_instance] assign[=] name[self_].self variable[original_initialized] assign[=] name[parameterized_instance].initialized name[parameterized_instance].initialized assign[=] constant[False] call[name[fn], parameter[name[parameterized_instance], <ast.Starred object at 0x7da18f810be0>]] name[parameterized_instance].initialized assign[=] name[original_initialized] return[name[override_initialization]]
keyword[def] identifier[as_uninitialized] ( identifier[fn] ): literal[string] @ identifier[wraps] ( identifier[fn] ) keyword[def] identifier[override_initialization] ( identifier[self_] ,* identifier[args] ,** identifier[kw] ): identifier[parameterized_instance] = identifier[self_] . identifier[self] identifier[original_initialized] = identifier[parameterized_instance] . identifier[initialized] identifier[parameterized_instance] . identifier[initialized] = keyword[False] identifier[fn] ( identifier[parameterized_instance] ,* identifier[args] ,** identifier[kw] ) identifier[parameterized_instance] . identifier[initialized] = identifier[original_initialized] keyword[return] identifier[override_initialization]
def as_uninitialized(fn): """ Decorator: call fn with the parameterized_instance's initialization flag set to False, then revert the flag. (Used to decorate Parameterized methods that must alter a constant Parameter.) """ @wraps(fn) def override_initialization(self_, *args, **kw): parameterized_instance = self_.self original_initialized = parameterized_instance.initialized parameterized_instance.initialized = False fn(parameterized_instance, *args, **kw) parameterized_instance.initialized = original_initialized return override_initialization
def cleanall(self,str,cleanslash=0): """Deals with things like: 1./ accents with a slashes and converts them to entities. Example: \', \`,\^ 2./ Some 'missed' incomplete entities or &#x00b4; ( floating apostroph ) and the like. Example: Milos&caron;evic --> Milo&scaron;evic Marti&#00b4;nez --> Mart&iacute;nez 3./ Get rid of remaining numeric entities. Converts them from an aproximation table or set to unknown. 4./ If option 'cleanslash' is set takes 'dangerous' radical action with slashes. Gets rid of all of them. Also converts 'l/a' to '&lstrok;a'. Maybe cases in which this is substituting too much? """ retstr = self.re_accent.sub(self.__sub_accent,str) retstr = self.re_missent.sub(self.__sub_missent,retstr) # retstr = self.re_missent_space.sub('',retstr) retstr = self.re_morenum.sub(self.__sub_morenum,retstr) # 11/5/02 AA - add translation of &rsquo; and &rsquor; into # single quote character retstr = re.sub(r'&rsquor?;',"'",retstr) if cleanslash: retstr = re.sub(r'\\','',retstr) retstr = re.sub(r'([Ll])/','&\g<1>strok;',retstr) return retstr
def function[cleanall, parameter[self, str, cleanslash]]: constant[Deals with things like: 1./ accents with a slashes and converts them to entities. Example: ', \`,\^ 2./ Some 'missed' incomplete entities or &#x00b4; ( floating apostroph ) and the like. Example: Milos&caron;evic --> Milo&scaron;evic Marti&#00b4;nez --> Mart&iacute;nez 3./ Get rid of remaining numeric entities. Converts them from an aproximation table or set to unknown. 4./ If option 'cleanslash' is set takes 'dangerous' radical action with slashes. Gets rid of all of them. Also converts 'l/a' to '&lstrok;a'. Maybe cases in which this is substituting too much? ] variable[retstr] assign[=] call[name[self].re_accent.sub, parameter[name[self].__sub_accent, name[str]]] variable[retstr] assign[=] call[name[self].re_missent.sub, parameter[name[self].__sub_missent, name[retstr]]] variable[retstr] assign[=] call[name[self].re_morenum.sub, parameter[name[self].__sub_morenum, name[retstr]]] variable[retstr] assign[=] call[name[re].sub, parameter[constant[&rsquor?;], constant['], name[retstr]]] if name[cleanslash] begin[:] variable[retstr] assign[=] call[name[re].sub, parameter[constant[\\], constant[], name[retstr]]] variable[retstr] assign[=] call[name[re].sub, parameter[constant[([Ll])/], constant[&\g<1>strok;], name[retstr]]] return[name[retstr]]
keyword[def] identifier[cleanall] ( identifier[self] , identifier[str] , identifier[cleanslash] = literal[int] ): literal[string] identifier[retstr] = identifier[self] . identifier[re_accent] . identifier[sub] ( identifier[self] . identifier[__sub_accent] , identifier[str] ) identifier[retstr] = identifier[self] . identifier[re_missent] . identifier[sub] ( identifier[self] . identifier[__sub_missent] , identifier[retstr] ) identifier[retstr] = identifier[self] . identifier[re_morenum] . identifier[sub] ( identifier[self] . identifier[__sub_morenum] , identifier[retstr] ) identifier[retstr] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[retstr] ) keyword[if] identifier[cleanslash] : identifier[retstr] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[retstr] ) identifier[retstr] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[retstr] ) keyword[return] identifier[retstr]
def cleanall(self, str, cleanslash=0): """Deals with things like: 1./ accents with a slashes and converts them to entities. Example: ', \\`,\\^ 2./ Some 'missed' incomplete entities or &#x00b4; ( floating apostroph ) and the like. Example: Milos&caron;evic --> Milo&scaron;evic Marti&#00b4;nez --> Mart&iacute;nez 3./ Get rid of remaining numeric entities. Converts them from an aproximation table or set to unknown. 4./ If option 'cleanslash' is set takes 'dangerous' radical action with slashes. Gets rid of all of them. Also converts 'l/a' to '&lstrok;a'. Maybe cases in which this is substituting too much? """ retstr = self.re_accent.sub(self.__sub_accent, str) retstr = self.re_missent.sub(self.__sub_missent, retstr) # retstr = self.re_missent_space.sub('',retstr) retstr = self.re_morenum.sub(self.__sub_morenum, retstr) # 11/5/02 AA - add translation of &rsquo; and &rsquor; into # single quote character retstr = re.sub('&rsquor?;', "'", retstr) if cleanslash: retstr = re.sub('\\\\', '', retstr) retstr = re.sub('([Ll])/', '&\\g<1>strok;', retstr) # depends on [control=['if'], data=[]] return retstr
def visit_default(self, node): """check the node line number and check it if not yet done""" if not node.is_statement: return if not node.root().pure_python: return # XXX block visit of child nodes prev_sibl = node.previous_sibling() if prev_sibl is not None: prev_line = prev_sibl.fromlineno else: # The line on which a finally: occurs in a try/finally # is not directly represented in the AST. We infer it # by taking the last line of the body and adding 1, which # should be the line of finally: if ( isinstance(node.parent, nodes.TryFinally) and node in node.parent.finalbody ): prev_line = node.parent.body[0].tolineno + 1 else: prev_line = node.parent.statement().fromlineno line = node.fromlineno assert line, node if prev_line == line and self._visited_lines.get(line) != 2: self._check_multi_statement_line(node, line) return if line in self._visited_lines: return try: tolineno = node.blockstart_tolineno except AttributeError: tolineno = node.tolineno assert tolineno, node lines = [] for line in range(line, tolineno + 1): self._visited_lines[line] = 1 try: lines.append(self._lines[line].rstrip()) except KeyError: lines.append("")
def function[visit_default, parameter[self, node]]: constant[check the node line number and check it if not yet done] if <ast.UnaryOp object at 0x7da1b059db70> begin[:] return[None] if <ast.UnaryOp object at 0x7da1b059e830> begin[:] return[None] variable[prev_sibl] assign[=] call[name[node].previous_sibling, parameter[]] if compare[name[prev_sibl] is_not constant[None]] begin[:] variable[prev_line] assign[=] name[prev_sibl].fromlineno variable[line] assign[=] name[node].fromlineno assert[name[line]] if <ast.BoolOp object at 0x7da1b020d630> begin[:] call[name[self]._check_multi_statement_line, parameter[name[node], name[line]]] return[None] if compare[name[line] in name[self]._visited_lines] begin[:] return[None] <ast.Try object at 0x7da1b020e230> assert[name[tolineno]] variable[lines] assign[=] list[[]] for taget[name[line]] in starred[call[name[range], parameter[name[line], binary_operation[name[tolineno] + constant[1]]]]] begin[:] call[name[self]._visited_lines][name[line]] assign[=] constant[1] <ast.Try object at 0x7da1b020d210>
keyword[def] identifier[visit_default] ( identifier[self] , identifier[node] ): literal[string] keyword[if] keyword[not] identifier[node] . identifier[is_statement] : keyword[return] keyword[if] keyword[not] identifier[node] . identifier[root] (). identifier[pure_python] : keyword[return] identifier[prev_sibl] = identifier[node] . identifier[previous_sibling] () keyword[if] identifier[prev_sibl] keyword[is] keyword[not] keyword[None] : identifier[prev_line] = identifier[prev_sibl] . identifier[fromlineno] keyword[else] : keyword[if] ( identifier[isinstance] ( identifier[node] . identifier[parent] , identifier[nodes] . identifier[TryFinally] ) keyword[and] identifier[node] keyword[in] identifier[node] . identifier[parent] . identifier[finalbody] ): identifier[prev_line] = identifier[node] . identifier[parent] . identifier[body] [ literal[int] ]. identifier[tolineno] + literal[int] keyword[else] : identifier[prev_line] = identifier[node] . identifier[parent] . identifier[statement] (). identifier[fromlineno] identifier[line] = identifier[node] . identifier[fromlineno] keyword[assert] identifier[line] , identifier[node] keyword[if] identifier[prev_line] == identifier[line] keyword[and] identifier[self] . identifier[_visited_lines] . identifier[get] ( identifier[line] )!= literal[int] : identifier[self] . identifier[_check_multi_statement_line] ( identifier[node] , identifier[line] ) keyword[return] keyword[if] identifier[line] keyword[in] identifier[self] . identifier[_visited_lines] : keyword[return] keyword[try] : identifier[tolineno] = identifier[node] . identifier[blockstart_tolineno] keyword[except] identifier[AttributeError] : identifier[tolineno] = identifier[node] . identifier[tolineno] keyword[assert] identifier[tolineno] , identifier[node] identifier[lines] =[] keyword[for] identifier[line] keyword[in] identifier[range] ( identifier[line] , identifier[tolineno] + literal[int] ): identifier[self] . identifier[_visited_lines] [ identifier[line] ]= literal[int] keyword[try] : identifier[lines] . identifier[append] ( identifier[self] . identifier[_lines] [ identifier[line] ]. identifier[rstrip] ()) keyword[except] identifier[KeyError] : identifier[lines] . identifier[append] ( literal[string] )
def visit_default(self, node): """check the node line number and check it if not yet done""" if not node.is_statement: return # depends on [control=['if'], data=[]] if not node.root().pure_python: return # XXX block visit of child nodes # depends on [control=['if'], data=[]] prev_sibl = node.previous_sibling() if prev_sibl is not None: prev_line = prev_sibl.fromlineno # depends on [control=['if'], data=['prev_sibl']] # The line on which a finally: occurs in a try/finally # is not directly represented in the AST. We infer it # by taking the last line of the body and adding 1, which # should be the line of finally: elif isinstance(node.parent, nodes.TryFinally) and node in node.parent.finalbody: prev_line = node.parent.body[0].tolineno + 1 # depends on [control=['if'], data=[]] else: prev_line = node.parent.statement().fromlineno line = node.fromlineno assert line, node if prev_line == line and self._visited_lines.get(line) != 2: self._check_multi_statement_line(node, line) return # depends on [control=['if'], data=[]] if line in self._visited_lines: return # depends on [control=['if'], data=[]] try: tolineno = node.blockstart_tolineno # depends on [control=['try'], data=[]] except AttributeError: tolineno = node.tolineno # depends on [control=['except'], data=[]] assert tolineno, node lines = [] for line in range(line, tolineno + 1): self._visited_lines[line] = 1 try: lines.append(self._lines[line].rstrip()) # depends on [control=['try'], data=[]] except KeyError: lines.append('') # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['line']]
def run_vep(in_file, data): """Annotate input VCF file with Ensembl variant effect predictor. """ if not vcfutils.vcf_has_variants(in_file): return None out_file = utils.append_stem(in_file, "-vepeffects") assert in_file.endswith(".gz") and out_file.endswith(".gz") if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: vep_dir, ensembl_name = prep_vep_cache(data["genome_build"], tz.get_in(["reference", "fasta", "base"], data)) if vep_dir: cores = tz.get_in(("config", "algorithm", "num_cores"), data, 1) fork_args = ["--fork", str(cores)] if cores > 1 else [] vep = config_utils.get_program("vep", data["config"]) # HGVS requires a bgzip compressed, faidx indexed input file or is unusable slow if dd.get_ref_file_compressed(data): hgvs_compatible = True config_args = ["--fasta", dd.get_ref_file_compressed(data)] else: hgvs_compatible = False config_args = ["--fasta", dd.get_ref_file(data)] if vcfanno.is_human(data): plugin_fns = {"loftee": _get_loftee, "maxentscan": _get_maxentscan,"genesplicer": _get_genesplicer, "spliceregion": _get_spliceregion, "G2P": _get_G2P} plugins = ["loftee", "G2P"] if "vep_splicesite_annotations" in dd.get_tools_on(data): # "genesplicer" too unstable so currently removed plugins += ["maxentscan", "spliceregion"] for plugin in plugins: plugin_args = plugin_fns[plugin](data) config_args += plugin_args config_args += ["--sift", "b", "--polyphen", "b"] if hgvs_compatible: config_args += ["--hgvsg","--hgvs", "--shift_hgvs", "1"] if (dd.get_effects_transcripts(data).startswith("canonical") or tz.get_in(("config", "algorithm", "clinical_reporting"), data)): config_args += ["--most_severe"] else: config_args += ["--flag_pick_allele_gene"] if ensembl_name.endswith("_merged"): config_args += ["--merged"] ensembl_name = ensembl_name.replace("_merged", "") resources = config_utils.get_resources("vep", data["config"]) extra_args = [str(x) for x in resources.get("options", [])] cmd = [vep, "--vcf", "-o", "stdout", "-i", in_file] + fork_args + extra_args + \ ["--species", ensembl_name, "--no_stats", "--cache", "--offline", "--dir", vep_dir, "--symbol", "--numbers", "--biotype", "--total_length", "--canonical", "--gene_phenotype", "--ccds", "--uniprot", "--domains", "--regulatory", "--protein", "--tsl", "--appris", "--af", "--max_af", "--af_1kg", "--af_esp", "--af_gnomad", "--pubmed", "--variant_class", "--allele_number"] + config_args perl_exports = utils.get_perl_exports() # Remove empty fields (';;') which can cause parsing errors downstream cmd = "%s && %s | sed '/^#/! s/;;/;/g' | bgzip -c > %s" % (perl_exports, " ".join(cmd), tx_out_file) do.run(cmd, "Ensembl variant effect predictor", data) if utils.file_exists(out_file): return vcfutils.bgzip_and_index(out_file, data["config"])
def function[run_vep, parameter[in_file, data]]: constant[Annotate input VCF file with Ensembl variant effect predictor. ] if <ast.UnaryOp object at 0x7da18f09dc90> begin[:] return[constant[None]] variable[out_file] assign[=] call[name[utils].append_stem, parameter[name[in_file], constant[-vepeffects]]] assert[<ast.BoolOp object at 0x7da18f09e590>] if <ast.UnaryOp object at 0x7da18f09fb20> begin[:] with call[name[file_transaction], parameter[name[data], name[out_file]]] begin[:] <ast.Tuple object at 0x7da18f09e470> assign[=] call[name[prep_vep_cache], parameter[call[name[data]][constant[genome_build]], call[name[tz].get_in, parameter[list[[<ast.Constant object at 0x7da18f09cfd0>, <ast.Constant object at 0x7da18f09fc40>, <ast.Constant object at 0x7da18f09f580>]], name[data]]]]] if name[vep_dir] begin[:] variable[cores] assign[=] call[name[tz].get_in, parameter[tuple[[<ast.Constant object at 0x7da18f09e3b0>, <ast.Constant object at 0x7da18f09ccd0>, <ast.Constant object at 0x7da18f09ffa0>]], name[data], constant[1]]] variable[fork_args] assign[=] <ast.IfExp object at 0x7da18f09e830> variable[vep] assign[=] call[name[config_utils].get_program, parameter[constant[vep], call[name[data]][constant[config]]]] if call[name[dd].get_ref_file_compressed, parameter[name[data]]] begin[:] variable[hgvs_compatible] assign[=] constant[True] variable[config_args] assign[=] list[[<ast.Constant object at 0x7da18f09fb50>, <ast.Call object at 0x7da18f09ce50>]] if call[name[vcfanno].is_human, parameter[name[data]]] begin[:] variable[plugin_fns] assign[=] dictionary[[<ast.Constant object at 0x7da18f09efb0>, <ast.Constant object at 0x7da18f09fca0>, <ast.Constant object at 0x7da18f09eb60>, <ast.Constant object at 0x7da18f09f490>, <ast.Constant object at 0x7da18f09cca0>], [<ast.Name object at 0x7da18f09c310>, <ast.Name object at 0x7da18f09dcc0>, <ast.Name object at 0x7da18f09fbe0>, <ast.Name object at 0x7da18f09ec50>, <ast.Name object at 0x7da18f09f700>]] variable[plugins] assign[=] list[[<ast.Constant object at 0x7da18f09f1c0>, <ast.Constant object at 0x7da18f09db70>]] if compare[constant[vep_splicesite_annotations] in call[name[dd].get_tools_on, parameter[name[data]]]] begin[:] <ast.AugAssign object at 0x7da18f09fdf0> for taget[name[plugin]] in starred[name[plugins]] begin[:] variable[plugin_args] assign[=] call[call[name[plugin_fns]][name[plugin]], parameter[name[data]]] <ast.AugAssign object at 0x7da18f09d5d0> <ast.AugAssign object at 0x7da18f09d480> if name[hgvs_compatible] begin[:] <ast.AugAssign object at 0x7da18f09cb50> if <ast.BoolOp object at 0x7da18f09fe80> begin[:] <ast.AugAssign object at 0x7da1b26aff70> if call[name[ensembl_name].endswith, parameter[constant[_merged]]] begin[:] <ast.AugAssign object at 0x7da1b26aed10> variable[ensembl_name] assign[=] call[name[ensembl_name].replace, parameter[constant[_merged], constant[]]] variable[resources] assign[=] call[name[config_utils].get_resources, parameter[constant[vep], call[name[data]][constant[config]]]] variable[extra_args] assign[=] <ast.ListComp object at 0x7da1b26aea10> variable[cmd] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[list[[<ast.Name object at 0x7da1b26af100>, <ast.Constant object at 0x7da1b26af9a0>, <ast.Constant object at 0x7da1b26ae9b0>, <ast.Constant object at 0x7da1b26afca0>, <ast.Constant object at 0x7da1b26ada80>, <ast.Name object at 0x7da1b26ad5d0>]] + name[fork_args]] + name[extra_args]] + list[[<ast.Constant object at 0x7da1b26aecb0>, <ast.Name object at 0x7da1b26ac9d0>, <ast.Constant object at 0x7da1b26aea40>, <ast.Constant object at 0x7da1b26ad1b0>, <ast.Constant object at 0x7da1b26af1f0>, <ast.Constant object at 0x7da1b26ae740>, <ast.Name object at 0x7da1b26af760>, <ast.Constant object at 0x7da1b26acf10>, <ast.Constant object at 0x7da1b26ad5a0>, <ast.Constant object at 0x7da1b26ae440>, <ast.Constant object at 0x7da1b26ad630>, <ast.Constant object at 0x7da1b26ae830>, <ast.Constant object at 0x7da1b26aceb0>, <ast.Constant object at 0x7da1b26ac100>, <ast.Constant object at 0x7da1b26acfa0>, <ast.Constant object at 0x7da1b26ae650>, <ast.Constant object at 0x7da1b26aea70>, <ast.Constant object at 0x7da1b26ad3c0>, <ast.Constant object at 0x7da1b26af370>, <ast.Constant object at 0x7da1b26aeb00>, <ast.Constant object at 0x7da1b26aca60>, <ast.Constant object at 0x7da1b26ad4e0>, <ast.Constant object at 0x7da1b26ac580>, <ast.Constant object at 0x7da1b26ae7d0>, <ast.Constant object at 0x7da1b26acd00>, <ast.Constant object at 0x7da1b26ae470>, <ast.Constant object at 0x7da1b26afbe0>, <ast.Constant object at 0x7da1b26ae500>]]] + name[config_args]] variable[perl_exports] assign[=] call[name[utils].get_perl_exports, parameter[]] variable[cmd] assign[=] binary_operation[constant[%s && %s | sed '/^#/! s/;;/;/g' | bgzip -c > %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26ae410>, <ast.Call object at 0x7da1b26ad540>, <ast.Name object at 0x7da1b26adff0>]]] call[name[do].run, parameter[name[cmd], constant[Ensembl variant effect predictor], name[data]]] if call[name[utils].file_exists, parameter[name[out_file]]] begin[:] return[call[name[vcfutils].bgzip_and_index, parameter[name[out_file], call[name[data]][constant[config]]]]]
keyword[def] identifier[run_vep] ( identifier[in_file] , identifier[data] ): literal[string] keyword[if] keyword[not] identifier[vcfutils] . identifier[vcf_has_variants] ( identifier[in_file] ): keyword[return] keyword[None] identifier[out_file] = identifier[utils] . identifier[append_stem] ( identifier[in_file] , literal[string] ) keyword[assert] identifier[in_file] . identifier[endswith] ( literal[string] ) keyword[and] identifier[out_file] . identifier[endswith] ( literal[string] ) keyword[if] keyword[not] identifier[utils] . identifier[file_exists] ( identifier[out_file] ): keyword[with] identifier[file_transaction] ( identifier[data] , identifier[out_file] ) keyword[as] identifier[tx_out_file] : identifier[vep_dir] , identifier[ensembl_name] = identifier[prep_vep_cache] ( identifier[data] [ literal[string] ], identifier[tz] . identifier[get_in] ([ literal[string] , literal[string] , literal[string] ], identifier[data] )) keyword[if] identifier[vep_dir] : identifier[cores] = identifier[tz] . identifier[get_in] (( literal[string] , literal[string] , literal[string] ), identifier[data] , literal[int] ) identifier[fork_args] =[ literal[string] , identifier[str] ( identifier[cores] )] keyword[if] identifier[cores] > literal[int] keyword[else] [] identifier[vep] = identifier[config_utils] . identifier[get_program] ( literal[string] , identifier[data] [ literal[string] ]) keyword[if] identifier[dd] . identifier[get_ref_file_compressed] ( identifier[data] ): identifier[hgvs_compatible] = keyword[True] identifier[config_args] =[ literal[string] , identifier[dd] . identifier[get_ref_file_compressed] ( identifier[data] )] keyword[else] : identifier[hgvs_compatible] = keyword[False] identifier[config_args] =[ literal[string] , identifier[dd] . identifier[get_ref_file] ( identifier[data] )] keyword[if] identifier[vcfanno] . identifier[is_human] ( identifier[data] ): identifier[plugin_fns] ={ literal[string] : identifier[_get_loftee] , literal[string] : identifier[_get_maxentscan] , literal[string] : identifier[_get_genesplicer] , literal[string] : identifier[_get_spliceregion] , literal[string] : identifier[_get_G2P] } identifier[plugins] =[ literal[string] , literal[string] ] keyword[if] literal[string] keyword[in] identifier[dd] . identifier[get_tools_on] ( identifier[data] ): identifier[plugins] +=[ literal[string] , literal[string] ] keyword[for] identifier[plugin] keyword[in] identifier[plugins] : identifier[plugin_args] = identifier[plugin_fns] [ identifier[plugin] ]( identifier[data] ) identifier[config_args] += identifier[plugin_args] identifier[config_args] +=[ literal[string] , literal[string] , literal[string] , literal[string] ] keyword[if] identifier[hgvs_compatible] : identifier[config_args] +=[ literal[string] , literal[string] , literal[string] , literal[string] ] keyword[if] ( identifier[dd] . identifier[get_effects_transcripts] ( identifier[data] ). identifier[startswith] ( literal[string] ) keyword[or] identifier[tz] . identifier[get_in] (( literal[string] , literal[string] , literal[string] ), identifier[data] )): identifier[config_args] +=[ literal[string] ] keyword[else] : identifier[config_args] +=[ literal[string] ] keyword[if] identifier[ensembl_name] . identifier[endswith] ( literal[string] ): identifier[config_args] +=[ literal[string] ] identifier[ensembl_name] = identifier[ensembl_name] . identifier[replace] ( literal[string] , literal[string] ) identifier[resources] = identifier[config_utils] . identifier[get_resources] ( literal[string] , identifier[data] [ literal[string] ]) identifier[extra_args] =[ identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[resources] . identifier[get] ( literal[string] ,[])] identifier[cmd] =[ identifier[vep] , literal[string] , literal[string] , literal[string] , literal[string] , identifier[in_file] ]+ identifier[fork_args] + identifier[extra_args] +[ literal[string] , identifier[ensembl_name] , literal[string] , literal[string] , literal[string] , literal[string] , identifier[vep_dir] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]+ identifier[config_args] identifier[perl_exports] = identifier[utils] . identifier[get_perl_exports] () identifier[cmd] = literal[string] %( identifier[perl_exports] , literal[string] . identifier[join] ( identifier[cmd] ), identifier[tx_out_file] ) identifier[do] . identifier[run] ( identifier[cmd] , literal[string] , identifier[data] ) keyword[if] identifier[utils] . identifier[file_exists] ( identifier[out_file] ): keyword[return] identifier[vcfutils] . identifier[bgzip_and_index] ( identifier[out_file] , identifier[data] [ literal[string] ])
def run_vep(in_file, data): """Annotate input VCF file with Ensembl variant effect predictor. """ if not vcfutils.vcf_has_variants(in_file): return None # depends on [control=['if'], data=[]] out_file = utils.append_stem(in_file, '-vepeffects') assert in_file.endswith('.gz') and out_file.endswith('.gz') if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: (vep_dir, ensembl_name) = prep_vep_cache(data['genome_build'], tz.get_in(['reference', 'fasta', 'base'], data)) if vep_dir: cores = tz.get_in(('config', 'algorithm', 'num_cores'), data, 1) fork_args = ['--fork', str(cores)] if cores > 1 else [] vep = config_utils.get_program('vep', data['config']) # HGVS requires a bgzip compressed, faidx indexed input file or is unusable slow if dd.get_ref_file_compressed(data): hgvs_compatible = True config_args = ['--fasta', dd.get_ref_file_compressed(data)] # depends on [control=['if'], data=[]] else: hgvs_compatible = False config_args = ['--fasta', dd.get_ref_file(data)] if vcfanno.is_human(data): plugin_fns = {'loftee': _get_loftee, 'maxentscan': _get_maxentscan, 'genesplicer': _get_genesplicer, 'spliceregion': _get_spliceregion, 'G2P': _get_G2P} plugins = ['loftee', 'G2P'] if 'vep_splicesite_annotations' in dd.get_tools_on(data): # "genesplicer" too unstable so currently removed plugins += ['maxentscan', 'spliceregion'] # depends on [control=['if'], data=[]] for plugin in plugins: plugin_args = plugin_fns[plugin](data) config_args += plugin_args # depends on [control=['for'], data=['plugin']] config_args += ['--sift', 'b', '--polyphen', 'b'] if hgvs_compatible: config_args += ['--hgvsg', '--hgvs', '--shift_hgvs', '1'] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if dd.get_effects_transcripts(data).startswith('canonical') or tz.get_in(('config', 'algorithm', 'clinical_reporting'), data): config_args += ['--most_severe'] # depends on [control=['if'], data=[]] else: config_args += ['--flag_pick_allele_gene'] if ensembl_name.endswith('_merged'): config_args += ['--merged'] ensembl_name = ensembl_name.replace('_merged', '') # depends on [control=['if'], data=[]] resources = config_utils.get_resources('vep', data['config']) extra_args = [str(x) for x in resources.get('options', [])] cmd = [vep, '--vcf', '-o', 'stdout', '-i', in_file] + fork_args + extra_args + ['--species', ensembl_name, '--no_stats', '--cache', '--offline', '--dir', vep_dir, '--symbol', '--numbers', '--biotype', '--total_length', '--canonical', '--gene_phenotype', '--ccds', '--uniprot', '--domains', '--regulatory', '--protein', '--tsl', '--appris', '--af', '--max_af', '--af_1kg', '--af_esp', '--af_gnomad', '--pubmed', '--variant_class', '--allele_number'] + config_args perl_exports = utils.get_perl_exports() # Remove empty fields (';;') which can cause parsing errors downstream cmd = "%s && %s | sed '/^#/! s/;;/;/g' | bgzip -c > %s" % (perl_exports, ' '.join(cmd), tx_out_file) do.run(cmd, 'Ensembl variant effect predictor', data) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['tx_out_file']] # depends on [control=['if'], data=[]] if utils.file_exists(out_file): return vcfutils.bgzip_and_index(out_file, data['config']) # depends on [control=['if'], data=[]]
def onCancelButton(self, event): """ Quit grid with warning if unsaved changes present """ if self.grid.changes: dlg1 = wx.MessageDialog(self, caption="Message:", message="Are you sure you want to exit this grid?\nYour changes will not be saved.\n ", style=wx.OK|wx.CANCEL) result = dlg1.ShowModal() if result == wx.ID_OK: dlg1.Destroy() self.Destroy() else: self.Destroy() if self.main_frame: self.main_frame.Show() self.main_frame.Raise()
def function[onCancelButton, parameter[self, event]]: constant[ Quit grid with warning if unsaved changes present ] if name[self].grid.changes begin[:] variable[dlg1] assign[=] call[name[wx].MessageDialog, parameter[name[self]]] variable[result] assign[=] call[name[dlg1].ShowModal, parameter[]] if compare[name[result] equal[==] name[wx].ID_OK] begin[:] call[name[dlg1].Destroy, parameter[]] call[name[self].Destroy, parameter[]] if name[self].main_frame begin[:] call[name[self].main_frame.Show, parameter[]] call[name[self].main_frame.Raise, parameter[]]
keyword[def] identifier[onCancelButton] ( identifier[self] , identifier[event] ): literal[string] keyword[if] identifier[self] . identifier[grid] . identifier[changes] : identifier[dlg1] = identifier[wx] . identifier[MessageDialog] ( identifier[self] , identifier[caption] = literal[string] , identifier[message] = literal[string] , identifier[style] = identifier[wx] . identifier[OK] | identifier[wx] . identifier[CANCEL] ) identifier[result] = identifier[dlg1] . identifier[ShowModal] () keyword[if] identifier[result] == identifier[wx] . identifier[ID_OK] : identifier[dlg1] . identifier[Destroy] () identifier[self] . identifier[Destroy] () keyword[else] : identifier[self] . identifier[Destroy] () keyword[if] identifier[self] . identifier[main_frame] : identifier[self] . identifier[main_frame] . identifier[Show] () identifier[self] . identifier[main_frame] . identifier[Raise] ()
def onCancelButton(self, event): """ Quit grid with warning if unsaved changes present """ if self.grid.changes: dlg1 = wx.MessageDialog(self, caption='Message:', message='Are you sure you want to exit this grid?\nYour changes will not be saved.\n ', style=wx.OK | wx.CANCEL) result = dlg1.ShowModal() if result == wx.ID_OK: dlg1.Destroy() self.Destroy() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: self.Destroy() if self.main_frame: self.main_frame.Show() self.main_frame.Raise() # depends on [control=['if'], data=[]]
def _fill_and_one_pad_stride(stride, n, data_format=DATA_FORMAT_NHWC): """Expands the provided stride to size n and pads it with 1s.""" if isinstance(stride, numbers.Integral) or ( isinstance(stride, collections.Iterable) and len(stride) <= n): if data_format.startswith("NC"): return (1, 1,) + _fill_shape(stride, n) elif data_format.startswith("N") and data_format.endswith("C"): return (1,) + _fill_shape(stride, n) + (1,) else: raise ValueError( "Invalid data_format {:s}. Must start with N and have a channel dim " "either follow the N dim or come at the end".format(data_format)) elif isinstance(stride, collections.Iterable) and len(stride) == n + 2: return stride else: raise base.IncompatibleShapeError( "stride is {} ({}), must be either a positive integer or an iterable of" " positive integers of size {}".format(stride, type(stride), n))
def function[_fill_and_one_pad_stride, parameter[stride, n, data_format]]: constant[Expands the provided stride to size n and pads it with 1s.] if <ast.BoolOp object at 0x7da1b1c62650> begin[:] if call[name[data_format].startswith, parameter[constant[NC]]] begin[:] return[binary_operation[tuple[[<ast.Constant object at 0x7da1b1c60eb0>, <ast.Constant object at 0x7da1b1c62470>]] + call[name[_fill_shape], parameter[name[stride], name[n]]]]]
keyword[def] identifier[_fill_and_one_pad_stride] ( identifier[stride] , identifier[n] , identifier[data_format] = identifier[DATA_FORMAT_NHWC] ): literal[string] keyword[if] identifier[isinstance] ( identifier[stride] , identifier[numbers] . identifier[Integral] ) keyword[or] ( identifier[isinstance] ( identifier[stride] , identifier[collections] . identifier[Iterable] ) keyword[and] identifier[len] ( identifier[stride] )<= identifier[n] ): keyword[if] identifier[data_format] . identifier[startswith] ( literal[string] ): keyword[return] ( literal[int] , literal[int] ,)+ identifier[_fill_shape] ( identifier[stride] , identifier[n] ) keyword[elif] identifier[data_format] . identifier[startswith] ( literal[string] ) keyword[and] identifier[data_format] . identifier[endswith] ( literal[string] ): keyword[return] ( literal[int] ,)+ identifier[_fill_shape] ( identifier[stride] , identifier[n] )+( literal[int] ,) keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] . identifier[format] ( identifier[data_format] )) keyword[elif] identifier[isinstance] ( identifier[stride] , identifier[collections] . identifier[Iterable] ) keyword[and] identifier[len] ( identifier[stride] )== identifier[n] + literal[int] : keyword[return] identifier[stride] keyword[else] : keyword[raise] identifier[base] . identifier[IncompatibleShapeError] ( literal[string] literal[string] . identifier[format] ( identifier[stride] , identifier[type] ( identifier[stride] ), identifier[n] ))
def _fill_and_one_pad_stride(stride, n, data_format=DATA_FORMAT_NHWC): """Expands the provided stride to size n and pads it with 1s.""" if isinstance(stride, numbers.Integral) or (isinstance(stride, collections.Iterable) and len(stride) <= n): if data_format.startswith('NC'): return (1, 1) + _fill_shape(stride, n) # depends on [control=['if'], data=[]] elif data_format.startswith('N') and data_format.endswith('C'): return (1,) + _fill_shape(stride, n) + (1,) # depends on [control=['if'], data=[]] else: raise ValueError('Invalid data_format {:s}. Must start with N and have a channel dim either follow the N dim or come at the end'.format(data_format)) # depends on [control=['if'], data=[]] elif isinstance(stride, collections.Iterable) and len(stride) == n + 2: return stride # depends on [control=['if'], data=[]] else: raise base.IncompatibleShapeError('stride is {} ({}), must be either a positive integer or an iterable of positive integers of size {}'.format(stride, type(stride), n))
def _create_listening_stream(self, pull_addr): """ Create a stream listening for Requests. The `self._recv_callback` method is asociated with incoming requests. """ sock = self._zmq_context.socket(zmq.PULL) sock.connect(pull_addr) stream = ZMQStream(sock, io_loop=self.io_loop) return stream
def function[_create_listening_stream, parameter[self, pull_addr]]: constant[ Create a stream listening for Requests. The `self._recv_callback` method is asociated with incoming requests. ] variable[sock] assign[=] call[name[self]._zmq_context.socket, parameter[name[zmq].PULL]] call[name[sock].connect, parameter[name[pull_addr]]] variable[stream] assign[=] call[name[ZMQStream], parameter[name[sock]]] return[name[stream]]
keyword[def] identifier[_create_listening_stream] ( identifier[self] , identifier[pull_addr] ): literal[string] identifier[sock] = identifier[self] . identifier[_zmq_context] . identifier[socket] ( identifier[zmq] . identifier[PULL] ) identifier[sock] . identifier[connect] ( identifier[pull_addr] ) identifier[stream] = identifier[ZMQStream] ( identifier[sock] , identifier[io_loop] = identifier[self] . identifier[io_loop] ) keyword[return] identifier[stream]
def _create_listening_stream(self, pull_addr): """ Create a stream listening for Requests. The `self._recv_callback` method is asociated with incoming requests. """ sock = self._zmq_context.socket(zmq.PULL) sock.connect(pull_addr) stream = ZMQStream(sock, io_loop=self.io_loop) return stream
def render_to_response(self, context, **response_kwargs): """ Generates the appropriate response. """ filename = os.path.basename(self.object.file.name) # Try to guess the content type of the given file content_type, _ = mimetypes.guess_type(self.object.file.name) if not content_type: content_type = 'text/plain' response = HttpResponse(self.object.file, content_type=content_type) response['Content-Disposition'] = 'attachment; filename={}'.format(filename) return response
def function[render_to_response, parameter[self, context]]: constant[ Generates the appropriate response. ] variable[filename] assign[=] call[name[os].path.basename, parameter[name[self].object.file.name]] <ast.Tuple object at 0x7da20c794fd0> assign[=] call[name[mimetypes].guess_type, parameter[name[self].object.file.name]] if <ast.UnaryOp object at 0x7da20c795390> begin[:] variable[content_type] assign[=] constant[text/plain] variable[response] assign[=] call[name[HttpResponse], parameter[name[self].object.file]] call[name[response]][constant[Content-Disposition]] assign[=] call[constant[attachment; filename={}].format, parameter[name[filename]]] return[name[response]]
keyword[def] identifier[render_to_response] ( identifier[self] , identifier[context] ,** identifier[response_kwargs] ): literal[string] identifier[filename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[self] . identifier[object] . identifier[file] . identifier[name] ) identifier[content_type] , identifier[_] = identifier[mimetypes] . identifier[guess_type] ( identifier[self] . identifier[object] . identifier[file] . identifier[name] ) keyword[if] keyword[not] identifier[content_type] : identifier[content_type] = literal[string] identifier[response] = identifier[HttpResponse] ( identifier[self] . identifier[object] . identifier[file] , identifier[content_type] = identifier[content_type] ) identifier[response] [ literal[string] ]= literal[string] . identifier[format] ( identifier[filename] ) keyword[return] identifier[response]
def render_to_response(self, context, **response_kwargs): """ Generates the appropriate response. """ filename = os.path.basename(self.object.file.name) # Try to guess the content type of the given file (content_type, _) = mimetypes.guess_type(self.object.file.name) if not content_type: content_type = 'text/plain' # depends on [control=['if'], data=[]] response = HttpResponse(self.object.file, content_type=content_type) response['Content-Disposition'] = 'attachment; filename={}'.format(filename) return response
def _prune(self, pop_candidates): """Choose a subset of the candidate states to continue on to the next generation. :param pop_candidates: The set of candidate states. """ return set( sorted(pop_candidates, key=self._score, reverse=True) [:self.args.max_pop] )
def function[_prune, parameter[self, pop_candidates]]: constant[Choose a subset of the candidate states to continue on to the next generation. :param pop_candidates: The set of candidate states. ] return[call[name[set], parameter[call[call[name[sorted], parameter[name[pop_candidates]]]][<ast.Slice object at 0x7da1b07cfac0>]]]]
keyword[def] identifier[_prune] ( identifier[self] , identifier[pop_candidates] ): literal[string] keyword[return] identifier[set] ( identifier[sorted] ( identifier[pop_candidates] , identifier[key] = identifier[self] . identifier[_score] , identifier[reverse] = keyword[True] ) [: identifier[self] . identifier[args] . identifier[max_pop] ] )
def _prune(self, pop_candidates): """Choose a subset of the candidate states to continue on to the next generation. :param pop_candidates: The set of candidate states. """ return set(sorted(pop_candidates, key=self._score, reverse=True)[:self.args.max_pop])
def find_country(session, code): """Find a country. Find a country by its ISO-3166 `code` (i.e ES for Spain, US for United States of America) using the given `session. When the country does not exist the function will return `None`. :param session: database session :param code: ISO-3166 code of the country to find :return: a country object; `None` when the country does not exist """ country = session.query(Country).\ filter(Country.code == code).first() return country
def function[find_country, parameter[session, code]]: constant[Find a country. Find a country by its ISO-3166 `code` (i.e ES for Spain, US for United States of America) using the given `session. When the country does not exist the function will return `None`. :param session: database session :param code: ISO-3166 code of the country to find :return: a country object; `None` when the country does not exist ] variable[country] assign[=] call[call[call[name[session].query, parameter[name[Country]]].filter, parameter[compare[name[Country].code equal[==] name[code]]]].first, parameter[]] return[name[country]]
keyword[def] identifier[find_country] ( identifier[session] , identifier[code] ): literal[string] identifier[country] = identifier[session] . identifier[query] ( identifier[Country] ). identifier[filter] ( identifier[Country] . identifier[code] == identifier[code] ). identifier[first] () keyword[return] identifier[country]
def find_country(session, code): """Find a country. Find a country by its ISO-3166 `code` (i.e ES for Spain, US for United States of America) using the given `session. When the country does not exist the function will return `None`. :param session: database session :param code: ISO-3166 code of the country to find :return: a country object; `None` when the country does not exist """ country = session.query(Country).filter(Country.code == code).first() return country
def validate_image(image, number_tiles): """Basic sanity checks prior to performing a split.""" TILE_LIMIT = 99 * 99 try: number_tiles = int(number_tiles) except: raise ValueError('number_tiles could not be cast to integer.') if number_tiles > TILE_LIMIT or number_tiles < 2: raise ValueError('Number of tiles must be between 2 and {} (you \ asked for {}).'.format(TILE_LIMIT, number_tiles))
def function[validate_image, parameter[image, number_tiles]]: constant[Basic sanity checks prior to performing a split.] variable[TILE_LIMIT] assign[=] binary_operation[constant[99] * constant[99]] <ast.Try object at 0x7da1b0657c10> if <ast.BoolOp object at 0x7da1b0657970> begin[:] <ast.Raise object at 0x7da1b0762860>
keyword[def] identifier[validate_image] ( identifier[image] , identifier[number_tiles] ): literal[string] identifier[TILE_LIMIT] = literal[int] * literal[int] keyword[try] : identifier[number_tiles] = identifier[int] ( identifier[number_tiles] ) keyword[except] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[number_tiles] > identifier[TILE_LIMIT] keyword[or] identifier[number_tiles] < literal[int] : keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[TILE_LIMIT] , identifier[number_tiles] ))
def validate_image(image, number_tiles): """Basic sanity checks prior to performing a split.""" TILE_LIMIT = 99 * 99 try: number_tiles = int(number_tiles) # depends on [control=['try'], data=[]] except: raise ValueError('number_tiles could not be cast to integer.') # depends on [control=['except'], data=[]] if number_tiles > TILE_LIMIT or number_tiles < 2: raise ValueError('Number of tiles must be between 2 and {} (you asked for {}).'.format(TILE_LIMIT, number_tiles)) # depends on [control=['if'], data=[]]
def all_library_calls(self): """ recursive version of library calls """ if self._all_library_calls is None: self._all_library_calls = self._explore_functions(lambda x: x.library_calls) return self._all_library_calls
def function[all_library_calls, parameter[self]]: constant[ recursive version of library calls ] if compare[name[self]._all_library_calls is constant[None]] begin[:] name[self]._all_library_calls assign[=] call[name[self]._explore_functions, parameter[<ast.Lambda object at 0x7da1b17d4eb0>]] return[name[self]._all_library_calls]
keyword[def] identifier[all_library_calls] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[_all_library_calls] keyword[is] keyword[None] : identifier[self] . identifier[_all_library_calls] = identifier[self] . identifier[_explore_functions] ( keyword[lambda] identifier[x] : identifier[x] . identifier[library_calls] ) keyword[return] identifier[self] . identifier[_all_library_calls]
def all_library_calls(self): """ recursive version of library calls """ if self._all_library_calls is None: self._all_library_calls = self._explore_functions(lambda x: x.library_calls) # depends on [control=['if'], data=[]] return self._all_library_calls
def constraint(self, n=-1, fid=0): """Obtain the set of orthogonal equations that make the solution of the rank deficient normal equations possible. :param fid: the id of the sub-fitter (numerical) """ c = self._getval("constr", fid) if n < 0 or n > self.deficiency(fid): return c else: raise RuntimeError("Not yet implemented")
def function[constraint, parameter[self, n, fid]]: constant[Obtain the set of orthogonal equations that make the solution of the rank deficient normal equations possible. :param fid: the id of the sub-fitter (numerical) ] variable[c] assign[=] call[name[self]._getval, parameter[constant[constr], name[fid]]] if <ast.BoolOp object at 0x7da18dc04d60> begin[:] return[name[c]]
keyword[def] identifier[constraint] ( identifier[self] , identifier[n] =- literal[int] , identifier[fid] = literal[int] ): literal[string] identifier[c] = identifier[self] . identifier[_getval] ( literal[string] , identifier[fid] ) keyword[if] identifier[n] < literal[int] keyword[or] identifier[n] > identifier[self] . identifier[deficiency] ( identifier[fid] ): keyword[return] identifier[c] keyword[else] : keyword[raise] identifier[RuntimeError] ( literal[string] )
def constraint(self, n=-1, fid=0): """Obtain the set of orthogonal equations that make the solution of the rank deficient normal equations possible. :param fid: the id of the sub-fitter (numerical) """ c = self._getval('constr', fid) if n < 0 or n > self.deficiency(fid): return c # depends on [control=['if'], data=[]] else: raise RuntimeError('Not yet implemented')
def listen(identifier): """ Launch a listener and return the compactor context. """ context = Context() process = WebProcess(identifier) context.spawn(process) log.info("Launching PID %s", process.pid) return process, context
def function[listen, parameter[identifier]]: constant[ Launch a listener and return the compactor context. ] variable[context] assign[=] call[name[Context], parameter[]] variable[process] assign[=] call[name[WebProcess], parameter[name[identifier]]] call[name[context].spawn, parameter[name[process]]] call[name[log].info, parameter[constant[Launching PID %s], name[process].pid]] return[tuple[[<ast.Name object at 0x7da1b1605870>, <ast.Name object at 0x7da1b16046a0>]]]
keyword[def] identifier[listen] ( identifier[identifier] ): literal[string] identifier[context] = identifier[Context] () identifier[process] = identifier[WebProcess] ( identifier[identifier] ) identifier[context] . identifier[spawn] ( identifier[process] ) identifier[log] . identifier[info] ( literal[string] , identifier[process] . identifier[pid] ) keyword[return] identifier[process] , identifier[context]
def listen(identifier): """ Launch a listener and return the compactor context. """ context = Context() process = WebProcess(identifier) context.spawn(process) log.info('Launching PID %s', process.pid) return (process, context)
def cos_values(period=360): """ Provides an infinite source of values representing a cosine wave (from -1 to +1) which repeats every *period* values. For example, to produce a "siren" effect with a couple of LEDs that repeats once a second:: from gpiozero import PWMLED from gpiozero.tools import cos_values, scaled, inverted from signal import pause red = PWMLED(2) blue = PWMLED(3) red.source_delay = 0.01 blue.source_delay = red.source_delay red.source = scaled(cos_values(100), 0, 1, -1, 1) blue.source = inverted(red) pause() If you require a different range than -1 to +1, see :func:`scaled`. """ angles = (2 * pi * i / period for i in range(period)) for a in cycle(angles): yield cos(a)
def function[cos_values, parameter[period]]: constant[ Provides an infinite source of values representing a cosine wave (from -1 to +1) which repeats every *period* values. For example, to produce a "siren" effect with a couple of LEDs that repeats once a second:: from gpiozero import PWMLED from gpiozero.tools import cos_values, scaled, inverted from signal import pause red = PWMLED(2) blue = PWMLED(3) red.source_delay = 0.01 blue.source_delay = red.source_delay red.source = scaled(cos_values(100), 0, 1, -1, 1) blue.source = inverted(red) pause() If you require a different range than -1 to +1, see :func:`scaled`. ] variable[angles] assign[=] <ast.GeneratorExp object at 0x7da18f09e1a0> for taget[name[a]] in starred[call[name[cycle], parameter[name[angles]]]] begin[:] <ast.Yield object at 0x7da18f720b20>
keyword[def] identifier[cos_values] ( identifier[period] = literal[int] ): literal[string] identifier[angles] =( literal[int] * identifier[pi] * identifier[i] / identifier[period] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[period] )) keyword[for] identifier[a] keyword[in] identifier[cycle] ( identifier[angles] ): keyword[yield] identifier[cos] ( identifier[a] )
def cos_values(period=360): """ Provides an infinite source of values representing a cosine wave (from -1 to +1) which repeats every *period* values. For example, to produce a "siren" effect with a couple of LEDs that repeats once a second:: from gpiozero import PWMLED from gpiozero.tools import cos_values, scaled, inverted from signal import pause red = PWMLED(2) blue = PWMLED(3) red.source_delay = 0.01 blue.source_delay = red.source_delay red.source = scaled(cos_values(100), 0, 1, -1, 1) blue.source = inverted(red) pause() If you require a different range than -1 to +1, see :func:`scaled`. """ angles = (2 * pi * i / period for i in range(period)) for a in cycle(angles): yield cos(a) # depends on [control=['for'], data=['a']]
def create(output_prefix, grid_flux_filename, wavelength_filenames, clobber=False, grid_flux_filename_format="csv", **kwargs): """ Create a new *sick* model from files describing the parameter names, fluxes, and wavelengths. """ if not clobber: # Check to make sure the output files won't exist already. output_suffixes = (".yaml", ".pkl", "-wavelengths.memmap", "-intensities.memmap") for path in [output_prefix + suffix for suffix in output_suffixes]: if os.path.exists(path): raise IOError("output filename {} already exists".format(path)) # Read the grid_flux filename. # param1 param2 param3 param4 channelname1 channelname2 kwds = kwargs.pop("__grid_flux_filename_kwargs", {}) kwds.update({"format": grid_flux_filename_format}) grid_flux_tbl = Table.read(grid_flux_filename, **kwds) # Distinguish column names between parameters (real numbers) and filenames str_columns = \ np.array([_[1].startswith("|S") for _ in grid_flux_tbl.dtype.descr]) # Check the number of channels provided. if str_columns.sum() != len(wavelength_filenames): raise ValueError("expected {0} wavelength filenames because {1} has {0}" " string columns ({2}) but found {3} wavelength filenames".format( sum(str_columns), grid_flux_filename, ", ".join(np.array(grid_flux_tbl.colnames)[str_columns]), len(wavelength_filenames))) # Create a record array of the grid points. grid_points = \ grid_flux_tbl.as_array()[np.array(grid_flux_tbl.colnames)[~str_columns]] # To-do: make sure they are all floats. # Sort the grid points. grid_indices = grid_points.argsort(order=grid_points.dtype.names) grid_points = grid_points[grid_indices] grid_flux_tbl = grid_flux_tbl[grid_indices] # Check the wavelength filenames. channel_wavelengths = np.array(map(load_simple_data, wavelength_filenames)) # Sort the channels by starting wavelength. c_indices = np.argsort([each.min() for each in channel_wavelengths]) channel_names = np.array(grid_flux_tbl.colnames)[str_columns][c_indices] channel_wavelengths = channel_wavelengths[c_indices] channel_sizes = [len(_) for _ in channel_wavelengths] num_pixels = sum(channel_sizes) # Create the model YAML file. with open(output_prefix + ".yaml", "w") as fp: header = "\n".join([ "# Model created on {0}".format(strftime("%Y-%m-%d %H:%M:%S")), "# Grid parameters: {0}".format(", ".join(grid_points.dtype.names)), "# Channel names: {0}".format(", ".join(channel_names)) ]) fp.write(header + "\n" + yaml.safe_dump({ "model_grid": { "grid_points": output_prefix + ".pkl", "intensities": output_prefix + "-intensities.memmap", "wavelengths": output_prefix + "-wavelengths.memmap" }}, stream=None, allow_unicode=True, default_flow_style=False)) # Create the pickled model file, with meta data. metadata = { "grid_flux_filename": grid_flux_filename, "wavelength_filenames": wavelength_filenames, "channel_names": channel_names, "channel_sizes": channel_sizes, "channel_resolutions": [float("inf")] * len(channel_names), "sick_version": sick_version } logger.debug("Dumping grid points and metadata to file") with open(output_prefix + ".pkl", "wb") as fp: pickle.dump((grid_points, metadata), fp, -1) # Create the memory-mapped dispersion file. logger.debug("Creating memory-mapped dispersion file.") wavelengths_memmap = np.memmap(output_prefix + "-wavelengths.memmap", dtype="float32", mode="w+", shape=(num_pixels, )) wavelengths_memmap[:] = np.hstack(channel_wavelengths) wavelengths_memmap.flush() del wavelengths_memmap # Create the memory-mapped intensities file. logger.debug("Creating memory-mapped intensities file.") intensities_memmap = np.memmap(output_prefix + "-intensities.memmap", shape=(grid_points.size, num_pixels), dtype="float32", mode="w+") n = len(grid_flux_tbl) for i, row in enumerate(grid_flux_tbl): logger.debug("Loading point {0}/{1} into the intensities map"\ .format(i + 1, n)) j = 0 for channel_name in channel_names: try: data = load_simple_data(row[channel_name]) except: logger.exception("Could not load data from {0} for channel {1}"\ .format(row[channel_name], channel_name)) raise intensities_memmap[i, j:j + data.size] = data j += data.size intensities_memmap.flush() del intensities_memmap return True
def function[create, parameter[output_prefix, grid_flux_filename, wavelength_filenames, clobber, grid_flux_filename_format]]: constant[ Create a new *sick* model from files describing the parameter names, fluxes, and wavelengths. ] if <ast.UnaryOp object at 0x7da18dc9ba30> begin[:] variable[output_suffixes] assign[=] tuple[[<ast.Constant object at 0x7da18dc99510>, <ast.Constant object at 0x7da18dc9bc10>, <ast.Constant object at 0x7da18dc9b490>, <ast.Constant object at 0x7da18dc98ee0>]] for taget[name[path]] in starred[<ast.ListComp object at 0x7da18dc99a80>] begin[:] if call[name[os].path.exists, parameter[name[path]]] begin[:] <ast.Raise object at 0x7da18dc9a470> variable[kwds] assign[=] call[name[kwargs].pop, parameter[constant[__grid_flux_filename_kwargs], dictionary[[], []]]] call[name[kwds].update, parameter[dictionary[[<ast.Constant object at 0x7da18dc9ba90>], [<ast.Name object at 0x7da18dc9ace0>]]]] variable[grid_flux_tbl] assign[=] call[name[Table].read, parameter[name[grid_flux_filename]]] variable[str_columns] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da18dc9bbb0>]] if compare[call[name[str_columns].sum, parameter[]] not_equal[!=] call[name[len], parameter[name[wavelength_filenames]]]] begin[:] <ast.Raise object at 0x7da18dc99360> variable[grid_points] assign[=] call[call[name[grid_flux_tbl].as_array, parameter[]]][call[call[name[np].array, parameter[name[grid_flux_tbl].colnames]]][<ast.UnaryOp object at 0x7da18dc9ae60>]] variable[grid_indices] assign[=] call[name[grid_points].argsort, parameter[]] variable[grid_points] assign[=] call[name[grid_points]][name[grid_indices]] variable[grid_flux_tbl] assign[=] call[name[grid_flux_tbl]][name[grid_indices]] variable[channel_wavelengths] assign[=] call[name[np].array, parameter[call[name[map], parameter[name[load_simple_data], name[wavelength_filenames]]]]] variable[c_indices] assign[=] call[name[np].argsort, parameter[<ast.ListComp object at 0x7da18dc990c0>]] variable[channel_names] assign[=] call[call[call[name[np].array, parameter[name[grid_flux_tbl].colnames]]][name[str_columns]]][name[c_indices]] variable[channel_wavelengths] assign[=] call[name[channel_wavelengths]][name[c_indices]] variable[channel_sizes] assign[=] <ast.ListComp object at 0x7da18dc99c00> variable[num_pixels] assign[=] call[name[sum], parameter[name[channel_sizes]]] with call[name[open], parameter[binary_operation[name[output_prefix] + constant[.yaml]], constant[w]]] begin[:] variable[header] assign[=] call[constant[ ].join, parameter[list[[<ast.Call object at 0x7da18dc99810>, <ast.Call object at 0x7da18f00c610>, <ast.Call object at 0x7da18f00ee60>]]]] call[name[fp].write, parameter[binary_operation[binary_operation[name[header] + constant[ ]] + call[name[yaml].safe_dump, parameter[dictionary[[<ast.Constant object at 0x7da18f00ec80>], [<ast.Dict object at 0x7da18f00f070>]]]]]]] variable[metadata] assign[=] dictionary[[<ast.Constant object at 0x7da18f00e1a0>, <ast.Constant object at 0x7da18f00c130>, <ast.Constant object at 0x7da18f00d9f0>, <ast.Constant object at 0x7da18f00ea70>, <ast.Constant object at 0x7da18f00d330>, <ast.Constant object at 0x7da18f00ffa0>], [<ast.Name object at 0x7da18f00fd90>, <ast.Name object at 0x7da18f00e410>, <ast.Name object at 0x7da18f00c340>, <ast.Name object at 0x7da18f00e8c0>, <ast.BinOp object at 0x7da18f00d630>, <ast.Name object at 0x7da18f00e2f0>]] call[name[logger].debug, parameter[constant[Dumping grid points and metadata to file]]] with call[name[open], parameter[binary_operation[name[output_prefix] + constant[.pkl]], constant[wb]]] begin[:] call[name[pickle].dump, parameter[tuple[[<ast.Name object at 0x7da18f00fb50>, <ast.Name object at 0x7da18f00d030>]], name[fp], <ast.UnaryOp object at 0x7da18f00c310>]] call[name[logger].debug, parameter[constant[Creating memory-mapped dispersion file.]]] variable[wavelengths_memmap] assign[=] call[name[np].memmap, parameter[binary_operation[name[output_prefix] + constant[-wavelengths.memmap]]]] call[name[wavelengths_memmap]][<ast.Slice object at 0x7da18f00fac0>] assign[=] call[name[np].hstack, parameter[name[channel_wavelengths]]] call[name[wavelengths_memmap].flush, parameter[]] <ast.Delete object at 0x7da18f00e3e0> call[name[logger].debug, parameter[constant[Creating memory-mapped intensities file.]]] variable[intensities_memmap] assign[=] call[name[np].memmap, parameter[binary_operation[name[output_prefix] + constant[-intensities.memmap]]]] variable[n] assign[=] call[name[len], parameter[name[grid_flux_tbl]]] for taget[tuple[[<ast.Name object at 0x7da18f00f0a0>, <ast.Name object at 0x7da18f00d720>]]] in starred[call[name[enumerate], parameter[name[grid_flux_tbl]]]] begin[:] call[name[logger].debug, parameter[call[constant[Loading point {0}/{1} into the intensities map].format, parameter[binary_operation[name[i] + constant[1]], name[n]]]]] variable[j] assign[=] constant[0] for taget[name[channel_name]] in starred[name[channel_names]] begin[:] <ast.Try object at 0x7da18f00f6a0> call[name[intensities_memmap]][tuple[[<ast.Name object at 0x7da18dc05a20>, <ast.Slice object at 0x7da18dc067d0>]]] assign[=] name[data] <ast.AugAssign object at 0x7da18dc042b0> call[name[intensities_memmap].flush, parameter[]] <ast.Delete object at 0x7da18dc071c0> return[constant[True]]
keyword[def] identifier[create] ( identifier[output_prefix] , identifier[grid_flux_filename] , identifier[wavelength_filenames] , identifier[clobber] = keyword[False] , identifier[grid_flux_filename_format] = literal[string] ,** identifier[kwargs] ): literal[string] keyword[if] keyword[not] identifier[clobber] : identifier[output_suffixes] =( literal[string] , literal[string] , literal[string] , literal[string] ) keyword[for] identifier[path] keyword[in] [ identifier[output_prefix] + identifier[suffix] keyword[for] identifier[suffix] keyword[in] identifier[output_suffixes] ]: keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ): keyword[raise] identifier[IOError] ( literal[string] . identifier[format] ( identifier[path] )) identifier[kwds] = identifier[kwargs] . identifier[pop] ( literal[string] ,{}) identifier[kwds] . identifier[update] ({ literal[string] : identifier[grid_flux_filename_format] }) identifier[grid_flux_tbl] = identifier[Table] . identifier[read] ( identifier[grid_flux_filename] ,** identifier[kwds] ) identifier[str_columns] = identifier[np] . identifier[array] ([ identifier[_] [ literal[int] ]. identifier[startswith] ( literal[string] ) keyword[for] identifier[_] keyword[in] identifier[grid_flux_tbl] . identifier[dtype] . identifier[descr] ]) keyword[if] identifier[str_columns] . identifier[sum] ()!= identifier[len] ( identifier[wavelength_filenames] ): keyword[raise] identifier[ValueError] ( literal[string] literal[string] . identifier[format] ( identifier[sum] ( identifier[str_columns] ), identifier[grid_flux_filename] , literal[string] . identifier[join] ( identifier[np] . identifier[array] ( identifier[grid_flux_tbl] . identifier[colnames] )[ identifier[str_columns] ]), identifier[len] ( identifier[wavelength_filenames] ))) identifier[grid_points] = identifier[grid_flux_tbl] . identifier[as_array] ()[ identifier[np] . identifier[array] ( identifier[grid_flux_tbl] . identifier[colnames] )[~ identifier[str_columns] ]] identifier[grid_indices] = identifier[grid_points] . identifier[argsort] ( identifier[order] = identifier[grid_points] . identifier[dtype] . identifier[names] ) identifier[grid_points] = identifier[grid_points] [ identifier[grid_indices] ] identifier[grid_flux_tbl] = identifier[grid_flux_tbl] [ identifier[grid_indices] ] identifier[channel_wavelengths] = identifier[np] . identifier[array] ( identifier[map] ( identifier[load_simple_data] , identifier[wavelength_filenames] )) identifier[c_indices] = identifier[np] . identifier[argsort] ([ identifier[each] . identifier[min] () keyword[for] identifier[each] keyword[in] identifier[channel_wavelengths] ]) identifier[channel_names] = identifier[np] . identifier[array] ( identifier[grid_flux_tbl] . identifier[colnames] )[ identifier[str_columns] ][ identifier[c_indices] ] identifier[channel_wavelengths] = identifier[channel_wavelengths] [ identifier[c_indices] ] identifier[channel_sizes] =[ identifier[len] ( identifier[_] ) keyword[for] identifier[_] keyword[in] identifier[channel_wavelengths] ] identifier[num_pixels] = identifier[sum] ( identifier[channel_sizes] ) keyword[with] identifier[open] ( identifier[output_prefix] + literal[string] , literal[string] ) keyword[as] identifier[fp] : identifier[header] = literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[strftime] ( literal[string] )), literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[grid_points] . identifier[dtype] . identifier[names] )), literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[channel_names] )) ]) identifier[fp] . identifier[write] ( identifier[header] + literal[string] + identifier[yaml] . identifier[safe_dump] ({ literal[string] :{ literal[string] : identifier[output_prefix] + literal[string] , literal[string] : identifier[output_prefix] + literal[string] , literal[string] : identifier[output_prefix] + literal[string] }}, identifier[stream] = keyword[None] , identifier[allow_unicode] = keyword[True] , identifier[default_flow_style] = keyword[False] )) identifier[metadata] ={ literal[string] : identifier[grid_flux_filename] , literal[string] : identifier[wavelength_filenames] , literal[string] : identifier[channel_names] , literal[string] : identifier[channel_sizes] , literal[string] :[ identifier[float] ( literal[string] )]* identifier[len] ( identifier[channel_names] ), literal[string] : identifier[sick_version] } identifier[logger] . identifier[debug] ( literal[string] ) keyword[with] identifier[open] ( identifier[output_prefix] + literal[string] , literal[string] ) keyword[as] identifier[fp] : identifier[pickle] . identifier[dump] (( identifier[grid_points] , identifier[metadata] ), identifier[fp] ,- literal[int] ) identifier[logger] . identifier[debug] ( literal[string] ) identifier[wavelengths_memmap] = identifier[np] . identifier[memmap] ( identifier[output_prefix] + literal[string] , identifier[dtype] = literal[string] , identifier[mode] = literal[string] , identifier[shape] =( identifier[num_pixels] ,)) identifier[wavelengths_memmap] [:]= identifier[np] . identifier[hstack] ( identifier[channel_wavelengths] ) identifier[wavelengths_memmap] . identifier[flush] () keyword[del] identifier[wavelengths_memmap] identifier[logger] . identifier[debug] ( literal[string] ) identifier[intensities_memmap] = identifier[np] . identifier[memmap] ( identifier[output_prefix] + literal[string] , identifier[shape] =( identifier[grid_points] . identifier[size] , identifier[num_pixels] ), identifier[dtype] = literal[string] , identifier[mode] = literal[string] ) identifier[n] = identifier[len] ( identifier[grid_flux_tbl] ) keyword[for] identifier[i] , identifier[row] keyword[in] identifier[enumerate] ( identifier[grid_flux_tbl] ): identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[i] + literal[int] , identifier[n] )) identifier[j] = literal[int] keyword[for] identifier[channel_name] keyword[in] identifier[channel_names] : keyword[try] : identifier[data] = identifier[load_simple_data] ( identifier[row] [ identifier[channel_name] ]) keyword[except] : identifier[logger] . identifier[exception] ( literal[string] . identifier[format] ( identifier[row] [ identifier[channel_name] ], identifier[channel_name] )) keyword[raise] identifier[intensities_memmap] [ identifier[i] , identifier[j] : identifier[j] + identifier[data] . identifier[size] ]= identifier[data] identifier[j] += identifier[data] . identifier[size] identifier[intensities_memmap] . identifier[flush] () keyword[del] identifier[intensities_memmap] keyword[return] keyword[True]
def create(output_prefix, grid_flux_filename, wavelength_filenames, clobber=False, grid_flux_filename_format='csv', **kwargs): """ Create a new *sick* model from files describing the parameter names, fluxes, and wavelengths. """ if not clobber: # Check to make sure the output files won't exist already. output_suffixes = ('.yaml', '.pkl', '-wavelengths.memmap', '-intensities.memmap') for path in [output_prefix + suffix for suffix in output_suffixes]: if os.path.exists(path): raise IOError('output filename {} already exists'.format(path)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']] # depends on [control=['if'], data=[]] # Read the grid_flux filename. # param1 param2 param3 param4 channelname1 channelname2 kwds = kwargs.pop('__grid_flux_filename_kwargs', {}) kwds.update({'format': grid_flux_filename_format}) grid_flux_tbl = Table.read(grid_flux_filename, **kwds) # Distinguish column names between parameters (real numbers) and filenames str_columns = np.array([_[1].startswith('|S') for _ in grid_flux_tbl.dtype.descr]) # Check the number of channels provided. if str_columns.sum() != len(wavelength_filenames): raise ValueError('expected {0} wavelength filenames because {1} has {0} string columns ({2}) but found {3} wavelength filenames'.format(sum(str_columns), grid_flux_filename, ', '.join(np.array(grid_flux_tbl.colnames)[str_columns]), len(wavelength_filenames))) # depends on [control=['if'], data=[]] # Create a record array of the grid points. grid_points = grid_flux_tbl.as_array()[np.array(grid_flux_tbl.colnames)[~str_columns]] # To-do: make sure they are all floats. # Sort the grid points. grid_indices = grid_points.argsort(order=grid_points.dtype.names) grid_points = grid_points[grid_indices] grid_flux_tbl = grid_flux_tbl[grid_indices] # Check the wavelength filenames. channel_wavelengths = np.array(map(load_simple_data, wavelength_filenames)) # Sort the channels by starting wavelength. c_indices = np.argsort([each.min() for each in channel_wavelengths]) channel_names = np.array(grid_flux_tbl.colnames)[str_columns][c_indices] channel_wavelengths = channel_wavelengths[c_indices] channel_sizes = [len(_) for _ in channel_wavelengths] num_pixels = sum(channel_sizes) # Create the model YAML file. with open(output_prefix + '.yaml', 'w') as fp: header = '\n'.join(['# Model created on {0}'.format(strftime('%Y-%m-%d %H:%M:%S')), '# Grid parameters: {0}'.format(', '.join(grid_points.dtype.names)), '# Channel names: {0}'.format(', '.join(channel_names))]) fp.write(header + '\n' + yaml.safe_dump({'model_grid': {'grid_points': output_prefix + '.pkl', 'intensities': output_prefix + '-intensities.memmap', 'wavelengths': output_prefix + '-wavelengths.memmap'}}, stream=None, allow_unicode=True, default_flow_style=False)) # depends on [control=['with'], data=['fp']] # Create the pickled model file, with meta data. metadata = {'grid_flux_filename': grid_flux_filename, 'wavelength_filenames': wavelength_filenames, 'channel_names': channel_names, 'channel_sizes': channel_sizes, 'channel_resolutions': [float('inf')] * len(channel_names), 'sick_version': sick_version} logger.debug('Dumping grid points and metadata to file') with open(output_prefix + '.pkl', 'wb') as fp: pickle.dump((grid_points, metadata), fp, -1) # depends on [control=['with'], data=['fp']] # Create the memory-mapped dispersion file. logger.debug('Creating memory-mapped dispersion file.') wavelengths_memmap = np.memmap(output_prefix + '-wavelengths.memmap', dtype='float32', mode='w+', shape=(num_pixels,)) wavelengths_memmap[:] = np.hstack(channel_wavelengths) wavelengths_memmap.flush() del wavelengths_memmap # Create the memory-mapped intensities file. logger.debug('Creating memory-mapped intensities file.') intensities_memmap = np.memmap(output_prefix + '-intensities.memmap', shape=(grid_points.size, num_pixels), dtype='float32', mode='w+') n = len(grid_flux_tbl) for (i, row) in enumerate(grid_flux_tbl): logger.debug('Loading point {0}/{1} into the intensities map'.format(i + 1, n)) j = 0 for channel_name in channel_names: try: data = load_simple_data(row[channel_name]) # depends on [control=['try'], data=[]] except: logger.exception('Could not load data from {0} for channel {1}'.format(row[channel_name], channel_name)) raise # depends on [control=['except'], data=[]] intensities_memmap[i, j:j + data.size] = data j += data.size # depends on [control=['for'], data=['channel_name']] # depends on [control=['for'], data=[]] intensities_memmap.flush() del intensities_memmap return True
def interfaces(root): ''' Generate a dictionary with all available interfaces relative to root. Symlinks are not followed. CLI example: .. code-block:: bash salt '*' sysfs.interfaces block/bcache0/bcache Output example: .. code-block:: json { "r": [ "state", "partial_stripes_expensive", "writeback_rate_debug", "stripe_size", "dirty_data", "stats_total/cache_hits", "stats_total/cache_bypass_misses", "stats_total/bypassed", "stats_total/cache_readaheads", "stats_total/cache_hit_ratio", "stats_total/cache_miss_collisions", "stats_total/cache_misses", "stats_total/cache_bypass_hits", ], "rw": [ "writeback_rate", "writeback_rate_update_seconds", "cache_mode", "writeback_delay", "label", "writeback_running", "writeback_metadata", "running", "writeback_rate_p_term_inverse", "sequential_cutoff", "writeback_percent", "writeback_rate_d_term", "readahead" ], "w": [ "stop", "clear_stats", "attach", "detach" ] } .. note:: * 'r' interfaces are read-only * 'w' interfaces are write-only (e.g. actions) * 'rw' are interfaces that can both be read or written ''' root = target(root) if root is False or not os.path.isdir(root): log.error('SysFS %s not a dir', root) return False readwrites = [] reads = [] writes = [] for path, _, files in salt.utils.path.os_walk(root, followlinks=False): for afile in files: canpath = os.path.join(path, afile) if not os.path.isfile(canpath): continue stat_mode = os.stat(canpath).st_mode is_r = bool(stat.S_IRUSR & stat_mode) is_w = bool(stat.S_IWUSR & stat_mode) relpath = os.path.relpath(canpath, root) if is_w: if is_r: readwrites.append(relpath) else: writes.append(relpath) elif is_r: reads.append(relpath) else: log.warning('Unable to find any interfaces in %s', canpath) return { 'r': reads, 'w': writes, 'rw': readwrites }
def function[interfaces, parameter[root]]: constant[ Generate a dictionary with all available interfaces relative to root. Symlinks are not followed. CLI example: .. code-block:: bash salt '*' sysfs.interfaces block/bcache0/bcache Output example: .. code-block:: json { "r": [ "state", "partial_stripes_expensive", "writeback_rate_debug", "stripe_size", "dirty_data", "stats_total/cache_hits", "stats_total/cache_bypass_misses", "stats_total/bypassed", "stats_total/cache_readaheads", "stats_total/cache_hit_ratio", "stats_total/cache_miss_collisions", "stats_total/cache_misses", "stats_total/cache_bypass_hits", ], "rw": [ "writeback_rate", "writeback_rate_update_seconds", "cache_mode", "writeback_delay", "label", "writeback_running", "writeback_metadata", "running", "writeback_rate_p_term_inverse", "sequential_cutoff", "writeback_percent", "writeback_rate_d_term", "readahead" ], "w": [ "stop", "clear_stats", "attach", "detach" ] } .. note:: * 'r' interfaces are read-only * 'w' interfaces are write-only (e.g. actions) * 'rw' are interfaces that can both be read or written ] variable[root] assign[=] call[name[target], parameter[name[root]]] if <ast.BoolOp object at 0x7da18f09dc90> begin[:] call[name[log].error, parameter[constant[SysFS %s not a dir], name[root]]] return[constant[False]] variable[readwrites] assign[=] list[[]] variable[reads] assign[=] list[[]] variable[writes] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da18f09cbe0>, <ast.Name object at 0x7da18f09d9f0>, <ast.Name object at 0x7da18f09cd30>]]] in starred[call[name[salt].utils.path.os_walk, parameter[name[root]]]] begin[:] for taget[name[afile]] in starred[name[files]] begin[:] variable[canpath] assign[=] call[name[os].path.join, parameter[name[path], name[afile]]] if <ast.UnaryOp object at 0x7da18f09fac0> begin[:] continue variable[stat_mode] assign[=] call[name[os].stat, parameter[name[canpath]]].st_mode variable[is_r] assign[=] call[name[bool], parameter[binary_operation[name[stat].S_IRUSR <ast.BitAnd object at 0x7da2590d6b60> name[stat_mode]]]] variable[is_w] assign[=] call[name[bool], parameter[binary_operation[name[stat].S_IWUSR <ast.BitAnd object at 0x7da2590d6b60> name[stat_mode]]]] variable[relpath] assign[=] call[name[os].path.relpath, parameter[name[canpath], name[root]]] if name[is_w] begin[:] if name[is_r] begin[:] call[name[readwrites].append, parameter[name[relpath]]] return[dictionary[[<ast.Constant object at 0x7da18f09e320>, <ast.Constant object at 0x7da18f09cfd0>, <ast.Constant object at 0x7da18f09c790>], [<ast.Name object at 0x7da18f09c040>, <ast.Name object at 0x7da18f09e2c0>, <ast.Name object at 0x7da18f09d780>]]]
keyword[def] identifier[interfaces] ( identifier[root] ): literal[string] identifier[root] = identifier[target] ( identifier[root] ) keyword[if] identifier[root] keyword[is] keyword[False] keyword[or] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[root] ): identifier[log] . identifier[error] ( literal[string] , identifier[root] ) keyword[return] keyword[False] identifier[readwrites] =[] identifier[reads] =[] identifier[writes] =[] keyword[for] identifier[path] , identifier[_] , identifier[files] keyword[in] identifier[salt] . identifier[utils] . identifier[path] . identifier[os_walk] ( identifier[root] , identifier[followlinks] = keyword[False] ): keyword[for] identifier[afile] keyword[in] identifier[files] : identifier[canpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[afile] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[canpath] ): keyword[continue] identifier[stat_mode] = identifier[os] . identifier[stat] ( identifier[canpath] ). identifier[st_mode] identifier[is_r] = identifier[bool] ( identifier[stat] . identifier[S_IRUSR] & identifier[stat_mode] ) identifier[is_w] = identifier[bool] ( identifier[stat] . identifier[S_IWUSR] & identifier[stat_mode] ) identifier[relpath] = identifier[os] . identifier[path] . identifier[relpath] ( identifier[canpath] , identifier[root] ) keyword[if] identifier[is_w] : keyword[if] identifier[is_r] : identifier[readwrites] . identifier[append] ( identifier[relpath] ) keyword[else] : identifier[writes] . identifier[append] ( identifier[relpath] ) keyword[elif] identifier[is_r] : identifier[reads] . identifier[append] ( identifier[relpath] ) keyword[else] : identifier[log] . identifier[warning] ( literal[string] , identifier[canpath] ) keyword[return] { literal[string] : identifier[reads] , literal[string] : identifier[writes] , literal[string] : identifier[readwrites] }
def interfaces(root): """ Generate a dictionary with all available interfaces relative to root. Symlinks are not followed. CLI example: .. code-block:: bash salt '*' sysfs.interfaces block/bcache0/bcache Output example: .. code-block:: json { "r": [ "state", "partial_stripes_expensive", "writeback_rate_debug", "stripe_size", "dirty_data", "stats_total/cache_hits", "stats_total/cache_bypass_misses", "stats_total/bypassed", "stats_total/cache_readaheads", "stats_total/cache_hit_ratio", "stats_total/cache_miss_collisions", "stats_total/cache_misses", "stats_total/cache_bypass_hits", ], "rw": [ "writeback_rate", "writeback_rate_update_seconds", "cache_mode", "writeback_delay", "label", "writeback_running", "writeback_metadata", "running", "writeback_rate_p_term_inverse", "sequential_cutoff", "writeback_percent", "writeback_rate_d_term", "readahead" ], "w": [ "stop", "clear_stats", "attach", "detach" ] } .. note:: * 'r' interfaces are read-only * 'w' interfaces are write-only (e.g. actions) * 'rw' are interfaces that can both be read or written """ root = target(root) if root is False or not os.path.isdir(root): log.error('SysFS %s not a dir', root) return False # depends on [control=['if'], data=[]] readwrites = [] reads = [] writes = [] for (path, _, files) in salt.utils.path.os_walk(root, followlinks=False): for afile in files: canpath = os.path.join(path, afile) if not os.path.isfile(canpath): continue # depends on [control=['if'], data=[]] stat_mode = os.stat(canpath).st_mode is_r = bool(stat.S_IRUSR & stat_mode) is_w = bool(stat.S_IWUSR & stat_mode) relpath = os.path.relpath(canpath, root) if is_w: if is_r: readwrites.append(relpath) # depends on [control=['if'], data=[]] else: writes.append(relpath) # depends on [control=['if'], data=[]] elif is_r: reads.append(relpath) # depends on [control=['if'], data=[]] else: log.warning('Unable to find any interfaces in %s', canpath) # depends on [control=['for'], data=['afile']] # depends on [control=['for'], data=[]] return {'r': reads, 'w': writes, 'rw': readwrites}
def read(self) -> None: """Call method |NetCDFFile.read| of all handled |NetCDFFile| objects. """ for folder in self.folders.values(): for file_ in folder.values(): file_.read()
def function[read, parameter[self]]: constant[Call method |NetCDFFile.read| of all handled |NetCDFFile| objects. ] for taget[name[folder]] in starred[call[name[self].folders.values, parameter[]]] begin[:] for taget[name[file_]] in starred[call[name[folder].values, parameter[]]] begin[:] call[name[file_].read, parameter[]]
keyword[def] identifier[read] ( identifier[self] )-> keyword[None] : literal[string] keyword[for] identifier[folder] keyword[in] identifier[self] . identifier[folders] . identifier[values] (): keyword[for] identifier[file_] keyword[in] identifier[folder] . identifier[values] (): identifier[file_] . identifier[read] ()
def read(self) -> None: """Call method |NetCDFFile.read| of all handled |NetCDFFile| objects. """ for folder in self.folders.values(): for file_ in folder.values(): file_.read() # depends on [control=['for'], data=['file_']] # depends on [control=['for'], data=['folder']]
def tail(self, n=5): """Return Series with the last n values. Parameters ---------- n : int Number of values. Returns ------- Series Series containing the last n values. Examples -------- >>> sr = bl.Series(np.arange(3)) >>> print(sr.tail(2).evaluate()) <BLANKLINE> --- -- 1 1 2 2 """ if self._length is not None: length = self._length else: length = self._lazy_len().weld_expr return _series_tail(self, self.index.tail(n), length, n)
def function[tail, parameter[self, n]]: constant[Return Series with the last n values. Parameters ---------- n : int Number of values. Returns ------- Series Series containing the last n values. Examples -------- >>> sr = bl.Series(np.arange(3)) >>> print(sr.tail(2).evaluate()) <BLANKLINE> --- -- 1 1 2 2 ] if compare[name[self]._length is_not constant[None]] begin[:] variable[length] assign[=] name[self]._length return[call[name[_series_tail], parameter[name[self], call[name[self].index.tail, parameter[name[n]]], name[length], name[n]]]]
keyword[def] identifier[tail] ( identifier[self] , identifier[n] = literal[int] ): literal[string] keyword[if] identifier[self] . identifier[_length] keyword[is] keyword[not] keyword[None] : identifier[length] = identifier[self] . identifier[_length] keyword[else] : identifier[length] = identifier[self] . identifier[_lazy_len] (). identifier[weld_expr] keyword[return] identifier[_series_tail] ( identifier[self] , identifier[self] . identifier[index] . identifier[tail] ( identifier[n] ), identifier[length] , identifier[n] )
def tail(self, n=5): """Return Series with the last n values. Parameters ---------- n : int Number of values. Returns ------- Series Series containing the last n values. Examples -------- >>> sr = bl.Series(np.arange(3)) >>> print(sr.tail(2).evaluate()) <BLANKLINE> --- -- 1 1 2 2 """ if self._length is not None: length = self._length # depends on [control=['if'], data=[]] else: length = self._lazy_len().weld_expr return _series_tail(self, self.index.tail(n), length, n)
def validate(filename, verbose=False): """ Validate file and return JSON result as dictionary. "filename" can be a file name or an HTTP URL. Return "" if the validator does not return valid JSON. Raise OSError if curl command returns an error status. """ # is_css = filename.endswith(".css") is_remote = filename.startswith("http://") or filename.startswith( "https://") with tempfile.TemporaryFile() if is_remote else open( filename, "rb") as f: if is_remote: r = requests.get(filename, verify=False) f.write(r.content) f.seek(0) # if is_css: # cmd = ( # "curl -sF \"file=@%s;type=text/css\" -F output=json -F warning=0 %s" # % (quoted_filename, CSS_VALIDATOR_URL)) # _ = cmd # else: r = requests.post( HTML_VALIDATOR_URL, files={"file": (filename, f, "text/html")}, data={ "out": "json", "showsource": "yes", }, verify=False) return r.json()
def function[validate, parameter[filename, verbose]]: constant[ Validate file and return JSON result as dictionary. "filename" can be a file name or an HTTP URL. Return "" if the validator does not return valid JSON. Raise OSError if curl command returns an error status. ] variable[is_remote] assign[=] <ast.BoolOp object at 0x7da1b0cfdb40> with <ast.IfExp object at 0x7da1b0cfc850> begin[:] if name[is_remote] begin[:] variable[r] assign[=] call[name[requests].get, parameter[name[filename]]] call[name[f].write, parameter[name[r].content]] call[name[f].seek, parameter[constant[0]]] variable[r] assign[=] call[name[requests].post, parameter[name[HTML_VALIDATOR_URL]]] return[call[name[r].json, parameter[]]]
keyword[def] identifier[validate] ( identifier[filename] , identifier[verbose] = keyword[False] ): literal[string] identifier[is_remote] = identifier[filename] . identifier[startswith] ( literal[string] ) keyword[or] identifier[filename] . identifier[startswith] ( literal[string] ) keyword[with] identifier[tempfile] . identifier[TemporaryFile] () keyword[if] identifier[is_remote] keyword[else] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[f] : keyword[if] identifier[is_remote] : identifier[r] = identifier[requests] . identifier[get] ( identifier[filename] , identifier[verify] = keyword[False] ) identifier[f] . identifier[write] ( identifier[r] . identifier[content] ) identifier[f] . identifier[seek] ( literal[int] ) identifier[r] = identifier[requests] . identifier[post] ( identifier[HTML_VALIDATOR_URL] , identifier[files] ={ literal[string] :( identifier[filename] , identifier[f] , literal[string] )}, identifier[data] ={ literal[string] : literal[string] , literal[string] : literal[string] , }, identifier[verify] = keyword[False] ) keyword[return] identifier[r] . identifier[json] ()
def validate(filename, verbose=False): """ Validate file and return JSON result as dictionary. "filename" can be a file name or an HTTP URL. Return "" if the validator does not return valid JSON. Raise OSError if curl command returns an error status. """ # is_css = filename.endswith(".css") is_remote = filename.startswith('http://') or filename.startswith('https://') with tempfile.TemporaryFile() if is_remote else open(filename, 'rb') as f: if is_remote: r = requests.get(filename, verify=False) f.write(r.content) f.seek(0) # depends on [control=['if'], data=[]] # if is_css: # cmd = ( # "curl -sF \"file=@%s;type=text/css\" -F output=json -F warning=0 %s" # % (quoted_filename, CSS_VALIDATOR_URL)) # _ = cmd # else: r = requests.post(HTML_VALIDATOR_URL, files={'file': (filename, f, 'text/html')}, data={'out': 'json', 'showsource': 'yes'}, verify=False) # depends on [control=['with'], data=['f']] return r.json()
def get_assessment_part_bank_session(self): """Gets the ``OsidSession`` to lookup assessment part/bank mappings for assessment parts. return: (osid.assessment.authoring.AssessmentPartBankSession) - an ``AssessmentPartBankSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_part_bank()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_part_bank()`` is ``true``.* """ if not self.supports_assessment_part_bank(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.AssessmentPartBankSession(runtime=self._runtime)
def function[get_assessment_part_bank_session, parameter[self]]: constant[Gets the ``OsidSession`` to lookup assessment part/bank mappings for assessment parts. return: (osid.assessment.authoring.AssessmentPartBankSession) - an ``AssessmentPartBankSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_part_bank()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_part_bank()`` is ``true``.* ] if <ast.UnaryOp object at 0x7da1b0a21c30> begin[:] <ast.Raise object at 0x7da1b0a22ad0> return[call[name[sessions].AssessmentPartBankSession, parameter[]]]
keyword[def] identifier[get_assessment_part_bank_session] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[supports_assessment_part_bank] (): keyword[raise] identifier[errors] . identifier[Unimplemented] () keyword[return] identifier[sessions] . identifier[AssessmentPartBankSession] ( identifier[runtime] = identifier[self] . identifier[_runtime] )
def get_assessment_part_bank_session(self): """Gets the ``OsidSession`` to lookup assessment part/bank mappings for assessment parts. return: (osid.assessment.authoring.AssessmentPartBankSession) - an ``AssessmentPartBankSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_part_bank()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_part_bank()`` is ``true``.* """ if not self.supports_assessment_part_bank(): raise errors.Unimplemented() # depends on [control=['if'], data=[]] # pylint: disable=no-member return sessions.AssessmentPartBankSession(runtime=self._runtime)
def sort(imports, separate=True, import_before_from=True, **classify_kwargs): """Sort import objects into groups. :param list imports: FromImport / ImportImport objects :param bool separate: Whether to classify and return separate segments of imports based on classification. :param bool import_before_from: Whether to sort `import ...` imports before `from ...` imports. For example: from os import path from aspy import refactor_imports import sys import pyramid separate = True, import_before_from = True import sys from os import path import pyramid from aspy import refactor_imports separate = True, import_before_from = False from os import path import sys import pyramid from aspy import refactor_imports separate = False, import_before_from = True import pyramid import sys from aspy import refactor_imports from os import path separate = False, import_before_from = False from aspy import refactor_imports from os import path import pyramid import sys """ if separate: def classify_func(obj): return classify_import( obj.import_statement.module, **classify_kwargs ) types = ImportType.__all__ else: # A little cheaty, this allows future imports to sort before others def classify_func(obj): return classify_import( obj.import_statement.module, **classify_kwargs ) == ImportType.FUTURE types = [True, False] if import_before_from: def sort_within(obj): return (CLS_TO_INDEX[type(obj)],) + obj.sort_key else: def sort_within(obj): return tuple(obj.sort_key) # Partition the imports imports_partitioned = collections.defaultdict(list) for import_obj in imports: imports_partitioned[classify_func(import_obj)].append(import_obj) # sort each of the segments for segment_key, val in imports_partitioned.items(): imports_partitioned[segment_key] = sorted(val, key=sort_within) return tuple( tuple(imports_partitioned[key]) for key in types if key in imports_partitioned )
def function[sort, parameter[imports, separate, import_before_from]]: constant[Sort import objects into groups. :param list imports: FromImport / ImportImport objects :param bool separate: Whether to classify and return separate segments of imports based on classification. :param bool import_before_from: Whether to sort `import ...` imports before `from ...` imports. For example: from os import path from aspy import refactor_imports import sys import pyramid separate = True, import_before_from = True import sys from os import path import pyramid from aspy import refactor_imports separate = True, import_before_from = False from os import path import sys import pyramid from aspy import refactor_imports separate = False, import_before_from = True import pyramid import sys from aspy import refactor_imports from os import path separate = False, import_before_from = False from aspy import refactor_imports from os import path import pyramid import sys ] if name[separate] begin[:] def function[classify_func, parameter[obj]]: return[call[name[classify_import], parameter[name[obj].import_statement.module]]] variable[types] assign[=] name[ImportType].__all__ if name[import_before_from] begin[:] def function[sort_within, parameter[obj]]: return[binary_operation[tuple[[<ast.Subscript object at 0x7da1b1b9ebf0>]] + name[obj].sort_key]] variable[imports_partitioned] assign[=] call[name[collections].defaultdict, parameter[name[list]]] for taget[name[import_obj]] in starred[name[imports]] begin[:] call[call[name[imports_partitioned]][call[name[classify_func], parameter[name[import_obj]]]].append, parameter[name[import_obj]]] for taget[tuple[[<ast.Name object at 0x7da1b1b9caf0>, <ast.Name object at 0x7da1b1b9f010>]]] in starred[call[name[imports_partitioned].items, parameter[]]] begin[:] call[name[imports_partitioned]][name[segment_key]] assign[=] call[name[sorted], parameter[name[val]]] return[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da18eb55510>]]]
keyword[def] identifier[sort] ( identifier[imports] , identifier[separate] = keyword[True] , identifier[import_before_from] = keyword[True] ,** identifier[classify_kwargs] ): literal[string] keyword[if] identifier[separate] : keyword[def] identifier[classify_func] ( identifier[obj] ): keyword[return] identifier[classify_import] ( identifier[obj] . identifier[import_statement] . identifier[module] ,** identifier[classify_kwargs] ) identifier[types] = identifier[ImportType] . identifier[__all__] keyword[else] : keyword[def] identifier[classify_func] ( identifier[obj] ): keyword[return] identifier[classify_import] ( identifier[obj] . identifier[import_statement] . identifier[module] ,** identifier[classify_kwargs] )== identifier[ImportType] . identifier[FUTURE] identifier[types] =[ keyword[True] , keyword[False] ] keyword[if] identifier[import_before_from] : keyword[def] identifier[sort_within] ( identifier[obj] ): keyword[return] ( identifier[CLS_TO_INDEX] [ identifier[type] ( identifier[obj] )],)+ identifier[obj] . identifier[sort_key] keyword[else] : keyword[def] identifier[sort_within] ( identifier[obj] ): keyword[return] identifier[tuple] ( identifier[obj] . identifier[sort_key] ) identifier[imports_partitioned] = identifier[collections] . identifier[defaultdict] ( identifier[list] ) keyword[for] identifier[import_obj] keyword[in] identifier[imports] : identifier[imports_partitioned] [ identifier[classify_func] ( identifier[import_obj] )]. identifier[append] ( identifier[import_obj] ) keyword[for] identifier[segment_key] , identifier[val] keyword[in] identifier[imports_partitioned] . identifier[items] (): identifier[imports_partitioned] [ identifier[segment_key] ]= identifier[sorted] ( identifier[val] , identifier[key] = identifier[sort_within] ) keyword[return] identifier[tuple] ( identifier[tuple] ( identifier[imports_partitioned] [ identifier[key] ]) keyword[for] identifier[key] keyword[in] identifier[types] keyword[if] identifier[key] keyword[in] identifier[imports_partitioned] )
def sort(imports, separate=True, import_before_from=True, **classify_kwargs): """Sort import objects into groups. :param list imports: FromImport / ImportImport objects :param bool separate: Whether to classify and return separate segments of imports based on classification. :param bool import_before_from: Whether to sort `import ...` imports before `from ...` imports. For example: from os import path from aspy import refactor_imports import sys import pyramid separate = True, import_before_from = True import sys from os import path import pyramid from aspy import refactor_imports separate = True, import_before_from = False from os import path import sys import pyramid from aspy import refactor_imports separate = False, import_before_from = True import pyramid import sys from aspy import refactor_imports from os import path separate = False, import_before_from = False from aspy import refactor_imports from os import path import pyramid import sys """ if separate: def classify_func(obj): return classify_import(obj.import_statement.module, **classify_kwargs) types = ImportType.__all__ # depends on [control=['if'], data=[]] else: # A little cheaty, this allows future imports to sort before others def classify_func(obj): return classify_import(obj.import_statement.module, **classify_kwargs) == ImportType.FUTURE types = [True, False] if import_before_from: def sort_within(obj): return (CLS_TO_INDEX[type(obj)],) + obj.sort_key # depends on [control=['if'], data=[]] else: def sort_within(obj): return tuple(obj.sort_key) # Partition the imports imports_partitioned = collections.defaultdict(list) for import_obj in imports: imports_partitioned[classify_func(import_obj)].append(import_obj) # depends on [control=['for'], data=['import_obj']] # sort each of the segments for (segment_key, val) in imports_partitioned.items(): imports_partitioned[segment_key] = sorted(val, key=sort_within) # depends on [control=['for'], data=[]] return tuple((tuple(imports_partitioned[key]) for key in types if key in imports_partitioned))
def read_int(self): """ Reads an integer. The size depends on the architecture. Reads a 4 byte small-endian singed int on 32 bit arch Reads an 8 byte small-endian singed int on 64 bit arch """ if self.reader.sysinfo.ProcessorArchitecture == PROCESSOR_ARCHITECTURE.AMD64: return int.from_bytes(self.read(8), byteorder = 'little', signed = True) else: return int.from_bytes(self.read(4), byteorder = 'little', signed = True)
def function[read_int, parameter[self]]: constant[ Reads an integer. The size depends on the architecture. Reads a 4 byte small-endian singed int on 32 bit arch Reads an 8 byte small-endian singed int on 64 bit arch ] if compare[name[self].reader.sysinfo.ProcessorArchitecture equal[==] name[PROCESSOR_ARCHITECTURE].AMD64] begin[:] return[call[name[int].from_bytes, parameter[call[name[self].read, parameter[constant[8]]]]]]
keyword[def] identifier[read_int] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[reader] . identifier[sysinfo] . identifier[ProcessorArchitecture] == identifier[PROCESSOR_ARCHITECTURE] . identifier[AMD64] : keyword[return] identifier[int] . identifier[from_bytes] ( identifier[self] . identifier[read] ( literal[int] ), identifier[byteorder] = literal[string] , identifier[signed] = keyword[True] ) keyword[else] : keyword[return] identifier[int] . identifier[from_bytes] ( identifier[self] . identifier[read] ( literal[int] ), identifier[byteorder] = literal[string] , identifier[signed] = keyword[True] )
def read_int(self): """ Reads an integer. The size depends on the architecture. Reads a 4 byte small-endian singed int on 32 bit arch Reads an 8 byte small-endian singed int on 64 bit arch """ if self.reader.sysinfo.ProcessorArchitecture == PROCESSOR_ARCHITECTURE.AMD64: return int.from_bytes(self.read(8), byteorder='little', signed=True) # depends on [control=['if'], data=[]] else: return int.from_bytes(self.read(4), byteorder='little', signed=True)
def _write_config_file(batch_id, caller_names, base_dir, data): """Write YAML configuration to generate an ensemble set of combined calls. """ config_dir = utils.safe_makedir(os.path.join(base_dir, "config")) config_file = os.path.join(config_dir, "{0}-ensemble.yaml".format(batch_id)) algorithm = data["config"]["algorithm"] econfig = {"ensemble": algorithm["ensemble"], "names": caller_names, "prep-inputs": False} intervals = validate.get_analysis_intervals(data, None, base_dir) if intervals: econfig["intervals"] = os.path.abspath(intervals) with open(config_file, "w") as out_handle: yaml.safe_dump(econfig, out_handle, allow_unicode=False, default_flow_style=False) return config_file
def function[_write_config_file, parameter[batch_id, caller_names, base_dir, data]]: constant[Write YAML configuration to generate an ensemble set of combined calls. ] variable[config_dir] assign[=] call[name[utils].safe_makedir, parameter[call[name[os].path.join, parameter[name[base_dir], constant[config]]]]] variable[config_file] assign[=] call[name[os].path.join, parameter[name[config_dir], call[constant[{0}-ensemble.yaml].format, parameter[name[batch_id]]]]] variable[algorithm] assign[=] call[call[name[data]][constant[config]]][constant[algorithm]] variable[econfig] assign[=] dictionary[[<ast.Constant object at 0x7da1b18a0610>, <ast.Constant object at 0x7da1b18a2cb0>, <ast.Constant object at 0x7da1b18a28f0>], [<ast.Subscript object at 0x7da1b18a14e0>, <ast.Name object at 0x7da1b18a2710>, <ast.Constant object at 0x7da1b18a3b80>]] variable[intervals] assign[=] call[name[validate].get_analysis_intervals, parameter[name[data], constant[None], name[base_dir]]] if name[intervals] begin[:] call[name[econfig]][constant[intervals]] assign[=] call[name[os].path.abspath, parameter[name[intervals]]] with call[name[open], parameter[name[config_file], constant[w]]] begin[:] call[name[yaml].safe_dump, parameter[name[econfig], name[out_handle]]] return[name[config_file]]
keyword[def] identifier[_write_config_file] ( identifier[batch_id] , identifier[caller_names] , identifier[base_dir] , identifier[data] ): literal[string] identifier[config_dir] = identifier[utils] . identifier[safe_makedir] ( identifier[os] . identifier[path] . identifier[join] ( identifier[base_dir] , literal[string] )) identifier[config_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[config_dir] , literal[string] . identifier[format] ( identifier[batch_id] )) identifier[algorithm] = identifier[data] [ literal[string] ][ literal[string] ] identifier[econfig] ={ literal[string] : identifier[algorithm] [ literal[string] ], literal[string] : identifier[caller_names] , literal[string] : keyword[False] } identifier[intervals] = identifier[validate] . identifier[get_analysis_intervals] ( identifier[data] , keyword[None] , identifier[base_dir] ) keyword[if] identifier[intervals] : identifier[econfig] [ literal[string] ]= identifier[os] . identifier[path] . identifier[abspath] ( identifier[intervals] ) keyword[with] identifier[open] ( identifier[config_file] , literal[string] ) keyword[as] identifier[out_handle] : identifier[yaml] . identifier[safe_dump] ( identifier[econfig] , identifier[out_handle] , identifier[allow_unicode] = keyword[False] , identifier[default_flow_style] = keyword[False] ) keyword[return] identifier[config_file]
def _write_config_file(batch_id, caller_names, base_dir, data): """Write YAML configuration to generate an ensemble set of combined calls. """ config_dir = utils.safe_makedir(os.path.join(base_dir, 'config')) config_file = os.path.join(config_dir, '{0}-ensemble.yaml'.format(batch_id)) algorithm = data['config']['algorithm'] econfig = {'ensemble': algorithm['ensemble'], 'names': caller_names, 'prep-inputs': False} intervals = validate.get_analysis_intervals(data, None, base_dir) if intervals: econfig['intervals'] = os.path.abspath(intervals) # depends on [control=['if'], data=[]] with open(config_file, 'w') as out_handle: yaml.safe_dump(econfig, out_handle, allow_unicode=False, default_flow_style=False) # depends on [control=['with'], data=['out_handle']] return config_file
def interpret_element(element_type: str, text: str, span: str) -> Element: """ Construct an Element instance from regexp groups. """ return Element(element_type, interpret_span(span), text)
def function[interpret_element, parameter[element_type, text, span]]: constant[ Construct an Element instance from regexp groups. ] return[call[name[Element], parameter[name[element_type], call[name[interpret_span], parameter[name[span]]], name[text]]]]
keyword[def] identifier[interpret_element] ( identifier[element_type] : identifier[str] , identifier[text] : identifier[str] , identifier[span] : identifier[str] )-> identifier[Element] : literal[string] keyword[return] identifier[Element] ( identifier[element_type] , identifier[interpret_span] ( identifier[span] ), identifier[text] )
def interpret_element(element_type: str, text: str, span: str) -> Element: """ Construct an Element instance from regexp groups. """ return Element(element_type, interpret_span(span), text)
def _assemble_from_unit_mappings(arg, errors, box, tz): """ assemble the unit specified fields from the arg (DataFrame) Return a Series for actual parsing Parameters ---------- arg : DataFrame errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaT - If 'ignore', then invalid parsing will return the input box : boolean - If True, return a DatetimeIndex - If False, return an array tz : None or 'utc' Returns ------- Series """ from pandas import to_timedelta, to_numeric, DataFrame arg = DataFrame(arg) if not arg.columns.is_unique: raise ValueError("cannot assemble with duplicate keys") # replace passed unit with _unit_map def f(value): if value in _unit_map: return _unit_map[value] # m is case significant if value.lower() in _unit_map: return _unit_map[value.lower()] return value unit = {k: f(k) for k in arg.keys()} unit_rev = {v: k for k, v in unit.items()} # we require at least Ymd required = ['year', 'month', 'day'] req = sorted(list(set(required) - set(unit_rev.keys()))) if len(req): raise ValueError("to assemble mappings requires at least that " "[year, month, day] be specified: [{required}] " "is missing".format(required=','.join(req))) # keys we don't recognize excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values()))) if len(excess): raise ValueError("extra keys have been passed " "to the datetime assemblage: " "[{excess}]".format(excess=','.join(excess))) def coerce(values): # we allow coercion to if errors allows values = to_numeric(values, errors=errors) # prevent overflow in case of int8 or int16 if is_integer_dtype(values): values = values.astype('int64', copy=False) return values values = (coerce(arg[unit_rev['year']]) * 10000 + coerce(arg[unit_rev['month']]) * 100 + coerce(arg[unit_rev['day']])) try: values = to_datetime(values, format='%Y%m%d', errors=errors, utc=tz) except (TypeError, ValueError) as e: raise ValueError("cannot assemble the " "datetimes: {error}".format(error=e)) for u in ['h', 'm', 's', 'ms', 'us', 'ns']: value = unit_rev.get(u) if value is not None and value in arg: try: values += to_timedelta(coerce(arg[value]), unit=u, errors=errors) except (TypeError, ValueError) as e: raise ValueError("cannot assemble the datetimes [{value}]: " "{error}".format(value=value, error=e)) if not box: return values.values return values
def function[_assemble_from_unit_mappings, parameter[arg, errors, box, tz]]: constant[ assemble the unit specified fields from the arg (DataFrame) Return a Series for actual parsing Parameters ---------- arg : DataFrame errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaT - If 'ignore', then invalid parsing will return the input box : boolean - If True, return a DatetimeIndex - If False, return an array tz : None or 'utc' Returns ------- Series ] from relative_module[pandas] import module[to_timedelta], module[to_numeric], module[DataFrame] variable[arg] assign[=] call[name[DataFrame], parameter[name[arg]]] if <ast.UnaryOp object at 0x7da2047eb4f0> begin[:] <ast.Raise object at 0x7da2047eb460> def function[f, parameter[value]]: if compare[name[value] in name[_unit_map]] begin[:] return[call[name[_unit_map]][name[value]]] if compare[call[name[value].lower, parameter[]] in name[_unit_map]] begin[:] return[call[name[_unit_map]][call[name[value].lower, parameter[]]]] return[name[value]] variable[unit] assign[=] <ast.DictComp object at 0x7da2047eb310> variable[unit_rev] assign[=] <ast.DictComp object at 0x7da2047e9810> variable[required] assign[=] list[[<ast.Constant object at 0x7da2047e8c40>, <ast.Constant object at 0x7da2047eb970>, <ast.Constant object at 0x7da2047e9600>]] variable[req] assign[=] call[name[sorted], parameter[call[name[list], parameter[binary_operation[call[name[set], parameter[name[required]]] - call[name[set], parameter[call[name[unit_rev].keys, parameter[]]]]]]]]] if call[name[len], parameter[name[req]]] begin[:] <ast.Raise object at 0x7da2047e8730> variable[excess] assign[=] call[name[sorted], parameter[call[name[list], parameter[binary_operation[call[name[set], parameter[call[name[unit_rev].keys, parameter[]]]] - call[name[set], parameter[call[name[_unit_map].values, parameter[]]]]]]]]] if call[name[len], parameter[name[excess]]] begin[:] <ast.Raise object at 0x7da2047e8070> def function[coerce, parameter[values]]: variable[values] assign[=] call[name[to_numeric], parameter[name[values]]] if call[name[is_integer_dtype], parameter[name[values]]] begin[:] variable[values] assign[=] call[name[values].astype, parameter[constant[int64]]] return[name[values]] variable[values] assign[=] binary_operation[binary_operation[binary_operation[call[name[coerce], parameter[call[name[arg]][call[name[unit_rev]][constant[year]]]]] * constant[10000]] + binary_operation[call[name[coerce], parameter[call[name[arg]][call[name[unit_rev]][constant[month]]]]] * constant[100]]] + call[name[coerce], parameter[call[name[arg]][call[name[unit_rev]][constant[day]]]]]] <ast.Try object at 0x7da18eb545b0> for taget[name[u]] in starred[list[[<ast.Constant object at 0x7da18eb56800>, <ast.Constant object at 0x7da18eb56f80>, <ast.Constant object at 0x7da18eb57c70>, <ast.Constant object at 0x7da18eb54bb0>, <ast.Constant object at 0x7da18eb553c0>, <ast.Constant object at 0x7da18eb568c0>]]] begin[:] variable[value] assign[=] call[name[unit_rev].get, parameter[name[u]]] if <ast.BoolOp object at 0x7da18eb55e70> begin[:] <ast.Try object at 0x7da18eb54670> if <ast.UnaryOp object at 0x7da1b26acf70> begin[:] return[name[values].values] return[name[values]]
keyword[def] identifier[_assemble_from_unit_mappings] ( identifier[arg] , identifier[errors] , identifier[box] , identifier[tz] ): literal[string] keyword[from] identifier[pandas] keyword[import] identifier[to_timedelta] , identifier[to_numeric] , identifier[DataFrame] identifier[arg] = identifier[DataFrame] ( identifier[arg] ) keyword[if] keyword[not] identifier[arg] . identifier[columns] . identifier[is_unique] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[def] identifier[f] ( identifier[value] ): keyword[if] identifier[value] keyword[in] identifier[_unit_map] : keyword[return] identifier[_unit_map] [ identifier[value] ] keyword[if] identifier[value] . identifier[lower] () keyword[in] identifier[_unit_map] : keyword[return] identifier[_unit_map] [ identifier[value] . identifier[lower] ()] keyword[return] identifier[value] identifier[unit] ={ identifier[k] : identifier[f] ( identifier[k] ) keyword[for] identifier[k] keyword[in] identifier[arg] . identifier[keys] ()} identifier[unit_rev] ={ identifier[v] : identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[unit] . identifier[items] ()} identifier[required] =[ literal[string] , literal[string] , literal[string] ] identifier[req] = identifier[sorted] ( identifier[list] ( identifier[set] ( identifier[required] )- identifier[set] ( identifier[unit_rev] . identifier[keys] ()))) keyword[if] identifier[len] ( identifier[req] ): keyword[raise] identifier[ValueError] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[required] = literal[string] . identifier[join] ( identifier[req] ))) identifier[excess] = identifier[sorted] ( identifier[list] ( identifier[set] ( identifier[unit_rev] . identifier[keys] ())- identifier[set] ( identifier[_unit_map] . identifier[values] ()))) keyword[if] identifier[len] ( identifier[excess] ): keyword[raise] identifier[ValueError] ( literal[string] literal[string] literal[string] . identifier[format] ( identifier[excess] = literal[string] . identifier[join] ( identifier[excess] ))) keyword[def] identifier[coerce] ( identifier[values] ): identifier[values] = identifier[to_numeric] ( identifier[values] , identifier[errors] = identifier[errors] ) keyword[if] identifier[is_integer_dtype] ( identifier[values] ): identifier[values] = identifier[values] . identifier[astype] ( literal[string] , identifier[copy] = keyword[False] ) keyword[return] identifier[values] identifier[values] =( identifier[coerce] ( identifier[arg] [ identifier[unit_rev] [ literal[string] ]])* literal[int] + identifier[coerce] ( identifier[arg] [ identifier[unit_rev] [ literal[string] ]])* literal[int] + identifier[coerce] ( identifier[arg] [ identifier[unit_rev] [ literal[string] ]])) keyword[try] : identifier[values] = identifier[to_datetime] ( identifier[values] , identifier[format] = literal[string] , identifier[errors] = identifier[errors] , identifier[utc] = identifier[tz] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ) keyword[as] identifier[e] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] . identifier[format] ( identifier[error] = identifier[e] )) keyword[for] identifier[u] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]: identifier[value] = identifier[unit_rev] . identifier[get] ( identifier[u] ) keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] keyword[and] identifier[value] keyword[in] identifier[arg] : keyword[try] : identifier[values] += identifier[to_timedelta] ( identifier[coerce] ( identifier[arg] [ identifier[value] ]), identifier[unit] = identifier[u] , identifier[errors] = identifier[errors] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ) keyword[as] identifier[e] : keyword[raise] identifier[ValueError] ( literal[string] literal[string] . identifier[format] ( identifier[value] = identifier[value] , identifier[error] = identifier[e] )) keyword[if] keyword[not] identifier[box] : keyword[return] identifier[values] . identifier[values] keyword[return] identifier[values]
def _assemble_from_unit_mappings(arg, errors, box, tz): """ assemble the unit specified fields from the arg (DataFrame) Return a Series for actual parsing Parameters ---------- arg : DataFrame errors : {'ignore', 'raise', 'coerce'}, default 'raise' - If 'raise', then invalid parsing will raise an exception - If 'coerce', then invalid parsing will be set as NaT - If 'ignore', then invalid parsing will return the input box : boolean - If True, return a DatetimeIndex - If False, return an array tz : None or 'utc' Returns ------- Series """ from pandas import to_timedelta, to_numeric, DataFrame arg = DataFrame(arg) if not arg.columns.is_unique: raise ValueError('cannot assemble with duplicate keys') # depends on [control=['if'], data=[]] # replace passed unit with _unit_map def f(value): if value in _unit_map: return _unit_map[value] # depends on [control=['if'], data=['value', '_unit_map']] # m is case significant if value.lower() in _unit_map: return _unit_map[value.lower()] # depends on [control=['if'], data=['_unit_map']] return value unit = {k: f(k) for k in arg.keys()} unit_rev = {v: k for (k, v) in unit.items()} # we require at least Ymd required = ['year', 'month', 'day'] req = sorted(list(set(required) - set(unit_rev.keys()))) if len(req): raise ValueError('to assemble mappings requires at least that [year, month, day] be specified: [{required}] is missing'.format(required=','.join(req))) # depends on [control=['if'], data=[]] # keys we don't recognize excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values()))) if len(excess): raise ValueError('extra keys have been passed to the datetime assemblage: [{excess}]'.format(excess=','.join(excess))) # depends on [control=['if'], data=[]] def coerce(values): # we allow coercion to if errors allows values = to_numeric(values, errors=errors) # prevent overflow in case of int8 or int16 if is_integer_dtype(values): values = values.astype('int64', copy=False) # depends on [control=['if'], data=[]] return values values = coerce(arg[unit_rev['year']]) * 10000 + coerce(arg[unit_rev['month']]) * 100 + coerce(arg[unit_rev['day']]) try: values = to_datetime(values, format='%Y%m%d', errors=errors, utc=tz) # depends on [control=['try'], data=[]] except (TypeError, ValueError) as e: raise ValueError('cannot assemble the datetimes: {error}'.format(error=e)) # depends on [control=['except'], data=['e']] for u in ['h', 'm', 's', 'ms', 'us', 'ns']: value = unit_rev.get(u) if value is not None and value in arg: try: values += to_timedelta(coerce(arg[value]), unit=u, errors=errors) # depends on [control=['try'], data=[]] except (TypeError, ValueError) as e: raise ValueError('cannot assemble the datetimes [{value}]: {error}'.format(value=value, error=e)) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['u']] if not box: return values.values # depends on [control=['if'], data=[]] return values
def _get_client_creds_from_request(self, request): """Return client credentials based on the current request. According to the rfc6749, client MAY use the HTTP Basic authentication scheme as defined in [RFC2617] to authenticate with the authorization server. The client identifier is encoded using the "application/x-www-form-urlencoded" encoding algorithm per Appendix B, and the encoded value is used as the username; the client password is encoded using the same algorithm and used as the password. The authorization server MUST support the HTTP Basic authentication scheme for authenticating clients that were issued a client password. See `Section 2.3.1`_. .. _`Section 2.3.1`: https://tools.ietf.org/html/rfc6749#section-2.3.1 """ if request.client_id is not None: return request.client_id, request.client_secret auth = request.headers.get('Authorization') # If Werkzeug successfully parsed the Authorization header, # `extract_params` helper will replace the header with a parsed dict, # otherwise, there is nothing useful in the header and we just skip it. if isinstance(auth, dict): return auth['username'], auth['password'] return None, None
def function[_get_client_creds_from_request, parameter[self, request]]: constant[Return client credentials based on the current request. According to the rfc6749, client MAY use the HTTP Basic authentication scheme as defined in [RFC2617] to authenticate with the authorization server. The client identifier is encoded using the "application/x-www-form-urlencoded" encoding algorithm per Appendix B, and the encoded value is used as the username; the client password is encoded using the same algorithm and used as the password. The authorization server MUST support the HTTP Basic authentication scheme for authenticating clients that were issued a client password. See `Section 2.3.1`_. .. _`Section 2.3.1`: https://tools.ietf.org/html/rfc6749#section-2.3.1 ] if compare[name[request].client_id is_not constant[None]] begin[:] return[tuple[[<ast.Attribute object at 0x7da1b02590f0>, <ast.Attribute object at 0x7da1b025a2f0>]]] variable[auth] assign[=] call[name[request].headers.get, parameter[constant[Authorization]]] if call[name[isinstance], parameter[name[auth], name[dict]]] begin[:] return[tuple[[<ast.Subscript object at 0x7da1b0258970>, <ast.Subscript object at 0x7da1b0259090>]]] return[tuple[[<ast.Constant object at 0x7da1b0258640>, <ast.Constant object at 0x7da1b025a530>]]]
keyword[def] identifier[_get_client_creds_from_request] ( identifier[self] , identifier[request] ): literal[string] keyword[if] identifier[request] . identifier[client_id] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[request] . identifier[client_id] , identifier[request] . identifier[client_secret] identifier[auth] = identifier[request] . identifier[headers] . identifier[get] ( literal[string] ) keyword[if] identifier[isinstance] ( identifier[auth] , identifier[dict] ): keyword[return] identifier[auth] [ literal[string] ], identifier[auth] [ literal[string] ] keyword[return] keyword[None] , keyword[None]
def _get_client_creds_from_request(self, request): """Return client credentials based on the current request. According to the rfc6749, client MAY use the HTTP Basic authentication scheme as defined in [RFC2617] to authenticate with the authorization server. The client identifier is encoded using the "application/x-www-form-urlencoded" encoding algorithm per Appendix B, and the encoded value is used as the username; the client password is encoded using the same algorithm and used as the password. The authorization server MUST support the HTTP Basic authentication scheme for authenticating clients that were issued a client password. See `Section 2.3.1`_. .. _`Section 2.3.1`: https://tools.ietf.org/html/rfc6749#section-2.3.1 """ if request.client_id is not None: return (request.client_id, request.client_secret) # depends on [control=['if'], data=[]] auth = request.headers.get('Authorization') # If Werkzeug successfully parsed the Authorization header, # `extract_params` helper will replace the header with a parsed dict, # otherwise, there is nothing useful in the header and we just skip it. if isinstance(auth, dict): return (auth['username'], auth['password']) # depends on [control=['if'], data=[]] return (None, None)
def get_archives_to_prune(archives, hook_data): """Return list of keys to delete.""" files_to_skip = [] for i in ['current_archive_filename', 'old_archive_filename']: if hook_data.get(i): files_to_skip.append(hook_data[i]) archives.sort(key=itemgetter('LastModified'), reverse=False) # sort from oldest to newest # Drop all but last 15 files return [i['Key'] for i in archives[:-15] if i['Key'] not in files_to_skip]
def function[get_archives_to_prune, parameter[archives, hook_data]]: constant[Return list of keys to delete.] variable[files_to_skip] assign[=] list[[]] for taget[name[i]] in starred[list[[<ast.Constant object at 0x7da1b072ec80>, <ast.Constant object at 0x7da1b072e770>]]] begin[:] if call[name[hook_data].get, parameter[name[i]]] begin[:] call[name[files_to_skip].append, parameter[call[name[hook_data]][name[i]]]] call[name[archives].sort, parameter[]] return[<ast.ListComp object at 0x7da1b072e5f0>]
keyword[def] identifier[get_archives_to_prune] ( identifier[archives] , identifier[hook_data] ): literal[string] identifier[files_to_skip] =[] keyword[for] identifier[i] keyword[in] [ literal[string] , literal[string] ]: keyword[if] identifier[hook_data] . identifier[get] ( identifier[i] ): identifier[files_to_skip] . identifier[append] ( identifier[hook_data] [ identifier[i] ]) identifier[archives] . identifier[sort] ( identifier[key] = identifier[itemgetter] ( literal[string] ), identifier[reverse] = keyword[False] ) keyword[return] [ identifier[i] [ literal[string] ] keyword[for] identifier[i] keyword[in] identifier[archives] [:- literal[int] ] keyword[if] identifier[i] [ literal[string] ] keyword[not] keyword[in] identifier[files_to_skip] ]
def get_archives_to_prune(archives, hook_data): """Return list of keys to delete.""" files_to_skip = [] for i in ['current_archive_filename', 'old_archive_filename']: if hook_data.get(i): files_to_skip.append(hook_data[i]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] archives.sort(key=itemgetter('LastModified'), reverse=False) # sort from oldest to newest # Drop all but last 15 files return [i['Key'] for i in archives[:-15] if i['Key'] not in files_to_skip]
def rgetattr(obj, attr, *args): """Get attr that handles dots in attr name.""" def _getattr(obj, attr): return getattr(obj, attr, *args) return functools.reduce(_getattr, [obj] + attr.split("."))
def function[rgetattr, parameter[obj, attr]]: constant[Get attr that handles dots in attr name.] def function[_getattr, parameter[obj, attr]]: return[call[name[getattr], parameter[name[obj], name[attr], <ast.Starred object at 0x7da1b20105b0>]]] return[call[name[functools].reduce, parameter[name[_getattr], binary_operation[list[[<ast.Name object at 0x7da1b2010b80>]] + call[name[attr].split, parameter[constant[.]]]]]]]
keyword[def] identifier[rgetattr] ( identifier[obj] , identifier[attr] ,* identifier[args] ): literal[string] keyword[def] identifier[_getattr] ( identifier[obj] , identifier[attr] ): keyword[return] identifier[getattr] ( identifier[obj] , identifier[attr] ,* identifier[args] ) keyword[return] identifier[functools] . identifier[reduce] ( identifier[_getattr] ,[ identifier[obj] ]+ identifier[attr] . identifier[split] ( literal[string] ))
def rgetattr(obj, attr, *args): """Get attr that handles dots in attr name.""" def _getattr(obj, attr): return getattr(obj, attr, *args) return functools.reduce(_getattr, [obj] + attr.split('.'))
def structural_similarity(document_1, document_2): """ Computes the structural similarity between two DOM Trees :param document_1: html string :param document_2: html string :return: int """ try: document_1 = lxml.html.parse(StringIO(document_1)) document_2 = lxml.html.parse(StringIO(document_2)) except Exception as e: print(e) return 0 tags1 = get_tags(document_1) tags2 = get_tags(document_2) diff = difflib.SequenceMatcher() diff.set_seq1(tags1) diff.set_seq2(tags2) return diff.ratio()
def function[structural_similarity, parameter[document_1, document_2]]: constant[ Computes the structural similarity between two DOM Trees :param document_1: html string :param document_2: html string :return: int ] <ast.Try object at 0x7da1b0ebd6f0> variable[tags1] assign[=] call[name[get_tags], parameter[name[document_1]]] variable[tags2] assign[=] call[name[get_tags], parameter[name[document_2]]] variable[diff] assign[=] call[name[difflib].SequenceMatcher, parameter[]] call[name[diff].set_seq1, parameter[name[tags1]]] call[name[diff].set_seq2, parameter[name[tags2]]] return[call[name[diff].ratio, parameter[]]]
keyword[def] identifier[structural_similarity] ( identifier[document_1] , identifier[document_2] ): literal[string] keyword[try] : identifier[document_1] = identifier[lxml] . identifier[html] . identifier[parse] ( identifier[StringIO] ( identifier[document_1] )) identifier[document_2] = identifier[lxml] . identifier[html] . identifier[parse] ( identifier[StringIO] ( identifier[document_2] )) keyword[except] identifier[Exception] keyword[as] identifier[e] : identifier[print] ( identifier[e] ) keyword[return] literal[int] identifier[tags1] = identifier[get_tags] ( identifier[document_1] ) identifier[tags2] = identifier[get_tags] ( identifier[document_2] ) identifier[diff] = identifier[difflib] . identifier[SequenceMatcher] () identifier[diff] . identifier[set_seq1] ( identifier[tags1] ) identifier[diff] . identifier[set_seq2] ( identifier[tags2] ) keyword[return] identifier[diff] . identifier[ratio] ()
def structural_similarity(document_1, document_2): """ Computes the structural similarity between two DOM Trees :param document_1: html string :param document_2: html string :return: int """ try: document_1 = lxml.html.parse(StringIO(document_1)) document_2 = lxml.html.parse(StringIO(document_2)) # depends on [control=['try'], data=[]] except Exception as e: print(e) return 0 # depends on [control=['except'], data=['e']] tags1 = get_tags(document_1) tags2 = get_tags(document_2) diff = difflib.SequenceMatcher() diff.set_seq1(tags1) diff.set_seq2(tags2) return diff.ratio()
def search(self, Queue=None, order=None, raw_query=None, Format='l', **kwargs): """ Search arbitrary needles in given fields and queue. Example:: >>> tracker = Rt('http://tracker.example.com/REST/1.0/', 'rt-username', 'top-secret') >>> tracker.login() >>> tickets = tracker.search(CF_Domain='example.com', Subject__like='warning') >>> tickets = tracker.search(Queue='General', order='Status', raw_query="id='1'+OR+id='2'+OR+id='3'") :keyword Queue: Queue where to search. If you wish to search across all of your queues, pass the ALL_QUEUES object as the argument. :keyword order: Name of field sorting result list, for descending order put - before the field name. E.g. -Created will put the newest tickets at the beginning :keyword raw_query: A raw query to provide to RT if you know what you are doing. You may still pass Queue and order kwargs, so use these instead of including them in the raw query. You can refer to the RT query builder. If passing raw_query, all other **kwargs will be ignored. :keyword Format: Format of the query: - i: only `id' fields are populated - s: only `id' and `subject' fields are populated - l: multi-line format, all fields are populated :keyword kwargs: Other arguments possible to set if not passing raw_query: Requestors, Subject, Cc, AdminCc, Owner, Status, Priority, InitialPriority, FinalPriority, TimeEstimated, Starts, Due, Text,... (according to RT fields) Custom fields CF.{<CustomFieldName>} could be set with keywords CF_CustomFieldName. To alter lookup operators you can append one of the following endings to each keyword: __exact for operator = (default) __notexact for operator != __gt for operator > __lt for operator < __like for operator LIKE __notlike for operator NOT LIKE Setting values to keywords constrain search result to the tickets satisfying all of them. :returns: List of matching tickets. Each ticket is the same dictionary as in :py:meth:`~Rt.get_ticket`. :raises: UnexpectedMessageFormat: Unexpected format of returned message. InvalidQueryError: If raw query is malformed """ get_params = {} query = [] url = 'search/ticket' if Queue is not ALL_QUEUES: query.append("Queue=\'{}\'".format(Queue or self.default_queue)) if not raw_query: operators_map = { 'gt': '>', 'lt': '<', 'exact': '=', 'notexact': '!=', 'like': ' LIKE ', 'notlike': ' NOT LIKE ' } for key, value in iteritems(kwargs): op = '=' key_parts = key.split('__') if len(key_parts) > 1: key = '__'.join(key_parts[:-1]) op = operators_map.get(key_parts[-1], '=') if key[:3] != 'CF_': query.append("{}{}\'{}\'".format(key, op, value)) else: query.append("'CF.{{{}}}'{}\'{}\'".format(key[3:], op, value)) else: query.append(raw_query) get_params['query'] = ' AND '.join('(' + part + ')' for part in query) if order: get_params['orderby'] = order get_params['format'] = Format msg = self.__request(url, get_params=get_params) lines = msg.split('\n') if len(lines) > 2: if self.__get_status_code(lines[0]) != 200 and lines[2].startswith('Invalid query: '): raise InvalidQueryError(lines[2]) if lines[2].startswith('No matching results.'): return [] if Format == 'l': msgs = map(lambda x: x.split('\n'), msg.split('\n--\n')) items = [] for msg in msgs: pairs = {} req_matching = [i for i, m in enumerate(msg) if self.RE_PATTERNS['requestors_pattern'].match(m)] req_id = req_matching[0] if req_matching else None if not req_id: raise UnexpectedMessageFormat('Missing line starting with `Requestors:`.') for i in range(req_id): if ': ' in msg[i]: header, content = self.split_header(msg[i]) pairs[header.strip()] = content.strip() requestors = [msg[req_id][12:]] req_id += 1 while (req_id < len(msg)) and (msg[req_id][:12] == ' ' * 12): requestors.append(msg[req_id][12:]) req_id += 1 pairs['Requestors'] = self.__normalize_list(requestors) for i in range(req_id, len(msg)): if ': ' in msg[i]: header, content = self.split_header(msg[i]) pairs[header.strip()] = content.strip() if pairs: items.append(pairs) if 'Cc' in pairs: pairs['Cc'] = self.__normalize_list(pairs['Cc']) if 'AdminCc' in pairs: pairs['AdminCc'] = self.__normalize_list(pairs['AdminCc']) if 'id' not in pairs and not pairs['id'].startswitch('ticket/'): raise UnexpectedMessageFormat('Response from RT didn\'t contain a valid ticket_id') else: pairs['numerical_id'] = pairs['id'].split('ticket/')[1] return items elif Format == 's': items = [] msgs = lines[2:] for msg in msgs: if "" == msg: # Ignore blank line at the end continue ticket_id, subject = self.split_header(msg) items.append({'id': 'ticket/' + ticket_id, 'numerical_id': ticket_id, 'Subject': subject}) return items elif Format == 'i': items = [] msgs = lines[2:] for msg in msgs: if "" == msg: # Ignore blank line at the end continue _, ticket_id = msg.split('/', 1) items.append({'id': 'ticket/' + ticket_id, 'numerical_id': ticket_id}) return items
def function[search, parameter[self, Queue, order, raw_query, Format]]: constant[ Search arbitrary needles in given fields and queue. Example:: >>> tracker = Rt('http://tracker.example.com/REST/1.0/', 'rt-username', 'top-secret') >>> tracker.login() >>> tickets = tracker.search(CF_Domain='example.com', Subject__like='warning') >>> tickets = tracker.search(Queue='General', order='Status', raw_query="id='1'+OR+id='2'+OR+id='3'") :keyword Queue: Queue where to search. If you wish to search across all of your queues, pass the ALL_QUEUES object as the argument. :keyword order: Name of field sorting result list, for descending order put - before the field name. E.g. -Created will put the newest tickets at the beginning :keyword raw_query: A raw query to provide to RT if you know what you are doing. You may still pass Queue and order kwargs, so use these instead of including them in the raw query. You can refer to the RT query builder. If passing raw_query, all other **kwargs will be ignored. :keyword Format: Format of the query: - i: only `id' fields are populated - s: only `id' and `subject' fields are populated - l: multi-line format, all fields are populated :keyword kwargs: Other arguments possible to set if not passing raw_query: Requestors, Subject, Cc, AdminCc, Owner, Status, Priority, InitialPriority, FinalPriority, TimeEstimated, Starts, Due, Text,... (according to RT fields) Custom fields CF.{<CustomFieldName>} could be set with keywords CF_CustomFieldName. To alter lookup operators you can append one of the following endings to each keyword: __exact for operator = (default) __notexact for operator != __gt for operator > __lt for operator < __like for operator LIKE __notlike for operator NOT LIKE Setting values to keywords constrain search result to the tickets satisfying all of them. :returns: List of matching tickets. Each ticket is the same dictionary as in :py:meth:`~Rt.get_ticket`. :raises: UnexpectedMessageFormat: Unexpected format of returned message. InvalidQueryError: If raw query is malformed ] variable[get_params] assign[=] dictionary[[], []] variable[query] assign[=] list[[]] variable[url] assign[=] constant[search/ticket] if compare[name[Queue] is_not name[ALL_QUEUES]] begin[:] call[name[query].append, parameter[call[constant[Queue='{}'].format, parameter[<ast.BoolOp object at 0x7da20e960f40>]]]] if <ast.UnaryOp object at 0x7da20e962fe0> begin[:] variable[operators_map] assign[=] dictionary[[<ast.Constant object at 0x7da20e9608b0>, <ast.Constant object at 0x7da20e9626b0>, <ast.Constant object at 0x7da20e963430>, <ast.Constant object at 0x7da20e9626e0>, <ast.Constant object at 0x7da20e961510>, <ast.Constant object at 0x7da20e960610>], [<ast.Constant object at 0x7da20e963dc0>, <ast.Constant object at 0x7da20e9619c0>, <ast.Constant object at 0x7da20e9631c0>, <ast.Constant object at 0x7da20e963700>, <ast.Constant object at 0x7da20e960550>, <ast.Constant object at 0x7da20e961a50>]] for taget[tuple[[<ast.Name object at 0x7da20e9627a0>, <ast.Name object at 0x7da20e960d00>]]] in starred[call[name[iteritems], parameter[name[kwargs]]]] begin[:] variable[op] assign[=] constant[=] variable[key_parts] assign[=] call[name[key].split, parameter[constant[__]]] if compare[call[name[len], parameter[name[key_parts]]] greater[>] constant[1]] begin[:] variable[key] assign[=] call[constant[__].join, parameter[call[name[key_parts]][<ast.Slice object at 0x7da20e960b50>]]] variable[op] assign[=] call[name[operators_map].get, parameter[call[name[key_parts]][<ast.UnaryOp object at 0x7da212db5030>], constant[=]]] if compare[call[name[key]][<ast.Slice object at 0x7da18bc71300>] not_equal[!=] constant[CF_]] begin[:] call[name[query].append, parameter[call[constant[{}{}'{}'].format, parameter[name[key], name[op], name[value]]]]] call[name[get_params]][constant[query]] assign[=] call[constant[ AND ].join, parameter[<ast.GeneratorExp object at 0x7da18bc72020>]] if name[order] begin[:] call[name[get_params]][constant[orderby]] assign[=] name[order] call[name[get_params]][constant[format]] assign[=] name[Format] variable[msg] assign[=] call[name[self].__request, parameter[name[url]]] variable[lines] assign[=] call[name[msg].split, parameter[constant[ ]]] if compare[call[name[len], parameter[name[lines]]] greater[>] constant[2]] begin[:] if <ast.BoolOp object at 0x7da18f58f9d0> begin[:] <ast.Raise object at 0x7da18f58c100> if call[call[name[lines]][constant[2]].startswith, parameter[constant[No matching results.]]] begin[:] return[list[[]]] if compare[name[Format] equal[==] constant[l]] begin[:] variable[msgs] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da20e960ee0>, call[name[msg].split, parameter[constant[ -- ]]]]] variable[items] assign[=] list[[]] for taget[name[msg]] in starred[name[msgs]] begin[:] variable[pairs] assign[=] dictionary[[], []] variable[req_matching] assign[=] <ast.ListComp object at 0x7da20e961cc0> variable[req_id] assign[=] <ast.IfExp object at 0x7da20e963550> if <ast.UnaryOp object at 0x7da20e962020> begin[:] <ast.Raise object at 0x7da20e960c40> for taget[name[i]] in starred[call[name[range], parameter[name[req_id]]]] begin[:] if compare[constant[: ] in call[name[msg]][name[i]]] begin[:] <ast.Tuple object at 0x7da20e962680> assign[=] call[name[self].split_header, parameter[call[name[msg]][name[i]]]] call[name[pairs]][call[name[header].strip, parameter[]]] assign[=] call[name[content].strip, parameter[]] variable[requestors] assign[=] list[[<ast.Subscript object at 0x7da204621000>]] <ast.AugAssign object at 0x7da204623cd0> while <ast.BoolOp object at 0x7da204622d40> begin[:] call[name[requestors].append, parameter[call[call[name[msg]][name[req_id]]][<ast.Slice object at 0x7da204621db0>]]] <ast.AugAssign object at 0x7da2046232b0> call[name[pairs]][constant[Requestors]] assign[=] call[name[self].__normalize_list, parameter[name[requestors]]] for taget[name[i]] in starred[call[name[range], parameter[name[req_id], call[name[len], parameter[name[msg]]]]]] begin[:] if compare[constant[: ] in call[name[msg]][name[i]]] begin[:] <ast.Tuple object at 0x7da204620ca0> assign[=] call[name[self].split_header, parameter[call[name[msg]][name[i]]]] call[name[pairs]][call[name[header].strip, parameter[]]] assign[=] call[name[content].strip, parameter[]] if name[pairs] begin[:] call[name[items].append, parameter[name[pairs]]] if compare[constant[Cc] in name[pairs]] begin[:] call[name[pairs]][constant[Cc]] assign[=] call[name[self].__normalize_list, parameter[call[name[pairs]][constant[Cc]]]] if compare[constant[AdminCc] in name[pairs]] begin[:] call[name[pairs]][constant[AdminCc]] assign[=] call[name[self].__normalize_list, parameter[call[name[pairs]][constant[AdminCc]]]] if <ast.BoolOp object at 0x7da204620e80> begin[:] <ast.Raise object at 0x7da204623790> return[name[items]]
keyword[def] identifier[search] ( identifier[self] , identifier[Queue] = keyword[None] , identifier[order] = keyword[None] , identifier[raw_query] = keyword[None] , identifier[Format] = literal[string] ,** identifier[kwargs] ): literal[string] identifier[get_params] ={} identifier[query] =[] identifier[url] = literal[string] keyword[if] identifier[Queue] keyword[is] keyword[not] identifier[ALL_QUEUES] : identifier[query] . identifier[append] ( literal[string] . identifier[format] ( identifier[Queue] keyword[or] identifier[self] . identifier[default_queue] )) keyword[if] keyword[not] identifier[raw_query] : identifier[operators_map] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] } keyword[for] identifier[key] , identifier[value] keyword[in] identifier[iteritems] ( identifier[kwargs] ): identifier[op] = literal[string] identifier[key_parts] = identifier[key] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[key_parts] )> literal[int] : identifier[key] = literal[string] . identifier[join] ( identifier[key_parts] [:- literal[int] ]) identifier[op] = identifier[operators_map] . identifier[get] ( identifier[key_parts] [- literal[int] ], literal[string] ) keyword[if] identifier[key] [: literal[int] ]!= literal[string] : identifier[query] . identifier[append] ( literal[string] . identifier[format] ( identifier[key] , identifier[op] , identifier[value] )) keyword[else] : identifier[query] . identifier[append] ( literal[string] . identifier[format] ( identifier[key] [ literal[int] :], identifier[op] , identifier[value] )) keyword[else] : identifier[query] . identifier[append] ( identifier[raw_query] ) identifier[get_params] [ literal[string] ]= literal[string] . identifier[join] ( literal[string] + identifier[part] + literal[string] keyword[for] identifier[part] keyword[in] identifier[query] ) keyword[if] identifier[order] : identifier[get_params] [ literal[string] ]= identifier[order] identifier[get_params] [ literal[string] ]= identifier[Format] identifier[msg] = identifier[self] . identifier[__request] ( identifier[url] , identifier[get_params] = identifier[get_params] ) identifier[lines] = identifier[msg] . identifier[split] ( literal[string] ) keyword[if] identifier[len] ( identifier[lines] )> literal[int] : keyword[if] identifier[self] . identifier[__get_status_code] ( identifier[lines] [ literal[int] ])!= literal[int] keyword[and] identifier[lines] [ literal[int] ]. identifier[startswith] ( literal[string] ): keyword[raise] identifier[InvalidQueryError] ( identifier[lines] [ literal[int] ]) keyword[if] identifier[lines] [ literal[int] ]. identifier[startswith] ( literal[string] ): keyword[return] [] keyword[if] identifier[Format] == literal[string] : identifier[msgs] = identifier[map] ( keyword[lambda] identifier[x] : identifier[x] . identifier[split] ( literal[string] ), identifier[msg] . identifier[split] ( literal[string] )) identifier[items] =[] keyword[for] identifier[msg] keyword[in] identifier[msgs] : identifier[pairs] ={} identifier[req_matching] =[ identifier[i] keyword[for] identifier[i] , identifier[m] keyword[in] identifier[enumerate] ( identifier[msg] ) keyword[if] identifier[self] . identifier[RE_PATTERNS] [ literal[string] ]. identifier[match] ( identifier[m] )] identifier[req_id] = identifier[req_matching] [ literal[int] ] keyword[if] identifier[req_matching] keyword[else] keyword[None] keyword[if] keyword[not] identifier[req_id] : keyword[raise] identifier[UnexpectedMessageFormat] ( literal[string] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[req_id] ): keyword[if] literal[string] keyword[in] identifier[msg] [ identifier[i] ]: identifier[header] , identifier[content] = identifier[self] . identifier[split_header] ( identifier[msg] [ identifier[i] ]) identifier[pairs] [ identifier[header] . identifier[strip] ()]= identifier[content] . identifier[strip] () identifier[requestors] =[ identifier[msg] [ identifier[req_id] ][ literal[int] :]] identifier[req_id] += literal[int] keyword[while] ( identifier[req_id] < identifier[len] ( identifier[msg] )) keyword[and] ( identifier[msg] [ identifier[req_id] ][: literal[int] ]== literal[string] * literal[int] ): identifier[requestors] . identifier[append] ( identifier[msg] [ identifier[req_id] ][ literal[int] :]) identifier[req_id] += literal[int] identifier[pairs] [ literal[string] ]= identifier[self] . identifier[__normalize_list] ( identifier[requestors] ) keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[req_id] , identifier[len] ( identifier[msg] )): keyword[if] literal[string] keyword[in] identifier[msg] [ identifier[i] ]: identifier[header] , identifier[content] = identifier[self] . identifier[split_header] ( identifier[msg] [ identifier[i] ]) identifier[pairs] [ identifier[header] . identifier[strip] ()]= identifier[content] . identifier[strip] () keyword[if] identifier[pairs] : identifier[items] . identifier[append] ( identifier[pairs] ) keyword[if] literal[string] keyword[in] identifier[pairs] : identifier[pairs] [ literal[string] ]= identifier[self] . identifier[__normalize_list] ( identifier[pairs] [ literal[string] ]) keyword[if] literal[string] keyword[in] identifier[pairs] : identifier[pairs] [ literal[string] ]= identifier[self] . identifier[__normalize_list] ( identifier[pairs] [ literal[string] ]) keyword[if] literal[string] keyword[not] keyword[in] identifier[pairs] keyword[and] keyword[not] identifier[pairs] [ literal[string] ]. identifier[startswitch] ( literal[string] ): keyword[raise] identifier[UnexpectedMessageFormat] ( literal[string] ) keyword[else] : identifier[pairs] [ literal[string] ]= identifier[pairs] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ] keyword[return] identifier[items] keyword[elif] identifier[Format] == literal[string] : identifier[items] =[] identifier[msgs] = identifier[lines] [ literal[int] :] keyword[for] identifier[msg] keyword[in] identifier[msgs] : keyword[if] literal[string] == identifier[msg] : keyword[continue] identifier[ticket_id] , identifier[subject] = identifier[self] . identifier[split_header] ( identifier[msg] ) identifier[items] . identifier[append] ({ literal[string] : literal[string] + identifier[ticket_id] , literal[string] : identifier[ticket_id] , literal[string] : identifier[subject] }) keyword[return] identifier[items] keyword[elif] identifier[Format] == literal[string] : identifier[items] =[] identifier[msgs] = identifier[lines] [ literal[int] :] keyword[for] identifier[msg] keyword[in] identifier[msgs] : keyword[if] literal[string] == identifier[msg] : keyword[continue] identifier[_] , identifier[ticket_id] = identifier[msg] . identifier[split] ( literal[string] , literal[int] ) identifier[items] . identifier[append] ({ literal[string] : literal[string] + identifier[ticket_id] , literal[string] : identifier[ticket_id] }) keyword[return] identifier[items]
def search(self, Queue=None, order=None, raw_query=None, Format='l', **kwargs): """ Search arbitrary needles in given fields and queue. Example:: >>> tracker = Rt('http://tracker.example.com/REST/1.0/', 'rt-username', 'top-secret') >>> tracker.login() >>> tickets = tracker.search(CF_Domain='example.com', Subject__like='warning') >>> tickets = tracker.search(Queue='General', order='Status', raw_query="id='1'+OR+id='2'+OR+id='3'") :keyword Queue: Queue where to search. If you wish to search across all of your queues, pass the ALL_QUEUES object as the argument. :keyword order: Name of field sorting result list, for descending order put - before the field name. E.g. -Created will put the newest tickets at the beginning :keyword raw_query: A raw query to provide to RT if you know what you are doing. You may still pass Queue and order kwargs, so use these instead of including them in the raw query. You can refer to the RT query builder. If passing raw_query, all other **kwargs will be ignored. :keyword Format: Format of the query: - i: only `id' fields are populated - s: only `id' and `subject' fields are populated - l: multi-line format, all fields are populated :keyword kwargs: Other arguments possible to set if not passing raw_query: Requestors, Subject, Cc, AdminCc, Owner, Status, Priority, InitialPriority, FinalPriority, TimeEstimated, Starts, Due, Text,... (according to RT fields) Custom fields CF.{<CustomFieldName>} could be set with keywords CF_CustomFieldName. To alter lookup operators you can append one of the following endings to each keyword: __exact for operator = (default) __notexact for operator != __gt for operator > __lt for operator < __like for operator LIKE __notlike for operator NOT LIKE Setting values to keywords constrain search result to the tickets satisfying all of them. :returns: List of matching tickets. Each ticket is the same dictionary as in :py:meth:`~Rt.get_ticket`. :raises: UnexpectedMessageFormat: Unexpected format of returned message. InvalidQueryError: If raw query is malformed """ get_params = {} query = [] url = 'search/ticket' if Queue is not ALL_QUEUES: query.append("Queue='{}'".format(Queue or self.default_queue)) # depends on [control=['if'], data=['Queue']] if not raw_query: operators_map = {'gt': '>', 'lt': '<', 'exact': '=', 'notexact': '!=', 'like': ' LIKE ', 'notlike': ' NOT LIKE '} for (key, value) in iteritems(kwargs): op = '=' key_parts = key.split('__') if len(key_parts) > 1: key = '__'.join(key_parts[:-1]) op = operators_map.get(key_parts[-1], '=') # depends on [control=['if'], data=[]] if key[:3] != 'CF_': query.append("{}{}'{}'".format(key, op, value)) # depends on [control=['if'], data=[]] else: query.append("'CF.{{{}}}'{}'{}'".format(key[3:], op, value)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] else: query.append(raw_query) get_params['query'] = ' AND '.join(('(' + part + ')' for part in query)) if order: get_params['orderby'] = order # depends on [control=['if'], data=[]] get_params['format'] = Format msg = self.__request(url, get_params=get_params) lines = msg.split('\n') if len(lines) > 2: if self.__get_status_code(lines[0]) != 200 and lines[2].startswith('Invalid query: '): raise InvalidQueryError(lines[2]) # depends on [control=['if'], data=[]] if lines[2].startswith('No matching results.'): return [] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] if Format == 'l': msgs = map(lambda x: x.split('\n'), msg.split('\n--\n')) items = [] for msg in msgs: pairs = {} req_matching = [i for (i, m) in enumerate(msg) if self.RE_PATTERNS['requestors_pattern'].match(m)] req_id = req_matching[0] if req_matching else None if not req_id: raise UnexpectedMessageFormat('Missing line starting with `Requestors:`.') # depends on [control=['if'], data=[]] for i in range(req_id): if ': ' in msg[i]: (header, content) = self.split_header(msg[i]) pairs[header.strip()] = content.strip() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] requestors = [msg[req_id][12:]] req_id += 1 while req_id < len(msg) and msg[req_id][:12] == ' ' * 12: requestors.append(msg[req_id][12:]) req_id += 1 # depends on [control=['while'], data=[]] pairs['Requestors'] = self.__normalize_list(requestors) for i in range(req_id, len(msg)): if ': ' in msg[i]: (header, content) = self.split_header(msg[i]) pairs[header.strip()] = content.strip() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] if pairs: items.append(pairs) # depends on [control=['if'], data=[]] if 'Cc' in pairs: pairs['Cc'] = self.__normalize_list(pairs['Cc']) # depends on [control=['if'], data=['pairs']] if 'AdminCc' in pairs: pairs['AdminCc'] = self.__normalize_list(pairs['AdminCc']) # depends on [control=['if'], data=['pairs']] if 'id' not in pairs and (not pairs['id'].startswitch('ticket/')): raise UnexpectedMessageFormat("Response from RT didn't contain a valid ticket_id") # depends on [control=['if'], data=[]] else: pairs['numerical_id'] = pairs['id'].split('ticket/')[1] # depends on [control=['for'], data=['msg']] return items # depends on [control=['if'], data=[]] elif Format == 's': items = [] msgs = lines[2:] for msg in msgs: if '' == msg: # Ignore blank line at the end continue # depends on [control=['if'], data=[]] (ticket_id, subject) = self.split_header(msg) items.append({'id': 'ticket/' + ticket_id, 'numerical_id': ticket_id, 'Subject': subject}) # depends on [control=['for'], data=['msg']] return items # depends on [control=['if'], data=[]] elif Format == 'i': items = [] msgs = lines[2:] for msg in msgs: if '' == msg: # Ignore blank line at the end continue # depends on [control=['if'], data=[]] (_, ticket_id) = msg.split('/', 1) items.append({'id': 'ticket/' + ticket_id, 'numerical_id': ticket_id}) # depends on [control=['for'], data=['msg']] return items # depends on [control=['if'], data=[]]
def _addAnomalyClassifierRegion(self, network, params, spEnable, tmEnable): """ Attaches an 'AnomalyClassifier' region to the network. Will remove current 'AnomalyClassifier' region if it exists. Parameters ----------- network - network to add the AnomalyClassifier region params - parameters to pass to the region spEnable - True if network has an SP region tmEnable - True if network has a TM region; Currently requires True """ allParams = copy.deepcopy(params) knnParams = dict(k=1, distanceMethod='rawOverlap', distanceNorm=1, doBinarization=1, replaceDuplicates=0, maxStoredPatterns=1000) allParams.update(knnParams) # Set defaults if not set if allParams['trainRecords'] is None: allParams['trainRecords'] = DEFAULT_ANOMALY_TRAINRECORDS if allParams['cacheSize'] is None: allParams['cacheSize'] = DEFAULT_ANOMALY_CACHESIZE # Remove current instance if already created (used for deserializing) if self._netInfo is not None and self._netInfo.net is not None \ and self._getAnomalyClassifier() is not None: self._netInfo.net.removeRegion('AnomalyClassifier') network.addRegion("AnomalyClassifier", "py.KNNAnomalyClassifierRegion", json.dumps(allParams)) # Attach link to SP if spEnable: network.link("SP", "AnomalyClassifier", "UniformLink", "", srcOutput="bottomUpOut", destInput="spBottomUpOut") else: network.link("sensor", "AnomalyClassifier", "UniformLink", "", srcOutput="dataOut", destInput="spBottomUpOut") # Attach link to TM if tmEnable: network.link("TM", "AnomalyClassifier", "UniformLink", "", srcOutput="topDownOut", destInput="tpTopDownOut") network.link("TM", "AnomalyClassifier", "UniformLink", "", srcOutput="lrnActiveStateT", destInput="tpLrnActiveStateT") else: raise RuntimeError("TemporalAnomaly models require a TM region.")
def function[_addAnomalyClassifierRegion, parameter[self, network, params, spEnable, tmEnable]]: constant[ Attaches an 'AnomalyClassifier' region to the network. Will remove current 'AnomalyClassifier' region if it exists. Parameters ----------- network - network to add the AnomalyClassifier region params - parameters to pass to the region spEnable - True if network has an SP region tmEnable - True if network has a TM region; Currently requires True ] variable[allParams] assign[=] call[name[copy].deepcopy, parameter[name[params]]] variable[knnParams] assign[=] call[name[dict], parameter[]] call[name[allParams].update, parameter[name[knnParams]]] if compare[call[name[allParams]][constant[trainRecords]] is constant[None]] begin[:] call[name[allParams]][constant[trainRecords]] assign[=] name[DEFAULT_ANOMALY_TRAINRECORDS] if compare[call[name[allParams]][constant[cacheSize]] is constant[None]] begin[:] call[name[allParams]][constant[cacheSize]] assign[=] name[DEFAULT_ANOMALY_CACHESIZE] if <ast.BoolOp object at 0x7da18dc05780> begin[:] call[name[self]._netInfo.net.removeRegion, parameter[constant[AnomalyClassifier]]] call[name[network].addRegion, parameter[constant[AnomalyClassifier], constant[py.KNNAnomalyClassifierRegion], call[name[json].dumps, parameter[name[allParams]]]]] if name[spEnable] begin[:] call[name[network].link, parameter[constant[SP], constant[AnomalyClassifier], constant[UniformLink], constant[]]] if name[tmEnable] begin[:] call[name[network].link, parameter[constant[TM], constant[AnomalyClassifier], constant[UniformLink], constant[]]] call[name[network].link, parameter[constant[TM], constant[AnomalyClassifier], constant[UniformLink], constant[]]]
keyword[def] identifier[_addAnomalyClassifierRegion] ( identifier[self] , identifier[network] , identifier[params] , identifier[spEnable] , identifier[tmEnable] ): literal[string] identifier[allParams] = identifier[copy] . identifier[deepcopy] ( identifier[params] ) identifier[knnParams] = identifier[dict] ( identifier[k] = literal[int] , identifier[distanceMethod] = literal[string] , identifier[distanceNorm] = literal[int] , identifier[doBinarization] = literal[int] , identifier[replaceDuplicates] = literal[int] , identifier[maxStoredPatterns] = literal[int] ) identifier[allParams] . identifier[update] ( identifier[knnParams] ) keyword[if] identifier[allParams] [ literal[string] ] keyword[is] keyword[None] : identifier[allParams] [ literal[string] ]= identifier[DEFAULT_ANOMALY_TRAINRECORDS] keyword[if] identifier[allParams] [ literal[string] ] keyword[is] keyword[None] : identifier[allParams] [ literal[string] ]= identifier[DEFAULT_ANOMALY_CACHESIZE] keyword[if] identifier[self] . identifier[_netInfo] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[_netInfo] . identifier[net] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[_getAnomalyClassifier] () keyword[is] keyword[not] keyword[None] : identifier[self] . identifier[_netInfo] . identifier[net] . identifier[removeRegion] ( literal[string] ) identifier[network] . identifier[addRegion] ( literal[string] , literal[string] , identifier[json] . identifier[dumps] ( identifier[allParams] )) keyword[if] identifier[spEnable] : identifier[network] . identifier[link] ( literal[string] , literal[string] , literal[string] , literal[string] , identifier[srcOutput] = literal[string] , identifier[destInput] = literal[string] ) keyword[else] : identifier[network] . identifier[link] ( literal[string] , literal[string] , literal[string] , literal[string] , identifier[srcOutput] = literal[string] , identifier[destInput] = literal[string] ) keyword[if] identifier[tmEnable] : identifier[network] . identifier[link] ( literal[string] , literal[string] , literal[string] , literal[string] , identifier[srcOutput] = literal[string] , identifier[destInput] = literal[string] ) identifier[network] . identifier[link] ( literal[string] , literal[string] , literal[string] , literal[string] , identifier[srcOutput] = literal[string] , identifier[destInput] = literal[string] ) keyword[else] : keyword[raise] identifier[RuntimeError] ( literal[string] )
def _addAnomalyClassifierRegion(self, network, params, spEnable, tmEnable): """ Attaches an 'AnomalyClassifier' region to the network. Will remove current 'AnomalyClassifier' region if it exists. Parameters ----------- network - network to add the AnomalyClassifier region params - parameters to pass to the region spEnable - True if network has an SP region tmEnable - True if network has a TM region; Currently requires True """ allParams = copy.deepcopy(params) knnParams = dict(k=1, distanceMethod='rawOverlap', distanceNorm=1, doBinarization=1, replaceDuplicates=0, maxStoredPatterns=1000) allParams.update(knnParams) # Set defaults if not set if allParams['trainRecords'] is None: allParams['trainRecords'] = DEFAULT_ANOMALY_TRAINRECORDS # depends on [control=['if'], data=[]] if allParams['cacheSize'] is None: allParams['cacheSize'] = DEFAULT_ANOMALY_CACHESIZE # depends on [control=['if'], data=[]] # Remove current instance if already created (used for deserializing) if self._netInfo is not None and self._netInfo.net is not None and (self._getAnomalyClassifier() is not None): self._netInfo.net.removeRegion('AnomalyClassifier') # depends on [control=['if'], data=[]] network.addRegion('AnomalyClassifier', 'py.KNNAnomalyClassifierRegion', json.dumps(allParams)) # Attach link to SP if spEnable: network.link('SP', 'AnomalyClassifier', 'UniformLink', '', srcOutput='bottomUpOut', destInput='spBottomUpOut') # depends on [control=['if'], data=[]] else: network.link('sensor', 'AnomalyClassifier', 'UniformLink', '', srcOutput='dataOut', destInput='spBottomUpOut') # Attach link to TM if tmEnable: network.link('TM', 'AnomalyClassifier', 'UniformLink', '', srcOutput='topDownOut', destInput='tpTopDownOut') network.link('TM', 'AnomalyClassifier', 'UniformLink', '', srcOutput='lrnActiveStateT', destInput='tpLrnActiveStateT') # depends on [control=['if'], data=[]] else: raise RuntimeError('TemporalAnomaly models require a TM region.')
def day_postfix(day): """Returns day's correct postfix (2nd, 3rd, 61st, etc).""" if day != 11 and day % 10 == 1: postfix = "st" elif day != 12 and day % 10 == 2: postfix = "nd" elif day != 13 and day % 10 == 3: postfix = "rd" else: postfix = "th" return postfix
def function[day_postfix, parameter[day]]: constant[Returns day's correct postfix (2nd, 3rd, 61st, etc).] if <ast.BoolOp object at 0x7da20e954250> begin[:] variable[postfix] assign[=] constant[st] return[name[postfix]]
keyword[def] identifier[day_postfix] ( identifier[day] ): literal[string] keyword[if] identifier[day] != literal[int] keyword[and] identifier[day] % literal[int] == literal[int] : identifier[postfix] = literal[string] keyword[elif] identifier[day] != literal[int] keyword[and] identifier[day] % literal[int] == literal[int] : identifier[postfix] = literal[string] keyword[elif] identifier[day] != literal[int] keyword[and] identifier[day] % literal[int] == literal[int] : identifier[postfix] = literal[string] keyword[else] : identifier[postfix] = literal[string] keyword[return] identifier[postfix]
def day_postfix(day): """Returns day's correct postfix (2nd, 3rd, 61st, etc).""" if day != 11 and day % 10 == 1: postfix = 'st' # depends on [control=['if'], data=[]] elif day != 12 and day % 10 == 2: postfix = 'nd' # depends on [control=['if'], data=[]] elif day != 13 and day % 10 == 3: postfix = 'rd' # depends on [control=['if'], data=[]] else: postfix = 'th' return postfix
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Replace Val File Read from File Method """ # Set file extension property self.fileExtension = extension # Open file and parse into a data structure with open(path, 'r') as f: for line in f: valLine = ReplaceValLine() valLine.contents = line valLine.replaceValFile = self
def function[_read, parameter[self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile]]: constant[ Replace Val File Read from File Method ] name[self].fileExtension assign[=] name[extension] with call[name[open], parameter[name[path], constant[r]]] begin[:] for taget[name[line]] in starred[name[f]] begin[:] variable[valLine] assign[=] call[name[ReplaceValLine], parameter[]] name[valLine].contents assign[=] name[line] name[valLine].replaceValFile assign[=] name[self]
keyword[def] identifier[_read] ( identifier[self] , identifier[directory] , identifier[filename] , identifier[session] , identifier[path] , identifier[name] , identifier[extension] , identifier[spatial] , identifier[spatialReferenceID] , identifier[replaceParamFile] ): literal[string] identifier[self] . identifier[fileExtension] = identifier[extension] keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] : keyword[for] identifier[line] keyword[in] identifier[f] : identifier[valLine] = identifier[ReplaceValLine] () identifier[valLine] . identifier[contents] = identifier[line] identifier[valLine] . identifier[replaceValFile] = identifier[self]
def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile): """ Replace Val File Read from File Method """ # Set file extension property self.fileExtension = extension # Open file and parse into a data structure with open(path, 'r') as f: for line in f: valLine = ReplaceValLine() valLine.contents = line valLine.replaceValFile = self # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']]
def get_unicodes(codepoint): """ Return list of unicodes for <scanning-codepoints> """ result = re.sub('\s', '', codepoint.text) return Extension.convert_to_list_of_unicodes(result)
def function[get_unicodes, parameter[codepoint]]: constant[ Return list of unicodes for <scanning-codepoints> ] variable[result] assign[=] call[name[re].sub, parameter[constant[\s], constant[], name[codepoint].text]] return[call[name[Extension].convert_to_list_of_unicodes, parameter[name[result]]]]
keyword[def] identifier[get_unicodes] ( identifier[codepoint] ): literal[string] identifier[result] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[codepoint] . identifier[text] ) keyword[return] identifier[Extension] . identifier[convert_to_list_of_unicodes] ( identifier[result] )
def get_unicodes(codepoint): """ Return list of unicodes for <scanning-codepoints> """ result = re.sub('\\s', '', codepoint.text) return Extension.convert_to_list_of_unicodes(result)
def zipsafe(dist): """Returns whether or not we determine a distribution is zip-safe.""" # zip-safety is only an attribute of eggs. wheels are considered never # zip safe per implications of PEP 427. if hasattr(dist, 'egg_info') and dist.egg_info.endswith('EGG-INFO'): egg_metadata = dist.metadata_listdir('') return 'zip-safe' in egg_metadata and 'native_libs.txt' not in egg_metadata else: return False
def function[zipsafe, parameter[dist]]: constant[Returns whether or not we determine a distribution is zip-safe.] if <ast.BoolOp object at 0x7da2041d9a50> begin[:] variable[egg_metadata] assign[=] call[name[dist].metadata_listdir, parameter[constant[]]] return[<ast.BoolOp object at 0x7da2047ebbe0>]
keyword[def] identifier[zipsafe] ( identifier[dist] ): literal[string] keyword[if] identifier[hasattr] ( identifier[dist] , literal[string] ) keyword[and] identifier[dist] . identifier[egg_info] . identifier[endswith] ( literal[string] ): identifier[egg_metadata] = identifier[dist] . identifier[metadata_listdir] ( literal[string] ) keyword[return] literal[string] keyword[in] identifier[egg_metadata] keyword[and] literal[string] keyword[not] keyword[in] identifier[egg_metadata] keyword[else] : keyword[return] keyword[False]
def zipsafe(dist): """Returns whether or not we determine a distribution is zip-safe.""" # zip-safety is only an attribute of eggs. wheels are considered never # zip safe per implications of PEP 427. if hasattr(dist, 'egg_info') and dist.egg_info.endswith('EGG-INFO'): egg_metadata = dist.metadata_listdir('') return 'zip-safe' in egg_metadata and 'native_libs.txt' not in egg_metadata # depends on [control=['if'], data=[]] else: return False
def fibonacci() -> Iterator[int]: """Generate the sequence of Fibonacci. https://oeis.org/A000045 """ a, b = 1, 2 while True: yield a a, b = b, a + b
def function[fibonacci, parameter[]]: constant[Generate the sequence of Fibonacci. https://oeis.org/A000045 ] <ast.Tuple object at 0x7da204620eb0> assign[=] tuple[[<ast.Constant object at 0x7da1b07615a0>, <ast.Constant object at 0x7da1b0762020>]] while constant[True] begin[:] <ast.Yield object at 0x7da1b07603a0> <ast.Tuple object at 0x7da1b0763520> assign[=] tuple[[<ast.Name object at 0x7da1b0761a20>, <ast.BinOp object at 0x7da1b0762f50>]]
keyword[def] identifier[fibonacci] ()-> identifier[Iterator] [ identifier[int] ]: literal[string] identifier[a] , identifier[b] = literal[int] , literal[int] keyword[while] keyword[True] : keyword[yield] identifier[a] identifier[a] , identifier[b] = identifier[b] , identifier[a] + identifier[b]
def fibonacci() -> Iterator[int]: """Generate the sequence of Fibonacci. https://oeis.org/A000045 """ (a, b) = (1, 2) while True: yield a (a, b) = (b, a + b) # depends on [control=['while'], data=[]]
def get_output_from_input(self): """Populate output form with default output path based on input layer. """ input_path = self.layer.currentLayer().source() output_path = ( os.path.splitext(input_path)[0] + '_multi_buffer' + os.path.splitext(input_path)[1]) self.output_form.setText(output_path)
def function[get_output_from_input, parameter[self]]: constant[Populate output form with default output path based on input layer. ] variable[input_path] assign[=] call[call[name[self].layer.currentLayer, parameter[]].source, parameter[]] variable[output_path] assign[=] binary_operation[binary_operation[call[call[name[os].path.splitext, parameter[name[input_path]]]][constant[0]] + constant[_multi_buffer]] + call[call[name[os].path.splitext, parameter[name[input_path]]]][constant[1]]] call[name[self].output_form.setText, parameter[name[output_path]]]
keyword[def] identifier[get_output_from_input] ( identifier[self] ): literal[string] identifier[input_path] = identifier[self] . identifier[layer] . identifier[currentLayer] (). identifier[source] () identifier[output_path] =( identifier[os] . identifier[path] . identifier[splitext] ( identifier[input_path] )[ literal[int] ]+ literal[string] + identifier[os] . identifier[path] . identifier[splitext] ( identifier[input_path] )[ literal[int] ]) identifier[self] . identifier[output_form] . identifier[setText] ( identifier[output_path] )
def get_output_from_input(self): """Populate output form with default output path based on input layer. """ input_path = self.layer.currentLayer().source() output_path = os.path.splitext(input_path)[0] + '_multi_buffer' + os.path.splitext(input_path)[1] self.output_form.setText(output_path)
def preprocess(train_dataset, output_dir, eval_dataset, checkpoint): """Preprocess data locally.""" import apache_beam as beam from google.datalab.utils import LambdaJob from . import _preprocess if checkpoint is None: checkpoint = _util._DEFAULT_CHECKPOINT_GSURL job_id = ('preprocess-image-classification-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S')) # Project is needed for bigquery data source, even in local run. options = { 'project': _util.default_project(), } opts = beam.pipeline.PipelineOptions(flags=[], **options) p = beam.Pipeline('DirectRunner', options=opts) _preprocess.configure_pipeline(p, train_dataset, eval_dataset, checkpoint, output_dir, job_id) job = LambdaJob(lambda: p.run().wait_until_finish(), job_id) return job
def function[preprocess, parameter[train_dataset, output_dir, eval_dataset, checkpoint]]: constant[Preprocess data locally.] import module[apache_beam] as alias[beam] from relative_module[google.datalab.utils] import module[LambdaJob] from relative_module[None] import module[_preprocess] if compare[name[checkpoint] is constant[None]] begin[:] variable[checkpoint] assign[=] name[_util]._DEFAULT_CHECKPOINT_GSURL variable[job_id] assign[=] binary_operation[constant[preprocess-image-classification-] + call[call[name[datetime].datetime.now, parameter[]].strftime, parameter[constant[%y%m%d-%H%M%S]]]] variable[options] assign[=] dictionary[[<ast.Constant object at 0x7da20c9911e0>], [<ast.Call object at 0x7da20c990ca0>]] variable[opts] assign[=] call[name[beam].pipeline.PipelineOptions, parameter[]] variable[p] assign[=] call[name[beam].Pipeline, parameter[constant[DirectRunner]]] call[name[_preprocess].configure_pipeline, parameter[name[p], name[train_dataset], name[eval_dataset], name[checkpoint], name[output_dir], name[job_id]]] variable[job] assign[=] call[name[LambdaJob], parameter[<ast.Lambda object at 0x7da20c990880>, name[job_id]]] return[name[job]]
keyword[def] identifier[preprocess] ( identifier[train_dataset] , identifier[output_dir] , identifier[eval_dataset] , identifier[checkpoint] ): literal[string] keyword[import] identifier[apache_beam] keyword[as] identifier[beam] keyword[from] identifier[google] . identifier[datalab] . identifier[utils] keyword[import] identifier[LambdaJob] keyword[from] . keyword[import] identifier[_preprocess] keyword[if] identifier[checkpoint] keyword[is] keyword[None] : identifier[checkpoint] = identifier[_util] . identifier[_DEFAULT_CHECKPOINT_GSURL] identifier[job_id] =( literal[string] + identifier[datetime] . identifier[datetime] . identifier[now] (). identifier[strftime] ( literal[string] )) identifier[options] ={ literal[string] : identifier[_util] . identifier[default_project] (), } identifier[opts] = identifier[beam] . identifier[pipeline] . identifier[PipelineOptions] ( identifier[flags] =[],** identifier[options] ) identifier[p] = identifier[beam] . identifier[Pipeline] ( literal[string] , identifier[options] = identifier[opts] ) identifier[_preprocess] . identifier[configure_pipeline] ( identifier[p] , identifier[train_dataset] , identifier[eval_dataset] , identifier[checkpoint] , identifier[output_dir] , identifier[job_id] ) identifier[job] = identifier[LambdaJob] ( keyword[lambda] : identifier[p] . identifier[run] (). identifier[wait_until_finish] (), identifier[job_id] ) keyword[return] identifier[job]
def preprocess(train_dataset, output_dir, eval_dataset, checkpoint): """Preprocess data locally.""" import apache_beam as beam from google.datalab.utils import LambdaJob from . import _preprocess if checkpoint is None: checkpoint = _util._DEFAULT_CHECKPOINT_GSURL # depends on [control=['if'], data=['checkpoint']] job_id = 'preprocess-image-classification-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S') # Project is needed for bigquery data source, even in local run. options = {'project': _util.default_project()} opts = beam.pipeline.PipelineOptions(flags=[], **options) p = beam.Pipeline('DirectRunner', options=opts) _preprocess.configure_pipeline(p, train_dataset, eval_dataset, checkpoint, output_dir, job_id) job = LambdaJob(lambda : p.run().wait_until_finish(), job_id) return job
def agent_from_entity(self, relation, entity_id): """Create a (potentially grounded) INDRA Agent object from a given Medscan entity describing the subject or object. Uses helper functions to convert a Medscan URN to an INDRA db_refs grounding dictionary. If the entity has properties indicating that it is a protein with a mutation or modification, then constructs the needed ModCondition or MutCondition. Parameters ---------- relation : MedscanRelation The current relation being processed entity_id : str The ID of the entity to process Returns ------- agent : indra.statements.Agent A potentially grounded INDRA agent representing this entity """ # Extract sentence tags mapping ids to the text. We refer to this # mapping only if the entity doesn't appear in the grounded entity # list tags = _extract_sentence_tags(relation.tagged_sentence) if entity_id is None: return None self.num_entities += 1 entity_id = _extract_id(entity_id) if entity_id not in relation.entities and \ entity_id not in tags: # Could not find the entity in either the list of grounded # entities of the items tagged in the sentence. Happens for # a very small percentage of the dataset. self.num_entities_not_found += 1 return None if entity_id not in relation.entities: # The entity is not in the grounded entity list # Instead, make an ungrounded entity, with TEXT corresponding to # the words with the given entity id tagged in the sentence. entity_data = tags[entity_id] db_refs = {'TEXT': entity_data['text']} ag = Agent(normalize_medscan_name(db_refs['TEXT']), db_refs=db_refs) return ag, entity_data['bounds'] else: entity = relation.entities[entity_id] bounds = (entity.ch_start, entity.ch_end) prop = entity.properties if len(prop.keys()) == 2 and 'Protein' in prop \ and 'Mutation' in prop: # Handle the special case where the entity is a protein # with a mutation or modification, with those details # described in the entity properties protein = prop['Protein'] assert(len(protein) == 1) protein = protein[0] mutation = prop['Mutation'] assert(len(mutation) == 1) mutation = mutation[0] db_refs, db_name = _urn_to_db_refs(protein.urn) if db_refs is None: return None db_refs['TEXT'] = protein.name if db_name is None: agent_name = db_refs['TEXT'] else: agent_name = db_name # Check mutation.type. Only some types correspond to situations # that can be represented in INDRA; return None if we cannot # map to an INDRA statement (which will block processing of # the statement in process_relation). if mutation.type == 'AASite': # Do not handle this # Example: # MedscanEntity(name='D1', urn='urn:agi-aa:D1', # type='AASite', properties=None) return None elif mutation.type == 'Mutation': # Convert mutation properties to an INDRA MutCondition r_old, pos, r_new = _parse_mut_string(mutation.name) if r_old is None: logger.warning('Could not parse mutation string: ' + mutation.name) # Don't create an agent return None else: try: cond = MutCondition(pos, r_old, r_new) ag = Agent(normalize_medscan_name(agent_name), db_refs=db_refs, mutations=[cond]) return ag, bounds except BaseException: logger.warning('Could not parse mutation ' + 'string: ' + mutation.name) return None elif mutation.type == 'MethSite': # Convert methylation site information to an INDRA # ModCondition res, pos = _parse_mod_string(mutation.name) if res is None: return None cond = ModCondition('methylation', res, pos) ag = Agent(normalize_medscan_name(agent_name), db_refs=db_refs, mods=[cond]) return ag, bounds # Example: # MedscanEntity(name='R457', # urn='urn:agi-s-llid:R457-2185', type='MethSite', # properties=None) elif mutation.type == 'PhosphoSite': # Convert phosphorylation site information to an INDRA # ModCondition res, pos = _parse_mod_string(mutation.name) if res is None: return None cond = ModCondition('phosphorylation', res, pos) ag = Agent(normalize_medscan_name(agent_name), db_refs=db_refs, mods=[cond]) return ag, bounds # Example: # MedscanEntity(name='S455', # urn='urn:agi-s-llid:S455-47', type='PhosphoSite', # properties=None) pass elif mutation.type == 'Lysine': # Ambiguous whether this is a methylation or # demethylation; skip # Example: # MedscanEntity(name='K150', # urn='urn:agi-s-llid:K150-5624', type='Lysine', # properties=None) return None else: logger.warning('Processor currently cannot process ' + 'mutations of type ' + mutation.type) else: # Handle the more common case where we just ground the entity # without mutation or modification information db_refs, db_name = _urn_to_db_refs(entity.urn) if db_refs is None: return None db_refs['TEXT'] = entity.name if db_name is None: agent_name = db_refs['TEXT'] else: agent_name = db_name ag = Agent(normalize_medscan_name(agent_name), db_refs=db_refs) return ag, bounds
def function[agent_from_entity, parameter[self, relation, entity_id]]: constant[Create a (potentially grounded) INDRA Agent object from a given Medscan entity describing the subject or object. Uses helper functions to convert a Medscan URN to an INDRA db_refs grounding dictionary. If the entity has properties indicating that it is a protein with a mutation or modification, then constructs the needed ModCondition or MutCondition. Parameters ---------- relation : MedscanRelation The current relation being processed entity_id : str The ID of the entity to process Returns ------- agent : indra.statements.Agent A potentially grounded INDRA agent representing this entity ] variable[tags] assign[=] call[name[_extract_sentence_tags], parameter[name[relation].tagged_sentence]] if compare[name[entity_id] is constant[None]] begin[:] return[constant[None]] <ast.AugAssign object at 0x7da1b0f3a1d0> variable[entity_id] assign[=] call[name[_extract_id], parameter[name[entity_id]]] if <ast.BoolOp object at 0x7da1b0f38d60> begin[:] <ast.AugAssign object at 0x7da1b0f395a0> return[constant[None]] if compare[name[entity_id] <ast.NotIn object at 0x7da2590d7190> name[relation].entities] begin[:] variable[entity_data] assign[=] call[name[tags]][name[entity_id]] variable[db_refs] assign[=] dictionary[[<ast.Constant object at 0x7da18c4cfaf0>], [<ast.Subscript object at 0x7da18c4cf910>]] variable[ag] assign[=] call[name[Agent], parameter[call[name[normalize_medscan_name], parameter[call[name[db_refs]][constant[TEXT]]]]]] return[tuple[[<ast.Name object at 0x7da18c4cdb10>, <ast.Subscript object at 0x7da18c4ccd00>]]]
keyword[def] identifier[agent_from_entity] ( identifier[self] , identifier[relation] , identifier[entity_id] ): literal[string] identifier[tags] = identifier[_extract_sentence_tags] ( identifier[relation] . identifier[tagged_sentence] ) keyword[if] identifier[entity_id] keyword[is] keyword[None] : keyword[return] keyword[None] identifier[self] . identifier[num_entities] += literal[int] identifier[entity_id] = identifier[_extract_id] ( identifier[entity_id] ) keyword[if] identifier[entity_id] keyword[not] keyword[in] identifier[relation] . identifier[entities] keyword[and] identifier[entity_id] keyword[not] keyword[in] identifier[tags] : identifier[self] . identifier[num_entities_not_found] += literal[int] keyword[return] keyword[None] keyword[if] identifier[entity_id] keyword[not] keyword[in] identifier[relation] . identifier[entities] : identifier[entity_data] = identifier[tags] [ identifier[entity_id] ] identifier[db_refs] ={ literal[string] : identifier[entity_data] [ literal[string] ]} identifier[ag] = identifier[Agent] ( identifier[normalize_medscan_name] ( identifier[db_refs] [ literal[string] ]), identifier[db_refs] = identifier[db_refs] ) keyword[return] identifier[ag] , identifier[entity_data] [ literal[string] ] keyword[else] : identifier[entity] = identifier[relation] . identifier[entities] [ identifier[entity_id] ] identifier[bounds] =( identifier[entity] . identifier[ch_start] , identifier[entity] . identifier[ch_end] ) identifier[prop] = identifier[entity] . identifier[properties] keyword[if] identifier[len] ( identifier[prop] . identifier[keys] ())== literal[int] keyword[and] literal[string] keyword[in] identifier[prop] keyword[and] literal[string] keyword[in] identifier[prop] : identifier[protein] = identifier[prop] [ literal[string] ] keyword[assert] ( identifier[len] ( identifier[protein] )== literal[int] ) identifier[protein] = identifier[protein] [ literal[int] ] identifier[mutation] = identifier[prop] [ literal[string] ] keyword[assert] ( identifier[len] ( identifier[mutation] )== literal[int] ) identifier[mutation] = identifier[mutation] [ literal[int] ] identifier[db_refs] , identifier[db_name] = identifier[_urn_to_db_refs] ( identifier[protein] . identifier[urn] ) keyword[if] identifier[db_refs] keyword[is] keyword[None] : keyword[return] keyword[None] identifier[db_refs] [ literal[string] ]= identifier[protein] . identifier[name] keyword[if] identifier[db_name] keyword[is] keyword[None] : identifier[agent_name] = identifier[db_refs] [ literal[string] ] keyword[else] : identifier[agent_name] = identifier[db_name] keyword[if] identifier[mutation] . identifier[type] == literal[string] : keyword[return] keyword[None] keyword[elif] identifier[mutation] . identifier[type] == literal[string] : identifier[r_old] , identifier[pos] , identifier[r_new] = identifier[_parse_mut_string] ( identifier[mutation] . identifier[name] ) keyword[if] identifier[r_old] keyword[is] keyword[None] : identifier[logger] . identifier[warning] ( literal[string] + identifier[mutation] . identifier[name] ) keyword[return] keyword[None] keyword[else] : keyword[try] : identifier[cond] = identifier[MutCondition] ( identifier[pos] , identifier[r_old] , identifier[r_new] ) identifier[ag] = identifier[Agent] ( identifier[normalize_medscan_name] ( identifier[agent_name] ), identifier[db_refs] = identifier[db_refs] , identifier[mutations] =[ identifier[cond] ]) keyword[return] identifier[ag] , identifier[bounds] keyword[except] identifier[BaseException] : identifier[logger] . identifier[warning] ( literal[string] + literal[string] + identifier[mutation] . identifier[name] ) keyword[return] keyword[None] keyword[elif] identifier[mutation] . identifier[type] == literal[string] : identifier[res] , identifier[pos] = identifier[_parse_mod_string] ( identifier[mutation] . identifier[name] ) keyword[if] identifier[res] keyword[is] keyword[None] : keyword[return] keyword[None] identifier[cond] = identifier[ModCondition] ( literal[string] , identifier[res] , identifier[pos] ) identifier[ag] = identifier[Agent] ( identifier[normalize_medscan_name] ( identifier[agent_name] ), identifier[db_refs] = identifier[db_refs] , identifier[mods] =[ identifier[cond] ]) keyword[return] identifier[ag] , identifier[bounds] keyword[elif] identifier[mutation] . identifier[type] == literal[string] : identifier[res] , identifier[pos] = identifier[_parse_mod_string] ( identifier[mutation] . identifier[name] ) keyword[if] identifier[res] keyword[is] keyword[None] : keyword[return] keyword[None] identifier[cond] = identifier[ModCondition] ( literal[string] , identifier[res] , identifier[pos] ) identifier[ag] = identifier[Agent] ( identifier[normalize_medscan_name] ( identifier[agent_name] ), identifier[db_refs] = identifier[db_refs] , identifier[mods] =[ identifier[cond] ]) keyword[return] identifier[ag] , identifier[bounds] keyword[pass] keyword[elif] identifier[mutation] . identifier[type] == literal[string] : keyword[return] keyword[None] keyword[else] : identifier[logger] . identifier[warning] ( literal[string] + literal[string] + identifier[mutation] . identifier[type] ) keyword[else] : identifier[db_refs] , identifier[db_name] = identifier[_urn_to_db_refs] ( identifier[entity] . identifier[urn] ) keyword[if] identifier[db_refs] keyword[is] keyword[None] : keyword[return] keyword[None] identifier[db_refs] [ literal[string] ]= identifier[entity] . identifier[name] keyword[if] identifier[db_name] keyword[is] keyword[None] : identifier[agent_name] = identifier[db_refs] [ literal[string] ] keyword[else] : identifier[agent_name] = identifier[db_name] identifier[ag] = identifier[Agent] ( identifier[normalize_medscan_name] ( identifier[agent_name] ), identifier[db_refs] = identifier[db_refs] ) keyword[return] identifier[ag] , identifier[bounds]
def agent_from_entity(self, relation, entity_id): """Create a (potentially grounded) INDRA Agent object from a given Medscan entity describing the subject or object. Uses helper functions to convert a Medscan URN to an INDRA db_refs grounding dictionary. If the entity has properties indicating that it is a protein with a mutation or modification, then constructs the needed ModCondition or MutCondition. Parameters ---------- relation : MedscanRelation The current relation being processed entity_id : str The ID of the entity to process Returns ------- agent : indra.statements.Agent A potentially grounded INDRA agent representing this entity """ # Extract sentence tags mapping ids to the text. We refer to this # mapping only if the entity doesn't appear in the grounded entity # list tags = _extract_sentence_tags(relation.tagged_sentence) if entity_id is None: return None # depends on [control=['if'], data=[]] self.num_entities += 1 entity_id = _extract_id(entity_id) if entity_id not in relation.entities and entity_id not in tags: # Could not find the entity in either the list of grounded # entities of the items tagged in the sentence. Happens for # a very small percentage of the dataset. self.num_entities_not_found += 1 return None # depends on [control=['if'], data=[]] if entity_id not in relation.entities: # The entity is not in the grounded entity list # Instead, make an ungrounded entity, with TEXT corresponding to # the words with the given entity id tagged in the sentence. entity_data = tags[entity_id] db_refs = {'TEXT': entity_data['text']} ag = Agent(normalize_medscan_name(db_refs['TEXT']), db_refs=db_refs) return (ag, entity_data['bounds']) # depends on [control=['if'], data=['entity_id']] else: entity = relation.entities[entity_id] bounds = (entity.ch_start, entity.ch_end) prop = entity.properties if len(prop.keys()) == 2 and 'Protein' in prop and ('Mutation' in prop): # Handle the special case where the entity is a protein # with a mutation or modification, with those details # described in the entity properties protein = prop['Protein'] assert len(protein) == 1 protein = protein[0] mutation = prop['Mutation'] assert len(mutation) == 1 mutation = mutation[0] (db_refs, db_name) = _urn_to_db_refs(protein.urn) if db_refs is None: return None # depends on [control=['if'], data=[]] db_refs['TEXT'] = protein.name if db_name is None: agent_name = db_refs['TEXT'] # depends on [control=['if'], data=[]] else: agent_name = db_name # Check mutation.type. Only some types correspond to situations # that can be represented in INDRA; return None if we cannot # map to an INDRA statement (which will block processing of # the statement in process_relation). if mutation.type == 'AASite': # Do not handle this # Example: # MedscanEntity(name='D1', urn='urn:agi-aa:D1', # type='AASite', properties=None) return None # depends on [control=['if'], data=[]] elif mutation.type == 'Mutation': # Convert mutation properties to an INDRA MutCondition (r_old, pos, r_new) = _parse_mut_string(mutation.name) if r_old is None: logger.warning('Could not parse mutation string: ' + mutation.name) # Don't create an agent return None # depends on [control=['if'], data=[]] else: try: cond = MutCondition(pos, r_old, r_new) ag = Agent(normalize_medscan_name(agent_name), db_refs=db_refs, mutations=[cond]) return (ag, bounds) # depends on [control=['try'], data=[]] except BaseException: logger.warning('Could not parse mutation ' + 'string: ' + mutation.name) return None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] elif mutation.type == 'MethSite': # Convert methylation site information to an INDRA # ModCondition (res, pos) = _parse_mod_string(mutation.name) if res is None: return None # depends on [control=['if'], data=[]] cond = ModCondition('methylation', res, pos) ag = Agent(normalize_medscan_name(agent_name), db_refs=db_refs, mods=[cond]) return (ag, bounds) # depends on [control=['if'], data=[]] # Example: # MedscanEntity(name='R457', # urn='urn:agi-s-llid:R457-2185', type='MethSite', # properties=None) elif mutation.type == 'PhosphoSite': # Convert phosphorylation site information to an INDRA # ModCondition (res, pos) = _parse_mod_string(mutation.name) if res is None: return None # depends on [control=['if'], data=[]] cond = ModCondition('phosphorylation', res, pos) ag = Agent(normalize_medscan_name(agent_name), db_refs=db_refs, mods=[cond]) return (ag, bounds) # Example: # MedscanEntity(name='S455', # urn='urn:agi-s-llid:S455-47', type='PhosphoSite', # properties=None) pass # depends on [control=['if'], data=[]] elif mutation.type == 'Lysine': # Ambiguous whether this is a methylation or # demethylation; skip # Example: # MedscanEntity(name='K150', # urn='urn:agi-s-llid:K150-5624', type='Lysine', # properties=None) return None # depends on [control=['if'], data=[]] else: logger.warning('Processor currently cannot process ' + 'mutations of type ' + mutation.type) # depends on [control=['if'], data=[]] else: # Handle the more common case where we just ground the entity # without mutation or modification information (db_refs, db_name) = _urn_to_db_refs(entity.urn) if db_refs is None: return None # depends on [control=['if'], data=[]] db_refs['TEXT'] = entity.name if db_name is None: agent_name = db_refs['TEXT'] # depends on [control=['if'], data=[]] else: agent_name = db_name ag = Agent(normalize_medscan_name(agent_name), db_refs=db_refs) return (ag, bounds)
def from_json(cls, json): """Inherit doc.""" obj = cls(property_range.PropertyRange.from_json(json["property_range"]), namespace_range.NamespaceRange.from_json_object(json["ns_range"]), model.QuerySpec.from_json(json["query_spec"])) cursor = json["cursor"] # lint bug. Class method can access protected fields. # pylint: disable=protected-access if cursor and json["cursor_object"]: obj._cursor = datastore_query.Cursor.from_websafe_string(cursor) else: obj._cursor = cursor return obj
def function[from_json, parameter[cls, json]]: constant[Inherit doc.] variable[obj] assign[=] call[name[cls], parameter[call[name[property_range].PropertyRange.from_json, parameter[call[name[json]][constant[property_range]]]], call[name[namespace_range].NamespaceRange.from_json_object, parameter[call[name[json]][constant[ns_range]]]], call[name[model].QuerySpec.from_json, parameter[call[name[json]][constant[query_spec]]]]]] variable[cursor] assign[=] call[name[json]][constant[cursor]] if <ast.BoolOp object at 0x7da20c991cf0> begin[:] name[obj]._cursor assign[=] call[name[datastore_query].Cursor.from_websafe_string, parameter[name[cursor]]] return[name[obj]]
keyword[def] identifier[from_json] ( identifier[cls] , identifier[json] ): literal[string] identifier[obj] = identifier[cls] ( identifier[property_range] . identifier[PropertyRange] . identifier[from_json] ( identifier[json] [ literal[string] ]), identifier[namespace_range] . identifier[NamespaceRange] . identifier[from_json_object] ( identifier[json] [ literal[string] ]), identifier[model] . identifier[QuerySpec] . identifier[from_json] ( identifier[json] [ literal[string] ])) identifier[cursor] = identifier[json] [ literal[string] ] keyword[if] identifier[cursor] keyword[and] identifier[json] [ literal[string] ]: identifier[obj] . identifier[_cursor] = identifier[datastore_query] . identifier[Cursor] . identifier[from_websafe_string] ( identifier[cursor] ) keyword[else] : identifier[obj] . identifier[_cursor] = identifier[cursor] keyword[return] identifier[obj]
def from_json(cls, json): """Inherit doc.""" obj = cls(property_range.PropertyRange.from_json(json['property_range']), namespace_range.NamespaceRange.from_json_object(json['ns_range']), model.QuerySpec.from_json(json['query_spec'])) cursor = json['cursor'] # lint bug. Class method can access protected fields. # pylint: disable=protected-access if cursor and json['cursor_object']: obj._cursor = datastore_query.Cursor.from_websafe_string(cursor) # depends on [control=['if'], data=[]] else: obj._cursor = cursor return obj
def get_subdomains_count(self, accepted=True, cur=None): """ Fetch subdomain names """ if accepted: accepted_filter = 'WHERE accepted=1' else: accepted_filter = '' get_cmd = "SELECT COUNT(DISTINCT fully_qualified_subdomain) as count FROM {} {};".format( self.subdomain_table, accepted_filter) cursor = cur if cursor is None: cursor = self.conn.cursor() db_query_execute(cursor, get_cmd, ()) try: rowdata = cursor.fetchone() return rowdata['count'] except Exception as e: if BLOCKSTACK_DEBUG: log.exception(e) return 0
def function[get_subdomains_count, parameter[self, accepted, cur]]: constant[ Fetch subdomain names ] if name[accepted] begin[:] variable[accepted_filter] assign[=] constant[WHERE accepted=1] variable[get_cmd] assign[=] call[constant[SELECT COUNT(DISTINCT fully_qualified_subdomain) as count FROM {} {};].format, parameter[name[self].subdomain_table, name[accepted_filter]]] variable[cursor] assign[=] name[cur] if compare[name[cursor] is constant[None]] begin[:] variable[cursor] assign[=] call[name[self].conn.cursor, parameter[]] call[name[db_query_execute], parameter[name[cursor], name[get_cmd], tuple[[]]]] <ast.Try object at 0x7da1b17d4d30>
keyword[def] identifier[get_subdomains_count] ( identifier[self] , identifier[accepted] = keyword[True] , identifier[cur] = keyword[None] ): literal[string] keyword[if] identifier[accepted] : identifier[accepted_filter] = literal[string] keyword[else] : identifier[accepted_filter] = literal[string] identifier[get_cmd] = literal[string] . identifier[format] ( identifier[self] . identifier[subdomain_table] , identifier[accepted_filter] ) identifier[cursor] = identifier[cur] keyword[if] identifier[cursor] keyword[is] keyword[None] : identifier[cursor] = identifier[self] . identifier[conn] . identifier[cursor] () identifier[db_query_execute] ( identifier[cursor] , identifier[get_cmd] ,()) keyword[try] : identifier[rowdata] = identifier[cursor] . identifier[fetchone] () keyword[return] identifier[rowdata] [ literal[string] ] keyword[except] identifier[Exception] keyword[as] identifier[e] : keyword[if] identifier[BLOCKSTACK_DEBUG] : identifier[log] . identifier[exception] ( identifier[e] ) keyword[return] literal[int]
def get_subdomains_count(self, accepted=True, cur=None): """ Fetch subdomain names """ if accepted: accepted_filter = 'WHERE accepted=1' # depends on [control=['if'], data=[]] else: accepted_filter = '' get_cmd = 'SELECT COUNT(DISTINCT fully_qualified_subdomain) as count FROM {} {};'.format(self.subdomain_table, accepted_filter) cursor = cur if cursor is None: cursor = self.conn.cursor() # depends on [control=['if'], data=['cursor']] db_query_execute(cursor, get_cmd, ()) try: rowdata = cursor.fetchone() return rowdata['count'] # depends on [control=['try'], data=[]] except Exception as e: if BLOCKSTACK_DEBUG: log.exception(e) # depends on [control=['if'], data=[]] return 0 # depends on [control=['except'], data=['e']]
def to_message(self, keywords=None, show_header=True): """Format keywords as a message object. .. versionadded:: 3.2 .. versionchanged:: 3.3 - default keywords to None The message object can then be rendered to html, plain text etc. :param keywords: Keywords to be converted to a message. Optional. If not passed then we will attempt to get keywords from self.layer if it is not None. :type keywords: dict :param show_header: Flag indicating if InaSAFE logo etc. should be added above the keywords table. Default is True. :type show_header: bool :returns: A safe message object containing a table. :rtype: safe.messaging.Message """ if keywords is None and self.layer is not None: keywords = self.read_keywords(self.layer) # This order was determined in issue #2313 preferred_order = [ 'title', 'layer_purpose', 'exposure', 'hazard', 'hazard_category', 'layer_geometry', 'layer_mode', 'classification', 'exposure_unit', 'continuous_hazard_unit', 'value_map', # attribute values 'thresholds', # attribute values 'value_maps', # attribute values 'inasafe_fields', 'inasafe_default_values', 'resample', 'source', 'url', 'scale', 'license', 'date', 'extra_keywords', 'keyword_version' ] # everything else in arbitrary order report = m.Message() if show_header: logo_element = m.Brand() report.add(logo_element) report.add(m.Heading(tr( 'Layer keywords:'), **styles.BLUE_LEVEL_4_STYLE)) report.add(m.Text(tr( 'The following keywords are defined for the active layer:'))) table = m.Table(style_class='table table-condensed table-striped') # First render out the preferred order keywords for keyword in preferred_order: if keyword in keywords: value = keywords[keyword] row = self._keyword_to_row(keyword, value) keywords.pop(keyword) table.add(row) # now render out any remaining keywords in arbitrary order for keyword in keywords: value = keywords[keyword] row = self._keyword_to_row(keyword, value) table.add(row) # If the keywords class was instantiated with a layer object # we can add some context info not stored in the keywords themselves # but that is still useful to see... if self.layer: # First the CRS keyword = tr('Reference system') value = self.layer.crs().authid() row = self._keyword_to_row(keyword, value) table.add(row) # Next the data source keyword = tr('Layer source') value = self.layer.publicSource() # Hide password row = self._keyword_to_row(keyword, value, wrap_slash=True) table.add(row) # Finalise the report report.add(table) return report
def function[to_message, parameter[self, keywords, show_header]]: constant[Format keywords as a message object. .. versionadded:: 3.2 .. versionchanged:: 3.3 - default keywords to None The message object can then be rendered to html, plain text etc. :param keywords: Keywords to be converted to a message. Optional. If not passed then we will attempt to get keywords from self.layer if it is not None. :type keywords: dict :param show_header: Flag indicating if InaSAFE logo etc. should be added above the keywords table. Default is True. :type show_header: bool :returns: A safe message object containing a table. :rtype: safe.messaging.Message ] if <ast.BoolOp object at 0x7da20c992650> begin[:] variable[keywords] assign[=] call[name[self].read_keywords, parameter[name[self].layer]] variable[preferred_order] assign[=] list[[<ast.Constant object at 0x7da20c990610>, <ast.Constant object at 0x7da20c992f50>, <ast.Constant object at 0x7da20c991de0>, <ast.Constant object at 0x7da20c991c60>, <ast.Constant object at 0x7da20c9923b0>, <ast.Constant object at 0x7da20c992890>, <ast.Constant object at 0x7da20c991d50>, <ast.Constant object at 0x7da20c991bd0>, <ast.Constant object at 0x7da20c993fd0>, <ast.Constant object at 0x7da20c992f20>, <ast.Constant object at 0x7da20c990670>, <ast.Constant object at 0x7da20c993ee0>, <ast.Constant object at 0x7da20c991690>, <ast.Constant object at 0x7da20c993e20>, <ast.Constant object at 0x7da20c993cd0>, <ast.Constant object at 0x7da20c991e10>, <ast.Constant object at 0x7da20c9927d0>, <ast.Constant object at 0x7da20c9934f0>, <ast.Constant object at 0x7da20c990130>, <ast.Constant object at 0x7da20c991d80>, <ast.Constant object at 0x7da20c990e50>, <ast.Constant object at 0x7da20c990550>, <ast.Constant object at 0x7da20c992e90>]] variable[report] assign[=] call[name[m].Message, parameter[]] if name[show_header] begin[:] variable[logo_element] assign[=] call[name[m].Brand, parameter[]] call[name[report].add, parameter[name[logo_element]]] call[name[report].add, parameter[call[name[m].Heading, parameter[call[name[tr], parameter[constant[Layer keywords:]]]]]]] call[name[report].add, parameter[call[name[m].Text, parameter[call[name[tr], parameter[constant[The following keywords are defined for the active layer:]]]]]]] variable[table] assign[=] call[name[m].Table, parameter[]] for taget[name[keyword]] in starred[name[preferred_order]] begin[:] if compare[name[keyword] in name[keywords]] begin[:] variable[value] assign[=] call[name[keywords]][name[keyword]] variable[row] assign[=] call[name[self]._keyword_to_row, parameter[name[keyword], name[value]]] call[name[keywords].pop, parameter[name[keyword]]] call[name[table].add, parameter[name[row]]] for taget[name[keyword]] in starred[name[keywords]] begin[:] variable[value] assign[=] call[name[keywords]][name[keyword]] variable[row] assign[=] call[name[self]._keyword_to_row, parameter[name[keyword], name[value]]] call[name[table].add, parameter[name[row]]] if name[self].layer begin[:] variable[keyword] assign[=] call[name[tr], parameter[constant[Reference system]]] variable[value] assign[=] call[call[name[self].layer.crs, parameter[]].authid, parameter[]] variable[row] assign[=] call[name[self]._keyword_to_row, parameter[name[keyword], name[value]]] call[name[table].add, parameter[name[row]]] variable[keyword] assign[=] call[name[tr], parameter[constant[Layer source]]] variable[value] assign[=] call[name[self].layer.publicSource, parameter[]] variable[row] assign[=] call[name[self]._keyword_to_row, parameter[name[keyword], name[value]]] call[name[table].add, parameter[name[row]]] call[name[report].add, parameter[name[table]]] return[name[report]]
keyword[def] identifier[to_message] ( identifier[self] , identifier[keywords] = keyword[None] , identifier[show_header] = keyword[True] ): literal[string] keyword[if] identifier[keywords] keyword[is] keyword[None] keyword[and] identifier[self] . identifier[layer] keyword[is] keyword[not] keyword[None] : identifier[keywords] = identifier[self] . identifier[read_keywords] ( identifier[self] . identifier[layer] ) identifier[preferred_order] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ] identifier[report] = identifier[m] . identifier[Message] () keyword[if] identifier[show_header] : identifier[logo_element] = identifier[m] . identifier[Brand] () identifier[report] . identifier[add] ( identifier[logo_element] ) identifier[report] . identifier[add] ( identifier[m] . identifier[Heading] ( identifier[tr] ( literal[string] ),** identifier[styles] . identifier[BLUE_LEVEL_4_STYLE] )) identifier[report] . identifier[add] ( identifier[m] . identifier[Text] ( identifier[tr] ( literal[string] ))) identifier[table] = identifier[m] . identifier[Table] ( identifier[style_class] = literal[string] ) keyword[for] identifier[keyword] keyword[in] identifier[preferred_order] : keyword[if] identifier[keyword] keyword[in] identifier[keywords] : identifier[value] = identifier[keywords] [ identifier[keyword] ] identifier[row] = identifier[self] . identifier[_keyword_to_row] ( identifier[keyword] , identifier[value] ) identifier[keywords] . identifier[pop] ( identifier[keyword] ) identifier[table] . identifier[add] ( identifier[row] ) keyword[for] identifier[keyword] keyword[in] identifier[keywords] : identifier[value] = identifier[keywords] [ identifier[keyword] ] identifier[row] = identifier[self] . identifier[_keyword_to_row] ( identifier[keyword] , identifier[value] ) identifier[table] . identifier[add] ( identifier[row] ) keyword[if] identifier[self] . identifier[layer] : identifier[keyword] = identifier[tr] ( literal[string] ) identifier[value] = identifier[self] . identifier[layer] . identifier[crs] (). identifier[authid] () identifier[row] = identifier[self] . identifier[_keyword_to_row] ( identifier[keyword] , identifier[value] ) identifier[table] . identifier[add] ( identifier[row] ) identifier[keyword] = identifier[tr] ( literal[string] ) identifier[value] = identifier[self] . identifier[layer] . identifier[publicSource] () identifier[row] = identifier[self] . identifier[_keyword_to_row] ( identifier[keyword] , identifier[value] , identifier[wrap_slash] = keyword[True] ) identifier[table] . identifier[add] ( identifier[row] ) identifier[report] . identifier[add] ( identifier[table] ) keyword[return] identifier[report]
def to_message(self, keywords=None, show_header=True): """Format keywords as a message object. .. versionadded:: 3.2 .. versionchanged:: 3.3 - default keywords to None The message object can then be rendered to html, plain text etc. :param keywords: Keywords to be converted to a message. Optional. If not passed then we will attempt to get keywords from self.layer if it is not None. :type keywords: dict :param show_header: Flag indicating if InaSAFE logo etc. should be added above the keywords table. Default is True. :type show_header: bool :returns: A safe message object containing a table. :rtype: safe.messaging.Message """ if keywords is None and self.layer is not None: keywords = self.read_keywords(self.layer) # depends on [control=['if'], data=[]] # This order was determined in issue #2313 # attribute values # attribute values # attribute values preferred_order = ['title', 'layer_purpose', 'exposure', 'hazard', 'hazard_category', 'layer_geometry', 'layer_mode', 'classification', 'exposure_unit', 'continuous_hazard_unit', 'value_map', 'thresholds', 'value_maps', 'inasafe_fields', 'inasafe_default_values', 'resample', 'source', 'url', 'scale', 'license', 'date', 'extra_keywords', 'keyword_version'] # everything else in arbitrary order report = m.Message() if show_header: logo_element = m.Brand() report.add(logo_element) report.add(m.Heading(tr('Layer keywords:'), **styles.BLUE_LEVEL_4_STYLE)) report.add(m.Text(tr('The following keywords are defined for the active layer:'))) # depends on [control=['if'], data=[]] table = m.Table(style_class='table table-condensed table-striped') # First render out the preferred order keywords for keyword in preferred_order: if keyword in keywords: value = keywords[keyword] row = self._keyword_to_row(keyword, value) keywords.pop(keyword) table.add(row) # depends on [control=['if'], data=['keyword', 'keywords']] # depends on [control=['for'], data=['keyword']] # now render out any remaining keywords in arbitrary order for keyword in keywords: value = keywords[keyword] row = self._keyword_to_row(keyword, value) table.add(row) # depends on [control=['for'], data=['keyword']] # If the keywords class was instantiated with a layer object # we can add some context info not stored in the keywords themselves # but that is still useful to see... if self.layer: # First the CRS keyword = tr('Reference system') value = self.layer.crs().authid() row = self._keyword_to_row(keyword, value) table.add(row) # Next the data source keyword = tr('Layer source') value = self.layer.publicSource() # Hide password row = self._keyword_to_row(keyword, value, wrap_slash=True) table.add(row) # depends on [control=['if'], data=[]] # Finalise the report report.add(table) return report
def camera_feedback_send(self, time_usec, target_system, cam_idx, img_idx, lat, lng, alt_msl, alt_rel, roll, pitch, yaw, foc_len, flags, force_mavlink1=False): ''' Camera Capture Feedback time_usec : Image timestamp (microseconds since UNIX epoch), as passed in by CAMERA_STATUS message (or autopilot if no CCB) (uint64_t) target_system : System ID (uint8_t) cam_idx : Camera ID (uint8_t) img_idx : Image index (uint16_t) lat : Latitude in (deg * 1E7) (int32_t) lng : Longitude in (deg * 1E7) (int32_t) alt_msl : Altitude Absolute (meters AMSL) (float) alt_rel : Altitude Relative (meters above HOME location) (float) roll : Camera Roll angle (earth frame, degrees, +-180) (float) pitch : Camera Pitch angle (earth frame, degrees, +-180) (float) yaw : Camera Yaw (earth frame, degrees, 0-360, true) (float) foc_len : Focal Length (mm) (float) flags : See CAMERA_FEEDBACK_FLAGS enum for definition of the bitmask (uint8_t) ''' return self.send(self.camera_feedback_encode(time_usec, target_system, cam_idx, img_idx, lat, lng, alt_msl, alt_rel, roll, pitch, yaw, foc_len, flags), force_mavlink1=force_mavlink1)
def function[camera_feedback_send, parameter[self, time_usec, target_system, cam_idx, img_idx, lat, lng, alt_msl, alt_rel, roll, pitch, yaw, foc_len, flags, force_mavlink1]]: constant[ Camera Capture Feedback time_usec : Image timestamp (microseconds since UNIX epoch), as passed in by CAMERA_STATUS message (or autopilot if no CCB) (uint64_t) target_system : System ID (uint8_t) cam_idx : Camera ID (uint8_t) img_idx : Image index (uint16_t) lat : Latitude in (deg * 1E7) (int32_t) lng : Longitude in (deg * 1E7) (int32_t) alt_msl : Altitude Absolute (meters AMSL) (float) alt_rel : Altitude Relative (meters above HOME location) (float) roll : Camera Roll angle (earth frame, degrees, +-180) (float) pitch : Camera Pitch angle (earth frame, degrees, +-180) (float) yaw : Camera Yaw (earth frame, degrees, 0-360, true) (float) foc_len : Focal Length (mm) (float) flags : See CAMERA_FEEDBACK_FLAGS enum for definition of the bitmask (uint8_t) ] return[call[name[self].send, parameter[call[name[self].camera_feedback_encode, parameter[name[time_usec], name[target_system], name[cam_idx], name[img_idx], name[lat], name[lng], name[alt_msl], name[alt_rel], name[roll], name[pitch], name[yaw], name[foc_len], name[flags]]]]]]
keyword[def] identifier[camera_feedback_send] ( identifier[self] , identifier[time_usec] , identifier[target_system] , identifier[cam_idx] , identifier[img_idx] , identifier[lat] , identifier[lng] , identifier[alt_msl] , identifier[alt_rel] , identifier[roll] , identifier[pitch] , identifier[yaw] , identifier[foc_len] , identifier[flags] , identifier[force_mavlink1] = keyword[False] ): literal[string] keyword[return] identifier[self] . identifier[send] ( identifier[self] . identifier[camera_feedback_encode] ( identifier[time_usec] , identifier[target_system] , identifier[cam_idx] , identifier[img_idx] , identifier[lat] , identifier[lng] , identifier[alt_msl] , identifier[alt_rel] , identifier[roll] , identifier[pitch] , identifier[yaw] , identifier[foc_len] , identifier[flags] ), identifier[force_mavlink1] = identifier[force_mavlink1] )
def camera_feedback_send(self, time_usec, target_system, cam_idx, img_idx, lat, lng, alt_msl, alt_rel, roll, pitch, yaw, foc_len, flags, force_mavlink1=False): """ Camera Capture Feedback time_usec : Image timestamp (microseconds since UNIX epoch), as passed in by CAMERA_STATUS message (or autopilot if no CCB) (uint64_t) target_system : System ID (uint8_t) cam_idx : Camera ID (uint8_t) img_idx : Image index (uint16_t) lat : Latitude in (deg * 1E7) (int32_t) lng : Longitude in (deg * 1E7) (int32_t) alt_msl : Altitude Absolute (meters AMSL) (float) alt_rel : Altitude Relative (meters above HOME location) (float) roll : Camera Roll angle (earth frame, degrees, +-180) (float) pitch : Camera Pitch angle (earth frame, degrees, +-180) (float) yaw : Camera Yaw (earth frame, degrees, 0-360, true) (float) foc_len : Focal Length (mm) (float) flags : See CAMERA_FEEDBACK_FLAGS enum for definition of the bitmask (uint8_t) """ return self.send(self.camera_feedback_encode(time_usec, target_system, cam_idx, img_idx, lat, lng, alt_msl, alt_rel, roll, pitch, yaw, foc_len, flags), force_mavlink1=force_mavlink1)
def dumps(data, ac_parser=None, **options): """ Return string representation of 'data' in forced type format. :param data: Config data object to dump :param ac_parser: Forced parser type or ID or parser object :param options: see :func:`dump` :return: Backend-specific string representation for the given data :raises: ValueError, UnknownProcessorTypeError """ psr = find(None, forced_type=ac_parser) return psr.dumps(data, **options)
def function[dumps, parameter[data, ac_parser]]: constant[ Return string representation of 'data' in forced type format. :param data: Config data object to dump :param ac_parser: Forced parser type or ID or parser object :param options: see :func:`dump` :return: Backend-specific string representation for the given data :raises: ValueError, UnknownProcessorTypeError ] variable[psr] assign[=] call[name[find], parameter[constant[None]]] return[call[name[psr].dumps, parameter[name[data]]]]
keyword[def] identifier[dumps] ( identifier[data] , identifier[ac_parser] = keyword[None] ,** identifier[options] ): literal[string] identifier[psr] = identifier[find] ( keyword[None] , identifier[forced_type] = identifier[ac_parser] ) keyword[return] identifier[psr] . identifier[dumps] ( identifier[data] ,** identifier[options] )
def dumps(data, ac_parser=None, **options): """ Return string representation of 'data' in forced type format. :param data: Config data object to dump :param ac_parser: Forced parser type or ID or parser object :param options: see :func:`dump` :return: Backend-specific string representation for the given data :raises: ValueError, UnknownProcessorTypeError """ psr = find(None, forced_type=ac_parser) return psr.dumps(data, **options)
def _locateConvergencePoint(stats, minOverlap, maxOverlap): """ Walk backwards through stats until you locate the first point that diverges from target overlap values. We need this to handle cases where it might get to target values, diverge, and then get back again. We want the last convergence point. """ for i, v in enumerate(stats[::-1]): if not (v >= minOverlap and v <= maxOverlap): return len(stats) - i + 1 # Never differs - converged in one iteration return 1
def function[_locateConvergencePoint, parameter[stats, minOverlap, maxOverlap]]: constant[ Walk backwards through stats until you locate the first point that diverges from target overlap values. We need this to handle cases where it might get to target values, diverge, and then get back again. We want the last convergence point. ] for taget[tuple[[<ast.Name object at 0x7da1b08325f0>, <ast.Name object at 0x7da1b0832980>]]] in starred[call[name[enumerate], parameter[call[name[stats]][<ast.Slice object at 0x7da1b0832f80>]]]] begin[:] if <ast.UnaryOp object at 0x7da1b0830ca0> begin[:] return[binary_operation[binary_operation[call[name[len], parameter[name[stats]]] - name[i]] + constant[1]]] return[constant[1]]
keyword[def] identifier[_locateConvergencePoint] ( identifier[stats] , identifier[minOverlap] , identifier[maxOverlap] ): literal[string] keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[stats] [::- literal[int] ]): keyword[if] keyword[not] ( identifier[v] >= identifier[minOverlap] keyword[and] identifier[v] <= identifier[maxOverlap] ): keyword[return] identifier[len] ( identifier[stats] )- identifier[i] + literal[int] keyword[return] literal[int]
def _locateConvergencePoint(stats, minOverlap, maxOverlap): """ Walk backwards through stats until you locate the first point that diverges from target overlap values. We need this to handle cases where it might get to target values, diverge, and then get back again. We want the last convergence point. """ for (i, v) in enumerate(stats[::-1]): if not (v >= minOverlap and v <= maxOverlap): return len(stats) - i + 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # Never differs - converged in one iteration return 1
def load_snippet(self, name, package): """Starts the snippet apk with the given package name and connects. Examples: .. code-block:: python ad.load_snippet( name='maps', package='com.google.maps.snippets') ad.maps.activateZoom('3') Args: name: string, the attribute name to which to attach the snippet client. E.g. `name='maps'` attaches the snippet client to `ad.maps`. package: string, the package name of the snippet apk to connect to. Raises: SnippetError: Illegal load operations are attempted. """ # Should not load snippet with an existing attribute. if hasattr(self, name): raise SnippetError( self, 'Attribute "%s" already exists, please use a different name.' % name) self.services.snippets.add_snippet_client(name, package)
def function[load_snippet, parameter[self, name, package]]: constant[Starts the snippet apk with the given package name and connects. Examples: .. code-block:: python ad.load_snippet( name='maps', package='com.google.maps.snippets') ad.maps.activateZoom('3') Args: name: string, the attribute name to which to attach the snippet client. E.g. `name='maps'` attaches the snippet client to `ad.maps`. package: string, the package name of the snippet apk to connect to. Raises: SnippetError: Illegal load operations are attempted. ] if call[name[hasattr], parameter[name[self], name[name]]] begin[:] <ast.Raise object at 0x7da1b086aef0> call[name[self].services.snippets.add_snippet_client, parameter[name[name], name[package]]]
keyword[def] identifier[load_snippet] ( identifier[self] , identifier[name] , identifier[package] ): literal[string] keyword[if] identifier[hasattr] ( identifier[self] , identifier[name] ): keyword[raise] identifier[SnippetError] ( identifier[self] , literal[string] % identifier[name] ) identifier[self] . identifier[services] . identifier[snippets] . identifier[add_snippet_client] ( identifier[name] , identifier[package] )
def load_snippet(self, name, package): """Starts the snippet apk with the given package name and connects. Examples: .. code-block:: python ad.load_snippet( name='maps', package='com.google.maps.snippets') ad.maps.activateZoom('3') Args: name: string, the attribute name to which to attach the snippet client. E.g. `name='maps'` attaches the snippet client to `ad.maps`. package: string, the package name of the snippet apk to connect to. Raises: SnippetError: Illegal load operations are attempted. """ # Should not load snippet with an existing attribute. if hasattr(self, name): raise SnippetError(self, 'Attribute "%s" already exists, please use a different name.' % name) # depends on [control=['if'], data=[]] self.services.snippets.add_snippet_client(name, package)
def _process_event(self, event, tagged_data): """Processes a single tf.Event and records it in tagged_data.""" event_type = event.WhichOneof('what') # Handle the most common case first. if event_type == 'summary': for value in event.summary.value: value = data_compat.migrate_value(value) tag, metadata, values = tagged_data.get(value.tag, (None, None, [])) values.append((event.step, event.wall_time, value.tensor)) if tag is None: # Store metadata only from the first event. tagged_data[value.tag] = sqlite_writer.TagData( value.tag, value.metadata, values) elif event_type == 'file_version': pass # TODO: reject file version < 2 (at loader level) elif event_type == 'session_log': if event.session_log.status == event_pb2.SessionLog.START: pass # TODO: implement purging via sqlite writer truncation method elif event_type in ('graph_def', 'meta_graph_def'): pass # TODO: support graphs elif event_type == 'tagged_run_metadata': pass
def function[_process_event, parameter[self, event, tagged_data]]: constant[Processes a single tf.Event and records it in tagged_data.] variable[event_type] assign[=] call[name[event].WhichOneof, parameter[constant[what]]] if compare[name[event_type] equal[==] constant[summary]] begin[:] for taget[name[value]] in starred[name[event].summary.value] begin[:] variable[value] assign[=] call[name[data_compat].migrate_value, parameter[name[value]]] <ast.Tuple object at 0x7da18c4ccd00> assign[=] call[name[tagged_data].get, parameter[name[value].tag, tuple[[<ast.Constant object at 0x7da18c4cf1c0>, <ast.Constant object at 0x7da18c4cead0>, <ast.List object at 0x7da18c4cf5b0>]]]] call[name[values].append, parameter[tuple[[<ast.Attribute object at 0x7da18c4cdfc0>, <ast.Attribute object at 0x7da18c4cd450>, <ast.Attribute object at 0x7da18c4ccbe0>]]]] if compare[name[tag] is constant[None]] begin[:] call[name[tagged_data]][name[value].tag] assign[=] call[name[sqlite_writer].TagData, parameter[name[value].tag, name[value].metadata, name[values]]]
keyword[def] identifier[_process_event] ( identifier[self] , identifier[event] , identifier[tagged_data] ): literal[string] identifier[event_type] = identifier[event] . identifier[WhichOneof] ( literal[string] ) keyword[if] identifier[event_type] == literal[string] : keyword[for] identifier[value] keyword[in] identifier[event] . identifier[summary] . identifier[value] : identifier[value] = identifier[data_compat] . identifier[migrate_value] ( identifier[value] ) identifier[tag] , identifier[metadata] , identifier[values] = identifier[tagged_data] . identifier[get] ( identifier[value] . identifier[tag] ,( keyword[None] , keyword[None] ,[])) identifier[values] . identifier[append] (( identifier[event] . identifier[step] , identifier[event] . identifier[wall_time] , identifier[value] . identifier[tensor] )) keyword[if] identifier[tag] keyword[is] keyword[None] : identifier[tagged_data] [ identifier[value] . identifier[tag] ]= identifier[sqlite_writer] . identifier[TagData] ( identifier[value] . identifier[tag] , identifier[value] . identifier[metadata] , identifier[values] ) keyword[elif] identifier[event_type] == literal[string] : keyword[pass] keyword[elif] identifier[event_type] == literal[string] : keyword[if] identifier[event] . identifier[session_log] . identifier[status] == identifier[event_pb2] . identifier[SessionLog] . identifier[START] : keyword[pass] keyword[elif] identifier[event_type] keyword[in] ( literal[string] , literal[string] ): keyword[pass] keyword[elif] identifier[event_type] == literal[string] : keyword[pass]
def _process_event(self, event, tagged_data): """Processes a single tf.Event and records it in tagged_data.""" event_type = event.WhichOneof('what') # Handle the most common case first. if event_type == 'summary': for value in event.summary.value: value = data_compat.migrate_value(value) (tag, metadata, values) = tagged_data.get(value.tag, (None, None, [])) values.append((event.step, event.wall_time, value.tensor)) if tag is None: # Store metadata only from the first event. tagged_data[value.tag] = sqlite_writer.TagData(value.tag, value.metadata, values) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['value']] # depends on [control=['if'], data=[]] elif event_type == 'file_version': pass # TODO: reject file version < 2 (at loader level) # depends on [control=['if'], data=[]] elif event_type == 'session_log': if event.session_log.status == event_pb2.SessionLog.START: pass # TODO: implement purging via sqlite writer truncation method # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] elif event_type in ('graph_def', 'meta_graph_def'): pass # TODO: support graphs # depends on [control=['if'], data=[]] elif event_type == 'tagged_run_metadata': pass # depends on [control=['if'], data=[]]
def parse_xml_data(self): """ Parses `xml_data` and loads it into object properties. """ self.raw_text = self.xml_data.find('raw_text').text self.station = WeatherStation(self.xml_data.find('station_id').text) self.station.latitude = float(self.xml_data.find('latitude').text) self.station.longitude = float(self.xml_data.find('longitude').text) self.station.elevation = float(self.xml_data.find('elevation_m').text) * 3.28084
def function[parse_xml_data, parameter[self]]: constant[ Parses `xml_data` and loads it into object properties. ] name[self].raw_text assign[=] call[name[self].xml_data.find, parameter[constant[raw_text]]].text name[self].station assign[=] call[name[WeatherStation], parameter[call[name[self].xml_data.find, parameter[constant[station_id]]].text]] name[self].station.latitude assign[=] call[name[float], parameter[call[name[self].xml_data.find, parameter[constant[latitude]]].text]] name[self].station.longitude assign[=] call[name[float], parameter[call[name[self].xml_data.find, parameter[constant[longitude]]].text]] name[self].station.elevation assign[=] binary_operation[call[name[float], parameter[call[name[self].xml_data.find, parameter[constant[elevation_m]]].text]] * constant[3.28084]]
keyword[def] identifier[parse_xml_data] ( identifier[self] ): literal[string] identifier[self] . identifier[raw_text] = identifier[self] . identifier[xml_data] . identifier[find] ( literal[string] ). identifier[text] identifier[self] . identifier[station] = identifier[WeatherStation] ( identifier[self] . identifier[xml_data] . identifier[find] ( literal[string] ). identifier[text] ) identifier[self] . identifier[station] . identifier[latitude] = identifier[float] ( identifier[self] . identifier[xml_data] . identifier[find] ( literal[string] ). identifier[text] ) identifier[self] . identifier[station] . identifier[longitude] = identifier[float] ( identifier[self] . identifier[xml_data] . identifier[find] ( literal[string] ). identifier[text] ) identifier[self] . identifier[station] . identifier[elevation] = identifier[float] ( identifier[self] . identifier[xml_data] . identifier[find] ( literal[string] ). identifier[text] )* literal[int]
def parse_xml_data(self): """ Parses `xml_data` and loads it into object properties. """ self.raw_text = self.xml_data.find('raw_text').text self.station = WeatherStation(self.xml_data.find('station_id').text) self.station.latitude = float(self.xml_data.find('latitude').text) self.station.longitude = float(self.xml_data.find('longitude').text) self.station.elevation = float(self.xml_data.find('elevation_m').text) * 3.28084
def min_distance_single(self, mesh, transform=None, return_name=False, return_data=False): """ Get the minimum distance between a single object and any object in the manager. Parameters --------------- mesh : Trimesh object The geometry of the collision object transform : (4,4) float Homogenous transform matrix for the object return_names : bool If true, return name of the closest object return_data : bool If true, a DistanceData object is returned as well Returns ------------- distance : float Min distance between mesh and any object in the manager name : str The name of the object in the manager that was closest data : DistanceData Extra data about the distance query """ if transform is None: transform = np.eye(4) # Create FCL data b = self._get_BVH(mesh) t = fcl.Transform(transform[:3, :3], transform[:3, 3]) o = fcl.CollisionObject(b, t) # Collide with manager's objects ddata = fcl.DistanceData() if return_data: ddata = fcl.DistanceData( fcl.DistanceRequest(enable_nearest_points=True), fcl.DistanceResult() ) self._manager.distance(o, ddata, fcl.defaultDistanceCallback) distance = ddata.result.min_distance # If we want to return the objects that were collision, collect them. name, data = None, None if return_name or return_data: cg = ddata.result.o1 if cg == b: cg = ddata.result.o2 name = self._extract_name(cg) names = (name, '__external') if cg == ddata.result.o2: names = reversed(names) data = DistanceData(names, ddata.result) if return_name and return_data: return distance, name, data elif return_name: return distance, name elif return_data: return distance, data else: return distance
def function[min_distance_single, parameter[self, mesh, transform, return_name, return_data]]: constant[ Get the minimum distance between a single object and any object in the manager. Parameters --------------- mesh : Trimesh object The geometry of the collision object transform : (4,4) float Homogenous transform matrix for the object return_names : bool If true, return name of the closest object return_data : bool If true, a DistanceData object is returned as well Returns ------------- distance : float Min distance between mesh and any object in the manager name : str The name of the object in the manager that was closest data : DistanceData Extra data about the distance query ] if compare[name[transform] is constant[None]] begin[:] variable[transform] assign[=] call[name[np].eye, parameter[constant[4]]] variable[b] assign[=] call[name[self]._get_BVH, parameter[name[mesh]]] variable[t] assign[=] call[name[fcl].Transform, parameter[call[name[transform]][tuple[[<ast.Slice object at 0x7da20c9902e0>, <ast.Slice object at 0x7da20c993d30>]]], call[name[transform]][tuple[[<ast.Slice object at 0x7da20c9920e0>, <ast.Constant object at 0x7da20c9939a0>]]]]] variable[o] assign[=] call[name[fcl].CollisionObject, parameter[name[b], name[t]]] variable[ddata] assign[=] call[name[fcl].DistanceData, parameter[]] if name[return_data] begin[:] variable[ddata] assign[=] call[name[fcl].DistanceData, parameter[call[name[fcl].DistanceRequest, parameter[]], call[name[fcl].DistanceResult, parameter[]]]] call[name[self]._manager.distance, parameter[name[o], name[ddata], name[fcl].defaultDistanceCallback]] variable[distance] assign[=] name[ddata].result.min_distance <ast.Tuple object at 0x7da20c993790> assign[=] tuple[[<ast.Constant object at 0x7da20c992d40>, <ast.Constant object at 0x7da20c992500>]] if <ast.BoolOp object at 0x7da20c990fa0> begin[:] variable[cg] assign[=] name[ddata].result.o1 if compare[name[cg] equal[==] name[b]] begin[:] variable[cg] assign[=] name[ddata].result.o2 variable[name] assign[=] call[name[self]._extract_name, parameter[name[cg]]] variable[names] assign[=] tuple[[<ast.Name object at 0x7da20c9924d0>, <ast.Constant object at 0x7da20c9900d0>]] if compare[name[cg] equal[==] name[ddata].result.o2] begin[:] variable[names] assign[=] call[name[reversed], parameter[name[names]]] variable[data] assign[=] call[name[DistanceData], parameter[name[names], name[ddata].result]] if <ast.BoolOp object at 0x7da1b23e5ae0> begin[:] return[tuple[[<ast.Name object at 0x7da1b23e5fc0>, <ast.Name object at 0x7da1b23e7640>, <ast.Name object at 0x7da1b23e6170>]]]
keyword[def] identifier[min_distance_single] ( identifier[self] , identifier[mesh] , identifier[transform] = keyword[None] , identifier[return_name] = keyword[False] , identifier[return_data] = keyword[False] ): literal[string] keyword[if] identifier[transform] keyword[is] keyword[None] : identifier[transform] = identifier[np] . identifier[eye] ( literal[int] ) identifier[b] = identifier[self] . identifier[_get_BVH] ( identifier[mesh] ) identifier[t] = identifier[fcl] . identifier[Transform] ( identifier[transform] [: literal[int] ,: literal[int] ], identifier[transform] [: literal[int] , literal[int] ]) identifier[o] = identifier[fcl] . identifier[CollisionObject] ( identifier[b] , identifier[t] ) identifier[ddata] = identifier[fcl] . identifier[DistanceData] () keyword[if] identifier[return_data] : identifier[ddata] = identifier[fcl] . identifier[DistanceData] ( identifier[fcl] . identifier[DistanceRequest] ( identifier[enable_nearest_points] = keyword[True] ), identifier[fcl] . identifier[DistanceResult] () ) identifier[self] . identifier[_manager] . identifier[distance] ( identifier[o] , identifier[ddata] , identifier[fcl] . identifier[defaultDistanceCallback] ) identifier[distance] = identifier[ddata] . identifier[result] . identifier[min_distance] identifier[name] , identifier[data] = keyword[None] , keyword[None] keyword[if] identifier[return_name] keyword[or] identifier[return_data] : identifier[cg] = identifier[ddata] . identifier[result] . identifier[o1] keyword[if] identifier[cg] == identifier[b] : identifier[cg] = identifier[ddata] . identifier[result] . identifier[o2] identifier[name] = identifier[self] . identifier[_extract_name] ( identifier[cg] ) identifier[names] =( identifier[name] , literal[string] ) keyword[if] identifier[cg] == identifier[ddata] . identifier[result] . identifier[o2] : identifier[names] = identifier[reversed] ( identifier[names] ) identifier[data] = identifier[DistanceData] ( identifier[names] , identifier[ddata] . identifier[result] ) keyword[if] identifier[return_name] keyword[and] identifier[return_data] : keyword[return] identifier[distance] , identifier[name] , identifier[data] keyword[elif] identifier[return_name] : keyword[return] identifier[distance] , identifier[name] keyword[elif] identifier[return_data] : keyword[return] identifier[distance] , identifier[data] keyword[else] : keyword[return] identifier[distance]
def min_distance_single(self, mesh, transform=None, return_name=False, return_data=False): """ Get the minimum distance between a single object and any object in the manager. Parameters --------------- mesh : Trimesh object The geometry of the collision object transform : (4,4) float Homogenous transform matrix for the object return_names : bool If true, return name of the closest object return_data : bool If true, a DistanceData object is returned as well Returns ------------- distance : float Min distance between mesh and any object in the manager name : str The name of the object in the manager that was closest data : DistanceData Extra data about the distance query """ if transform is None: transform = np.eye(4) # depends on [control=['if'], data=['transform']] # Create FCL data b = self._get_BVH(mesh) t = fcl.Transform(transform[:3, :3], transform[:3, 3]) o = fcl.CollisionObject(b, t) # Collide with manager's objects ddata = fcl.DistanceData() if return_data: ddata = fcl.DistanceData(fcl.DistanceRequest(enable_nearest_points=True), fcl.DistanceResult()) # depends on [control=['if'], data=[]] self._manager.distance(o, ddata, fcl.defaultDistanceCallback) distance = ddata.result.min_distance # If we want to return the objects that were collision, collect them. (name, data) = (None, None) if return_name or return_data: cg = ddata.result.o1 if cg == b: cg = ddata.result.o2 # depends on [control=['if'], data=['cg']] name = self._extract_name(cg) names = (name, '__external') if cg == ddata.result.o2: names = reversed(names) # depends on [control=['if'], data=[]] data = DistanceData(names, ddata.result) # depends on [control=['if'], data=[]] if return_name and return_data: return (distance, name, data) # depends on [control=['if'], data=[]] elif return_name: return (distance, name) # depends on [control=['if'], data=[]] elif return_data: return (distance, data) # depends on [control=['if'], data=[]] else: return distance
def fit_transform(self, X, y=None): """ Fit the imputer and then transform input `X` Note: all imputations should have a `fit_transform` method, but only some (like IterativeImputer) also support inductive mode using `fit` or `fit_transform` on `X_train` and then `transform` on new `X_test`. """ X_original, missing_mask = self.prepare_input_data(X) observed_mask = ~missing_mask X = X_original.copy() if self.normalizer is not None: X = self.normalizer.fit_transform(X) X_filled = self.fill(X, missing_mask, inplace=True) if not isinstance(X_filled, np.ndarray): raise TypeError( "Expected %s.fill() to return NumPy array but got %s" % ( self.__class__.__name__, type(X_filled))) X_result = self.solve(X_filled, missing_mask) if not isinstance(X_result, np.ndarray): raise TypeError( "Expected %s.solve() to return NumPy array but got %s" % ( self.__class__.__name__, type(X_result))) X_result = self.project_result(X=X_result) X_result[observed_mask] = X_original[observed_mask] return X_result
def function[fit_transform, parameter[self, X, y]]: constant[ Fit the imputer and then transform input `X` Note: all imputations should have a `fit_transform` method, but only some (like IterativeImputer) also support inductive mode using `fit` or `fit_transform` on `X_train` and then `transform` on new `X_test`. ] <ast.Tuple object at 0x7da2041d8fa0> assign[=] call[name[self].prepare_input_data, parameter[name[X]]] variable[observed_mask] assign[=] <ast.UnaryOp object at 0x7da2041dabf0> variable[X] assign[=] call[name[X_original].copy, parameter[]] if compare[name[self].normalizer is_not constant[None]] begin[:] variable[X] assign[=] call[name[self].normalizer.fit_transform, parameter[name[X]]] variable[X_filled] assign[=] call[name[self].fill, parameter[name[X], name[missing_mask]]] if <ast.UnaryOp object at 0x7da2041db700> begin[:] <ast.Raise object at 0x7da2041da860> variable[X_result] assign[=] call[name[self].solve, parameter[name[X_filled], name[missing_mask]]] if <ast.UnaryOp object at 0x7da2041dbf40> begin[:] <ast.Raise object at 0x7da2041d9840> variable[X_result] assign[=] call[name[self].project_result, parameter[]] call[name[X_result]][name[observed_mask]] assign[=] call[name[X_original]][name[observed_mask]] return[name[X_result]]
keyword[def] identifier[fit_transform] ( identifier[self] , identifier[X] , identifier[y] = keyword[None] ): literal[string] identifier[X_original] , identifier[missing_mask] = identifier[self] . identifier[prepare_input_data] ( identifier[X] ) identifier[observed_mask] =~ identifier[missing_mask] identifier[X] = identifier[X_original] . identifier[copy] () keyword[if] identifier[self] . identifier[normalizer] keyword[is] keyword[not] keyword[None] : identifier[X] = identifier[self] . identifier[normalizer] . identifier[fit_transform] ( identifier[X] ) identifier[X_filled] = identifier[self] . identifier[fill] ( identifier[X] , identifier[missing_mask] , identifier[inplace] = keyword[True] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[X_filled] , identifier[np] . identifier[ndarray] ): keyword[raise] identifier[TypeError] ( literal[string] %( identifier[self] . identifier[__class__] . identifier[__name__] , identifier[type] ( identifier[X_filled] ))) identifier[X_result] = identifier[self] . identifier[solve] ( identifier[X_filled] , identifier[missing_mask] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[X_result] , identifier[np] . identifier[ndarray] ): keyword[raise] identifier[TypeError] ( literal[string] %( identifier[self] . identifier[__class__] . identifier[__name__] , identifier[type] ( identifier[X_result] ))) identifier[X_result] = identifier[self] . identifier[project_result] ( identifier[X] = identifier[X_result] ) identifier[X_result] [ identifier[observed_mask] ]= identifier[X_original] [ identifier[observed_mask] ] keyword[return] identifier[X_result]
def fit_transform(self, X, y=None): """ Fit the imputer and then transform input `X` Note: all imputations should have a `fit_transform` method, but only some (like IterativeImputer) also support inductive mode using `fit` or `fit_transform` on `X_train` and then `transform` on new `X_test`. """ (X_original, missing_mask) = self.prepare_input_data(X) observed_mask = ~missing_mask X = X_original.copy() if self.normalizer is not None: X = self.normalizer.fit_transform(X) # depends on [control=['if'], data=[]] X_filled = self.fill(X, missing_mask, inplace=True) if not isinstance(X_filled, np.ndarray): raise TypeError('Expected %s.fill() to return NumPy array but got %s' % (self.__class__.__name__, type(X_filled))) # depends on [control=['if'], data=[]] X_result = self.solve(X_filled, missing_mask) if not isinstance(X_result, np.ndarray): raise TypeError('Expected %s.solve() to return NumPy array but got %s' % (self.__class__.__name__, type(X_result))) # depends on [control=['if'], data=[]] X_result = self.project_result(X=X_result) X_result[observed_mask] = X_original[observed_mask] return X_result
def __check_config_key(self, key): """Check whether the key is valid. A valid key has the schema <section>.<option>. Keys supported are listed in CONFIG_OPTIONS dict. :param key: <section>.<option> key """ try: section, option = key.split('.') except (AttributeError, ValueError): return False if not section or not option: return False return section in Config.CONFIG_OPTIONS and\ option in Config.CONFIG_OPTIONS[section]
def function[__check_config_key, parameter[self, key]]: constant[Check whether the key is valid. A valid key has the schema <section>.<option>. Keys supported are listed in CONFIG_OPTIONS dict. :param key: <section>.<option> key ] <ast.Try object at 0x7da1b0e25030> if <ast.BoolOp object at 0x7da1b0e27280> begin[:] return[constant[False]] return[<ast.BoolOp object at 0x7da1b0e25060>]
keyword[def] identifier[__check_config_key] ( identifier[self] , identifier[key] ): literal[string] keyword[try] : identifier[section] , identifier[option] = identifier[key] . identifier[split] ( literal[string] ) keyword[except] ( identifier[AttributeError] , identifier[ValueError] ): keyword[return] keyword[False] keyword[if] keyword[not] identifier[section] keyword[or] keyword[not] identifier[option] : keyword[return] keyword[False] keyword[return] identifier[section] keyword[in] identifier[Config] . identifier[CONFIG_OPTIONS] keyword[and] identifier[option] keyword[in] identifier[Config] . identifier[CONFIG_OPTIONS] [ identifier[section] ]
def __check_config_key(self, key): """Check whether the key is valid. A valid key has the schema <section>.<option>. Keys supported are listed in CONFIG_OPTIONS dict. :param key: <section>.<option> key """ try: (section, option) = key.split('.') # depends on [control=['try'], data=[]] except (AttributeError, ValueError): return False # depends on [control=['except'], data=[]] if not section or not option: return False # depends on [control=['if'], data=[]] return section in Config.CONFIG_OPTIONS and option in Config.CONFIG_OPTIONS[section]
def _serial_sanitizer(instr): '''Replaces the last 1/4 of a string with X's''' length = len(instr) index = int(math.floor(length * .75)) return '{0}{1}'.format(instr[:index], 'X' * (length - index))
def function[_serial_sanitizer, parameter[instr]]: constant[Replaces the last 1/4 of a string with X's] variable[length] assign[=] call[name[len], parameter[name[instr]]] variable[index] assign[=] call[name[int], parameter[call[name[math].floor, parameter[binary_operation[name[length] * constant[0.75]]]]]] return[call[constant[{0}{1}].format, parameter[call[name[instr]][<ast.Slice object at 0x7da1b20240a0>], binary_operation[constant[X] * binary_operation[name[length] - name[index]]]]]]
keyword[def] identifier[_serial_sanitizer] ( identifier[instr] ): literal[string] identifier[length] = identifier[len] ( identifier[instr] ) identifier[index] = identifier[int] ( identifier[math] . identifier[floor] ( identifier[length] * literal[int] )) keyword[return] literal[string] . identifier[format] ( identifier[instr] [: identifier[index] ], literal[string] *( identifier[length] - identifier[index] ))
def _serial_sanitizer(instr): """Replaces the last 1/4 of a string with X's""" length = len(instr) index = int(math.floor(length * 0.75)) return '{0}{1}'.format(instr[:index], 'X' * (length - index))
def read_value_from_path(value): """Enables translators to read values from files. The value can be referred to with the `file://` prefix. ie: conf_key: ${kms file://kms_value.txt} """ if value.startswith('file://'): path = value.split('file://', 1)[1] config_directory = get_config_directory() relative_path = os.path.join(config_directory, path) with open(relative_path) as read_file: value = read_file.read() return value
def function[read_value_from_path, parameter[value]]: constant[Enables translators to read values from files. The value can be referred to with the `file://` prefix. ie: conf_key: ${kms file://kms_value.txt} ] if call[name[value].startswith, parameter[constant[file://]]] begin[:] variable[path] assign[=] call[call[name[value].split, parameter[constant[file://], constant[1]]]][constant[1]] variable[config_directory] assign[=] call[name[get_config_directory], parameter[]] variable[relative_path] assign[=] call[name[os].path.join, parameter[name[config_directory], name[path]]] with call[name[open], parameter[name[relative_path]]] begin[:] variable[value] assign[=] call[name[read_file].read, parameter[]] return[name[value]]
keyword[def] identifier[read_value_from_path] ( identifier[value] ): literal[string] keyword[if] identifier[value] . identifier[startswith] ( literal[string] ): identifier[path] = identifier[value] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ] identifier[config_directory] = identifier[get_config_directory] () identifier[relative_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[config_directory] , identifier[path] ) keyword[with] identifier[open] ( identifier[relative_path] ) keyword[as] identifier[read_file] : identifier[value] = identifier[read_file] . identifier[read] () keyword[return] identifier[value]
def read_value_from_path(value): """Enables translators to read values from files. The value can be referred to with the `file://` prefix. ie: conf_key: ${kms file://kms_value.txt} """ if value.startswith('file://'): path = value.split('file://', 1)[1] config_directory = get_config_directory() relative_path = os.path.join(config_directory, path) with open(relative_path) as read_file: value = read_file.read() # depends on [control=['with'], data=['read_file']] # depends on [control=['if'], data=[]] return value
def unique_id(self): """Creates a unique ID for the `Atom` based on its parents. Returns ------- unique_id : (str, str, str) (polymer.id, residue.id, atom.id) """ chain = self.parent.parent.id residue = self.parent.id return chain, residue, self.id
def function[unique_id, parameter[self]]: constant[Creates a unique ID for the `Atom` based on its parents. Returns ------- unique_id : (str, str, str) (polymer.id, residue.id, atom.id) ] variable[chain] assign[=] name[self].parent.parent.id variable[residue] assign[=] name[self].parent.id return[tuple[[<ast.Name object at 0x7da1b092dea0>, <ast.Name object at 0x7da1b092e470>, <ast.Attribute object at 0x7da1b092e0e0>]]]
keyword[def] identifier[unique_id] ( identifier[self] ): literal[string] identifier[chain] = identifier[self] . identifier[parent] . identifier[parent] . identifier[id] identifier[residue] = identifier[self] . identifier[parent] . identifier[id] keyword[return] identifier[chain] , identifier[residue] , identifier[self] . identifier[id]
def unique_id(self): """Creates a unique ID for the `Atom` based on its parents. Returns ------- unique_id : (str, str, str) (polymer.id, residue.id, atom.id) """ chain = self.parent.parent.id residue = self.parent.id return (chain, residue, self.id)
def open_consolidated(store, metadata_key='.zmetadata', mode='r+', **kwargs): """Open group using metadata previously consolidated into a single key. This is an optimised method for opening a Zarr group, where instead of traversing the group/array hierarchy by accessing the metadata keys at each level, a single key contains all of the metadata for everything. For remote data sources where the overhead of accessing a key is large compared to the time to read data. The group accessed must have already had its metadata consolidated into a single key using the function :func:`consolidate_metadata`. This optimised method only works in modes which do not change the metadata, although the data may still be written/updated. Parameters ---------- store : MutableMapping or string Store or path to directory in file system or name of zip file. metadata_key : str Key to read the consolidated metadata from. The default (.zmetadata) corresponds to the default used by :func:`consolidate_metadata`. mode : {'r', 'r+'}, optional Persistence mode: 'r' means read only (must exist); 'r+' means read/write (must exist) although only writes to data are allowed, changes to metadata including creation of new arrays or group are not allowed. **kwargs Additional parameters are passed through to :func:`zarr.creation.open_array` or :func:`zarr.hierarchy.open_group`. Returns ------- g : :class:`zarr.hierarchy.Group` Group instance, opened with the consolidated metadata. See Also -------- consolidate_metadata """ from .storage import ConsolidatedMetadataStore # normalize parameters store = normalize_store_arg(store) if mode not in {'r', 'r+'}: raise ValueError("invalid mode, expected either 'r' or 'r+'; found {!r}" .format(mode)) # setup metadata sotre meta_store = ConsolidatedMetadataStore(store, metadata_key=metadata_key) # pass through return open(store=meta_store, chunk_store=store, mode=mode, **kwargs)
def function[open_consolidated, parameter[store, metadata_key, mode]]: constant[Open group using metadata previously consolidated into a single key. This is an optimised method for opening a Zarr group, where instead of traversing the group/array hierarchy by accessing the metadata keys at each level, a single key contains all of the metadata for everything. For remote data sources where the overhead of accessing a key is large compared to the time to read data. The group accessed must have already had its metadata consolidated into a single key using the function :func:`consolidate_metadata`. This optimised method only works in modes which do not change the metadata, although the data may still be written/updated. Parameters ---------- store : MutableMapping or string Store or path to directory in file system or name of zip file. metadata_key : str Key to read the consolidated metadata from. The default (.zmetadata) corresponds to the default used by :func:`consolidate_metadata`. mode : {'r', 'r+'}, optional Persistence mode: 'r' means read only (must exist); 'r+' means read/write (must exist) although only writes to data are allowed, changes to metadata including creation of new arrays or group are not allowed. **kwargs Additional parameters are passed through to :func:`zarr.creation.open_array` or :func:`zarr.hierarchy.open_group`. Returns ------- g : :class:`zarr.hierarchy.Group` Group instance, opened with the consolidated metadata. See Also -------- consolidate_metadata ] from relative_module[storage] import module[ConsolidatedMetadataStore] variable[store] assign[=] call[name[normalize_store_arg], parameter[name[store]]] if compare[name[mode] <ast.NotIn object at 0x7da2590d7190> <ast.Set object at 0x7da1b18a35e0>] begin[:] <ast.Raise object at 0x7da1b18a2860> variable[meta_store] assign[=] call[name[ConsolidatedMetadataStore], parameter[name[store]]] return[call[name[open], parameter[]]]
keyword[def] identifier[open_consolidated] ( identifier[store] , identifier[metadata_key] = literal[string] , identifier[mode] = literal[string] ,** identifier[kwargs] ): literal[string] keyword[from] . identifier[storage] keyword[import] identifier[ConsolidatedMetadataStore] identifier[store] = identifier[normalize_store_arg] ( identifier[store] ) keyword[if] identifier[mode] keyword[not] keyword[in] { literal[string] , literal[string] }: keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[mode] )) identifier[meta_store] = identifier[ConsolidatedMetadataStore] ( identifier[store] , identifier[metadata_key] = identifier[metadata_key] ) keyword[return] identifier[open] ( identifier[store] = identifier[meta_store] , identifier[chunk_store] = identifier[store] , identifier[mode] = identifier[mode] ,** identifier[kwargs] )
def open_consolidated(store, metadata_key='.zmetadata', mode='r+', **kwargs): """Open group using metadata previously consolidated into a single key. This is an optimised method for opening a Zarr group, where instead of traversing the group/array hierarchy by accessing the metadata keys at each level, a single key contains all of the metadata for everything. For remote data sources where the overhead of accessing a key is large compared to the time to read data. The group accessed must have already had its metadata consolidated into a single key using the function :func:`consolidate_metadata`. This optimised method only works in modes which do not change the metadata, although the data may still be written/updated. Parameters ---------- store : MutableMapping or string Store or path to directory in file system or name of zip file. metadata_key : str Key to read the consolidated metadata from. The default (.zmetadata) corresponds to the default used by :func:`consolidate_metadata`. mode : {'r', 'r+'}, optional Persistence mode: 'r' means read only (must exist); 'r+' means read/write (must exist) although only writes to data are allowed, changes to metadata including creation of new arrays or group are not allowed. **kwargs Additional parameters are passed through to :func:`zarr.creation.open_array` or :func:`zarr.hierarchy.open_group`. Returns ------- g : :class:`zarr.hierarchy.Group` Group instance, opened with the consolidated metadata. See Also -------- consolidate_metadata """ from .storage import ConsolidatedMetadataStore # normalize parameters store = normalize_store_arg(store) if mode not in {'r', 'r+'}: raise ValueError("invalid mode, expected either 'r' or 'r+'; found {!r}".format(mode)) # depends on [control=['if'], data=['mode']] # setup metadata sotre meta_store = ConsolidatedMetadataStore(store, metadata_key=metadata_key) # pass through return open(store=meta_store, chunk_store=store, mode=mode, **kwargs)
def load(self, elem): """ Converts the inputted string tag to Python. :param elem | <xml.etree.ElementTree> :return <str> """ self.testTag(elem, 'str') return elem.text if elem.text is not None else ''
def function[load, parameter[self, elem]]: constant[ Converts the inputted string tag to Python. :param elem | <xml.etree.ElementTree> :return <str> ] call[name[self].testTag, parameter[name[elem], constant[str]]] return[<ast.IfExp object at 0x7da1b2774580>]
keyword[def] identifier[load] ( identifier[self] , identifier[elem] ): literal[string] identifier[self] . identifier[testTag] ( identifier[elem] , literal[string] ) keyword[return] identifier[elem] . identifier[text] keyword[if] identifier[elem] . identifier[text] keyword[is] keyword[not] keyword[None] keyword[else] literal[string]
def load(self, elem): """ Converts the inputted string tag to Python. :param elem | <xml.etree.ElementTree> :return <str> """ self.testTag(elem, 'str') return elem.text if elem.text is not None else ''
def expand_source_paths(paths): """ Convert pyc files into their source equivalents.""" for src_path in paths: # only track the source path if we can find it to avoid double-reloads # when the source and the compiled path change because on some # platforms they are not changed at the same time if src_path.endswith(('.pyc', '.pyo')): py_path = get_py_path(src_path) if os.path.exists(py_path): src_path = py_path yield src_path
def function[expand_source_paths, parameter[paths]]: constant[ Convert pyc files into their source equivalents.] for taget[name[src_path]] in starred[name[paths]] begin[:] if call[name[src_path].endswith, parameter[tuple[[<ast.Constant object at 0x7da1b11a8c10>, <ast.Constant object at 0x7da1b11a8970>]]]] begin[:] variable[py_path] assign[=] call[name[get_py_path], parameter[name[src_path]]] if call[name[os].path.exists, parameter[name[py_path]]] begin[:] variable[src_path] assign[=] name[py_path] <ast.Yield object at 0x7da1b11a87c0>
keyword[def] identifier[expand_source_paths] ( identifier[paths] ): literal[string] keyword[for] identifier[src_path] keyword[in] identifier[paths] : keyword[if] identifier[src_path] . identifier[endswith] (( literal[string] , literal[string] )): identifier[py_path] = identifier[get_py_path] ( identifier[src_path] ) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[py_path] ): identifier[src_path] = identifier[py_path] keyword[yield] identifier[src_path]
def expand_source_paths(paths): """ Convert pyc files into their source equivalents.""" for src_path in paths: # only track the source path if we can find it to avoid double-reloads # when the source and the compiled path change because on some # platforms they are not changed at the same time if src_path.endswith(('.pyc', '.pyo')): py_path = get_py_path(src_path) if os.path.exists(py_path): src_path = py_path # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] yield src_path # depends on [control=['for'], data=['src_path']]