repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
robinandeer/puzzle | puzzle/plugins/gemini/mixins/variant.py | VariantMixin._format_variant | def _format_variant(self, case_id, gemini_variant, individual_objs,
index=0, add_all_info=False):
"""Make a puzzle variant from a gemini variant
Args:
case_id (str): related case id
gemini_variant (GeminiQueryRow): The gemini variant
individual_objs (list(dict)): A list of Individuals
index(int): The index of the variant
Returns:
variant (dict): A Variant object
"""
chrom = gemini_variant['chrom']
if chrom.startswith('chr') or chrom.startswith('CHR'):
chrom = chrom[3:]
variant_dict = {
'CHROM':chrom,
'POS':str(gemini_variant['start']),
'ID':gemini_variant['rs_ids'],
'REF':gemini_variant['ref'],
'ALT':gemini_variant['alt'],
'QUAL':gemini_variant['qual'],
'FILTER':gemini_variant['filter']
}
variant = Variant(**variant_dict)
# Use the gemini id for fast search
variant.update_variant_id(gemini_variant['variant_id'])
logger.debug("Creating a variant object of variant {0}".format(
variant.variant_id))
variant['index'] = index
# Add the most severe consequence
self._add_most_severe_consequence(variant, gemini_variant)
#Add the impact severity
self._add_impact_severity(variant, gemini_variant)
### POSITON ANNOATTIONS ###
variant.start = int(gemini_variant['start'])
variant.stop = int(gemini_variant['end'])
#Add the sv specific coordinates
if self.variant_type == 'sv':
variant.sv_type = gemini_variant['sub_type']
variant.stop = int(gemini_variant['end'])
self._add_sv_coordinates(variant)
else:
### Consequence and region annotations
#Add the transcript information
self._add_transcripts(variant, gemini_variant)
self._add_thousand_g(variant, gemini_variant)
self._add_exac(variant, gemini_variant)
self._add_gmaf(variant, gemini_variant)
#### Check the impact annotations ####
if gemini_variant['cadd_scaled']:
variant.cadd_score = gemini_variant['cadd_scaled']
# We use the prediction in text
polyphen = gemini_variant['polyphen_pred']
if polyphen:
variant.add_severity('Polyphen', polyphen)
# We use the prediction in text
sift = gemini_variant['sift_pred']
if sift:
variant.add_severity('SIFT', sift)
#Add the genes based on the hgnc symbols
self._add_hgnc_symbols(variant)
if self.variant_type == 'snv':
self._add_genes(variant)
self._add_consequences(variant)
### GENOTYPE ANNOATTIONS ###
#Get the genotype info
if add_all_info:
self._add_genotypes(variant, gemini_variant, case_id, individual_objs)
if self.variant_type == 'sv':
self._add_genes(variant)
return variant | python | def _format_variant(self, case_id, gemini_variant, individual_objs,
index=0, add_all_info=False):
"""Make a puzzle variant from a gemini variant
Args:
case_id (str): related case id
gemini_variant (GeminiQueryRow): The gemini variant
individual_objs (list(dict)): A list of Individuals
index(int): The index of the variant
Returns:
variant (dict): A Variant object
"""
chrom = gemini_variant['chrom']
if chrom.startswith('chr') or chrom.startswith('CHR'):
chrom = chrom[3:]
variant_dict = {
'CHROM':chrom,
'POS':str(gemini_variant['start']),
'ID':gemini_variant['rs_ids'],
'REF':gemini_variant['ref'],
'ALT':gemini_variant['alt'],
'QUAL':gemini_variant['qual'],
'FILTER':gemini_variant['filter']
}
variant = Variant(**variant_dict)
# Use the gemini id for fast search
variant.update_variant_id(gemini_variant['variant_id'])
logger.debug("Creating a variant object of variant {0}".format(
variant.variant_id))
variant['index'] = index
# Add the most severe consequence
self._add_most_severe_consequence(variant, gemini_variant)
#Add the impact severity
self._add_impact_severity(variant, gemini_variant)
### POSITON ANNOATTIONS ###
variant.start = int(gemini_variant['start'])
variant.stop = int(gemini_variant['end'])
#Add the sv specific coordinates
if self.variant_type == 'sv':
variant.sv_type = gemini_variant['sub_type']
variant.stop = int(gemini_variant['end'])
self._add_sv_coordinates(variant)
else:
### Consequence and region annotations
#Add the transcript information
self._add_transcripts(variant, gemini_variant)
self._add_thousand_g(variant, gemini_variant)
self._add_exac(variant, gemini_variant)
self._add_gmaf(variant, gemini_variant)
#### Check the impact annotations ####
if gemini_variant['cadd_scaled']:
variant.cadd_score = gemini_variant['cadd_scaled']
# We use the prediction in text
polyphen = gemini_variant['polyphen_pred']
if polyphen:
variant.add_severity('Polyphen', polyphen)
# We use the prediction in text
sift = gemini_variant['sift_pred']
if sift:
variant.add_severity('SIFT', sift)
#Add the genes based on the hgnc symbols
self._add_hgnc_symbols(variant)
if self.variant_type == 'snv':
self._add_genes(variant)
self._add_consequences(variant)
### GENOTYPE ANNOATTIONS ###
#Get the genotype info
if add_all_info:
self._add_genotypes(variant, gemini_variant, case_id, individual_objs)
if self.variant_type == 'sv':
self._add_genes(variant)
return variant | [
"def",
"_format_variant",
"(",
"self",
",",
"case_id",
",",
"gemini_variant",
",",
"individual_objs",
",",
"index",
"=",
"0",
",",
"add_all_info",
"=",
"False",
")",
":",
"chrom",
"=",
"gemini_variant",
"[",
"'chrom'",
"]",
"if",
"chrom",
".",
"startswith",
"(",
"'chr'",
")",
"or",
"chrom",
".",
"startswith",
"(",
"'CHR'",
")",
":",
"chrom",
"=",
"chrom",
"[",
"3",
":",
"]",
"variant_dict",
"=",
"{",
"'CHROM'",
":",
"chrom",
",",
"'POS'",
":",
"str",
"(",
"gemini_variant",
"[",
"'start'",
"]",
")",
",",
"'ID'",
":",
"gemini_variant",
"[",
"'rs_ids'",
"]",
",",
"'REF'",
":",
"gemini_variant",
"[",
"'ref'",
"]",
",",
"'ALT'",
":",
"gemini_variant",
"[",
"'alt'",
"]",
",",
"'QUAL'",
":",
"gemini_variant",
"[",
"'qual'",
"]",
",",
"'FILTER'",
":",
"gemini_variant",
"[",
"'filter'",
"]",
"}",
"variant",
"=",
"Variant",
"(",
"*",
"*",
"variant_dict",
")",
"# Use the gemini id for fast search",
"variant",
".",
"update_variant_id",
"(",
"gemini_variant",
"[",
"'variant_id'",
"]",
")",
"logger",
".",
"debug",
"(",
"\"Creating a variant object of variant {0}\"",
".",
"format",
"(",
"variant",
".",
"variant_id",
")",
")",
"variant",
"[",
"'index'",
"]",
"=",
"index",
"# Add the most severe consequence",
"self",
".",
"_add_most_severe_consequence",
"(",
"variant",
",",
"gemini_variant",
")",
"#Add the impact severity",
"self",
".",
"_add_impact_severity",
"(",
"variant",
",",
"gemini_variant",
")",
"### POSITON ANNOATTIONS ###",
"variant",
".",
"start",
"=",
"int",
"(",
"gemini_variant",
"[",
"'start'",
"]",
")",
"variant",
".",
"stop",
"=",
"int",
"(",
"gemini_variant",
"[",
"'end'",
"]",
")",
"#Add the sv specific coordinates",
"if",
"self",
".",
"variant_type",
"==",
"'sv'",
":",
"variant",
".",
"sv_type",
"=",
"gemini_variant",
"[",
"'sub_type'",
"]",
"variant",
".",
"stop",
"=",
"int",
"(",
"gemini_variant",
"[",
"'end'",
"]",
")",
"self",
".",
"_add_sv_coordinates",
"(",
"variant",
")",
"else",
":",
"### Consequence and region annotations",
"#Add the transcript information",
"self",
".",
"_add_transcripts",
"(",
"variant",
",",
"gemini_variant",
")",
"self",
".",
"_add_thousand_g",
"(",
"variant",
",",
"gemini_variant",
")",
"self",
".",
"_add_exac",
"(",
"variant",
",",
"gemini_variant",
")",
"self",
".",
"_add_gmaf",
"(",
"variant",
",",
"gemini_variant",
")",
"#### Check the impact annotations ####",
"if",
"gemini_variant",
"[",
"'cadd_scaled'",
"]",
":",
"variant",
".",
"cadd_score",
"=",
"gemini_variant",
"[",
"'cadd_scaled'",
"]",
"# We use the prediction in text",
"polyphen",
"=",
"gemini_variant",
"[",
"'polyphen_pred'",
"]",
"if",
"polyphen",
":",
"variant",
".",
"add_severity",
"(",
"'Polyphen'",
",",
"polyphen",
")",
"# We use the prediction in text",
"sift",
"=",
"gemini_variant",
"[",
"'sift_pred'",
"]",
"if",
"sift",
":",
"variant",
".",
"add_severity",
"(",
"'SIFT'",
",",
"sift",
")",
"#Add the genes based on the hgnc symbols",
"self",
".",
"_add_hgnc_symbols",
"(",
"variant",
")",
"if",
"self",
".",
"variant_type",
"==",
"'snv'",
":",
"self",
".",
"_add_genes",
"(",
"variant",
")",
"self",
".",
"_add_consequences",
"(",
"variant",
")",
"### GENOTYPE ANNOATTIONS ###",
"#Get the genotype info",
"if",
"add_all_info",
":",
"self",
".",
"_add_genotypes",
"(",
"variant",
",",
"gemini_variant",
",",
"case_id",
",",
"individual_objs",
")",
"if",
"self",
".",
"variant_type",
"==",
"'sv'",
":",
"self",
".",
"_add_genes",
"(",
"variant",
")",
"return",
"variant"
] | Make a puzzle variant from a gemini variant
Args:
case_id (str): related case id
gemini_variant (GeminiQueryRow): The gemini variant
individual_objs (list(dict)): A list of Individuals
index(int): The index of the variant
Returns:
variant (dict): A Variant object | [
"Make",
"a",
"puzzle",
"variant",
"from",
"a",
"gemini",
"variant"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/gemini/mixins/variant.py#L237-L323 | train |
robinandeer/puzzle | puzzle/plugins/gemini/mixins/variant.py | VariantMixin._is_variant | def _is_variant(self, gemini_variant, ind_objs):
"""Check if the variant is a variation in any of the individuals
Args:
gemini_variant (GeminiQueryRow): The gemini variant
ind_objs (list(puzzle.models.individual)): A list of individuals to check
Returns:
bool : If any of the individuals has the variant
"""
indexes = (ind.ind_index for ind in ind_objs)
#Check if any individual have a heterozygous or homozygous variant call
for index in indexes:
gt_call = gemini_variant['gt_types'][index]
if (gt_call == 1 or gt_call == 3):
return True
return False | python | def _is_variant(self, gemini_variant, ind_objs):
"""Check if the variant is a variation in any of the individuals
Args:
gemini_variant (GeminiQueryRow): The gemini variant
ind_objs (list(puzzle.models.individual)): A list of individuals to check
Returns:
bool : If any of the individuals has the variant
"""
indexes = (ind.ind_index for ind in ind_objs)
#Check if any individual have a heterozygous or homozygous variant call
for index in indexes:
gt_call = gemini_variant['gt_types'][index]
if (gt_call == 1 or gt_call == 3):
return True
return False | [
"def",
"_is_variant",
"(",
"self",
",",
"gemini_variant",
",",
"ind_objs",
")",
":",
"indexes",
"=",
"(",
"ind",
".",
"ind_index",
"for",
"ind",
"in",
"ind_objs",
")",
"#Check if any individual have a heterozygous or homozygous variant call",
"for",
"index",
"in",
"indexes",
":",
"gt_call",
"=",
"gemini_variant",
"[",
"'gt_types'",
"]",
"[",
"index",
"]",
"if",
"(",
"gt_call",
"==",
"1",
"or",
"gt_call",
"==",
"3",
")",
":",
"return",
"True",
"return",
"False"
] | Check if the variant is a variation in any of the individuals
Args:
gemini_variant (GeminiQueryRow): The gemini variant
ind_objs (list(puzzle.models.individual)): A list of individuals to check
Returns:
bool : If any of the individuals has the variant | [
"Check",
"if",
"the",
"variant",
"is",
"a",
"variation",
"in",
"any",
"of",
"the",
"individuals"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/gemini/mixins/variant.py#L325-L343 | train |
robinandeer/puzzle | puzzle/models/mixins.py | PedigreeHumanMixin.is_affected | def is_affected(self):
"""Boolean for telling if the sample is affected."""
phenotype = self.phenotype
if phenotype == '1':
return False
elif phenotype == '2':
return True
else:
return False | python | def is_affected(self):
"""Boolean for telling if the sample is affected."""
phenotype = self.phenotype
if phenotype == '1':
return False
elif phenotype == '2':
return True
else:
return False | [
"def",
"is_affected",
"(",
"self",
")",
":",
"phenotype",
"=",
"self",
".",
"phenotype",
"if",
"phenotype",
"==",
"'1'",
":",
"return",
"False",
"elif",
"phenotype",
"==",
"'2'",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | Boolean for telling if the sample is affected. | [
"Boolean",
"for",
"telling",
"if",
"the",
"sample",
"is",
"affected",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/mixins.py#L17-L25 | train |
robinandeer/puzzle | puzzle/plugins/sql/mixins/actions/genelist.py | GeneListActions.gene_list | def gene_list(self, list_id):
"""Get a gene list from the database."""
return self.query(GeneList).filter_by(list_id=list_id).first() | python | def gene_list(self, list_id):
"""Get a gene list from the database."""
return self.query(GeneList).filter_by(list_id=list_id).first() | [
"def",
"gene_list",
"(",
"self",
",",
"list_id",
")",
":",
"return",
"self",
".",
"query",
"(",
"GeneList",
")",
".",
"filter_by",
"(",
"list_id",
"=",
"list_id",
")",
".",
"first",
"(",
")"
] | Get a gene list from the database. | [
"Get",
"a",
"gene",
"list",
"from",
"the",
"database",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/genelist.py#L6-L8 | train |
robinandeer/puzzle | puzzle/plugins/sql/mixins/actions/genelist.py | GeneListActions.add_genelist | def add_genelist(self, list_id, gene_ids, case_obj=None):
"""Create a new gene list and optionally link to cases."""
new_genelist = GeneList(list_id=list_id)
new_genelist.gene_ids = gene_ids
if case_obj:
new_genelist.cases.append(case_obj)
self.session.add(new_genelist)
self.save()
return new_genelist | python | def add_genelist(self, list_id, gene_ids, case_obj=None):
"""Create a new gene list and optionally link to cases."""
new_genelist = GeneList(list_id=list_id)
new_genelist.gene_ids = gene_ids
if case_obj:
new_genelist.cases.append(case_obj)
self.session.add(new_genelist)
self.save()
return new_genelist | [
"def",
"add_genelist",
"(",
"self",
",",
"list_id",
",",
"gene_ids",
",",
"case_obj",
"=",
"None",
")",
":",
"new_genelist",
"=",
"GeneList",
"(",
"list_id",
"=",
"list_id",
")",
"new_genelist",
".",
"gene_ids",
"=",
"gene_ids",
"if",
"case_obj",
":",
"new_genelist",
".",
"cases",
".",
"append",
"(",
"case_obj",
")",
"self",
".",
"session",
".",
"add",
"(",
"new_genelist",
")",
"self",
".",
"save",
"(",
")",
"return",
"new_genelist"
] | Create a new gene list and optionally link to cases. | [
"Create",
"a",
"new",
"gene",
"list",
"and",
"optionally",
"link",
"to",
"cases",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/genelist.py#L14-L23 | train |
robinandeer/puzzle | puzzle/plugins/sql/mixins/actions/genelist.py | GeneListActions.remove_genelist | def remove_genelist(self, list_id, case_obj=None):
"""Remove a gene list and links to cases."""
gene_list = self.gene_list(list_id)
if case_obj:
# remove a single link between case and gene list
case_ids = [case_obj.id]
else:
# remove all links and the list itself
case_ids = [case.id for case in gene_list.cases]
self.session.delete(gene_list)
case_links = self.query(CaseGenelistLink).filter(
CaseGenelistLink.case_id.in_(case_ids),
CaseGenelistLink.genelist_id == gene_list.id
)
for case_link in case_links:
self.session.delete(case_link)
self.save() | python | def remove_genelist(self, list_id, case_obj=None):
"""Remove a gene list and links to cases."""
gene_list = self.gene_list(list_id)
if case_obj:
# remove a single link between case and gene list
case_ids = [case_obj.id]
else:
# remove all links and the list itself
case_ids = [case.id for case in gene_list.cases]
self.session.delete(gene_list)
case_links = self.query(CaseGenelistLink).filter(
CaseGenelistLink.case_id.in_(case_ids),
CaseGenelistLink.genelist_id == gene_list.id
)
for case_link in case_links:
self.session.delete(case_link)
self.save() | [
"def",
"remove_genelist",
"(",
"self",
",",
"list_id",
",",
"case_obj",
"=",
"None",
")",
":",
"gene_list",
"=",
"self",
".",
"gene_list",
"(",
"list_id",
")",
"if",
"case_obj",
":",
"# remove a single link between case and gene list",
"case_ids",
"=",
"[",
"case_obj",
".",
"id",
"]",
"else",
":",
"# remove all links and the list itself",
"case_ids",
"=",
"[",
"case",
".",
"id",
"for",
"case",
"in",
"gene_list",
".",
"cases",
"]",
"self",
".",
"session",
".",
"delete",
"(",
"gene_list",
")",
"case_links",
"=",
"self",
".",
"query",
"(",
"CaseGenelistLink",
")",
".",
"filter",
"(",
"CaseGenelistLink",
".",
"case_id",
".",
"in_",
"(",
"case_ids",
")",
",",
"CaseGenelistLink",
".",
"genelist_id",
"==",
"gene_list",
".",
"id",
")",
"for",
"case_link",
"in",
"case_links",
":",
"self",
".",
"session",
".",
"delete",
"(",
"case_link",
")",
"self",
".",
"save",
"(",
")"
] | Remove a gene list and links to cases. | [
"Remove",
"a",
"gene",
"list",
"and",
"links",
"to",
"cases",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/genelist.py#L25-L44 | train |
robinandeer/puzzle | puzzle/plugins/sql/mixins/actions/genelist.py | GeneListActions.case_genelist | def case_genelist(self, case_obj):
"""Get or create a new case specific gene list record."""
list_id = "{}-HPO".format(case_obj.case_id)
gene_list = self.gene_list(list_id)
if gene_list is None:
gene_list = GeneList(list_id=list_id)
case_obj.gene_lists.append(gene_list)
self.session.add(gene_list)
return gene_list | python | def case_genelist(self, case_obj):
"""Get or create a new case specific gene list record."""
list_id = "{}-HPO".format(case_obj.case_id)
gene_list = self.gene_list(list_id)
if gene_list is None:
gene_list = GeneList(list_id=list_id)
case_obj.gene_lists.append(gene_list)
self.session.add(gene_list)
return gene_list | [
"def",
"case_genelist",
"(",
"self",
",",
"case_obj",
")",
":",
"list_id",
"=",
"\"{}-HPO\"",
".",
"format",
"(",
"case_obj",
".",
"case_id",
")",
"gene_list",
"=",
"self",
".",
"gene_list",
"(",
"list_id",
")",
"if",
"gene_list",
"is",
"None",
":",
"gene_list",
"=",
"GeneList",
"(",
"list_id",
"=",
"list_id",
")",
"case_obj",
".",
"gene_lists",
".",
"append",
"(",
"gene_list",
")",
"self",
".",
"session",
".",
"add",
"(",
"gene_list",
")",
"return",
"gene_list"
] | Get or create a new case specific gene list record. | [
"Get",
"or",
"create",
"a",
"new",
"case",
"specific",
"gene",
"list",
"record",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/genelist.py#L46-L56 | train |
ldomic/lintools | lintools/figure.py | Figure.add_bigger_box | def add_bigger_box(self):
"""
Sets the size of the figure by expanding the space of molecule.svg file. These dimension have been
previously determined. Also makes the lines of the molecule thicker.
"""
start1 = "width='"+str(int(self.molecule.molsize1))+"px' height='"+str(int(self.molecule.molsize2))+"px' >"
start2 = "<rect style='opacity:1.0;fill:#FFFFFF;stroke:none' width='"+str(int(self.molecule.molsize1))+"' height='"+str(int(self.molecule.molsize2))+"' x='0' y='0'> </rect>"
bigger_box ="width='100%' height='100%' viewbox='0 0 "+str(int(self.molecule.x_dim))+" "+str(int(self.molecule.y_dim))+"' > "
big_box2= "<rect style='opacity:1.0;fill:white;stroke:none' width='"+str(int(self.molecule.x_dim))+"px' height='"+str(int(self.molecule.y_dim))+"px' x='0' y='0'> </rect> <g id='molecularDrawing' transform='translate("+str((self.molecule.x_dim-self.molecule.molsize1)/2)+","+str((self.molecule.y_dim-self.molecule.molsize2)/2)+")'>'<rect style='opacity:1.0;fill:#ffffff;stroke:none' width='"+str(self.molecule.molsize1)+"' height='"+str(self.molecule.molsize2)+"' x='0' y='0' /> "
self.end_symbol = "</svg>"
no_end_symbol = "</g>"
#Make the lines in molecule drawing thicker to look better with the large plots
linewidth1 = "stroke-width:2px"
linewidth2 = "stroke-width:5px"
self.change_lines_in_svg("molecule.svg", linewidth1,linewidth2)
self.change_lines_in_svg("molecule.svg", start1, bigger_box)
self.change_lines_in_svg("molecule.svg", start2, big_box2)
self.change_lines_in_svg("molecule.svg", self.end_symbol, no_end_symbol)
with open("molecule.svg","r") as f:
lines = f.readlines()
self.filestart = " ".join(map(str,lines[0:8]))
self.draw_molecule ="".join(map(str,lines[8:]))
f.close() | python | def add_bigger_box(self):
"""
Sets the size of the figure by expanding the space of molecule.svg file. These dimension have been
previously determined. Also makes the lines of the molecule thicker.
"""
start1 = "width='"+str(int(self.molecule.molsize1))+"px' height='"+str(int(self.molecule.molsize2))+"px' >"
start2 = "<rect style='opacity:1.0;fill:#FFFFFF;stroke:none' width='"+str(int(self.molecule.molsize1))+"' height='"+str(int(self.molecule.molsize2))+"' x='0' y='0'> </rect>"
bigger_box ="width='100%' height='100%' viewbox='0 0 "+str(int(self.molecule.x_dim))+" "+str(int(self.molecule.y_dim))+"' > "
big_box2= "<rect style='opacity:1.0;fill:white;stroke:none' width='"+str(int(self.molecule.x_dim))+"px' height='"+str(int(self.molecule.y_dim))+"px' x='0' y='0'> </rect> <g id='molecularDrawing' transform='translate("+str((self.molecule.x_dim-self.molecule.molsize1)/2)+","+str((self.molecule.y_dim-self.molecule.molsize2)/2)+")'>'<rect style='opacity:1.0;fill:#ffffff;stroke:none' width='"+str(self.molecule.molsize1)+"' height='"+str(self.molecule.molsize2)+"' x='0' y='0' /> "
self.end_symbol = "</svg>"
no_end_symbol = "</g>"
#Make the lines in molecule drawing thicker to look better with the large plots
linewidth1 = "stroke-width:2px"
linewidth2 = "stroke-width:5px"
self.change_lines_in_svg("molecule.svg", linewidth1,linewidth2)
self.change_lines_in_svg("molecule.svg", start1, bigger_box)
self.change_lines_in_svg("molecule.svg", start2, big_box2)
self.change_lines_in_svg("molecule.svg", self.end_symbol, no_end_symbol)
with open("molecule.svg","r") as f:
lines = f.readlines()
self.filestart = " ".join(map(str,lines[0:8]))
self.draw_molecule ="".join(map(str,lines[8:]))
f.close() | [
"def",
"add_bigger_box",
"(",
"self",
")",
":",
"start1",
"=",
"\"width='\"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"molsize1",
")",
")",
"+",
"\"px' height='\"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"molsize2",
")",
")",
"+",
"\"px' >\"",
"start2",
"=",
"\"<rect style='opacity:1.0;fill:#FFFFFF;stroke:none' width='\"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"molsize1",
")",
")",
"+",
"\"' height='\"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"molsize2",
")",
")",
"+",
"\"' x='0' y='0'> </rect>\"",
"bigger_box",
"=",
"\"width='100%' height='100%' viewbox='0 0 \"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"x_dim",
")",
")",
"+",
"\" \"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"y_dim",
")",
")",
"+",
"\"' > \"",
"big_box2",
"=",
"\"<rect style='opacity:1.0;fill:white;stroke:none' width='\"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"x_dim",
")",
")",
"+",
"\"px' height='\"",
"+",
"str",
"(",
"int",
"(",
"self",
".",
"molecule",
".",
"y_dim",
")",
")",
"+",
"\"px' x='0' y='0'> </rect> <g id='molecularDrawing' transform='translate(\"",
"+",
"str",
"(",
"(",
"self",
".",
"molecule",
".",
"x_dim",
"-",
"self",
".",
"molecule",
".",
"molsize1",
")",
"/",
"2",
")",
"+",
"\",\"",
"+",
"str",
"(",
"(",
"self",
".",
"molecule",
".",
"y_dim",
"-",
"self",
".",
"molecule",
".",
"molsize2",
")",
"/",
"2",
")",
"+",
"\")'>'<rect style='opacity:1.0;fill:#ffffff;stroke:none' width='\"",
"+",
"str",
"(",
"self",
".",
"molecule",
".",
"molsize1",
")",
"+",
"\"' height='\"",
"+",
"str",
"(",
"self",
".",
"molecule",
".",
"molsize2",
")",
"+",
"\"' x='0' y='0' /> \"",
"self",
".",
"end_symbol",
"=",
"\"</svg>\"",
"no_end_symbol",
"=",
"\"</g>\"",
"#Make the lines in molecule drawing thicker to look better with the large plots",
"linewidth1",
"=",
"\"stroke-width:2px\"",
"linewidth2",
"=",
"\"stroke-width:5px\"",
"self",
".",
"change_lines_in_svg",
"(",
"\"molecule.svg\"",
",",
"linewidth1",
",",
"linewidth2",
")",
"self",
".",
"change_lines_in_svg",
"(",
"\"molecule.svg\"",
",",
"start1",
",",
"bigger_box",
")",
"self",
".",
"change_lines_in_svg",
"(",
"\"molecule.svg\"",
",",
"start2",
",",
"big_box2",
")",
"self",
".",
"change_lines_in_svg",
"(",
"\"molecule.svg\"",
",",
"self",
".",
"end_symbol",
",",
"no_end_symbol",
")",
"with",
"open",
"(",
"\"molecule.svg\"",
",",
"\"r\"",
")",
"as",
"f",
":",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"self",
".",
"filestart",
"=",
"\" \"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"lines",
"[",
"0",
":",
"8",
"]",
")",
")",
"self",
".",
"draw_molecule",
"=",
"\"\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"lines",
"[",
"8",
":",
"]",
")",
")",
"f",
".",
"close",
"(",
")"
] | Sets the size of the figure by expanding the space of molecule.svg file. These dimension have been
previously determined. Also makes the lines of the molecule thicker. | [
"Sets",
"the",
"size",
"of",
"the",
"figure",
"by",
"expanding",
"the",
"space",
"of",
"molecule",
".",
"svg",
"file",
".",
"These",
"dimension",
"have",
"been",
"previously",
"determined",
".",
"Also",
"makes",
"the",
"lines",
"of",
"the",
"molecule",
"thicker",
"."
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/figure.py#L57-L80 | train |
ellethee/argparseinator | argparseinator/__init__.py | extend_with | def extend_with(func):
"""Extends with class or function"""
if not func.__name__ in ArgParseInator._plugins:
ArgParseInator._plugins[func.__name__] = func | python | def extend_with(func):
"""Extends with class or function"""
if not func.__name__ in ArgParseInator._plugins:
ArgParseInator._plugins[func.__name__] = func | [
"def",
"extend_with",
"(",
"func",
")",
":",
"if",
"not",
"func",
".",
"__name__",
"in",
"ArgParseInator",
".",
"_plugins",
":",
"ArgParseInator",
".",
"_plugins",
"[",
"func",
".",
"__name__",
"]",
"=",
"func"
] | Extends with class or function | [
"Extends",
"with",
"class",
"or",
"function"
] | 05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e | https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L599-L602 | train |
ellethee/argparseinator | argparseinator/__init__.py | arg | def arg(*args, **kwargs):
"""
Dcorates a function or a class method to add to the argument parser
"""
def decorate(func):
"""
Decorate
"""
# we'll set the command name with the passed cmd_name argument, if
# exist, else the command name will be the function name
func.__cmd_name__ = kwargs.pop(
'cmd_name', getattr(func, '__cmd_name__', func.__name__))
# retrieve the class (SillyClass)
func.__cls__ = utils.check_class()
if not hasattr(func, '__arguments__'):
# if the funcion hasn't the __arguments__ yet, we'll setup them
# using get_functarguments.
func.__arguments__ = utils.get_functarguments(func)
if len(args) or len(kwargs):
# if we have some argument or keyword argument
# we'll try to get the destination name from the kwargs ('dest')
# else we'll use the last arg name as destination
arg_name = kwargs.get(
'dest', args[-1].lstrip('-').replace('-', '_'))
try:
# we try to get the command index.
idx = func.__named__.index(arg_name)
# and delete it from the named list
del func.__named__[idx]
# and delete it from the arguments list
del func.__arguments__[idx]
except ValueError:
pass
# append the args and kwargs to the function arguments list
func.__arguments__.append((args, kwargs,))
if func.__cls__ is None and isinstance(func, types.FunctionType):
# if the function don't have a class and is a FunctionType
# we'll add it directly to he commands list.
ap_ = ArgParseInator(skip_init=True)
if func.__cmd_name__ not in ap_.commands:
# we'll add it if not exists
ap_.commands[func.__cmd_name__] = func
return func
return decorate | python | def arg(*args, **kwargs):
"""
Dcorates a function or a class method to add to the argument parser
"""
def decorate(func):
"""
Decorate
"""
# we'll set the command name with the passed cmd_name argument, if
# exist, else the command name will be the function name
func.__cmd_name__ = kwargs.pop(
'cmd_name', getattr(func, '__cmd_name__', func.__name__))
# retrieve the class (SillyClass)
func.__cls__ = utils.check_class()
if not hasattr(func, '__arguments__'):
# if the funcion hasn't the __arguments__ yet, we'll setup them
# using get_functarguments.
func.__arguments__ = utils.get_functarguments(func)
if len(args) or len(kwargs):
# if we have some argument or keyword argument
# we'll try to get the destination name from the kwargs ('dest')
# else we'll use the last arg name as destination
arg_name = kwargs.get(
'dest', args[-1].lstrip('-').replace('-', '_'))
try:
# we try to get the command index.
idx = func.__named__.index(arg_name)
# and delete it from the named list
del func.__named__[idx]
# and delete it from the arguments list
del func.__arguments__[idx]
except ValueError:
pass
# append the args and kwargs to the function arguments list
func.__arguments__.append((args, kwargs,))
if func.__cls__ is None and isinstance(func, types.FunctionType):
# if the function don't have a class and is a FunctionType
# we'll add it directly to he commands list.
ap_ = ArgParseInator(skip_init=True)
if func.__cmd_name__ not in ap_.commands:
# we'll add it if not exists
ap_.commands[func.__cmd_name__] = func
return func
return decorate | [
"def",
"arg",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"decorate",
"(",
"func",
")",
":",
"\"\"\"\n Decorate\n \"\"\"",
"# we'll set the command name with the passed cmd_name argument, if",
"# exist, else the command name will be the function name",
"func",
".",
"__cmd_name__",
"=",
"kwargs",
".",
"pop",
"(",
"'cmd_name'",
",",
"getattr",
"(",
"func",
",",
"'__cmd_name__'",
",",
"func",
".",
"__name__",
")",
")",
"# retrieve the class (SillyClass)",
"func",
".",
"__cls__",
"=",
"utils",
".",
"check_class",
"(",
")",
"if",
"not",
"hasattr",
"(",
"func",
",",
"'__arguments__'",
")",
":",
"# if the funcion hasn't the __arguments__ yet, we'll setup them",
"# using get_functarguments.",
"func",
".",
"__arguments__",
"=",
"utils",
".",
"get_functarguments",
"(",
"func",
")",
"if",
"len",
"(",
"args",
")",
"or",
"len",
"(",
"kwargs",
")",
":",
"# if we have some argument or keyword argument",
"# we'll try to get the destination name from the kwargs ('dest')",
"# else we'll use the last arg name as destination",
"arg_name",
"=",
"kwargs",
".",
"get",
"(",
"'dest'",
",",
"args",
"[",
"-",
"1",
"]",
".",
"lstrip",
"(",
"'-'",
")",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
")",
"try",
":",
"# we try to get the command index.",
"idx",
"=",
"func",
".",
"__named__",
".",
"index",
"(",
"arg_name",
")",
"# and delete it from the named list",
"del",
"func",
".",
"__named__",
"[",
"idx",
"]",
"# and delete it from the arguments list",
"del",
"func",
".",
"__arguments__",
"[",
"idx",
"]",
"except",
"ValueError",
":",
"pass",
"# append the args and kwargs to the function arguments list",
"func",
".",
"__arguments__",
".",
"append",
"(",
"(",
"args",
",",
"kwargs",
",",
")",
")",
"if",
"func",
".",
"__cls__",
"is",
"None",
"and",
"isinstance",
"(",
"func",
",",
"types",
".",
"FunctionType",
")",
":",
"# if the function don't have a class and is a FunctionType",
"# we'll add it directly to he commands list.",
"ap_",
"=",
"ArgParseInator",
"(",
"skip_init",
"=",
"True",
")",
"if",
"func",
".",
"__cmd_name__",
"not",
"in",
"ap_",
".",
"commands",
":",
"# we'll add it if not exists",
"ap_",
".",
"commands",
"[",
"func",
".",
"__cmd_name__",
"]",
"=",
"func",
"return",
"func",
"return",
"decorate"
] | Dcorates a function or a class method to add to the argument parser | [
"Dcorates",
"a",
"function",
"or",
"a",
"class",
"method",
"to",
"add",
"to",
"the",
"argument",
"parser"
] | 05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e | https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L605-L648 | train |
ellethee/argparseinator | argparseinator/__init__.py | class_args | def class_args(cls):
"""
Decorates a class to handle the arguments parser.
"""
# get the Singleton
ap_ = ArgParseInator(skip_init=True)
# collect special vars (really need?)
utils.collect_appendvars(ap_, cls)
# set class reference
cls.__cls__ = cls
cmds = {}
# get eventual class arguments
cls.__arguments__ = getattr(cls, '__arguments__', [])
# cycle through class functions
for func in [f for f in cls.__dict__.values()
if hasattr(f, '__cmd_name__') and not inspect.isclass(f)]:
# clear subcommands
func.__subcommands__ = None
# set the parent class
func.__cls__ = cls
# assign to commands dict
cmds[func.__cmd_name__] = func
if hasattr(cls, '__cmd_name__') and cls.__cmd_name__ not in ap_.commands:
# if che class has the __cmd_name__ attribute and is not already present
# in the ArgParseInator commands
# set the class subcommands
cls.__subcommands__ = cmds
# add the class as ArgParseInator command
ap_.commands[cls.__cmd_name__] = cls
else:
# else if we don't have a __cmd_name__
# we will add all the functions directly to the ArgParseInator commands
# if it don't already exists.
for name, func in cmds.items():
if name not in ap_.commands:
ap_.commands[name] = func
return cls | python | def class_args(cls):
"""
Decorates a class to handle the arguments parser.
"""
# get the Singleton
ap_ = ArgParseInator(skip_init=True)
# collect special vars (really need?)
utils.collect_appendvars(ap_, cls)
# set class reference
cls.__cls__ = cls
cmds = {}
# get eventual class arguments
cls.__arguments__ = getattr(cls, '__arguments__', [])
# cycle through class functions
for func in [f for f in cls.__dict__.values()
if hasattr(f, '__cmd_name__') and not inspect.isclass(f)]:
# clear subcommands
func.__subcommands__ = None
# set the parent class
func.__cls__ = cls
# assign to commands dict
cmds[func.__cmd_name__] = func
if hasattr(cls, '__cmd_name__') and cls.__cmd_name__ not in ap_.commands:
# if che class has the __cmd_name__ attribute and is not already present
# in the ArgParseInator commands
# set the class subcommands
cls.__subcommands__ = cmds
# add the class as ArgParseInator command
ap_.commands[cls.__cmd_name__] = cls
else:
# else if we don't have a __cmd_name__
# we will add all the functions directly to the ArgParseInator commands
# if it don't already exists.
for name, func in cmds.items():
if name not in ap_.commands:
ap_.commands[name] = func
return cls | [
"def",
"class_args",
"(",
"cls",
")",
":",
"# get the Singleton",
"ap_",
"=",
"ArgParseInator",
"(",
"skip_init",
"=",
"True",
")",
"# collect special vars (really need?)",
"utils",
".",
"collect_appendvars",
"(",
"ap_",
",",
"cls",
")",
"# set class reference",
"cls",
".",
"__cls__",
"=",
"cls",
"cmds",
"=",
"{",
"}",
"# get eventual class arguments",
"cls",
".",
"__arguments__",
"=",
"getattr",
"(",
"cls",
",",
"'__arguments__'",
",",
"[",
"]",
")",
"# cycle through class functions",
"for",
"func",
"in",
"[",
"f",
"for",
"f",
"in",
"cls",
".",
"__dict__",
".",
"values",
"(",
")",
"if",
"hasattr",
"(",
"f",
",",
"'__cmd_name__'",
")",
"and",
"not",
"inspect",
".",
"isclass",
"(",
"f",
")",
"]",
":",
"# clear subcommands",
"func",
".",
"__subcommands__",
"=",
"None",
"# set the parent class",
"func",
".",
"__cls__",
"=",
"cls",
"# assign to commands dict",
"cmds",
"[",
"func",
".",
"__cmd_name__",
"]",
"=",
"func",
"if",
"hasattr",
"(",
"cls",
",",
"'__cmd_name__'",
")",
"and",
"cls",
".",
"__cmd_name__",
"not",
"in",
"ap_",
".",
"commands",
":",
"# if che class has the __cmd_name__ attribute and is not already present",
"# in the ArgParseInator commands",
"# set the class subcommands",
"cls",
".",
"__subcommands__",
"=",
"cmds",
"# add the class as ArgParseInator command",
"ap_",
".",
"commands",
"[",
"cls",
".",
"__cmd_name__",
"]",
"=",
"cls",
"else",
":",
"# else if we don't have a __cmd_name__",
"# we will add all the functions directly to the ArgParseInator commands",
"# if it don't already exists.",
"for",
"name",
",",
"func",
"in",
"cmds",
".",
"items",
"(",
")",
":",
"if",
"name",
"not",
"in",
"ap_",
".",
"commands",
":",
"ap_",
".",
"commands",
"[",
"name",
"]",
"=",
"func",
"return",
"cls"
] | Decorates a class to handle the arguments parser. | [
"Decorates",
"a",
"class",
"to",
"handle",
"the",
"arguments",
"parser",
"."
] | 05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e | https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L651-L687 | train |
ellethee/argparseinator | argparseinator/__init__.py | cmd_auth | def cmd_auth(auth_phrase=None):
"""
set authorization for command or subcommand.
"""
def decorate(func):
"""
decorates the funcion
"""
# get the Singleton
ap_ = ArgParseInator(skip_init=True)
# set the authorization name
auth_name = id(func)
if auth_phrase is None:
# if we don't have a specific auth_phrase we set the
# **authorization needed** to True
ap_.auths[auth_name] = True
else:
# else if we have a specific auth_phrase we set it for the
# command authorization
ap_.auths[auth_name] = str(auth_phrase)
return func
return decorate | python | def cmd_auth(auth_phrase=None):
"""
set authorization for command or subcommand.
"""
def decorate(func):
"""
decorates the funcion
"""
# get the Singleton
ap_ = ArgParseInator(skip_init=True)
# set the authorization name
auth_name = id(func)
if auth_phrase is None:
# if we don't have a specific auth_phrase we set the
# **authorization needed** to True
ap_.auths[auth_name] = True
else:
# else if we have a specific auth_phrase we set it for the
# command authorization
ap_.auths[auth_name] = str(auth_phrase)
return func
return decorate | [
"def",
"cmd_auth",
"(",
"auth_phrase",
"=",
"None",
")",
":",
"def",
"decorate",
"(",
"func",
")",
":",
"\"\"\"\n decorates the funcion\n \"\"\"",
"# get the Singleton",
"ap_",
"=",
"ArgParseInator",
"(",
"skip_init",
"=",
"True",
")",
"# set the authorization name",
"auth_name",
"=",
"id",
"(",
"func",
")",
"if",
"auth_phrase",
"is",
"None",
":",
"# if we don't have a specific auth_phrase we set the",
"# **authorization needed** to True",
"ap_",
".",
"auths",
"[",
"auth_name",
"]",
"=",
"True",
"else",
":",
"# else if we have a specific auth_phrase we set it for the",
"# command authorization",
"ap_",
".",
"auths",
"[",
"auth_name",
"]",
"=",
"str",
"(",
"auth_phrase",
")",
"return",
"func",
"return",
"decorate"
] | set authorization for command or subcommand. | [
"set",
"authorization",
"for",
"command",
"or",
"subcommand",
"."
] | 05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e | https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L697-L718 | train |
ellethee/argparseinator | argparseinator/__init__.py | ArgParseInator.parse_args | def parse_args(self):
"""
Parse our arguments.
"""
# compile the parser
self._compile()
# clear the args
self.args = None
self._self_event('before_parse', 'parse', *sys.argv[1:], **{})
# list commands/subcommands in argv
cmds = [cmd for cmd in sys.argv[1:] if not cmd.startswith("-")]
if (len(cmds) > 0 and not utils.check_help() and self.default_cmd
and cmds[0] not in self.commands):
# if we have at least one command which is not an help command
# and we have a default command and the first command in arguments
# is not in commands we insert the default command as second
# argument (actually the first command)
sys.argv.insert(1, self.default_cmd)
# let's parse the arguments
self.args = self.parser.parse_args()
# set up the output.
if self.args:
# if we have some arguments
if self.add_output and self.args.output is not None:
# If add_output is True and we have an output file
# setup the encoding
self.encoding = self.args.encoding
if self.args.encoding.lower() == 'raw':
# if we have passed a raw encoding we will write directly
# to the output file.
self._output = open(self.args.output, self.args.write_mode)
else:
# else we will use the codecs module to write to the
# output file.
import codecs
self._output = codecs.open(
self.args.output, self.args.write_mode,
encoding=self.args.encoding)
if self._cfg_factory:
# if we have a config factory setup the config file with the
# right param
self.cfg_file = self.args.config
# now is parsed.
self._is_parsed = True
return self | python | def parse_args(self):
"""
Parse our arguments.
"""
# compile the parser
self._compile()
# clear the args
self.args = None
self._self_event('before_parse', 'parse', *sys.argv[1:], **{})
# list commands/subcommands in argv
cmds = [cmd for cmd in sys.argv[1:] if not cmd.startswith("-")]
if (len(cmds) > 0 and not utils.check_help() and self.default_cmd
and cmds[0] not in self.commands):
# if we have at least one command which is not an help command
# and we have a default command and the first command in arguments
# is not in commands we insert the default command as second
# argument (actually the first command)
sys.argv.insert(1, self.default_cmd)
# let's parse the arguments
self.args = self.parser.parse_args()
# set up the output.
if self.args:
# if we have some arguments
if self.add_output and self.args.output is not None:
# If add_output is True and we have an output file
# setup the encoding
self.encoding = self.args.encoding
if self.args.encoding.lower() == 'raw':
# if we have passed a raw encoding we will write directly
# to the output file.
self._output = open(self.args.output, self.args.write_mode)
else:
# else we will use the codecs module to write to the
# output file.
import codecs
self._output = codecs.open(
self.args.output, self.args.write_mode,
encoding=self.args.encoding)
if self._cfg_factory:
# if we have a config factory setup the config file with the
# right param
self.cfg_file = self.args.config
# now is parsed.
self._is_parsed = True
return self | [
"def",
"parse_args",
"(",
"self",
")",
":",
"# compile the parser",
"self",
".",
"_compile",
"(",
")",
"# clear the args",
"self",
".",
"args",
"=",
"None",
"self",
".",
"_self_event",
"(",
"'before_parse'",
",",
"'parse'",
",",
"*",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
",",
"*",
"*",
"{",
"}",
")",
"# list commands/subcommands in argv",
"cmds",
"=",
"[",
"cmd",
"for",
"cmd",
"in",
"sys",
".",
"argv",
"[",
"1",
":",
"]",
"if",
"not",
"cmd",
".",
"startswith",
"(",
"\"-\"",
")",
"]",
"if",
"(",
"len",
"(",
"cmds",
")",
">",
"0",
"and",
"not",
"utils",
".",
"check_help",
"(",
")",
"and",
"self",
".",
"default_cmd",
"and",
"cmds",
"[",
"0",
"]",
"not",
"in",
"self",
".",
"commands",
")",
":",
"# if we have at least one command which is not an help command",
"# and we have a default command and the first command in arguments",
"# is not in commands we insert the default command as second",
"# argument (actually the first command)",
"sys",
".",
"argv",
".",
"insert",
"(",
"1",
",",
"self",
".",
"default_cmd",
")",
"# let's parse the arguments",
"self",
".",
"args",
"=",
"self",
".",
"parser",
".",
"parse_args",
"(",
")",
"# set up the output.",
"if",
"self",
".",
"args",
":",
"# if we have some arguments",
"if",
"self",
".",
"add_output",
"and",
"self",
".",
"args",
".",
"output",
"is",
"not",
"None",
":",
"# If add_output is True and we have an output file",
"# setup the encoding",
"self",
".",
"encoding",
"=",
"self",
".",
"args",
".",
"encoding",
"if",
"self",
".",
"args",
".",
"encoding",
".",
"lower",
"(",
")",
"==",
"'raw'",
":",
"# if we have passed a raw encoding we will write directly",
"# to the output file.",
"self",
".",
"_output",
"=",
"open",
"(",
"self",
".",
"args",
".",
"output",
",",
"self",
".",
"args",
".",
"write_mode",
")",
"else",
":",
"# else we will use the codecs module to write to the",
"# output file.",
"import",
"codecs",
"self",
".",
"_output",
"=",
"codecs",
".",
"open",
"(",
"self",
".",
"args",
".",
"output",
",",
"self",
".",
"args",
".",
"write_mode",
",",
"encoding",
"=",
"self",
".",
"args",
".",
"encoding",
")",
"if",
"self",
".",
"_cfg_factory",
":",
"# if we have a config factory setup the config file with the",
"# right param",
"self",
".",
"cfg_file",
"=",
"self",
".",
"args",
".",
"config",
"# now is parsed.",
"self",
".",
"_is_parsed",
"=",
"True",
"return",
"self"
] | Parse our arguments. | [
"Parse",
"our",
"arguments",
"."
] | 05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e | https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L318-L362 | train |
ellethee/argparseinator | argparseinator/__init__.py | ArgParseInator.check_auth | def check_auth(self, name):
"""
Check the authorization for the command
"""
if name in self.auths:
# if the command name is in the **need authorization list**
# get the authorization for the command
auth = self.auths[name]
if self.args.auth is None:
# if we didn't pass the authorization phrase raise the
# appropriate exception
raise exceptions.ArgParseInatorAuthorizationRequired
elif ((auth is True and self.args.auth != self.auth_phrase) or
(auth is not True and self.args.auth != auth)):
# else if the authorization phrase is wrong
raise exceptions.ArgParseInatorNotValidAuthorization
return True | python | def check_auth(self, name):
"""
Check the authorization for the command
"""
if name in self.auths:
# if the command name is in the **need authorization list**
# get the authorization for the command
auth = self.auths[name]
if self.args.auth is None:
# if we didn't pass the authorization phrase raise the
# appropriate exception
raise exceptions.ArgParseInatorAuthorizationRequired
elif ((auth is True and self.args.auth != self.auth_phrase) or
(auth is not True and self.args.auth != auth)):
# else if the authorization phrase is wrong
raise exceptions.ArgParseInatorNotValidAuthorization
return True | [
"def",
"check_auth",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"in",
"self",
".",
"auths",
":",
"# if the command name is in the **need authorization list**",
"# get the authorization for the command",
"auth",
"=",
"self",
".",
"auths",
"[",
"name",
"]",
"if",
"self",
".",
"args",
".",
"auth",
"is",
"None",
":",
"# if we didn't pass the authorization phrase raise the",
"# appropriate exception",
"raise",
"exceptions",
".",
"ArgParseInatorAuthorizationRequired",
"elif",
"(",
"(",
"auth",
"is",
"True",
"and",
"self",
".",
"args",
".",
"auth",
"!=",
"self",
".",
"auth_phrase",
")",
"or",
"(",
"auth",
"is",
"not",
"True",
"and",
"self",
".",
"args",
".",
"auth",
"!=",
"auth",
")",
")",
":",
"# else if the authorization phrase is wrong",
"raise",
"exceptions",
".",
"ArgParseInatorNotValidAuthorization",
"return",
"True"
] | Check the authorization for the command | [
"Check",
"the",
"authorization",
"for",
"the",
"command"
] | 05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e | https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L364-L380 | train |
ellethee/argparseinator | argparseinator/__init__.py | ArgParseInator.check_command | def check_command(self, **new_attributes):
"""
Check if 'was passed a valid action in the command line and if so,
executes it by passing parameters and returning the result.
:return: (Any) Return the result of the called function, if provided,
or None.
"""
# let's parse arguments if we didn't before.
if not self._is_parsed:
self.parse_args()
if not self.commands:
# if we don't have commands raise an Exception
raise exceptions.ArgParseInatorNoCommandsFound
elif self._single:
# if we have a single function we get it directly
func = self._single
else:
if not self.args.command:
self.parser.error("too few arguments")
# get the right command
func = self.commands[self.args.command]
if hasattr(func, '__subcommands__') and func.__subcommands__:
# if we have subcommands get the command from them
command = func.__subcommands__[self.args.subcommand]
else:
# else the command IS the function
command = func
# get the command name
self.cmd_name = command.__cmd_name__
# check authorization
if not self.check_auth(id(command)):
return 0
# let's execute the command.
return self._execute(func, command, **new_attributes) | python | def check_command(self, **new_attributes):
"""
Check if 'was passed a valid action in the command line and if so,
executes it by passing parameters and returning the result.
:return: (Any) Return the result of the called function, if provided,
or None.
"""
# let's parse arguments if we didn't before.
if not self._is_parsed:
self.parse_args()
if not self.commands:
# if we don't have commands raise an Exception
raise exceptions.ArgParseInatorNoCommandsFound
elif self._single:
# if we have a single function we get it directly
func = self._single
else:
if not self.args.command:
self.parser.error("too few arguments")
# get the right command
func = self.commands[self.args.command]
if hasattr(func, '__subcommands__') and func.__subcommands__:
# if we have subcommands get the command from them
command = func.__subcommands__[self.args.subcommand]
else:
# else the command IS the function
command = func
# get the command name
self.cmd_name = command.__cmd_name__
# check authorization
if not self.check_auth(id(command)):
return 0
# let's execute the command.
return self._execute(func, command, **new_attributes) | [
"def",
"check_command",
"(",
"self",
",",
"*",
"*",
"new_attributes",
")",
":",
"# let's parse arguments if we didn't before.",
"if",
"not",
"self",
".",
"_is_parsed",
":",
"self",
".",
"parse_args",
"(",
")",
"if",
"not",
"self",
".",
"commands",
":",
"# if we don't have commands raise an Exception",
"raise",
"exceptions",
".",
"ArgParseInatorNoCommandsFound",
"elif",
"self",
".",
"_single",
":",
"# if we have a single function we get it directly",
"func",
"=",
"self",
".",
"_single",
"else",
":",
"if",
"not",
"self",
".",
"args",
".",
"command",
":",
"self",
".",
"parser",
".",
"error",
"(",
"\"too few arguments\"",
")",
"# get the right command",
"func",
"=",
"self",
".",
"commands",
"[",
"self",
".",
"args",
".",
"command",
"]",
"if",
"hasattr",
"(",
"func",
",",
"'__subcommands__'",
")",
"and",
"func",
".",
"__subcommands__",
":",
"# if we have subcommands get the command from them",
"command",
"=",
"func",
".",
"__subcommands__",
"[",
"self",
".",
"args",
".",
"subcommand",
"]",
"else",
":",
"# else the command IS the function",
"command",
"=",
"func",
"# get the command name",
"self",
".",
"cmd_name",
"=",
"command",
".",
"__cmd_name__",
"# check authorization",
"if",
"not",
"self",
".",
"check_auth",
"(",
"id",
"(",
"command",
")",
")",
":",
"return",
"0",
"# let's execute the command.",
"return",
"self",
".",
"_execute",
"(",
"func",
",",
"command",
",",
"*",
"*",
"new_attributes",
")"
] | Check if 'was passed a valid action in the command line and if so,
executes it by passing parameters and returning the result.
:return: (Any) Return the result of the called function, if provided,
or None. | [
"Check",
"if",
"was",
"passed",
"a",
"valid",
"action",
"in",
"the",
"command",
"line",
"and",
"if",
"so",
"executes",
"it",
"by",
"passing",
"parameters",
"and",
"returning",
"the",
"result",
"."
] | 05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e | https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L382-L417 | train |
ellethee/argparseinator | argparseinator/__init__.py | ArgParseInator._call_event | def _call_event(self, event_name, cmd, pargs, kwargs, **kws):
"""
Try to call events for cmd.
"""
def get_result_params(res):
"""return the right list of params"""
if not isinstance(res, (list, tuple)):
return res, pargs, kwargs
elif len(res) == 2:
return res, pargs, kwargs
return res[0], (pargs[0], ) + tuple(res[1]), kwargs
if hasattr(cmd, event_name):
return get_result_params(
getattr(cmd, event_name)(pargs[0], *pargs[1:], **kwargs))
elif hasattr(cmd.__cls__, event_name):
return get_result_params(
getattr(cmd.__cls__, event_name)(
pargs[0], cmd.__cmd_name__ or cmd.__name__, *pargs[1:],
**kwargs))
return None, pargs, kwargs | python | def _call_event(self, event_name, cmd, pargs, kwargs, **kws):
"""
Try to call events for cmd.
"""
def get_result_params(res):
"""return the right list of params"""
if not isinstance(res, (list, tuple)):
return res, pargs, kwargs
elif len(res) == 2:
return res, pargs, kwargs
return res[0], (pargs[0], ) + tuple(res[1]), kwargs
if hasattr(cmd, event_name):
return get_result_params(
getattr(cmd, event_name)(pargs[0], *pargs[1:], **kwargs))
elif hasattr(cmd.__cls__, event_name):
return get_result_params(
getattr(cmd.__cls__, event_name)(
pargs[0], cmd.__cmd_name__ or cmd.__name__, *pargs[1:],
**kwargs))
return None, pargs, kwargs | [
"def",
"_call_event",
"(",
"self",
",",
"event_name",
",",
"cmd",
",",
"pargs",
",",
"kwargs",
",",
"*",
"*",
"kws",
")",
":",
"def",
"get_result_params",
"(",
"res",
")",
":",
"\"\"\"return the right list of params\"\"\"",
"if",
"not",
"isinstance",
"(",
"res",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"res",
",",
"pargs",
",",
"kwargs",
"elif",
"len",
"(",
"res",
")",
"==",
"2",
":",
"return",
"res",
",",
"pargs",
",",
"kwargs",
"return",
"res",
"[",
"0",
"]",
",",
"(",
"pargs",
"[",
"0",
"]",
",",
")",
"+",
"tuple",
"(",
"res",
"[",
"1",
"]",
")",
",",
"kwargs",
"if",
"hasattr",
"(",
"cmd",
",",
"event_name",
")",
":",
"return",
"get_result_params",
"(",
"getattr",
"(",
"cmd",
",",
"event_name",
")",
"(",
"pargs",
"[",
"0",
"]",
",",
"*",
"pargs",
"[",
"1",
":",
"]",
",",
"*",
"*",
"kwargs",
")",
")",
"elif",
"hasattr",
"(",
"cmd",
".",
"__cls__",
",",
"event_name",
")",
":",
"return",
"get_result_params",
"(",
"getattr",
"(",
"cmd",
".",
"__cls__",
",",
"event_name",
")",
"(",
"pargs",
"[",
"0",
"]",
",",
"cmd",
".",
"__cmd_name__",
"or",
"cmd",
".",
"__name__",
",",
"*",
"pargs",
"[",
"1",
":",
"]",
",",
"*",
"*",
"kwargs",
")",
")",
"return",
"None",
",",
"pargs",
",",
"kwargs"
] | Try to call events for cmd. | [
"Try",
"to",
"call",
"events",
"for",
"cmd",
"."
] | 05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e | https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L531-L550 | train |
ellethee/argparseinator | argparseinator/__init__.py | ArgParseInator._self_event | def _self_event(self, event_name, cmd, *pargs, **kwargs):
"""Call self event"""
if hasattr(self, event_name):
getattr(self, event_name)(cmd, *pargs, **kwargs) | python | def _self_event(self, event_name, cmd, *pargs, **kwargs):
"""Call self event"""
if hasattr(self, event_name):
getattr(self, event_name)(cmd, *pargs, **kwargs) | [
"def",
"_self_event",
"(",
"self",
",",
"event_name",
",",
"cmd",
",",
"*",
"pargs",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"event_name",
")",
":",
"getattr",
"(",
"self",
",",
"event_name",
")",
"(",
"cmd",
",",
"*",
"pargs",
",",
"*",
"*",
"kwargs",
")"
] | Call self event | [
"Call",
"self",
"event"
] | 05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e | https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L552-L555 | train |
ellethee/argparseinator | argparseinator/__init__.py | ArgParseInator.write | def write(self, *string):
"""
Writes to the output
"""
self._output.write(' '.join([six.text_type(s) for s in string]))
return self | python | def write(self, *string):
"""
Writes to the output
"""
self._output.write(' '.join([six.text_type(s) for s in string]))
return self | [
"def",
"write",
"(",
"self",
",",
"*",
"string",
")",
":",
"self",
".",
"_output",
".",
"write",
"(",
"' '",
".",
"join",
"(",
"[",
"six",
".",
"text_type",
"(",
"s",
")",
"for",
"s",
"in",
"string",
"]",
")",
")",
"return",
"self"
] | Writes to the output | [
"Writes",
"to",
"the",
"output"
] | 05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e | https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L565-L570 | train |
ellethee/argparseinator | argparseinator/__init__.py | ArgParseInator.exit | def exit(self, status=EXIT_OK, message=None):
"""
Terminate the script.
"""
if not self.parser:
self.parser = argparse.ArgumentParser()
if self.msg_on_error_only:
# if msg_on_error_only is True
if status != EXIT_OK:
# if we have an error we'll exit with the message also.
self.parser.exit(status, message)
else:
# else we'll exit with the status ongly
self.parser.exit(status, None)
else:
# else if msg_on_error_only is not True
# we'll exit with the status and the message
self.parser.exit(status, message) | python | def exit(self, status=EXIT_OK, message=None):
"""
Terminate the script.
"""
if not self.parser:
self.parser = argparse.ArgumentParser()
if self.msg_on_error_only:
# if msg_on_error_only is True
if status != EXIT_OK:
# if we have an error we'll exit with the message also.
self.parser.exit(status, message)
else:
# else we'll exit with the status ongly
self.parser.exit(status, None)
else:
# else if msg_on_error_only is not True
# we'll exit with the status and the message
self.parser.exit(status, message) | [
"def",
"exit",
"(",
"self",
",",
"status",
"=",
"EXIT_OK",
",",
"message",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"parser",
":",
"self",
".",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"if",
"self",
".",
"msg_on_error_only",
":",
"# if msg_on_error_only is True",
"if",
"status",
"!=",
"EXIT_OK",
":",
"# if we have an error we'll exit with the message also.",
"self",
".",
"parser",
".",
"exit",
"(",
"status",
",",
"message",
")",
"else",
":",
"# else we'll exit with the status ongly",
"self",
".",
"parser",
".",
"exit",
"(",
"status",
",",
"None",
")",
"else",
":",
"# else if msg_on_error_only is not True",
"# we'll exit with the status and the message",
"self",
".",
"parser",
".",
"exit",
"(",
"status",
",",
"message",
")"
] | Terminate the script. | [
"Terminate",
"the",
"script",
"."
] | 05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e | https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__init__.py#L580-L597 | train |
ldomic/lintools | lintools/analysis/sasa.py | SASA.analyse_ligand_sasa | def analyse_ligand_sasa(self):
"""Analysis of ligand SASA."""
i=0
start = timer()
if self.trajectory == []:
self.trajectory = [self.topology_data.universe.filename]
try:
for traj in self.trajectory:
new_traj = mdtraj.load(traj,top=self.topology_data.universe.filename)
#Analyse only non-H ligand
ligand_slice = new_traj.atom_slice(atom_indices=self.topology_data.universe.ligand_noH.ids)
self.sasa = mdtraj.shrake_rupley(ligand_slice)
self.atom_sasa[i]=self.assign_per_atom_sasa()
i+=1
self.total_sasa = self.get_total_per_atom_sasa()
except KeyError as e:
print "WARNING: SASA analysis cannot be performed due to incorrect atom names in"
print "the topology ", e
print "SASA: "+str(timer()-start) | python | def analyse_ligand_sasa(self):
"""Analysis of ligand SASA."""
i=0
start = timer()
if self.trajectory == []:
self.trajectory = [self.topology_data.universe.filename]
try:
for traj in self.trajectory:
new_traj = mdtraj.load(traj,top=self.topology_data.universe.filename)
#Analyse only non-H ligand
ligand_slice = new_traj.atom_slice(atom_indices=self.topology_data.universe.ligand_noH.ids)
self.sasa = mdtraj.shrake_rupley(ligand_slice)
self.atom_sasa[i]=self.assign_per_atom_sasa()
i+=1
self.total_sasa = self.get_total_per_atom_sasa()
except KeyError as e:
print "WARNING: SASA analysis cannot be performed due to incorrect atom names in"
print "the topology ", e
print "SASA: "+str(timer()-start) | [
"def",
"analyse_ligand_sasa",
"(",
"self",
")",
":",
"i",
"=",
"0",
"start",
"=",
"timer",
"(",
")",
"if",
"self",
".",
"trajectory",
"==",
"[",
"]",
":",
"self",
".",
"trajectory",
"=",
"[",
"self",
".",
"topology_data",
".",
"universe",
".",
"filename",
"]",
"try",
":",
"for",
"traj",
"in",
"self",
".",
"trajectory",
":",
"new_traj",
"=",
"mdtraj",
".",
"load",
"(",
"traj",
",",
"top",
"=",
"self",
".",
"topology_data",
".",
"universe",
".",
"filename",
")",
"#Analyse only non-H ligand",
"ligand_slice",
"=",
"new_traj",
".",
"atom_slice",
"(",
"atom_indices",
"=",
"self",
".",
"topology_data",
".",
"universe",
".",
"ligand_noH",
".",
"ids",
")",
"self",
".",
"sasa",
"=",
"mdtraj",
".",
"shrake_rupley",
"(",
"ligand_slice",
")",
"self",
".",
"atom_sasa",
"[",
"i",
"]",
"=",
"self",
".",
"assign_per_atom_sasa",
"(",
")",
"i",
"+=",
"1",
"self",
".",
"total_sasa",
"=",
"self",
".",
"get_total_per_atom_sasa",
"(",
")",
"except",
"KeyError",
"as",
"e",
":",
"print",
"\"WARNING: SASA analysis cannot be performed due to incorrect atom names in\"",
"print",
"\"the topology \"",
",",
"e",
"print",
"\"SASA: \"",
"+",
"str",
"(",
"timer",
"(",
")",
"-",
"start",
")"
] | Analysis of ligand SASA. | [
"Analysis",
"of",
"ligand",
"SASA",
"."
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/analysis/sasa.py#L23-L43 | train |
ldomic/lintools | lintools/analysis/sasa.py | SASA.assign_per_atom_sasa | def assign_per_atom_sasa(self):
"""Make a dictionary with SASA assigned to each ligand atom, stored as list of SASA values over
the simulation time."""
atom_names= [atom.name for atom in self.topology_data.universe.ligand_noH.atoms]
sasa_dict = {}
for atom in range(0,self.topology_data.universe.ligand_noH.n_atoms):
sasa_dict[atom_names[atom]]=[self.sasa[i][atom] for i in range(len(self.sasa))]
return sasa_dict | python | def assign_per_atom_sasa(self):
"""Make a dictionary with SASA assigned to each ligand atom, stored as list of SASA values over
the simulation time."""
atom_names= [atom.name for atom in self.topology_data.universe.ligand_noH.atoms]
sasa_dict = {}
for atom in range(0,self.topology_data.universe.ligand_noH.n_atoms):
sasa_dict[atom_names[atom]]=[self.sasa[i][atom] for i in range(len(self.sasa))]
return sasa_dict | [
"def",
"assign_per_atom_sasa",
"(",
"self",
")",
":",
"atom_names",
"=",
"[",
"atom",
".",
"name",
"for",
"atom",
"in",
"self",
".",
"topology_data",
".",
"universe",
".",
"ligand_noH",
".",
"atoms",
"]",
"sasa_dict",
"=",
"{",
"}",
"for",
"atom",
"in",
"range",
"(",
"0",
",",
"self",
".",
"topology_data",
".",
"universe",
".",
"ligand_noH",
".",
"n_atoms",
")",
":",
"sasa_dict",
"[",
"atom_names",
"[",
"atom",
"]",
"]",
"=",
"[",
"self",
".",
"sasa",
"[",
"i",
"]",
"[",
"atom",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"sasa",
")",
")",
"]",
"return",
"sasa_dict"
] | Make a dictionary with SASA assigned to each ligand atom, stored as list of SASA values over
the simulation time. | [
"Make",
"a",
"dictionary",
"with",
"SASA",
"assigned",
"to",
"each",
"ligand",
"atom",
"stored",
"as",
"list",
"of",
"SASA",
"values",
"over",
"the",
"simulation",
"time",
"."
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/analysis/sasa.py#L46-L53 | train |
ldomic/lintools | lintools/analysis/sasa.py | SASA.get_total_per_atom_sasa | def get_total_per_atom_sasa(self):
"""Return average SASA of the atoms."""
total_sasa = defaultdict(int)
for traj in range(len(self.atom_sasa)):
for atom in self.atom_sasa[traj]:
total_sasa[atom]+=float(sum((self.atom_sasa[traj][atom])))/len(self.atom_sasa[traj][atom])
for atom in total_sasa:
total_sasa[atom]=float(total_sasa[atom])/len(self.atom_sasa)
return total_sasa | python | def get_total_per_atom_sasa(self):
"""Return average SASA of the atoms."""
total_sasa = defaultdict(int)
for traj in range(len(self.atom_sasa)):
for atom in self.atom_sasa[traj]:
total_sasa[atom]+=float(sum((self.atom_sasa[traj][atom])))/len(self.atom_sasa[traj][atom])
for atom in total_sasa:
total_sasa[atom]=float(total_sasa[atom])/len(self.atom_sasa)
return total_sasa | [
"def",
"get_total_per_atom_sasa",
"(",
"self",
")",
":",
"total_sasa",
"=",
"defaultdict",
"(",
"int",
")",
"for",
"traj",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"atom_sasa",
")",
")",
":",
"for",
"atom",
"in",
"self",
".",
"atom_sasa",
"[",
"traj",
"]",
":",
"total_sasa",
"[",
"atom",
"]",
"+=",
"float",
"(",
"sum",
"(",
"(",
"self",
".",
"atom_sasa",
"[",
"traj",
"]",
"[",
"atom",
"]",
")",
")",
")",
"/",
"len",
"(",
"self",
".",
"atom_sasa",
"[",
"traj",
"]",
"[",
"atom",
"]",
")",
"for",
"atom",
"in",
"total_sasa",
":",
"total_sasa",
"[",
"atom",
"]",
"=",
"float",
"(",
"total_sasa",
"[",
"atom",
"]",
")",
"/",
"len",
"(",
"self",
".",
"atom_sasa",
")",
"return",
"total_sasa"
] | Return average SASA of the atoms. | [
"Return",
"average",
"SASA",
"of",
"the",
"atoms",
"."
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/analysis/sasa.py#L55-L63 | train |
thautwarm/Redy | Redy/Async/Accompany.py | Accompany.run | def run(self, *args):
"""
You can choose whether to use lock method when running threads.
"""
if self.running:
return self
self._mut_finished(False) # in case of recovery from a disaster.
self._mut_running(True)
stream = self.target(*args)
# noinspection SpellCheckingInspection
def subr():
self._mut_running(True)
try:
for each in stream:
self._product = each
desc = self.descriptor_mapping(each)
event = self.events.get(desc)
if event:
event(self, each, globals)
self._mut_finished(True)
except ThreadExit:
pass
finally:
self._mut_running(False)
self._thread = thread = threading.Thread(target=subr, args=())
thread.start()
return self | python | def run(self, *args):
"""
You can choose whether to use lock method when running threads.
"""
if self.running:
return self
self._mut_finished(False) # in case of recovery from a disaster.
self._mut_running(True)
stream = self.target(*args)
# noinspection SpellCheckingInspection
def subr():
self._mut_running(True)
try:
for each in stream:
self._product = each
desc = self.descriptor_mapping(each)
event = self.events.get(desc)
if event:
event(self, each, globals)
self._mut_finished(True)
except ThreadExit:
pass
finally:
self._mut_running(False)
self._thread = thread = threading.Thread(target=subr, args=())
thread.start()
return self | [
"def",
"run",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"self",
".",
"running",
":",
"return",
"self",
"self",
".",
"_mut_finished",
"(",
"False",
")",
"# in case of recovery from a disaster.",
"self",
".",
"_mut_running",
"(",
"True",
")",
"stream",
"=",
"self",
".",
"target",
"(",
"*",
"args",
")",
"# noinspection SpellCheckingInspection",
"def",
"subr",
"(",
")",
":",
"self",
".",
"_mut_running",
"(",
"True",
")",
"try",
":",
"for",
"each",
"in",
"stream",
":",
"self",
".",
"_product",
"=",
"each",
"desc",
"=",
"self",
".",
"descriptor_mapping",
"(",
"each",
")",
"event",
"=",
"self",
".",
"events",
".",
"get",
"(",
"desc",
")",
"if",
"event",
":",
"event",
"(",
"self",
",",
"each",
",",
"globals",
")",
"self",
".",
"_mut_finished",
"(",
"True",
")",
"except",
"ThreadExit",
":",
"pass",
"finally",
":",
"self",
".",
"_mut_running",
"(",
"False",
")",
"self",
".",
"_thread",
"=",
"thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"subr",
",",
"args",
"=",
"(",
")",
")",
"thread",
".",
"start",
"(",
")",
"return",
"self"
] | You can choose whether to use lock method when running threads. | [
"You",
"can",
"choose",
"whether",
"to",
"use",
"lock",
"method",
"when",
"running",
"threads",
"."
] | 8beee5c5f752edfd2754bb1e6b5f4acb016a7770 | https://github.com/thautwarm/Redy/blob/8beee5c5f752edfd2754bb1e6b5f4acb016a7770/Redy/Async/Accompany.py#L114-L144 | train |
robinandeer/puzzle | puzzle/plugins/gemini/mixins/variant_extras/consequences.py | ConsequenceExtras._add_consequences | def _add_consequences(self, variant_obj):
"""Add the consequences found in all transcripts
Args:
variant_obj (puzzle.models.Variant)
"""
consequences = set()
for transcript in variant_obj.transcripts:
for consequence in transcript.consequence.split('&'):
consequences.add(consequence)
variant_obj.consequences = list(consequences) | python | def _add_consequences(self, variant_obj):
"""Add the consequences found in all transcripts
Args:
variant_obj (puzzle.models.Variant)
"""
consequences = set()
for transcript in variant_obj.transcripts:
for consequence in transcript.consequence.split('&'):
consequences.add(consequence)
variant_obj.consequences = list(consequences) | [
"def",
"_add_consequences",
"(",
"self",
",",
"variant_obj",
")",
":",
"consequences",
"=",
"set",
"(",
")",
"for",
"transcript",
"in",
"variant_obj",
".",
"transcripts",
":",
"for",
"consequence",
"in",
"transcript",
".",
"consequence",
".",
"split",
"(",
"'&'",
")",
":",
"consequences",
".",
"add",
"(",
"consequence",
")",
"variant_obj",
".",
"consequences",
"=",
"list",
"(",
"consequences",
")"
] | Add the consequences found in all transcripts
Args:
variant_obj (puzzle.models.Variant) | [
"Add",
"the",
"consequences",
"found",
"in",
"all",
"transcripts"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/gemini/mixins/variant_extras/consequences.py#L8-L20 | train |
robinandeer/puzzle | puzzle/plugins/vcf/mixins/variant_extras/genes.py | GeneExtras._add_hgnc_symbols | def _add_hgnc_symbols(self, variant_obj):
"""Add hgnc symbols to the variant
If there are transcripts use the symbols found here,
otherwise use phizz to get the gene ids.
"""
hgnc_symbols = set()
if variant_obj.transcripts:
for transcript in variant_obj.transcripts:
if transcript.hgnc_symbol:
hgnc_symbols.add(transcript.hgnc_symbol)
else:
chrom = variant_obj.CHROM
start = variant_obj.start
stop = variant_obj.stop
hgnc_symbols = get_gene_symbols(chrom, start, stop)
#Make unique ids
variant_obj.gene_symbols = list(hgnc_symbols) | python | def _add_hgnc_symbols(self, variant_obj):
"""Add hgnc symbols to the variant
If there are transcripts use the symbols found here,
otherwise use phizz to get the gene ids.
"""
hgnc_symbols = set()
if variant_obj.transcripts:
for transcript in variant_obj.transcripts:
if transcript.hgnc_symbol:
hgnc_symbols.add(transcript.hgnc_symbol)
else:
chrom = variant_obj.CHROM
start = variant_obj.start
stop = variant_obj.stop
hgnc_symbols = get_gene_symbols(chrom, start, stop)
#Make unique ids
variant_obj.gene_symbols = list(hgnc_symbols) | [
"def",
"_add_hgnc_symbols",
"(",
"self",
",",
"variant_obj",
")",
":",
"hgnc_symbols",
"=",
"set",
"(",
")",
"if",
"variant_obj",
".",
"transcripts",
":",
"for",
"transcript",
"in",
"variant_obj",
".",
"transcripts",
":",
"if",
"transcript",
".",
"hgnc_symbol",
":",
"hgnc_symbols",
".",
"add",
"(",
"transcript",
".",
"hgnc_symbol",
")",
"else",
":",
"chrom",
"=",
"variant_obj",
".",
"CHROM",
"start",
"=",
"variant_obj",
".",
"start",
"stop",
"=",
"variant_obj",
".",
"stop",
"hgnc_symbols",
"=",
"get_gene_symbols",
"(",
"chrom",
",",
"start",
",",
"stop",
")",
"#Make unique ids",
"variant_obj",
".",
"gene_symbols",
"=",
"list",
"(",
"hgnc_symbols",
")"
] | Add hgnc symbols to the variant
If there are transcripts use the symbols found here,
otherwise use phizz to get the gene ids. | [
"Add",
"hgnc",
"symbols",
"to",
"the",
"variant",
"If",
"there",
"are",
"transcripts",
"use",
"the",
"symbols",
"found",
"here",
"otherwise",
"use",
"phizz",
"to",
"get",
"the",
"gene",
"ids",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/vcf/mixins/variant_extras/genes.py#L7-L25 | train |
robinandeer/puzzle | puzzle/plugins/vcf/mixins/variant_extras/genes.py | GeneExtras._add_genes | def _add_genes(self, variant_obj):
"""Add the Gene objects for a variant"""
genes = []
ensembl_ids = []
hgnc_symbols = []
if variant_obj.transcripts:
for transcript in variant_obj.transcripts:
if transcript.ensembl_id:
ensembl_ids.append(transcript.ensembl_id)
if transcript.hgnc_symbol:
hgnc_symbols.append(transcript.hgnc_symbol)
else:
hgnc_symbols = variant_obj.gene_symbols
genes = get_gene_info(
ensembl_ids=ensembl_ids,
hgnc_symbols=hgnc_symbols
)
for gene in genes:
variant_obj.add_gene(gene) | python | def _add_genes(self, variant_obj):
"""Add the Gene objects for a variant"""
genes = []
ensembl_ids = []
hgnc_symbols = []
if variant_obj.transcripts:
for transcript in variant_obj.transcripts:
if transcript.ensembl_id:
ensembl_ids.append(transcript.ensembl_id)
if transcript.hgnc_symbol:
hgnc_symbols.append(transcript.hgnc_symbol)
else:
hgnc_symbols = variant_obj.gene_symbols
genes = get_gene_info(
ensembl_ids=ensembl_ids,
hgnc_symbols=hgnc_symbols
)
for gene in genes:
variant_obj.add_gene(gene) | [
"def",
"_add_genes",
"(",
"self",
",",
"variant_obj",
")",
":",
"genes",
"=",
"[",
"]",
"ensembl_ids",
"=",
"[",
"]",
"hgnc_symbols",
"=",
"[",
"]",
"if",
"variant_obj",
".",
"transcripts",
":",
"for",
"transcript",
"in",
"variant_obj",
".",
"transcripts",
":",
"if",
"transcript",
".",
"ensembl_id",
":",
"ensembl_ids",
".",
"append",
"(",
"transcript",
".",
"ensembl_id",
")",
"if",
"transcript",
".",
"hgnc_symbol",
":",
"hgnc_symbols",
".",
"append",
"(",
"transcript",
".",
"hgnc_symbol",
")",
"else",
":",
"hgnc_symbols",
"=",
"variant_obj",
".",
"gene_symbols",
"genes",
"=",
"get_gene_info",
"(",
"ensembl_ids",
"=",
"ensembl_ids",
",",
"hgnc_symbols",
"=",
"hgnc_symbols",
")",
"for",
"gene",
"in",
"genes",
":",
"variant_obj",
".",
"add_gene",
"(",
"gene",
")"
] | Add the Gene objects for a variant | [
"Add",
"the",
"Gene",
"objects",
"for",
"a",
"variant"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/vcf/mixins/variant_extras/genes.py#L27-L49 | train |
eleme/meepo | meepo/apps/eventsourcing/prepare_commit.py | _redis_strict_pc | def _redis_strict_pc(func):
"""Strict deco for RedisPrepareCommit
The deco will choose whether to silent exception or not based on the
strict attr in RedisPrepareCommit object.
"""
phase = "session_%s" % func.__name__
@functools.wraps(func)
def wrapper(self, session, *args, **kwargs):
try:
func(self, session, *args, **kwargs)
self.logger.debug("%s -> %s" % (session.meepo_unique_id, phase))
return True
except Exception as e:
if self.strict:
raise
if isinstance(e, redis.ConnectionError):
self.logger.warn("redis connection error in %s: %s" % (
phase, session.meepo_unique_id))
else:
self.logger.exception(e)
return False
return wrapper | python | def _redis_strict_pc(func):
"""Strict deco for RedisPrepareCommit
The deco will choose whether to silent exception or not based on the
strict attr in RedisPrepareCommit object.
"""
phase = "session_%s" % func.__name__
@functools.wraps(func)
def wrapper(self, session, *args, **kwargs):
try:
func(self, session, *args, **kwargs)
self.logger.debug("%s -> %s" % (session.meepo_unique_id, phase))
return True
except Exception as e:
if self.strict:
raise
if isinstance(e, redis.ConnectionError):
self.logger.warn("redis connection error in %s: %s" % (
phase, session.meepo_unique_id))
else:
self.logger.exception(e)
return False
return wrapper | [
"def",
"_redis_strict_pc",
"(",
"func",
")",
":",
"phase",
"=",
"\"session_%s\"",
"%",
"func",
".",
"__name__",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"self",
",",
"session",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"func",
"(",
"self",
",",
"session",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"%s -> %s\"",
"%",
"(",
"session",
".",
"meepo_unique_id",
",",
"phase",
")",
")",
"return",
"True",
"except",
"Exception",
"as",
"e",
":",
"if",
"self",
".",
"strict",
":",
"raise",
"if",
"isinstance",
"(",
"e",
",",
"redis",
".",
"ConnectionError",
")",
":",
"self",
".",
"logger",
".",
"warn",
"(",
"\"redis connection error in %s: %s\"",
"%",
"(",
"phase",
",",
"session",
".",
"meepo_unique_id",
")",
")",
"else",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"e",
")",
"return",
"False",
"return",
"wrapper"
] | Strict deco for RedisPrepareCommit
The deco will choose whether to silent exception or not based on the
strict attr in RedisPrepareCommit object. | [
"Strict",
"deco",
"for",
"RedisPrepareCommit"
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/prepare_commit.py#L45-L68 | train |
eleme/meepo | meepo/apps/eventsourcing/prepare_commit.py | RedisPrepareCommit.phase | def phase(self, session):
"""Determine the session phase in prepare commit.
:param session: sqlalchemy session
:return: phase "prepare" or "commit"
"""
sp_key, _ = self._keygen(session)
if self.r.sismember(sp_key, session.meepo_unique_id):
return "prepare"
else:
return "commit" | python | def phase(self, session):
"""Determine the session phase in prepare commit.
:param session: sqlalchemy session
:return: phase "prepare" or "commit"
"""
sp_key, _ = self._keygen(session)
if self.r.sismember(sp_key, session.meepo_unique_id):
return "prepare"
else:
return "commit" | [
"def",
"phase",
"(",
"self",
",",
"session",
")",
":",
"sp_key",
",",
"_",
"=",
"self",
".",
"_keygen",
"(",
"session",
")",
"if",
"self",
".",
"r",
".",
"sismember",
"(",
"sp_key",
",",
"session",
".",
"meepo_unique_id",
")",
":",
"return",
"\"prepare\"",
"else",
":",
"return",
"\"commit\""
] | Determine the session phase in prepare commit.
:param session: sqlalchemy session
:return: phase "prepare" or "commit" | [
"Determine",
"the",
"session",
"phase",
"in",
"prepare",
"commit",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/prepare_commit.py#L113-L123 | train |
eleme/meepo | meepo/apps/eventsourcing/prepare_commit.py | RedisPrepareCommit.prepare | def prepare(self, session, event):
"""Prepare phase for session.
:param session: sqlalchemy session
"""
if not event:
self.logger.warn("event empty!")
return
sp_key, sp_hkey = self._keygen(session)
def _pk(obj):
pk_values = tuple(getattr(obj, c.name)
for c in obj.__mapper__.primary_key)
if len(pk_values) == 1:
return pk_values[0]
return pk_values
def _get_dump_value(value):
if hasattr(value, '__mapper__'):
return _pk(value)
return value
pickled_event = {
k: pickle.dumps({_get_dump_value(obj) for obj in objs})
for k, objs in event.items()}
with self.r.pipeline(transaction=False) as p:
p.sadd(sp_key, session.meepo_unique_id)
p.hmset(sp_hkey, pickled_event)
p.execute() | python | def prepare(self, session, event):
"""Prepare phase for session.
:param session: sqlalchemy session
"""
if not event:
self.logger.warn("event empty!")
return
sp_key, sp_hkey = self._keygen(session)
def _pk(obj):
pk_values = tuple(getattr(obj, c.name)
for c in obj.__mapper__.primary_key)
if len(pk_values) == 1:
return pk_values[0]
return pk_values
def _get_dump_value(value):
if hasattr(value, '__mapper__'):
return _pk(value)
return value
pickled_event = {
k: pickle.dumps({_get_dump_value(obj) for obj in objs})
for k, objs in event.items()}
with self.r.pipeline(transaction=False) as p:
p.sadd(sp_key, session.meepo_unique_id)
p.hmset(sp_hkey, pickled_event)
p.execute() | [
"def",
"prepare",
"(",
"self",
",",
"session",
",",
"event",
")",
":",
"if",
"not",
"event",
":",
"self",
".",
"logger",
".",
"warn",
"(",
"\"event empty!\"",
")",
"return",
"sp_key",
",",
"sp_hkey",
"=",
"self",
".",
"_keygen",
"(",
"session",
")",
"def",
"_pk",
"(",
"obj",
")",
":",
"pk_values",
"=",
"tuple",
"(",
"getattr",
"(",
"obj",
",",
"c",
".",
"name",
")",
"for",
"c",
"in",
"obj",
".",
"__mapper__",
".",
"primary_key",
")",
"if",
"len",
"(",
"pk_values",
")",
"==",
"1",
":",
"return",
"pk_values",
"[",
"0",
"]",
"return",
"pk_values",
"def",
"_get_dump_value",
"(",
"value",
")",
":",
"if",
"hasattr",
"(",
"value",
",",
"'__mapper__'",
")",
":",
"return",
"_pk",
"(",
"value",
")",
"return",
"value",
"pickled_event",
"=",
"{",
"k",
":",
"pickle",
".",
"dumps",
"(",
"{",
"_get_dump_value",
"(",
"obj",
")",
"for",
"obj",
"in",
"objs",
"}",
")",
"for",
"k",
",",
"objs",
"in",
"event",
".",
"items",
"(",
")",
"}",
"with",
"self",
".",
"r",
".",
"pipeline",
"(",
"transaction",
"=",
"False",
")",
"as",
"p",
":",
"p",
".",
"sadd",
"(",
"sp_key",
",",
"session",
".",
"meepo_unique_id",
")",
"p",
".",
"hmset",
"(",
"sp_hkey",
",",
"pickled_event",
")",
"p",
".",
"execute",
"(",
")"
] | Prepare phase for session.
:param session: sqlalchemy session | [
"Prepare",
"phase",
"for",
"session",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/prepare_commit.py#L126-L154 | train |
eleme/meepo | meepo/apps/eventsourcing/prepare_commit.py | RedisPrepareCommit.commit | def commit(self, session):
"""Commit phase for session.
:param session: sqlalchemy session
"""
sp_key, sp_hkey = self._keygen(session)
with self.r.pipeline(transaction=False) as p:
p.srem(sp_key, session.meepo_unique_id)
p.expire(sp_hkey, 60 * 60)
p.execute() | python | def commit(self, session):
"""Commit phase for session.
:param session: sqlalchemy session
"""
sp_key, sp_hkey = self._keygen(session)
with self.r.pipeline(transaction=False) as p:
p.srem(sp_key, session.meepo_unique_id)
p.expire(sp_hkey, 60 * 60)
p.execute() | [
"def",
"commit",
"(",
"self",
",",
"session",
")",
":",
"sp_key",
",",
"sp_hkey",
"=",
"self",
".",
"_keygen",
"(",
"session",
")",
"with",
"self",
".",
"r",
".",
"pipeline",
"(",
"transaction",
"=",
"False",
")",
"as",
"p",
":",
"p",
".",
"srem",
"(",
"sp_key",
",",
"session",
".",
"meepo_unique_id",
")",
"p",
".",
"expire",
"(",
"sp_hkey",
",",
"60",
"*",
"60",
")",
"p",
".",
"execute",
"(",
")"
] | Commit phase for session.
:param session: sqlalchemy session | [
"Commit",
"phase",
"for",
"session",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/prepare_commit.py#L157-L166 | train |
eleme/meepo | meepo/apps/eventsourcing/prepare_commit.py | RedisPrepareCommit.clear | def clear(self, ts=None):
"""Clear all session in prepare phase.
:param ts: timestamp used locate the namespace
"""
sp_key = "%s:session_prepare" % self.namespace(ts or int(time.time()))
return self.r.delete(sp_key) | python | def clear(self, ts=None):
"""Clear all session in prepare phase.
:param ts: timestamp used locate the namespace
"""
sp_key = "%s:session_prepare" % self.namespace(ts or int(time.time()))
return self.r.delete(sp_key) | [
"def",
"clear",
"(",
"self",
",",
"ts",
"=",
"None",
")",
":",
"sp_key",
"=",
"\"%s:session_prepare\"",
"%",
"self",
".",
"namespace",
"(",
"ts",
"or",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
"return",
"self",
".",
"r",
".",
"delete",
"(",
"sp_key",
")"
] | Clear all session in prepare phase.
:param ts: timestamp used locate the namespace | [
"Clear",
"all",
"session",
"in",
"prepare",
"phase",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/prepare_commit.py#L190-L196 | train |
robinandeer/puzzle | puzzle/cli/cases.py | cases | def cases(ctx, root):
"""
Show all cases in the database.
If no database was found run puzzle init first.
"""
root = root or ctx.obj.get('root') or os.path.expanduser("~/.puzzle")
if os.path.isfile(root):
logger.error("'root' can't be a file")
ctx.abort()
logger.info("Root directory is: {}".format(root))
db_path = os.path.join(root, 'puzzle_db.sqlite3')
logger.info("db path is: {}".format(db_path))
if not os.path.exists(db_path):
logger.warn("database not initialized, run 'puzzle init'")
ctx.abort()
store = SqlStore(db_path)
for case in store.cases():
click.echo(case) | python | def cases(ctx, root):
"""
Show all cases in the database.
If no database was found run puzzle init first.
"""
root = root or ctx.obj.get('root') or os.path.expanduser("~/.puzzle")
if os.path.isfile(root):
logger.error("'root' can't be a file")
ctx.abort()
logger.info("Root directory is: {}".format(root))
db_path = os.path.join(root, 'puzzle_db.sqlite3')
logger.info("db path is: {}".format(db_path))
if not os.path.exists(db_path):
logger.warn("database not initialized, run 'puzzle init'")
ctx.abort()
store = SqlStore(db_path)
for case in store.cases():
click.echo(case) | [
"def",
"cases",
"(",
"ctx",
",",
"root",
")",
":",
"root",
"=",
"root",
"or",
"ctx",
".",
"obj",
".",
"get",
"(",
"'root'",
")",
"or",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.puzzle\"",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"root",
")",
":",
"logger",
".",
"error",
"(",
"\"'root' can't be a file\"",
")",
"ctx",
".",
"abort",
"(",
")",
"logger",
".",
"info",
"(",
"\"Root directory is: {}\"",
".",
"format",
"(",
"root",
")",
")",
"db_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"'puzzle_db.sqlite3'",
")",
"logger",
".",
"info",
"(",
"\"db path is: {}\"",
".",
"format",
"(",
"db_path",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"db_path",
")",
":",
"logger",
".",
"warn",
"(",
"\"database not initialized, run 'puzzle init'\"",
")",
"ctx",
".",
"abort",
"(",
")",
"store",
"=",
"SqlStore",
"(",
"db_path",
")",
"for",
"case",
"in",
"store",
".",
"cases",
"(",
")",
":",
"click",
".",
"echo",
"(",
"case",
")"
] | Show all cases in the database.
If no database was found run puzzle init first. | [
"Show",
"all",
"cases",
"in",
"the",
"database",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/cli/cases.py#L16-L39 | train |
ellethee/argparseinator | argparseinator/__main__.py | init | def init(name, subnames, dest, skeleton, description, project_type, skip_core):
"""Creates a standalone, subprojects or submodules script sctrucure"""
dest = dest or CUR_DIR
skeleton = join(skeleton or SKEL_PATH, project_type)
project = join(dest, name)
script = join(project, name + '.py')
core = join(project, name)
if project_type == 'standalone':
renames = [
(join(project, 'project.py'), script),
(join(project, 'project'), core)]
copy_skeleton(
name, skeleton, project, renames=renames, description=description,
ignore=False)
else:
renames = [
(join(project, 'project.py'), script),
(join(project, 'project'), core)]
exclude_dirs = ['submodule'] + (['project'] if skip_core else [])
copy_skeleton(
name, skeleton, project, renames=renames, description=description,
exclude_dirs=exclude_dirs, ignore=True)
for subname in subnames:
renames = [
(join(project, 'submodule'), join(project, subname))
]
copy_skeleton(
subname, skeleton, project, renames=renames,
description=description, ignore=True,
exclude_dirs=['project'], exclude_files=['project.py'])
return 0, "\n{}\n".format(project) | python | def init(name, subnames, dest, skeleton, description, project_type, skip_core):
"""Creates a standalone, subprojects or submodules script sctrucure"""
dest = dest or CUR_DIR
skeleton = join(skeleton or SKEL_PATH, project_type)
project = join(dest, name)
script = join(project, name + '.py')
core = join(project, name)
if project_type == 'standalone':
renames = [
(join(project, 'project.py'), script),
(join(project, 'project'), core)]
copy_skeleton(
name, skeleton, project, renames=renames, description=description,
ignore=False)
else:
renames = [
(join(project, 'project.py'), script),
(join(project, 'project'), core)]
exclude_dirs = ['submodule'] + (['project'] if skip_core else [])
copy_skeleton(
name, skeleton, project, renames=renames, description=description,
exclude_dirs=exclude_dirs, ignore=True)
for subname in subnames:
renames = [
(join(project, 'submodule'), join(project, subname))
]
copy_skeleton(
subname, skeleton, project, renames=renames,
description=description, ignore=True,
exclude_dirs=['project'], exclude_files=['project.py'])
return 0, "\n{}\n".format(project) | [
"def",
"init",
"(",
"name",
",",
"subnames",
",",
"dest",
",",
"skeleton",
",",
"description",
",",
"project_type",
",",
"skip_core",
")",
":",
"dest",
"=",
"dest",
"or",
"CUR_DIR",
"skeleton",
"=",
"join",
"(",
"skeleton",
"or",
"SKEL_PATH",
",",
"project_type",
")",
"project",
"=",
"join",
"(",
"dest",
",",
"name",
")",
"script",
"=",
"join",
"(",
"project",
",",
"name",
"+",
"'.py'",
")",
"core",
"=",
"join",
"(",
"project",
",",
"name",
")",
"if",
"project_type",
"==",
"'standalone'",
":",
"renames",
"=",
"[",
"(",
"join",
"(",
"project",
",",
"'project.py'",
")",
",",
"script",
")",
",",
"(",
"join",
"(",
"project",
",",
"'project'",
")",
",",
"core",
")",
"]",
"copy_skeleton",
"(",
"name",
",",
"skeleton",
",",
"project",
",",
"renames",
"=",
"renames",
",",
"description",
"=",
"description",
",",
"ignore",
"=",
"False",
")",
"else",
":",
"renames",
"=",
"[",
"(",
"join",
"(",
"project",
",",
"'project.py'",
")",
",",
"script",
")",
",",
"(",
"join",
"(",
"project",
",",
"'project'",
")",
",",
"core",
")",
"]",
"exclude_dirs",
"=",
"[",
"'submodule'",
"]",
"+",
"(",
"[",
"'project'",
"]",
"if",
"skip_core",
"else",
"[",
"]",
")",
"copy_skeleton",
"(",
"name",
",",
"skeleton",
",",
"project",
",",
"renames",
"=",
"renames",
",",
"description",
"=",
"description",
",",
"exclude_dirs",
"=",
"exclude_dirs",
",",
"ignore",
"=",
"True",
")",
"for",
"subname",
"in",
"subnames",
":",
"renames",
"=",
"[",
"(",
"join",
"(",
"project",
",",
"'submodule'",
")",
",",
"join",
"(",
"project",
",",
"subname",
")",
")",
"]",
"copy_skeleton",
"(",
"subname",
",",
"skeleton",
",",
"project",
",",
"renames",
"=",
"renames",
",",
"description",
"=",
"description",
",",
"ignore",
"=",
"True",
",",
"exclude_dirs",
"=",
"[",
"'project'",
"]",
",",
"exclude_files",
"=",
"[",
"'project.py'",
"]",
")",
"return",
"0",
",",
"\"\\n{}\\n\"",
".",
"format",
"(",
"project",
")"
] | Creates a standalone, subprojects or submodules script sctrucure | [
"Creates",
"a",
"standalone",
"subprojects",
"or",
"submodules",
"script",
"sctrucure"
] | 05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e | https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/__main__.py#L115-L145 | train |
jwodder/javaproperties | javaproperties/propfile.py | PropertiesFile._check | def _check(self):
"""
Assert the internal consistency of the instance's data structures.
This method is for debugging only.
"""
for k,ix in six.iteritems(self._indices):
assert k is not None, 'null key'
assert ix, 'Key does not map to any indices'
assert ix == sorted(ix), "Key's indices are not in order"
for i in ix:
assert i in self._lines, 'Key index does not map to line'
assert self._lines[i].key is not None, 'Key maps to comment'
assert self._lines[i].key == k, 'Key does not map to itself'
assert self._lines[i].value is not None, 'Key has null value'
prev = None
for i, line in six.iteritems(self._lines):
assert prev is None or prev < i, 'Line indices out of order'
prev = i
if line.key is None:
assert line.value is None, 'Comment/blank has value'
assert line.source is not None, 'Comment source not stored'
assert loads(line.source) == {}, 'Comment source is not comment'
else:
assert line.value is not None, 'Key has null value'
if line.source is not None:
assert loads(line.source) == {line.key: line.value}, \
'Key source does not deserialize to itself'
assert line.key in self._indices, 'Key is missing from map'
assert i in self._indices[line.key], \
'Key does not map to itself' | python | def _check(self):
"""
Assert the internal consistency of the instance's data structures.
This method is for debugging only.
"""
for k,ix in six.iteritems(self._indices):
assert k is not None, 'null key'
assert ix, 'Key does not map to any indices'
assert ix == sorted(ix), "Key's indices are not in order"
for i in ix:
assert i in self._lines, 'Key index does not map to line'
assert self._lines[i].key is not None, 'Key maps to comment'
assert self._lines[i].key == k, 'Key does not map to itself'
assert self._lines[i].value is not None, 'Key has null value'
prev = None
for i, line in six.iteritems(self._lines):
assert prev is None or prev < i, 'Line indices out of order'
prev = i
if line.key is None:
assert line.value is None, 'Comment/blank has value'
assert line.source is not None, 'Comment source not stored'
assert loads(line.source) == {}, 'Comment source is not comment'
else:
assert line.value is not None, 'Key has null value'
if line.source is not None:
assert loads(line.source) == {line.key: line.value}, \
'Key source does not deserialize to itself'
assert line.key in self._indices, 'Key is missing from map'
assert i in self._indices[line.key], \
'Key does not map to itself' | [
"def",
"_check",
"(",
"self",
")",
":",
"for",
"k",
",",
"ix",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_indices",
")",
":",
"assert",
"k",
"is",
"not",
"None",
",",
"'null key'",
"assert",
"ix",
",",
"'Key does not map to any indices'",
"assert",
"ix",
"==",
"sorted",
"(",
"ix",
")",
",",
"\"Key's indices are not in order\"",
"for",
"i",
"in",
"ix",
":",
"assert",
"i",
"in",
"self",
".",
"_lines",
",",
"'Key index does not map to line'",
"assert",
"self",
".",
"_lines",
"[",
"i",
"]",
".",
"key",
"is",
"not",
"None",
",",
"'Key maps to comment'",
"assert",
"self",
".",
"_lines",
"[",
"i",
"]",
".",
"key",
"==",
"k",
",",
"'Key does not map to itself'",
"assert",
"self",
".",
"_lines",
"[",
"i",
"]",
".",
"value",
"is",
"not",
"None",
",",
"'Key has null value'",
"prev",
"=",
"None",
"for",
"i",
",",
"line",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_lines",
")",
":",
"assert",
"prev",
"is",
"None",
"or",
"prev",
"<",
"i",
",",
"'Line indices out of order'",
"prev",
"=",
"i",
"if",
"line",
".",
"key",
"is",
"None",
":",
"assert",
"line",
".",
"value",
"is",
"None",
",",
"'Comment/blank has value'",
"assert",
"line",
".",
"source",
"is",
"not",
"None",
",",
"'Comment source not stored'",
"assert",
"loads",
"(",
"line",
".",
"source",
")",
"==",
"{",
"}",
",",
"'Comment source is not comment'",
"else",
":",
"assert",
"line",
".",
"value",
"is",
"not",
"None",
",",
"'Key has null value'",
"if",
"line",
".",
"source",
"is",
"not",
"None",
":",
"assert",
"loads",
"(",
"line",
".",
"source",
")",
"==",
"{",
"line",
".",
"key",
":",
"line",
".",
"value",
"}",
",",
"'Key source does not deserialize to itself'",
"assert",
"line",
".",
"key",
"in",
"self",
".",
"_indices",
",",
"'Key is missing from map'",
"assert",
"i",
"in",
"self",
".",
"_indices",
"[",
"line",
".",
"key",
"]",
",",
"'Key does not map to itself'"
] | Assert the internal consistency of the instance's data structures.
This method is for debugging only. | [
"Assert",
"the",
"internal",
"consistency",
"of",
"the",
"instance",
"s",
"data",
"structures",
".",
"This",
"method",
"is",
"for",
"debugging",
"only",
"."
] | 8b48f040305217ebeb80c98c4354691bbb01429b | https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/propfile.py#L64-L93 | train |
jwodder/javaproperties | javaproperties/propfile.py | PropertiesFile.load | def load(cls, fp):
"""
Parse the contents of the `~io.IOBase.readline`-supporting file-like
object ``fp`` as a simple line-oriented ``.properties`` file and return
a `PropertiesFile` instance.
``fp`` may be either a text or binary filehandle, with or without
universal newlines enabled. If it is a binary filehandle, its contents
are decoded as Latin-1.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param fp: the file from which to read the ``.properties`` document
:type fp: file-like object
:rtype: PropertiesFile
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input
"""
obj = cls()
for i, (k, v, src) in enumerate(parse(fp)):
if k is not None:
obj._indices.setdefault(k, []).append(i)
obj._lines[i] = PropertyLine(k, v, src)
return obj | python | def load(cls, fp):
"""
Parse the contents of the `~io.IOBase.readline`-supporting file-like
object ``fp`` as a simple line-oriented ``.properties`` file and return
a `PropertiesFile` instance.
``fp`` may be either a text or binary filehandle, with or without
universal newlines enabled. If it is a binary filehandle, its contents
are decoded as Latin-1.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param fp: the file from which to read the ``.properties`` document
:type fp: file-like object
:rtype: PropertiesFile
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input
"""
obj = cls()
for i, (k, v, src) in enumerate(parse(fp)):
if k is not None:
obj._indices.setdefault(k, []).append(i)
obj._lines[i] = PropertyLine(k, v, src)
return obj | [
"def",
"load",
"(",
"cls",
",",
"fp",
")",
":",
"obj",
"=",
"cls",
"(",
")",
"for",
"i",
",",
"(",
"k",
",",
"v",
",",
"src",
")",
"in",
"enumerate",
"(",
"parse",
"(",
"fp",
")",
")",
":",
"if",
"k",
"is",
"not",
"None",
":",
"obj",
".",
"_indices",
".",
"setdefault",
"(",
"k",
",",
"[",
"]",
")",
".",
"append",
"(",
"i",
")",
"obj",
".",
"_lines",
"[",
"i",
"]",
"=",
"PropertyLine",
"(",
"k",
",",
"v",
",",
"src",
")",
"return",
"obj"
] | Parse the contents of the `~io.IOBase.readline`-supporting file-like
object ``fp`` as a simple line-oriented ``.properties`` file and return
a `PropertiesFile` instance.
``fp`` may be either a text or binary filehandle, with or without
universal newlines enabled. If it is a binary filehandle, its contents
are decoded as Latin-1.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param fp: the file from which to read the ``.properties`` document
:type fp: file-like object
:rtype: PropertiesFile
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input | [
"Parse",
"the",
"contents",
"of",
"the",
"~io",
".",
"IOBase",
".",
"readline",
"-",
"supporting",
"file",
"-",
"like",
"object",
"fp",
"as",
"a",
"simple",
"line",
"-",
"oriented",
".",
"properties",
"file",
"and",
"return",
"a",
"PropertiesFile",
"instance",
"."
] | 8b48f040305217ebeb80c98c4354691bbb01429b | https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/propfile.py#L170-L195 | train |
jwodder/javaproperties | javaproperties/propfile.py | PropertiesFile.loads | def loads(cls, s):
"""
Parse the contents of the string ``s`` as a simple line-oriented
``.properties`` file and return a `PropertiesFile` instance.
``s`` may be either a text string or bytes string. If it is a bytes
string, its contents are decoded as Latin-1.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param string s: the string from which to read the ``.properties``
document
:rtype: PropertiesFile
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input
"""
if isinstance(s, six.binary_type):
fp = six.BytesIO(s)
else:
fp = six.StringIO(s)
return cls.load(fp) | python | def loads(cls, s):
"""
Parse the contents of the string ``s`` as a simple line-oriented
``.properties`` file and return a `PropertiesFile` instance.
``s`` may be either a text string or bytes string. If it is a bytes
string, its contents are decoded as Latin-1.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param string s: the string from which to read the ``.properties``
document
:rtype: PropertiesFile
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input
"""
if isinstance(s, six.binary_type):
fp = six.BytesIO(s)
else:
fp = six.StringIO(s)
return cls.load(fp) | [
"def",
"loads",
"(",
"cls",
",",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"six",
".",
"binary_type",
")",
":",
"fp",
"=",
"six",
".",
"BytesIO",
"(",
"s",
")",
"else",
":",
"fp",
"=",
"six",
".",
"StringIO",
"(",
"s",
")",
"return",
"cls",
".",
"load",
"(",
"fp",
")"
] | Parse the contents of the string ``s`` as a simple line-oriented
``.properties`` file and return a `PropertiesFile` instance.
``s`` may be either a text string or bytes string. If it is a bytes
string, its contents are decoded as Latin-1.
.. versionchanged:: 0.5.0
Invalid ``\\uXXXX`` escape sequences will now cause an
`InvalidUEscapeError` to be raised
:param string s: the string from which to read the ``.properties``
document
:rtype: PropertiesFile
:raises InvalidUEscapeError: if an invalid ``\\uXXXX`` escape sequence
occurs in the input | [
"Parse",
"the",
"contents",
"of",
"the",
"string",
"s",
"as",
"a",
"simple",
"line",
"-",
"oriented",
".",
"properties",
"file",
"and",
"return",
"a",
"PropertiesFile",
"instance",
"."
] | 8b48f040305217ebeb80c98c4354691bbb01429b | https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/propfile.py#L198-L220 | train |
jwodder/javaproperties | javaproperties/propfile.py | PropertiesFile.dump | def dump(self, fp, separator='='):
"""
Write the mapping to a file in simple line-oriented ``.properties``
format.
If the instance was originally created from a file or string with
`PropertiesFile.load()` or `PropertiesFile.loads()`, then the output
will include the comments and whitespace from the original input, and
any keys that haven't been deleted or reassigned will retain their
original formatting and multiplicity. Key-value pairs that have been
modified or added to the mapping will be reformatted with
`join_key_value()` using the given separator. All key-value pairs are
output in the order they were defined, with new keys added to the end.
.. note::
Serializing a `PropertiesFile` instance with the :func:`dump()`
function instead will cause all formatting information to be
ignored, as :func:`dump()` will treat the instance like a normal
mapping.
:param fp: A file-like object to write the mapping to. It must have
been opened as a text file with a Latin-1-compatible encoding.
:param separator: The string to use for separating new or modified keys
& values. Only ``" "``, ``"="``, and ``":"`` (possibly with added
whitespace) should ever be used as the separator.
:type separator: text string
:return: `None`
"""
### TODO: Support setting the timestamp
for line in six.itervalues(self._lines):
if line.source is None:
print(join_key_value(line.key, line.value, separator), file=fp)
else:
fp.write(line.source) | python | def dump(self, fp, separator='='):
"""
Write the mapping to a file in simple line-oriented ``.properties``
format.
If the instance was originally created from a file or string with
`PropertiesFile.load()` or `PropertiesFile.loads()`, then the output
will include the comments and whitespace from the original input, and
any keys that haven't been deleted or reassigned will retain their
original formatting and multiplicity. Key-value pairs that have been
modified or added to the mapping will be reformatted with
`join_key_value()` using the given separator. All key-value pairs are
output in the order they were defined, with new keys added to the end.
.. note::
Serializing a `PropertiesFile` instance with the :func:`dump()`
function instead will cause all formatting information to be
ignored, as :func:`dump()` will treat the instance like a normal
mapping.
:param fp: A file-like object to write the mapping to. It must have
been opened as a text file with a Latin-1-compatible encoding.
:param separator: The string to use for separating new or modified keys
& values. Only ``" "``, ``"="``, and ``":"`` (possibly with added
whitespace) should ever be used as the separator.
:type separator: text string
:return: `None`
"""
### TODO: Support setting the timestamp
for line in six.itervalues(self._lines):
if line.source is None:
print(join_key_value(line.key, line.value, separator), file=fp)
else:
fp.write(line.source) | [
"def",
"dump",
"(",
"self",
",",
"fp",
",",
"separator",
"=",
"'='",
")",
":",
"### TODO: Support setting the timestamp",
"for",
"line",
"in",
"six",
".",
"itervalues",
"(",
"self",
".",
"_lines",
")",
":",
"if",
"line",
".",
"source",
"is",
"None",
":",
"print",
"(",
"join_key_value",
"(",
"line",
".",
"key",
",",
"line",
".",
"value",
",",
"separator",
")",
",",
"file",
"=",
"fp",
")",
"else",
":",
"fp",
".",
"write",
"(",
"line",
".",
"source",
")"
] | Write the mapping to a file in simple line-oriented ``.properties``
format.
If the instance was originally created from a file or string with
`PropertiesFile.load()` or `PropertiesFile.loads()`, then the output
will include the comments and whitespace from the original input, and
any keys that haven't been deleted or reassigned will retain their
original formatting and multiplicity. Key-value pairs that have been
modified or added to the mapping will be reformatted with
`join_key_value()` using the given separator. All key-value pairs are
output in the order they were defined, with new keys added to the end.
.. note::
Serializing a `PropertiesFile` instance with the :func:`dump()`
function instead will cause all formatting information to be
ignored, as :func:`dump()` will treat the instance like a normal
mapping.
:param fp: A file-like object to write the mapping to. It must have
been opened as a text file with a Latin-1-compatible encoding.
:param separator: The string to use for separating new or modified keys
& values. Only ``" "``, ``"="``, and ``":"`` (possibly with added
whitespace) should ever be used as the separator.
:type separator: text string
:return: `None` | [
"Write",
"the",
"mapping",
"to",
"a",
"file",
"in",
"simple",
"line",
"-",
"oriented",
".",
"properties",
"format",
"."
] | 8b48f040305217ebeb80c98c4354691bbb01429b | https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/propfile.py#L222-L256 | train |
jwodder/javaproperties | javaproperties/propfile.py | PropertiesFile.dumps | def dumps(self, separator='='):
"""
Convert the mapping to a text string in simple line-oriented
``.properties`` format.
If the instance was originally created from a file or string with
`PropertiesFile.load()` or `PropertiesFile.loads()`, then the output
will include the comments and whitespace from the original input, and
any keys that haven't been deleted or reassigned will retain their
original formatting and multiplicity. Key-value pairs that have been
modified or added to the mapping will be reformatted with
`join_key_value()` using the given separator. All key-value pairs are
output in the order they were defined, with new keys added to the end.
.. note::
Serializing a `PropertiesFile` instance with the :func:`dumps()`
function instead will cause all formatting information to be
ignored, as :func:`dumps()` will treat the instance like a normal
mapping.
:param separator: The string to use for separating new or modified keys
& values. Only ``" "``, ``"="``, and ``":"`` (possibly with added
whitespace) should ever be used as the separator.
:type separator: text string
:rtype: text string
"""
s = six.StringIO()
self.dump(s, separator=separator)
return s.getvalue() | python | def dumps(self, separator='='):
"""
Convert the mapping to a text string in simple line-oriented
``.properties`` format.
If the instance was originally created from a file or string with
`PropertiesFile.load()` or `PropertiesFile.loads()`, then the output
will include the comments and whitespace from the original input, and
any keys that haven't been deleted or reassigned will retain their
original formatting and multiplicity. Key-value pairs that have been
modified or added to the mapping will be reformatted with
`join_key_value()` using the given separator. All key-value pairs are
output in the order they were defined, with new keys added to the end.
.. note::
Serializing a `PropertiesFile` instance with the :func:`dumps()`
function instead will cause all formatting information to be
ignored, as :func:`dumps()` will treat the instance like a normal
mapping.
:param separator: The string to use for separating new or modified keys
& values. Only ``" "``, ``"="``, and ``":"`` (possibly with added
whitespace) should ever be used as the separator.
:type separator: text string
:rtype: text string
"""
s = six.StringIO()
self.dump(s, separator=separator)
return s.getvalue() | [
"def",
"dumps",
"(",
"self",
",",
"separator",
"=",
"'='",
")",
":",
"s",
"=",
"six",
".",
"StringIO",
"(",
")",
"self",
".",
"dump",
"(",
"s",
",",
"separator",
"=",
"separator",
")",
"return",
"s",
".",
"getvalue",
"(",
")"
] | Convert the mapping to a text string in simple line-oriented
``.properties`` format.
If the instance was originally created from a file or string with
`PropertiesFile.load()` or `PropertiesFile.loads()`, then the output
will include the comments and whitespace from the original input, and
any keys that haven't been deleted or reassigned will retain their
original formatting and multiplicity. Key-value pairs that have been
modified or added to the mapping will be reformatted with
`join_key_value()` using the given separator. All key-value pairs are
output in the order they were defined, with new keys added to the end.
.. note::
Serializing a `PropertiesFile` instance with the :func:`dumps()`
function instead will cause all formatting information to be
ignored, as :func:`dumps()` will treat the instance like a normal
mapping.
:param separator: The string to use for separating new or modified keys
& values. Only ``" "``, ``"="``, and ``":"`` (possibly with added
whitespace) should ever be used as the separator.
:type separator: text string
:rtype: text string | [
"Convert",
"the",
"mapping",
"to",
"a",
"text",
"string",
"in",
"simple",
"line",
"-",
"oriented",
".",
"properties",
"format",
"."
] | 8b48f040305217ebeb80c98c4354691bbb01429b | https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/propfile.py#L258-L287 | train |
jwodder/javaproperties | javaproperties/propfile.py | PropertiesFile.copy | def copy(self):
""" Create a copy of the mapping, including formatting information """
dup = type(self)()
dup._indices = OrderedDict(
(k, list(v)) for k,v in six.iteritems(self._indices)
)
dup._lines = self._lines.copy()
return dup | python | def copy(self):
""" Create a copy of the mapping, including formatting information """
dup = type(self)()
dup._indices = OrderedDict(
(k, list(v)) for k,v in six.iteritems(self._indices)
)
dup._lines = self._lines.copy()
return dup | [
"def",
"copy",
"(",
"self",
")",
":",
"dup",
"=",
"type",
"(",
"self",
")",
"(",
")",
"dup",
".",
"_indices",
"=",
"OrderedDict",
"(",
"(",
"k",
",",
"list",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_indices",
")",
")",
"dup",
".",
"_lines",
"=",
"self",
".",
"_lines",
".",
"copy",
"(",
")",
"return",
"dup"
] | Create a copy of the mapping, including formatting information | [
"Create",
"a",
"copy",
"of",
"the",
"mapping",
"including",
"formatting",
"information"
] | 8b48f040305217ebeb80c98c4354691bbb01429b | https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/propfile.py#L289-L296 | train |
ldomic/lintools | lintools/analysis/maths_functions.py | prepare_normal_vectors | def prepare_normal_vectors(atomselection):
"""Create and normalize a vector across ring plane."""
ring_atomselection = [atomselection.coordinates()[a] for a in [0,2,4]]
vect1 = self.vector(ring_atomselection[0],ring_atomselection[1])
vect2 = self.vector(ring_atomselection[2],ring_atomselection[0])
return self.normalize_vector(np.cross(vect1,vect2)) | python | def prepare_normal_vectors(atomselection):
"""Create and normalize a vector across ring plane."""
ring_atomselection = [atomselection.coordinates()[a] for a in [0,2,4]]
vect1 = self.vector(ring_atomselection[0],ring_atomselection[1])
vect2 = self.vector(ring_atomselection[2],ring_atomselection[0])
return self.normalize_vector(np.cross(vect1,vect2)) | [
"def",
"prepare_normal_vectors",
"(",
"atomselection",
")",
":",
"ring_atomselection",
"=",
"[",
"atomselection",
".",
"coordinates",
"(",
")",
"[",
"a",
"]",
"for",
"a",
"in",
"[",
"0",
",",
"2",
",",
"4",
"]",
"]",
"vect1",
"=",
"self",
".",
"vector",
"(",
"ring_atomselection",
"[",
"0",
"]",
",",
"ring_atomselection",
"[",
"1",
"]",
")",
"vect2",
"=",
"self",
".",
"vector",
"(",
"ring_atomselection",
"[",
"2",
"]",
",",
"ring_atomselection",
"[",
"0",
"]",
")",
"return",
"self",
".",
"normalize_vector",
"(",
"np",
".",
"cross",
"(",
"vect1",
",",
"vect2",
")",
")"
] | Create and normalize a vector across ring plane. | [
"Create",
"and",
"normalize",
"a",
"vector",
"across",
"ring",
"plane",
"."
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/analysis/maths_functions.py#L3-L8 | train |
rycus86/ghost-client | ghost_client/helpers.py | refresh_session_if_necessary | def refresh_session_if_necessary(f):
"""
Decorator to use on methods that are allowed
to retry the request after reauthenticating the client.
:param f: The original function
:return: The decorated function
"""
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
try:
result = f(self, *args, **kwargs)
except Exception as ex:
if hasattr(ex, 'code') and ex.code in (401, 403):
self.refresh_session()
# retry now
result = f(self, *args, **kwargs)
else:
raise ex
return result
return wrapped | python | def refresh_session_if_necessary(f):
"""
Decorator to use on methods that are allowed
to retry the request after reauthenticating the client.
:param f: The original function
:return: The decorated function
"""
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
try:
result = f(self, *args, **kwargs)
except Exception as ex:
if hasattr(ex, 'code') and ex.code in (401, 403):
self.refresh_session()
# retry now
result = f(self, *args, **kwargs)
else:
raise ex
return result
return wrapped | [
"def",
"refresh_session_if_necessary",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrapped",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"result",
"=",
"f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"ex",
":",
"if",
"hasattr",
"(",
"ex",
",",
"'code'",
")",
"and",
"ex",
".",
"code",
"in",
"(",
"401",
",",
"403",
")",
":",
"self",
".",
"refresh_session",
"(",
")",
"# retry now",
"result",
"=",
"f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"raise",
"ex",
"return",
"result",
"return",
"wrapped"
] | Decorator to use on methods that are allowed
to retry the request after reauthenticating the client.
:param f: The original function
:return: The decorated function | [
"Decorator",
"to",
"use",
"on",
"methods",
"that",
"are",
"allowed",
"to",
"retry",
"the",
"request",
"after",
"reauthenticating",
"the",
"client",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/helpers.py#L4-L27 | train |
robinandeer/puzzle | puzzle/utils/puzzle_database.py | init_db | def init_db(db_path):
"""Build the sqlite database"""
logger.info("Creating database")
with closing(connect_database(db_path)) as db:
with open(SCHEMA, 'r') as f:
db.cursor().executescript(f.read())
db.commit()
return | python | def init_db(db_path):
"""Build the sqlite database"""
logger.info("Creating database")
with closing(connect_database(db_path)) as db:
with open(SCHEMA, 'r') as f:
db.cursor().executescript(f.read())
db.commit()
return | [
"def",
"init_db",
"(",
"db_path",
")",
":",
"logger",
".",
"info",
"(",
"\"Creating database\"",
")",
"with",
"closing",
"(",
"connect_database",
"(",
"db_path",
")",
")",
"as",
"db",
":",
"with",
"open",
"(",
"SCHEMA",
",",
"'r'",
")",
"as",
"f",
":",
"db",
".",
"cursor",
"(",
")",
".",
"executescript",
"(",
"f",
".",
"read",
"(",
")",
")",
"db",
".",
"commit",
"(",
")",
"return"
] | Build the sqlite database | [
"Build",
"the",
"sqlite",
"database"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/utils/puzzle_database.py#L15-L22 | train |
inveniosoftware-contrib/json-merger | json_merger/merger.py | Merger.merge | def merge(self):
"""Populates result members.
Performs the merge algorithm using the specified config and fills in
the members that provide stats about the merging procedure.
Attributes:
merged_root: The result of the merge.
aligned_root, aligned_head, aligned_update: Copies of root, head
and update in which all matched list entities have the same
list index for easier diff viewing.
head_stats, update_stats: Stats for each list field present in the
head or update objects. Instance of
:class:`json_merger.stats.ListMatchStats`
conflicts: List of :class:`json_merger.conflict.Conflict` instances
that occured during the merge.
Raises:
:class:`json_merger.errors.MergeError` : If conflicts occur during
the call.
Example:
>>> from json_merger import Merger
>>> # We compare people by their name
>>> from json_merger.comparator import PrimaryKeyComparator
>>> from json_merger.config import DictMergerOps, UnifierOps
>>> from json_merger.errors import MergeError
>>> # Use this only for doctest :)
>>> from pprint import pprint as pp
>>>
>>> root = {'people': [{'name': 'Jimmy', 'age': 30}]}
>>> head = {'people': [{'name': 'Jimmy', 'age': 31},
... {'name': 'George'}]}
>>> update = {'people': [{'name': 'John'},
... {'name': 'Jimmy', 'age': 32}]}
>>>
>>> class NameComparator(PrimaryKeyComparator):
... # Two objects are the same entitity if they have the
... # same name.
... primary_key_fields = ['name']
>>> m = Merger(root, head, update,
... DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_HEAD_FIRST,
... comparators = {'people': NameComparator})
>>> # We do a merge
>>> try:
... m.merge()
... except MergeError as e:
... # Conflicts are the same thing as the exception content.
... assert e.content == m.conflicts
>>> # This is how the lists are aligned:
>>> pp(m.aligned_root['people'], width=60)
['#$PLACEHOLDER$#',
{'age': 30, 'name': 'Jimmy'},
'#$PLACEHOLDER$#']
>>> pp(m.aligned_head['people'], width=60)
['#$PLACEHOLDER$#',
{'age': 31, 'name': 'Jimmy'},
{'name': 'George'}]
>>> pp(m.aligned_update['people'], width=60)
[{'name': 'John'},
{'age': 32, 'name': 'Jimmy'},
'#$PLACEHOLDER$#']
>>> # This is the end result of the merge:
>>> pp(m.merged_root, width=60)
{'people': [{'name': 'John'},
{'age': 31, 'name': 'Jimmy'},
{'name': 'George'}]}
>>> # With some conflicts:
>>> pp(m.conflicts, width=60)
[('SET_FIELD', ('people', 1, 'age'), 32)]
>>> # And some stats:
>>> pp(m.head_stats[('people',)].in_result)
[{'age': 31, 'name': 'Jimmy'}, {'name': 'George'}]
>>> pp(m.update_stats[('people',)].not_in_result)
[]
Note:
Even if conflicts occur, merged_root, aligned_root, aligned_head
and aligned_update are always populated by following the
startegies set for the merger instance.
"""
self.merged_root = self._recursive_merge(self.root, self.head,
self.update)
if self.conflicts:
raise MergeError('Conflicts Occurred in Merge Process',
self.conflicts) | python | def merge(self):
"""Populates result members.
Performs the merge algorithm using the specified config and fills in
the members that provide stats about the merging procedure.
Attributes:
merged_root: The result of the merge.
aligned_root, aligned_head, aligned_update: Copies of root, head
and update in which all matched list entities have the same
list index for easier diff viewing.
head_stats, update_stats: Stats for each list field present in the
head or update objects. Instance of
:class:`json_merger.stats.ListMatchStats`
conflicts: List of :class:`json_merger.conflict.Conflict` instances
that occured during the merge.
Raises:
:class:`json_merger.errors.MergeError` : If conflicts occur during
the call.
Example:
>>> from json_merger import Merger
>>> # We compare people by their name
>>> from json_merger.comparator import PrimaryKeyComparator
>>> from json_merger.config import DictMergerOps, UnifierOps
>>> from json_merger.errors import MergeError
>>> # Use this only for doctest :)
>>> from pprint import pprint as pp
>>>
>>> root = {'people': [{'name': 'Jimmy', 'age': 30}]}
>>> head = {'people': [{'name': 'Jimmy', 'age': 31},
... {'name': 'George'}]}
>>> update = {'people': [{'name': 'John'},
... {'name': 'Jimmy', 'age': 32}]}
>>>
>>> class NameComparator(PrimaryKeyComparator):
... # Two objects are the same entitity if they have the
... # same name.
... primary_key_fields = ['name']
>>> m = Merger(root, head, update,
... DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_HEAD_FIRST,
... comparators = {'people': NameComparator})
>>> # We do a merge
>>> try:
... m.merge()
... except MergeError as e:
... # Conflicts are the same thing as the exception content.
... assert e.content == m.conflicts
>>> # This is how the lists are aligned:
>>> pp(m.aligned_root['people'], width=60)
['#$PLACEHOLDER$#',
{'age': 30, 'name': 'Jimmy'},
'#$PLACEHOLDER$#']
>>> pp(m.aligned_head['people'], width=60)
['#$PLACEHOLDER$#',
{'age': 31, 'name': 'Jimmy'},
{'name': 'George'}]
>>> pp(m.aligned_update['people'], width=60)
[{'name': 'John'},
{'age': 32, 'name': 'Jimmy'},
'#$PLACEHOLDER$#']
>>> # This is the end result of the merge:
>>> pp(m.merged_root, width=60)
{'people': [{'name': 'John'},
{'age': 31, 'name': 'Jimmy'},
{'name': 'George'}]}
>>> # With some conflicts:
>>> pp(m.conflicts, width=60)
[('SET_FIELD', ('people', 1, 'age'), 32)]
>>> # And some stats:
>>> pp(m.head_stats[('people',)].in_result)
[{'age': 31, 'name': 'Jimmy'}, {'name': 'George'}]
>>> pp(m.update_stats[('people',)].not_in_result)
[]
Note:
Even if conflicts occur, merged_root, aligned_root, aligned_head
and aligned_update are always populated by following the
startegies set for the merger instance.
"""
self.merged_root = self._recursive_merge(self.root, self.head,
self.update)
if self.conflicts:
raise MergeError('Conflicts Occurred in Merge Process',
self.conflicts) | [
"def",
"merge",
"(",
"self",
")",
":",
"self",
".",
"merged_root",
"=",
"self",
".",
"_recursive_merge",
"(",
"self",
".",
"root",
",",
"self",
".",
"head",
",",
"self",
".",
"update",
")",
"if",
"self",
".",
"conflicts",
":",
"raise",
"MergeError",
"(",
"'Conflicts Occurred in Merge Process'",
",",
"self",
".",
"conflicts",
")"
] | Populates result members.
Performs the merge algorithm using the specified config and fills in
the members that provide stats about the merging procedure.
Attributes:
merged_root: The result of the merge.
aligned_root, aligned_head, aligned_update: Copies of root, head
and update in which all matched list entities have the same
list index for easier diff viewing.
head_stats, update_stats: Stats for each list field present in the
head or update objects. Instance of
:class:`json_merger.stats.ListMatchStats`
conflicts: List of :class:`json_merger.conflict.Conflict` instances
that occured during the merge.
Raises:
:class:`json_merger.errors.MergeError` : If conflicts occur during
the call.
Example:
>>> from json_merger import Merger
>>> # We compare people by their name
>>> from json_merger.comparator import PrimaryKeyComparator
>>> from json_merger.config import DictMergerOps, UnifierOps
>>> from json_merger.errors import MergeError
>>> # Use this only for doctest :)
>>> from pprint import pprint as pp
>>>
>>> root = {'people': [{'name': 'Jimmy', 'age': 30}]}
>>> head = {'people': [{'name': 'Jimmy', 'age': 31},
... {'name': 'George'}]}
>>> update = {'people': [{'name': 'John'},
... {'name': 'Jimmy', 'age': 32}]}
>>>
>>> class NameComparator(PrimaryKeyComparator):
... # Two objects are the same entitity if they have the
... # same name.
... primary_key_fields = ['name']
>>> m = Merger(root, head, update,
... DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_HEAD_FIRST,
... comparators = {'people': NameComparator})
>>> # We do a merge
>>> try:
... m.merge()
... except MergeError as e:
... # Conflicts are the same thing as the exception content.
... assert e.content == m.conflicts
>>> # This is how the lists are aligned:
>>> pp(m.aligned_root['people'], width=60)
['#$PLACEHOLDER$#',
{'age': 30, 'name': 'Jimmy'},
'#$PLACEHOLDER$#']
>>> pp(m.aligned_head['people'], width=60)
['#$PLACEHOLDER$#',
{'age': 31, 'name': 'Jimmy'},
{'name': 'George'}]
>>> pp(m.aligned_update['people'], width=60)
[{'name': 'John'},
{'age': 32, 'name': 'Jimmy'},
'#$PLACEHOLDER$#']
>>> # This is the end result of the merge:
>>> pp(m.merged_root, width=60)
{'people': [{'name': 'John'},
{'age': 31, 'name': 'Jimmy'},
{'name': 'George'}]}
>>> # With some conflicts:
>>> pp(m.conflicts, width=60)
[('SET_FIELD', ('people', 1, 'age'), 32)]
>>> # And some stats:
>>> pp(m.head_stats[('people',)].in_result)
[{'age': 31, 'name': 'Jimmy'}, {'name': 'George'}]
>>> pp(m.update_stats[('people',)].not_in_result)
[]
Note:
Even if conflicts occur, merged_root, aligned_root, aligned_head
and aligned_update are always populated by following the
startegies set for the merger instance. | [
"Populates",
"result",
"members",
"."
] | adc6d372da018427e1db7b92424d3471e01a4118 | https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/merger.py#L135-L224 | train |
robinandeer/puzzle | puzzle/utils/phenomizer.py | hpo_genes | def hpo_genes(phenotype_ids, username, password):
"""Return list of HGNC symbols matching HPO phenotype ids.
Args:
phenotype_ids (list): list of phenotype ids
username (str): username to connect to phenomizer
password (str): password to connect to phenomizer
Returns:
query_result: a list of dictionaries on the form
{
'p_value': float,
'gene_id': str,
'omim_id': int,
'orphanet_id': int,
'decipher_id': int,
'any_id': int,
'mode_of_inheritance': str,
'description': str,
'raw_line': str
}
"""
if phenotype_ids:
try:
results = query_phenomizer.query(username, password, phenotype_ids)
return [result for result in results
if result['p_value'] is not None]
except SystemExit, RuntimeError:
pass
return None | python | def hpo_genes(phenotype_ids, username, password):
"""Return list of HGNC symbols matching HPO phenotype ids.
Args:
phenotype_ids (list): list of phenotype ids
username (str): username to connect to phenomizer
password (str): password to connect to phenomizer
Returns:
query_result: a list of dictionaries on the form
{
'p_value': float,
'gene_id': str,
'omim_id': int,
'orphanet_id': int,
'decipher_id': int,
'any_id': int,
'mode_of_inheritance': str,
'description': str,
'raw_line': str
}
"""
if phenotype_ids:
try:
results = query_phenomizer.query(username, password, phenotype_ids)
return [result for result in results
if result['p_value'] is not None]
except SystemExit, RuntimeError:
pass
return None | [
"def",
"hpo_genes",
"(",
"phenotype_ids",
",",
"username",
",",
"password",
")",
":",
"if",
"phenotype_ids",
":",
"try",
":",
"results",
"=",
"query_phenomizer",
".",
"query",
"(",
"username",
",",
"password",
",",
"phenotype_ids",
")",
"return",
"[",
"result",
"for",
"result",
"in",
"results",
"if",
"result",
"[",
"'p_value'",
"]",
"is",
"not",
"None",
"]",
"except",
"SystemExit",
",",
"RuntimeError",
":",
"pass",
"return",
"None"
] | Return list of HGNC symbols matching HPO phenotype ids.
Args:
phenotype_ids (list): list of phenotype ids
username (str): username to connect to phenomizer
password (str): password to connect to phenomizer
Returns:
query_result: a list of dictionaries on the form
{
'p_value': float,
'gene_id': str,
'omim_id': int,
'orphanet_id': int,
'decipher_id': int,
'any_id': int,
'mode_of_inheritance': str,
'description': str,
'raw_line': str
} | [
"Return",
"list",
"of",
"HGNC",
"symbols",
"matching",
"HPO",
"phenotype",
"ids",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/utils/phenomizer.py#L5-L34 | train |
Colorless-Green-Ideas/MaterialDjango | materialdjango/forms.py | mangle_form | def mangle_form(form):
"Utility to monkeypatch forms into paperinputs, untested"
for field, widget in form.fields.iteritems():
if type(widget) is forms.widgets.TextInput:
form.fields[field].widget = PaperTextInput()
form.fields[field].label = ''
if type(widget) is forms.widgets.PasswordInput:
field.widget = PaperPasswordInput()
field.label = ''
return form | python | def mangle_form(form):
"Utility to monkeypatch forms into paperinputs, untested"
for field, widget in form.fields.iteritems():
if type(widget) is forms.widgets.TextInput:
form.fields[field].widget = PaperTextInput()
form.fields[field].label = ''
if type(widget) is forms.widgets.PasswordInput:
field.widget = PaperPasswordInput()
field.label = ''
return form | [
"def",
"mangle_form",
"(",
"form",
")",
":",
"for",
"field",
",",
"widget",
"in",
"form",
".",
"fields",
".",
"iteritems",
"(",
")",
":",
"if",
"type",
"(",
"widget",
")",
"is",
"forms",
".",
"widgets",
".",
"TextInput",
":",
"form",
".",
"fields",
"[",
"field",
"]",
".",
"widget",
"=",
"PaperTextInput",
"(",
")",
"form",
".",
"fields",
"[",
"field",
"]",
".",
"label",
"=",
"''",
"if",
"type",
"(",
"widget",
")",
"is",
"forms",
".",
"widgets",
".",
"PasswordInput",
":",
"field",
".",
"widget",
"=",
"PaperPasswordInput",
"(",
")",
"field",
".",
"label",
"=",
"''",
"return",
"form"
] | Utility to monkeypatch forms into paperinputs, untested | [
"Utility",
"to",
"monkeypatch",
"forms",
"into",
"paperinputs",
"untested"
] | e7a69e968965d25198d90318623a828cff67f5dc | https://github.com/Colorless-Green-Ideas/MaterialDjango/blob/e7a69e968965d25198d90318623a828cff67f5dc/materialdjango/forms.py#L14-L23 | train |
eleme/meepo | meepo/apps/eventsourcing/event_store.py | RedisEventStore._keygen | def _keygen(self, event, ts=None):
"""Generate redis key for event at timestamp.
:param event: event name
:param ts: timestamp, default to current timestamp if left as None
"""
return "%s:%s" % (self.namespace(ts or time.time()), event) | python | def _keygen(self, event, ts=None):
"""Generate redis key for event at timestamp.
:param event: event name
:param ts: timestamp, default to current timestamp if left as None
"""
return "%s:%s" % (self.namespace(ts or time.time()), event) | [
"def",
"_keygen",
"(",
"self",
",",
"event",
",",
"ts",
"=",
"None",
")",
":",
"return",
"\"%s:%s\"",
"%",
"(",
"self",
".",
"namespace",
"(",
"ts",
"or",
"time",
".",
"time",
"(",
")",
")",
",",
"event",
")"
] | Generate redis key for event at timestamp.
:param event: event name
:param ts: timestamp, default to current timestamp if left as None | [
"Generate",
"redis",
"key",
"for",
"event",
"at",
"timestamp",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/event_store.py#L140-L146 | train |
eleme/meepo | meepo/apps/eventsourcing/event_store.py | RedisEventStore._zadd | def _zadd(self, key, pk, ts=None, ttl=None):
"""Redis lua func to add an event to the corresponding sorted set.
:param key: the key to be stored in redis server
:param pk: the primary key of event
:param ts: timestamp of the event, default to redis_server's
current timestamp
:param ttl: the expiration time of event since the last update
"""
return self.r.eval(self.LUA_ZADD, 1, key, ts or self._time(), pk) | python | def _zadd(self, key, pk, ts=None, ttl=None):
"""Redis lua func to add an event to the corresponding sorted set.
:param key: the key to be stored in redis server
:param pk: the primary key of event
:param ts: timestamp of the event, default to redis_server's
current timestamp
:param ttl: the expiration time of event since the last update
"""
return self.r.eval(self.LUA_ZADD, 1, key, ts or self._time(), pk) | [
"def",
"_zadd",
"(",
"self",
",",
"key",
",",
"pk",
",",
"ts",
"=",
"None",
",",
"ttl",
"=",
"None",
")",
":",
"return",
"self",
".",
"r",
".",
"eval",
"(",
"self",
".",
"LUA_ZADD",
",",
"1",
",",
"key",
",",
"ts",
"or",
"self",
".",
"_time",
"(",
")",
",",
"pk",
")"
] | Redis lua func to add an event to the corresponding sorted set.
:param key: the key to be stored in redis server
:param pk: the primary key of event
:param ts: timestamp of the event, default to redis_server's
current timestamp
:param ttl: the expiration time of event since the last update | [
"Redis",
"lua",
"func",
"to",
"add",
"an",
"event",
"to",
"the",
"corresponding",
"sorted",
"set",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/event_store.py#L154-L163 | train |
eleme/meepo | meepo/apps/eventsourcing/event_store.py | RedisEventStore.add | def add(self, event, pk, ts=None, ttl=None):
"""Add an event to event store.
All events were stored in a sorted set in redis with timestamp as
rank score.
:param event: the event to be added, format should be ``table_action``
:param pk: the primary key of event
:param ts: timestamp of the event, default to redis_server's
current timestamp
:param ttl: the expiration time of event since the last update
:return: bool
"""
key = self._keygen(event, ts)
try:
self._zadd(key, pk, ts, ttl)
return True
except redis.ConnectionError as e:
# connection error typically happens when redis server can't be
# reached or timed out, the error will be silent with an error
# log and return None.
self.logger.error(
"redis event store failed with connection error %r" % e)
return False | python | def add(self, event, pk, ts=None, ttl=None):
"""Add an event to event store.
All events were stored in a sorted set in redis with timestamp as
rank score.
:param event: the event to be added, format should be ``table_action``
:param pk: the primary key of event
:param ts: timestamp of the event, default to redis_server's
current timestamp
:param ttl: the expiration time of event since the last update
:return: bool
"""
key = self._keygen(event, ts)
try:
self._zadd(key, pk, ts, ttl)
return True
except redis.ConnectionError as e:
# connection error typically happens when redis server can't be
# reached or timed out, the error will be silent with an error
# log and return None.
self.logger.error(
"redis event store failed with connection error %r" % e)
return False | [
"def",
"add",
"(",
"self",
",",
"event",
",",
"pk",
",",
"ts",
"=",
"None",
",",
"ttl",
"=",
"None",
")",
":",
"key",
"=",
"self",
".",
"_keygen",
"(",
"event",
",",
"ts",
")",
"try",
":",
"self",
".",
"_zadd",
"(",
"key",
",",
"pk",
",",
"ts",
",",
"ttl",
")",
"return",
"True",
"except",
"redis",
".",
"ConnectionError",
"as",
"e",
":",
"# connection error typically happens when redis server can't be",
"# reached or timed out, the error will be silent with an error",
"# log and return None.",
"self",
".",
"logger",
".",
"error",
"(",
"\"redis event store failed with connection error %r\"",
"%",
"e",
")",
"return",
"False"
] | Add an event to event store.
All events were stored in a sorted set in redis with timestamp as
rank score.
:param event: the event to be added, format should be ``table_action``
:param pk: the primary key of event
:param ts: timestamp of the event, default to redis_server's
current timestamp
:param ttl: the expiration time of event since the last update
:return: bool | [
"Add",
"an",
"event",
"to",
"event",
"store",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/event_store.py#L165-L188 | train |
eleme/meepo | meepo/apps/eventsourcing/event_store.py | RedisEventStore.replay | def replay(self, event, ts=0, end_ts=None, with_ts=False):
"""Replay events based on timestamp.
If you split namespace with ts, the replay will only return events
within the same namespace.
:param event: event name
:param ts: replay events after ts, default from 0.
:param end_ts: replay events to ts, default to "+inf".
:param with_ts: return timestamp with events, default to False.
:return: list of pks when with_ts set to False, list of (pk, ts) tuples
when with_ts is True.
"""
key = self._keygen(event, ts)
end_ts = end_ts if end_ts else "+inf"
elements = self.r.zrangebyscore(key, ts, end_ts, withscores=with_ts)
if not with_ts:
return [s(e) for e in elements]
else:
return [(s(e[0]), int(e[1])) for e in elements] | python | def replay(self, event, ts=0, end_ts=None, with_ts=False):
"""Replay events based on timestamp.
If you split namespace with ts, the replay will only return events
within the same namespace.
:param event: event name
:param ts: replay events after ts, default from 0.
:param end_ts: replay events to ts, default to "+inf".
:param with_ts: return timestamp with events, default to False.
:return: list of pks when with_ts set to False, list of (pk, ts) tuples
when with_ts is True.
"""
key = self._keygen(event, ts)
end_ts = end_ts if end_ts else "+inf"
elements = self.r.zrangebyscore(key, ts, end_ts, withscores=with_ts)
if not with_ts:
return [s(e) for e in elements]
else:
return [(s(e[0]), int(e[1])) for e in elements] | [
"def",
"replay",
"(",
"self",
",",
"event",
",",
"ts",
"=",
"0",
",",
"end_ts",
"=",
"None",
",",
"with_ts",
"=",
"False",
")",
":",
"key",
"=",
"self",
".",
"_keygen",
"(",
"event",
",",
"ts",
")",
"end_ts",
"=",
"end_ts",
"if",
"end_ts",
"else",
"\"+inf\"",
"elements",
"=",
"self",
".",
"r",
".",
"zrangebyscore",
"(",
"key",
",",
"ts",
",",
"end_ts",
",",
"withscores",
"=",
"with_ts",
")",
"if",
"not",
"with_ts",
":",
"return",
"[",
"s",
"(",
"e",
")",
"for",
"e",
"in",
"elements",
"]",
"else",
":",
"return",
"[",
"(",
"s",
"(",
"e",
"[",
"0",
"]",
")",
",",
"int",
"(",
"e",
"[",
"1",
"]",
")",
")",
"for",
"e",
"in",
"elements",
"]"
] | Replay events based on timestamp.
If you split namespace with ts, the replay will only return events
within the same namespace.
:param event: event name
:param ts: replay events after ts, default from 0.
:param end_ts: replay events to ts, default to "+inf".
:param with_ts: return timestamp with events, default to False.
:return: list of pks when with_ts set to False, list of (pk, ts) tuples
when with_ts is True. | [
"Replay",
"events",
"based",
"on",
"timestamp",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/event_store.py#L190-L210 | train |
eleme/meepo | meepo/apps/eventsourcing/event_store.py | RedisEventStore.query | def query(self, event, pk, ts=None):
"""Query the last update timestamp of an event pk.
You can pass a timestamp to only look for events later than that
within the same namespace.
:param event: the event name.
:param pk: the pk value for query.
:param ts: query event pk after ts, default to None which will query
all span of current namespace.
"""
key = self._keygen(event, ts)
pk_ts = self.r.zscore(key, pk)
return int(pk_ts) if pk_ts else None | python | def query(self, event, pk, ts=None):
"""Query the last update timestamp of an event pk.
You can pass a timestamp to only look for events later than that
within the same namespace.
:param event: the event name.
:param pk: the pk value for query.
:param ts: query event pk after ts, default to None which will query
all span of current namespace.
"""
key = self._keygen(event, ts)
pk_ts = self.r.zscore(key, pk)
return int(pk_ts) if pk_ts else None | [
"def",
"query",
"(",
"self",
",",
"event",
",",
"pk",
",",
"ts",
"=",
"None",
")",
":",
"key",
"=",
"self",
".",
"_keygen",
"(",
"event",
",",
"ts",
")",
"pk_ts",
"=",
"self",
".",
"r",
".",
"zscore",
"(",
"key",
",",
"pk",
")",
"return",
"int",
"(",
"pk_ts",
")",
"if",
"pk_ts",
"else",
"None"
] | Query the last update timestamp of an event pk.
You can pass a timestamp to only look for events later than that
within the same namespace.
:param event: the event name.
:param pk: the pk value for query.
:param ts: query event pk after ts, default to None which will query
all span of current namespace. | [
"Query",
"the",
"last",
"update",
"timestamp",
"of",
"an",
"event",
"pk",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/event_store.py#L212-L225 | train |
eleme/meepo | meepo/apps/eventsourcing/event_store.py | RedisEventStore.clear | def clear(self, event, ts=None):
"""Clear all stored record of event.
:param event: event name to be cleared.
:param ts: timestamp used locate the namespace
"""
return self.r.delete(self._keygen(event, ts)) | python | def clear(self, event, ts=None):
"""Clear all stored record of event.
:param event: event name to be cleared.
:param ts: timestamp used locate the namespace
"""
return self.r.delete(self._keygen(event, ts)) | [
"def",
"clear",
"(",
"self",
",",
"event",
",",
"ts",
"=",
"None",
")",
":",
"return",
"self",
".",
"r",
".",
"delete",
"(",
"self",
".",
"_keygen",
"(",
"event",
",",
"ts",
")",
")"
] | Clear all stored record of event.
:param event: event name to be cleared.
:param ts: timestamp used locate the namespace | [
"Clear",
"all",
"stored",
"record",
"of",
"event",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/event_store.py#L227-L233 | train |
alunduil/crumbs | crumbs/__init__.py | Parameters.add_configuration_file | def add_configuration_file(self, file_name):
'''Register a file path from which to read parameter values.
This method can be called multiple times to register multiple files for
querying. Files are expected to be ``ini`` formatted.
No assumptions should be made about the order that the registered files
are read and values defined in multiple files may have unpredictable
results.
**Arguments**
:``file_name``: Name of the file to add to the parameter search.
'''
logger.info('adding %s to configuration files', file_name)
if file_name not in self.configuration_files and self._inotify:
self._watch_manager.add_watch(file_name, pyinotify.IN_MODIFY)
if os.access(file_name, os.R_OK):
self.configuration_files[file_name] = SafeConfigParser()
self.configuration_files[file_name].read(file_name)
else:
logger.warn('could not read %s', file_name)
warnings.warn('could not read {}'.format(file_name), ResourceWarning) | python | def add_configuration_file(self, file_name):
'''Register a file path from which to read parameter values.
This method can be called multiple times to register multiple files for
querying. Files are expected to be ``ini`` formatted.
No assumptions should be made about the order that the registered files
are read and values defined in multiple files may have unpredictable
results.
**Arguments**
:``file_name``: Name of the file to add to the parameter search.
'''
logger.info('adding %s to configuration files', file_name)
if file_name not in self.configuration_files and self._inotify:
self._watch_manager.add_watch(file_name, pyinotify.IN_MODIFY)
if os.access(file_name, os.R_OK):
self.configuration_files[file_name] = SafeConfigParser()
self.configuration_files[file_name].read(file_name)
else:
logger.warn('could not read %s', file_name)
warnings.warn('could not read {}'.format(file_name), ResourceWarning) | [
"def",
"add_configuration_file",
"(",
"self",
",",
"file_name",
")",
":",
"logger",
".",
"info",
"(",
"'adding %s to configuration files'",
",",
"file_name",
")",
"if",
"file_name",
"not",
"in",
"self",
".",
"configuration_files",
"and",
"self",
".",
"_inotify",
":",
"self",
".",
"_watch_manager",
".",
"add_watch",
"(",
"file_name",
",",
"pyinotify",
".",
"IN_MODIFY",
")",
"if",
"os",
".",
"access",
"(",
"file_name",
",",
"os",
".",
"R_OK",
")",
":",
"self",
".",
"configuration_files",
"[",
"file_name",
"]",
"=",
"SafeConfigParser",
"(",
")",
"self",
".",
"configuration_files",
"[",
"file_name",
"]",
".",
"read",
"(",
"file_name",
")",
"else",
":",
"logger",
".",
"warn",
"(",
"'could not read %s'",
",",
"file_name",
")",
"warnings",
".",
"warn",
"(",
"'could not read {}'",
".",
"format",
"(",
"file_name",
")",
",",
"ResourceWarning",
")"
] | Register a file path from which to read parameter values.
This method can be called multiple times to register multiple files for
querying. Files are expected to be ``ini`` formatted.
No assumptions should be made about the order that the registered files
are read and values defined in multiple files may have unpredictable
results.
**Arguments**
:``file_name``: Name of the file to add to the parameter search. | [
"Register",
"a",
"file",
"path",
"from",
"which",
"to",
"read",
"parameter",
"values",
"."
] | 94b23f45db3054000d16968a44400780c6cff5ba | https://github.com/alunduil/crumbs/blob/94b23f45db3054000d16968a44400780c6cff5ba/crumbs/__init__.py#L328-L354 | train |
alunduil/crumbs | crumbs/__init__.py | Parameters.add_parameter | def add_parameter(self, **kwargs):
'''Add the parameter to ``Parameters``.
**Arguments**
The arguments are lumped into two groups:``Parameters.add_parameter``
and ``argparse.ArgumentParser.add_argument``. Parameters that are only
used by ``Parameters.add_parameter`` are removed before ``kwargs`` is
passed directly to argparse.ArgumentParser.add_argument``.
.. note::
Once ``parse`` has been called ``Parameters.parsed`` will be True
and it is inadvisable to add more parameters to the ``Parameters``.
*``Parameters.add_parameter`` Arguments*
:``environment_prefix``: Prefix to add when searching the environment
for this parameter. Default:
os.path.basename(sys.argv[0]).
:``group``: Group (namespace or prefix) for parameter
(corresponds to section name in configuration
files). Default: 'default'.
:``options``: REQUIRED. The list of options to match for
this parameter in argv.
:``only``: Iterable containing the components that this
parameter applies to (i.e. 'environment',
'configuration', 'argument'). Default:
('environment', 'configuration', 'argument').
*``argparse.ArgumentParser.add_argument`` Arguments*
:``name or flags``: Positional argument filled in by options keyword
argument.
:``action``: The basic type of action to be taken when this
argument is encountered at the command line.
:``nargs``: The number of command-line arguments that should be
consumed.
:``const``: A constant value required by some action and nargs
selections.
:``default``: The value produced if the argument is absent from
the command line.
:``type``: The type to which the command-line argument should
be converted.
:``choices``: A container of the allowable values for the
argument.
:``required``: Whether or not the command-line option may be
omitted (optionals only).
:``help``: A brief description of what the argument does.
:``metavar``: A name for the argument in usage messages.
:``dest``: The name of the attribute to be added to the object
returned by parse_args().
'''
parameter_name = max(kwargs['options'], key = len).lstrip('-')
if 'dest' in kwargs:
parameter_name = kwargs['dest']
group = kwargs.pop('group', 'default')
self.groups.add(group)
parameter_name = '.'.join([ group, parameter_name ]).lstrip('.').replace('-', '_')
logger.info('adding parameter %s', parameter_name)
if self.parsed:
logger.warn('adding parameter %s after parse', parameter_name)
warnings.warn('adding parameter {} after parse'.format(parameter_name), RuntimeWarning)
self.parameters[parameter_name] = copy.copy(kwargs)
self.parameters[parameter_name]['group'] = group
self.parameters[parameter_name]['type'] = kwargs.get('type', str)
self.parameters[parameter_name]['environment_prefix'] = kwargs.pop('environment_prefix', os.path.basename(sys.argv[0]))
if self.parameters[parameter_name]['environment_prefix'] is not None:
self.parameters[parameter_name]['environment_prefix'] = self.parameters[parameter_name]['environment_prefix'].upper().replace('-', '_')
logger.info('group: %s', group)
self.grouped_parameters.setdefault(group, {}).setdefault(parameter_name.replace(group + '.', ''), self.parameters[parameter_name])
action_defaults = {
'store': kwargs.get('default'),
'store_const': kwargs.get('const'),
'store_true': False,
'store_false': True,
'append': [],
'append_const': [],
'count': 0,
}
self.defaults[parameter_name] = action_defaults[kwargs.get('action', 'store')]
logger.info('default value: %s', kwargs.get('default'))
if 'argument' in kwargs.pop('only', [ 'argument' ]):
if group not in self._group_parsers:
self._group_parsers[group] = self._group_parsers['default'].add_argument_group(group)
if self._group_prefix and group != 'default':
long_option = max(kwargs['options'], key = len)
kwargs['options'].remove(long_option)
kwargs['options'].append(long_option.replace('--', '--' + group.replace('_', '-') + '-'))
logger.debug('options: %s', kwargs['options'])
self._group_parsers[group].add_argument(*kwargs.pop('options'), **kwargs) | python | def add_parameter(self, **kwargs):
'''Add the parameter to ``Parameters``.
**Arguments**
The arguments are lumped into two groups:``Parameters.add_parameter``
and ``argparse.ArgumentParser.add_argument``. Parameters that are only
used by ``Parameters.add_parameter`` are removed before ``kwargs`` is
passed directly to argparse.ArgumentParser.add_argument``.
.. note::
Once ``parse`` has been called ``Parameters.parsed`` will be True
and it is inadvisable to add more parameters to the ``Parameters``.
*``Parameters.add_parameter`` Arguments*
:``environment_prefix``: Prefix to add when searching the environment
for this parameter. Default:
os.path.basename(sys.argv[0]).
:``group``: Group (namespace or prefix) for parameter
(corresponds to section name in configuration
files). Default: 'default'.
:``options``: REQUIRED. The list of options to match for
this parameter in argv.
:``only``: Iterable containing the components that this
parameter applies to (i.e. 'environment',
'configuration', 'argument'). Default:
('environment', 'configuration', 'argument').
*``argparse.ArgumentParser.add_argument`` Arguments*
:``name or flags``: Positional argument filled in by options keyword
argument.
:``action``: The basic type of action to be taken when this
argument is encountered at the command line.
:``nargs``: The number of command-line arguments that should be
consumed.
:``const``: A constant value required by some action and nargs
selections.
:``default``: The value produced if the argument is absent from
the command line.
:``type``: The type to which the command-line argument should
be converted.
:``choices``: A container of the allowable values for the
argument.
:``required``: Whether or not the command-line option may be
omitted (optionals only).
:``help``: A brief description of what the argument does.
:``metavar``: A name for the argument in usage messages.
:``dest``: The name of the attribute to be added to the object
returned by parse_args().
'''
parameter_name = max(kwargs['options'], key = len).lstrip('-')
if 'dest' in kwargs:
parameter_name = kwargs['dest']
group = kwargs.pop('group', 'default')
self.groups.add(group)
parameter_name = '.'.join([ group, parameter_name ]).lstrip('.').replace('-', '_')
logger.info('adding parameter %s', parameter_name)
if self.parsed:
logger.warn('adding parameter %s after parse', parameter_name)
warnings.warn('adding parameter {} after parse'.format(parameter_name), RuntimeWarning)
self.parameters[parameter_name] = copy.copy(kwargs)
self.parameters[parameter_name]['group'] = group
self.parameters[parameter_name]['type'] = kwargs.get('type', str)
self.parameters[parameter_name]['environment_prefix'] = kwargs.pop('environment_prefix', os.path.basename(sys.argv[0]))
if self.parameters[parameter_name]['environment_prefix'] is not None:
self.parameters[parameter_name]['environment_prefix'] = self.parameters[parameter_name]['environment_prefix'].upper().replace('-', '_')
logger.info('group: %s', group)
self.grouped_parameters.setdefault(group, {}).setdefault(parameter_name.replace(group + '.', ''), self.parameters[parameter_name])
action_defaults = {
'store': kwargs.get('default'),
'store_const': kwargs.get('const'),
'store_true': False,
'store_false': True,
'append': [],
'append_const': [],
'count': 0,
}
self.defaults[parameter_name] = action_defaults[kwargs.get('action', 'store')]
logger.info('default value: %s', kwargs.get('default'))
if 'argument' in kwargs.pop('only', [ 'argument' ]):
if group not in self._group_parsers:
self._group_parsers[group] = self._group_parsers['default'].add_argument_group(group)
if self._group_prefix and group != 'default':
long_option = max(kwargs['options'], key = len)
kwargs['options'].remove(long_option)
kwargs['options'].append(long_option.replace('--', '--' + group.replace('_', '-') + '-'))
logger.debug('options: %s', kwargs['options'])
self._group_parsers[group].add_argument(*kwargs.pop('options'), **kwargs) | [
"def",
"add_parameter",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"parameter_name",
"=",
"max",
"(",
"kwargs",
"[",
"'options'",
"]",
",",
"key",
"=",
"len",
")",
".",
"lstrip",
"(",
"'-'",
")",
"if",
"'dest'",
"in",
"kwargs",
":",
"parameter_name",
"=",
"kwargs",
"[",
"'dest'",
"]",
"group",
"=",
"kwargs",
".",
"pop",
"(",
"'group'",
",",
"'default'",
")",
"self",
".",
"groups",
".",
"add",
"(",
"group",
")",
"parameter_name",
"=",
"'.'",
".",
"join",
"(",
"[",
"group",
",",
"parameter_name",
"]",
")",
".",
"lstrip",
"(",
"'.'",
")",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"logger",
".",
"info",
"(",
"'adding parameter %s'",
",",
"parameter_name",
")",
"if",
"self",
".",
"parsed",
":",
"logger",
".",
"warn",
"(",
"'adding parameter %s after parse'",
",",
"parameter_name",
")",
"warnings",
".",
"warn",
"(",
"'adding parameter {} after parse'",
".",
"format",
"(",
"parameter_name",
")",
",",
"RuntimeWarning",
")",
"self",
".",
"parameters",
"[",
"parameter_name",
"]",
"=",
"copy",
".",
"copy",
"(",
"kwargs",
")",
"self",
".",
"parameters",
"[",
"parameter_name",
"]",
"[",
"'group'",
"]",
"=",
"group",
"self",
".",
"parameters",
"[",
"parameter_name",
"]",
"[",
"'type'",
"]",
"=",
"kwargs",
".",
"get",
"(",
"'type'",
",",
"str",
")",
"self",
".",
"parameters",
"[",
"parameter_name",
"]",
"[",
"'environment_prefix'",
"]",
"=",
"kwargs",
".",
"pop",
"(",
"'environment_prefix'",
",",
"os",
".",
"path",
".",
"basename",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
")",
")",
"if",
"self",
".",
"parameters",
"[",
"parameter_name",
"]",
"[",
"'environment_prefix'",
"]",
"is",
"not",
"None",
":",
"self",
".",
"parameters",
"[",
"parameter_name",
"]",
"[",
"'environment_prefix'",
"]",
"=",
"self",
".",
"parameters",
"[",
"parameter_name",
"]",
"[",
"'environment_prefix'",
"]",
".",
"upper",
"(",
")",
".",
"replace",
"(",
"'-'",
",",
"'_'",
")",
"logger",
".",
"info",
"(",
"'group: %s'",
",",
"group",
")",
"self",
".",
"grouped_parameters",
".",
"setdefault",
"(",
"group",
",",
"{",
"}",
")",
".",
"setdefault",
"(",
"parameter_name",
".",
"replace",
"(",
"group",
"+",
"'.'",
",",
"''",
")",
",",
"self",
".",
"parameters",
"[",
"parameter_name",
"]",
")",
"action_defaults",
"=",
"{",
"'store'",
":",
"kwargs",
".",
"get",
"(",
"'default'",
")",
",",
"'store_const'",
":",
"kwargs",
".",
"get",
"(",
"'const'",
")",
",",
"'store_true'",
":",
"False",
",",
"'store_false'",
":",
"True",
",",
"'append'",
":",
"[",
"]",
",",
"'append_const'",
":",
"[",
"]",
",",
"'count'",
":",
"0",
",",
"}",
"self",
".",
"defaults",
"[",
"parameter_name",
"]",
"=",
"action_defaults",
"[",
"kwargs",
".",
"get",
"(",
"'action'",
",",
"'store'",
")",
"]",
"logger",
".",
"info",
"(",
"'default value: %s'",
",",
"kwargs",
".",
"get",
"(",
"'default'",
")",
")",
"if",
"'argument'",
"in",
"kwargs",
".",
"pop",
"(",
"'only'",
",",
"[",
"'argument'",
"]",
")",
":",
"if",
"group",
"not",
"in",
"self",
".",
"_group_parsers",
":",
"self",
".",
"_group_parsers",
"[",
"group",
"]",
"=",
"self",
".",
"_group_parsers",
"[",
"'default'",
"]",
".",
"add_argument_group",
"(",
"group",
")",
"if",
"self",
".",
"_group_prefix",
"and",
"group",
"!=",
"'default'",
":",
"long_option",
"=",
"max",
"(",
"kwargs",
"[",
"'options'",
"]",
",",
"key",
"=",
"len",
")",
"kwargs",
"[",
"'options'",
"]",
".",
"remove",
"(",
"long_option",
")",
"kwargs",
"[",
"'options'",
"]",
".",
"append",
"(",
"long_option",
".",
"replace",
"(",
"'--'",
",",
"'--'",
"+",
"group",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
"+",
"'-'",
")",
")",
"logger",
".",
"debug",
"(",
"'options: %s'",
",",
"kwargs",
"[",
"'options'",
"]",
")",
"self",
".",
"_group_parsers",
"[",
"group",
"]",
".",
"add_argument",
"(",
"*",
"kwargs",
".",
"pop",
"(",
"'options'",
")",
",",
"*",
"*",
"kwargs",
")"
] | Add the parameter to ``Parameters``.
**Arguments**
The arguments are lumped into two groups:``Parameters.add_parameter``
and ``argparse.ArgumentParser.add_argument``. Parameters that are only
used by ``Parameters.add_parameter`` are removed before ``kwargs`` is
passed directly to argparse.ArgumentParser.add_argument``.
.. note::
Once ``parse`` has been called ``Parameters.parsed`` will be True
and it is inadvisable to add more parameters to the ``Parameters``.
*``Parameters.add_parameter`` Arguments*
:``environment_prefix``: Prefix to add when searching the environment
for this parameter. Default:
os.path.basename(sys.argv[0]).
:``group``: Group (namespace or prefix) for parameter
(corresponds to section name in configuration
files). Default: 'default'.
:``options``: REQUIRED. The list of options to match for
this parameter in argv.
:``only``: Iterable containing the components that this
parameter applies to (i.e. 'environment',
'configuration', 'argument'). Default:
('environment', 'configuration', 'argument').
*``argparse.ArgumentParser.add_argument`` Arguments*
:``name or flags``: Positional argument filled in by options keyword
argument.
:``action``: The basic type of action to be taken when this
argument is encountered at the command line.
:``nargs``: The number of command-line arguments that should be
consumed.
:``const``: A constant value required by some action and nargs
selections.
:``default``: The value produced if the argument is absent from
the command line.
:``type``: The type to which the command-line argument should
be converted.
:``choices``: A container of the allowable values for the
argument.
:``required``: Whether or not the command-line option may be
omitted (optionals only).
:``help``: A brief description of what the argument does.
:``metavar``: A name for the argument in usage messages.
:``dest``: The name of the attribute to be added to the object
returned by parse_args(). | [
"Add",
"the",
"parameter",
"to",
"Parameters",
"."
] | 94b23f45db3054000d16968a44400780c6cff5ba | https://github.com/alunduil/crumbs/blob/94b23f45db3054000d16968a44400780c6cff5ba/crumbs/__init__.py#L356-L464 | train |
alunduil/crumbs | crumbs/__init__.py | Parameters.parse | def parse(self, only_known = False):
'''Ensure all sources are ready to be queried.
Parses ``sys.argv`` with the contained ``argparse.ArgumentParser`` and
sets ``parsed`` to True if ``only_known`` is False. Once ``parsed`` is
set to True, it is inadvisable to add more parameters (cf.
``add_parameter``). Also, if ``parsed`` is not set to True, retrieving
items (cf. ``__getitem__``) will result in a warning that values are
being retrieved from an uparsed Parameters.
**Arguments**
:``only_known``: If True, do not error or fail when unknown parameters
are encountered.
.. note::
If ``only_known`` is True, the ``--help`` and
``-h`` options on the command line (``sys.argv``)
will be ignored during parsing as it is unexpected
that these parameters' default behavior would be
desired at this stage of execution.
'''
self.parsed = not only_known or self.parsed
logger.info('parsing parameters')
logger.debug('sys.argv: %s', sys.argv)
if only_known:
args = [ _ for _ in copy.copy(sys.argv) if not re.match('-h|--help', _) ]
self._group_parsers['default'].parse_known_args(args = args, namespace = self._argument_namespace)
else:
self._group_parsers['default'].parse_args(namespace = self._argument_namespace) | python | def parse(self, only_known = False):
'''Ensure all sources are ready to be queried.
Parses ``sys.argv`` with the contained ``argparse.ArgumentParser`` and
sets ``parsed`` to True if ``only_known`` is False. Once ``parsed`` is
set to True, it is inadvisable to add more parameters (cf.
``add_parameter``). Also, if ``parsed`` is not set to True, retrieving
items (cf. ``__getitem__``) will result in a warning that values are
being retrieved from an uparsed Parameters.
**Arguments**
:``only_known``: If True, do not error or fail when unknown parameters
are encountered.
.. note::
If ``only_known`` is True, the ``--help`` and
``-h`` options on the command line (``sys.argv``)
will be ignored during parsing as it is unexpected
that these parameters' default behavior would be
desired at this stage of execution.
'''
self.parsed = not only_known or self.parsed
logger.info('parsing parameters')
logger.debug('sys.argv: %s', sys.argv)
if only_known:
args = [ _ for _ in copy.copy(sys.argv) if not re.match('-h|--help', _) ]
self._group_parsers['default'].parse_known_args(args = args, namespace = self._argument_namespace)
else:
self._group_parsers['default'].parse_args(namespace = self._argument_namespace) | [
"def",
"parse",
"(",
"self",
",",
"only_known",
"=",
"False",
")",
":",
"self",
".",
"parsed",
"=",
"not",
"only_known",
"or",
"self",
".",
"parsed",
"logger",
".",
"info",
"(",
"'parsing parameters'",
")",
"logger",
".",
"debug",
"(",
"'sys.argv: %s'",
",",
"sys",
".",
"argv",
")",
"if",
"only_known",
":",
"args",
"=",
"[",
"_",
"for",
"_",
"in",
"copy",
".",
"copy",
"(",
"sys",
".",
"argv",
")",
"if",
"not",
"re",
".",
"match",
"(",
"'-h|--help'",
",",
"_",
")",
"]",
"self",
".",
"_group_parsers",
"[",
"'default'",
"]",
".",
"parse_known_args",
"(",
"args",
"=",
"args",
",",
"namespace",
"=",
"self",
".",
"_argument_namespace",
")",
"else",
":",
"self",
".",
"_group_parsers",
"[",
"'default'",
"]",
".",
"parse_args",
"(",
"namespace",
"=",
"self",
".",
"_argument_namespace",
")"
] | Ensure all sources are ready to be queried.
Parses ``sys.argv`` with the contained ``argparse.ArgumentParser`` and
sets ``parsed`` to True if ``only_known`` is False. Once ``parsed`` is
set to True, it is inadvisable to add more parameters (cf.
``add_parameter``). Also, if ``parsed`` is not set to True, retrieving
items (cf. ``__getitem__``) will result in a warning that values are
being retrieved from an uparsed Parameters.
**Arguments**
:``only_known``: If True, do not error or fail when unknown parameters
are encountered.
.. note::
If ``only_known`` is True, the ``--help`` and
``-h`` options on the command line (``sys.argv``)
will be ignored during parsing as it is unexpected
that these parameters' default behavior would be
desired at this stage of execution. | [
"Ensure",
"all",
"sources",
"are",
"ready",
"to",
"be",
"queried",
"."
] | 94b23f45db3054000d16968a44400780c6cff5ba | https://github.com/alunduil/crumbs/blob/94b23f45db3054000d16968a44400780c6cff5ba/crumbs/__init__.py#L466-L501 | train |
alunduil/crumbs | crumbs/__init__.py | Parameters.read_configuration_files | def read_configuration_files(self):
'''Explicitly read the configuration files.
Reads all configuration files in this Parameters object. Even if
inotify is watching or a read has already occurred.
.. note::
The order that the configuration files are read is not guaranteed.
'''
for file_name, configuration_parser in self.configuration_files.items():
if os.access(file_name, os.R_OK):
configuration_parser.read(file_name)
else:
logger.warn('could not read %s', file_name)
warnings.warn('could not read {}'.format(file_name), ResourceWarning) | python | def read_configuration_files(self):
'''Explicitly read the configuration files.
Reads all configuration files in this Parameters object. Even if
inotify is watching or a read has already occurred.
.. note::
The order that the configuration files are read is not guaranteed.
'''
for file_name, configuration_parser in self.configuration_files.items():
if os.access(file_name, os.R_OK):
configuration_parser.read(file_name)
else:
logger.warn('could not read %s', file_name)
warnings.warn('could not read {}'.format(file_name), ResourceWarning) | [
"def",
"read_configuration_files",
"(",
"self",
")",
":",
"for",
"file_name",
",",
"configuration_parser",
"in",
"self",
".",
"configuration_files",
".",
"items",
"(",
")",
":",
"if",
"os",
".",
"access",
"(",
"file_name",
",",
"os",
".",
"R_OK",
")",
":",
"configuration_parser",
".",
"read",
"(",
"file_name",
")",
"else",
":",
"logger",
".",
"warn",
"(",
"'could not read %s'",
",",
"file_name",
")",
"warnings",
".",
"warn",
"(",
"'could not read {}'",
".",
"format",
"(",
"file_name",
")",
",",
"ResourceWarning",
")"
] | Explicitly read the configuration files.
Reads all configuration files in this Parameters object. Even if
inotify is watching or a read has already occurred.
.. note::
The order that the configuration files are read is not guaranteed. | [
"Explicitly",
"read",
"the",
"configuration",
"files",
"."
] | 94b23f45db3054000d16968a44400780c6cff5ba | https://github.com/alunduil/crumbs/blob/94b23f45db3054000d16968a44400780c6cff5ba/crumbs/__init__.py#L503-L520 | train |
robinandeer/puzzle | puzzle/models/variant.py | Variant.nr_genes | def nr_genes(self):
"""Return the number of genes"""
if self['genes']:
nr_genes = len(self['genes'])
else:
nr_genes = len(self['gene_symbols'])
return nr_genes | python | def nr_genes(self):
"""Return the number of genes"""
if self['genes']:
nr_genes = len(self['genes'])
else:
nr_genes = len(self['gene_symbols'])
return nr_genes | [
"def",
"nr_genes",
"(",
"self",
")",
":",
"if",
"self",
"[",
"'genes'",
"]",
":",
"nr_genes",
"=",
"len",
"(",
"self",
"[",
"'genes'",
"]",
")",
"else",
":",
"nr_genes",
"=",
"len",
"(",
"self",
"[",
"'gene_symbols'",
"]",
")",
"return",
"nr_genes"
] | Return the number of genes | [
"Return",
"the",
"number",
"of",
"genes"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/variant.py#L44-L50 | train |
robinandeer/puzzle | puzzle/models/variant.py | Variant.display_name | def display_name(self):
"""Readable name for the variant."""
if self.is_snv:
gene_ids = self.gene_symbols[:2]
return ', '.join(gene_ids)
else:
return "{this.cytoband_start} ({this.sv_len})".format(this=self) | python | def display_name(self):
"""Readable name for the variant."""
if self.is_snv:
gene_ids = self.gene_symbols[:2]
return ', '.join(gene_ids)
else:
return "{this.cytoband_start} ({this.sv_len})".format(this=self) | [
"def",
"display_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_snv",
":",
"gene_ids",
"=",
"self",
".",
"gene_symbols",
"[",
":",
"2",
"]",
"return",
"', '",
".",
"join",
"(",
"gene_ids",
")",
"else",
":",
"return",
"\"{this.cytoband_start} ({this.sv_len})\"",
".",
"format",
"(",
"this",
"=",
"self",
")"
] | Readable name for the variant. | [
"Readable",
"name",
"for",
"the",
"variant",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/variant.py#L58-L64 | train |
robinandeer/puzzle | puzzle/models/variant.py | Variant.md5 | def md5(self):
"""Return a md5 key string based on position, ref and alt"""
return hashlib.md5('_'.join([self.CHROM, str(self.POS), self.REF,
self.ALT])).hexdigest() | python | def md5(self):
"""Return a md5 key string based on position, ref and alt"""
return hashlib.md5('_'.join([self.CHROM, str(self.POS), self.REF,
self.ALT])).hexdigest() | [
"def",
"md5",
"(",
"self",
")",
":",
"return",
"hashlib",
".",
"md5",
"(",
"'_'",
".",
"join",
"(",
"[",
"self",
".",
"CHROM",
",",
"str",
"(",
"self",
".",
"POS",
")",
",",
"self",
".",
"REF",
",",
"self",
".",
"ALT",
"]",
")",
")",
".",
"hexdigest",
"(",
")"
] | Return a md5 key string based on position, ref and alt | [
"Return",
"a",
"md5",
"key",
"string",
"based",
"on",
"position",
"ref",
"and",
"alt"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/variant.py#L67-L70 | train |
robinandeer/puzzle | puzzle/models/variant.py | Variant.add_frequency | def add_frequency(self, name, value):
"""Add a frequency that will be displayed on the variant level
Args:
name (str): The name of the frequency field
"""
logger.debug("Adding frequency {0} with value {1} to variant {2}".format(
name, value, self['variant_id']))
self['frequencies'].append({'label': name, 'value': value}) | python | def add_frequency(self, name, value):
"""Add a frequency that will be displayed on the variant level
Args:
name (str): The name of the frequency field
"""
logger.debug("Adding frequency {0} with value {1} to variant {2}".format(
name, value, self['variant_id']))
self['frequencies'].append({'label': name, 'value': value}) | [
"def",
"add_frequency",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"logger",
".",
"debug",
"(",
"\"Adding frequency {0} with value {1} to variant {2}\"",
".",
"format",
"(",
"name",
",",
"value",
",",
"self",
"[",
"'variant_id'",
"]",
")",
")",
"self",
"[",
"'frequencies'",
"]",
".",
"append",
"(",
"{",
"'label'",
":",
"name",
",",
"'value'",
":",
"value",
"}",
")"
] | Add a frequency that will be displayed on the variant level
Args:
name (str): The name of the frequency field | [
"Add",
"a",
"frequency",
"that",
"will",
"be",
"displayed",
"on",
"the",
"variant",
"level"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/variant.py#L81-L89 | train |
robinandeer/puzzle | puzzle/models/variant.py | Variant.set_max_freq | def set_max_freq(self, max_freq=None):
"""Set the max frequency for the variant
If max_freq use this, otherwise go through all frequencies and
set the highest as self['max_freq']
Args:
max_freq (float): The max frequency
"""
if max_freq:
self['max_freq'] = max_freq
else:
for frequency in self['frequencies']:
if self['max_freq']:
if frequency['value'] > self['max_freq']:
self['max_freq'] = frequency['value']
else:
self['max_freq'] = frequency['value']
return | python | def set_max_freq(self, max_freq=None):
"""Set the max frequency for the variant
If max_freq use this, otherwise go through all frequencies and
set the highest as self['max_freq']
Args:
max_freq (float): The max frequency
"""
if max_freq:
self['max_freq'] = max_freq
else:
for frequency in self['frequencies']:
if self['max_freq']:
if frequency['value'] > self['max_freq']:
self['max_freq'] = frequency['value']
else:
self['max_freq'] = frequency['value']
return | [
"def",
"set_max_freq",
"(",
"self",
",",
"max_freq",
"=",
"None",
")",
":",
"if",
"max_freq",
":",
"self",
"[",
"'max_freq'",
"]",
"=",
"max_freq",
"else",
":",
"for",
"frequency",
"in",
"self",
"[",
"'frequencies'",
"]",
":",
"if",
"self",
"[",
"'max_freq'",
"]",
":",
"if",
"frequency",
"[",
"'value'",
"]",
">",
"self",
"[",
"'max_freq'",
"]",
":",
"self",
"[",
"'max_freq'",
"]",
"=",
"frequency",
"[",
"'value'",
"]",
"else",
":",
"self",
"[",
"'max_freq'",
"]",
"=",
"frequency",
"[",
"'value'",
"]",
"return"
] | Set the max frequency for the variant
If max_freq use this, otherwise go through all frequencies and
set the highest as self['max_freq']
Args:
max_freq (float): The max frequency | [
"Set",
"the",
"max",
"frequency",
"for",
"the",
"variant"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/variant.py#L91-L109 | train |
robinandeer/puzzle | puzzle/models/variant.py | Variant.add_severity | def add_severity(self, name, value):
"""Add a severity to the variant
Args:
name (str): The name of the severity
value : The value of the severity
"""
logger.debug("Adding severity {0} with value {1} to variant {2}".format(
name, value, self['variant_id']))
self['severities'].append({name: value}) | python | def add_severity(self, name, value):
"""Add a severity to the variant
Args:
name (str): The name of the severity
value : The value of the severity
"""
logger.debug("Adding severity {0} with value {1} to variant {2}".format(
name, value, self['variant_id']))
self['severities'].append({name: value}) | [
"def",
"add_severity",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"logger",
".",
"debug",
"(",
"\"Adding severity {0} with value {1} to variant {2}\"",
".",
"format",
"(",
"name",
",",
"value",
",",
"self",
"[",
"'variant_id'",
"]",
")",
")",
"self",
"[",
"'severities'",
"]",
".",
"append",
"(",
"{",
"name",
":",
"value",
"}",
")"
] | Add a severity to the variant
Args:
name (str): The name of the severity
value : The value of the severity | [
"Add",
"a",
"severity",
"to",
"the",
"variant"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/variant.py#L111-L120 | train |
robinandeer/puzzle | puzzle/models/variant.py | Variant.add_individual | def add_individual(self, genotype):
"""Add the information for a individual
This adds a genotype dict to variant['individuals']
Args:
genotype (dict): A genotype dictionary
"""
logger.debug("Adding genotype {0} to variant {1}".format(
genotype, self['variant_id']))
self['individuals'].append(genotype) | python | def add_individual(self, genotype):
"""Add the information for a individual
This adds a genotype dict to variant['individuals']
Args:
genotype (dict): A genotype dictionary
"""
logger.debug("Adding genotype {0} to variant {1}".format(
genotype, self['variant_id']))
self['individuals'].append(genotype) | [
"def",
"add_individual",
"(",
"self",
",",
"genotype",
")",
":",
"logger",
".",
"debug",
"(",
"\"Adding genotype {0} to variant {1}\"",
".",
"format",
"(",
"genotype",
",",
"self",
"[",
"'variant_id'",
"]",
")",
")",
"self",
"[",
"'individuals'",
"]",
".",
"append",
"(",
"genotype",
")"
] | Add the information for a individual
This adds a genotype dict to variant['individuals']
Args:
genotype (dict): A genotype dictionary | [
"Add",
"the",
"information",
"for",
"a",
"individual"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/variant.py#L122-L132 | train |
robinandeer/puzzle | puzzle/models/variant.py | Variant.add_transcript | def add_transcript(self, transcript):
"""Add the information transcript
This adds a transcript dict to variant['transcripts']
Args:
transcript (dict): A transcript dictionary
"""
logger.debug("Adding transcript {0} to variant {1}".format(
transcript, self['variant_id']))
self['transcripts'].append(transcript) | python | def add_transcript(self, transcript):
"""Add the information transcript
This adds a transcript dict to variant['transcripts']
Args:
transcript (dict): A transcript dictionary
"""
logger.debug("Adding transcript {0} to variant {1}".format(
transcript, self['variant_id']))
self['transcripts'].append(transcript) | [
"def",
"add_transcript",
"(",
"self",
",",
"transcript",
")",
":",
"logger",
".",
"debug",
"(",
"\"Adding transcript {0} to variant {1}\"",
".",
"format",
"(",
"transcript",
",",
"self",
"[",
"'variant_id'",
"]",
")",
")",
"self",
"[",
"'transcripts'",
"]",
".",
"append",
"(",
"transcript",
")"
] | Add the information transcript
This adds a transcript dict to variant['transcripts']
Args:
transcript (dict): A transcript dictionary | [
"Add",
"the",
"information",
"transcript"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/variant.py#L134-L144 | train |
robinandeer/puzzle | puzzle/models/variant.py | Variant.add_gene | def add_gene(self, gene):
"""Add the information of a gene
This adds a gene dict to variant['genes']
Args:
gene (dict): A gene dictionary
"""
logger.debug("Adding gene {0} to variant {1}".format(
gene, self['variant_id']))
self['genes'].append(gene) | python | def add_gene(self, gene):
"""Add the information of a gene
This adds a gene dict to variant['genes']
Args:
gene (dict): A gene dictionary
"""
logger.debug("Adding gene {0} to variant {1}".format(
gene, self['variant_id']))
self['genes'].append(gene) | [
"def",
"add_gene",
"(",
"self",
",",
"gene",
")",
":",
"logger",
".",
"debug",
"(",
"\"Adding gene {0} to variant {1}\"",
".",
"format",
"(",
"gene",
",",
"self",
"[",
"'variant_id'",
"]",
")",
")",
"self",
"[",
"'genes'",
"]",
".",
"append",
"(",
"gene",
")"
] | Add the information of a gene
This adds a gene dict to variant['genes']
Args:
gene (dict): A gene dictionary | [
"Add",
"the",
"information",
"of",
"a",
"gene"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/variant.py#L146-L157 | train |
robinandeer/puzzle | puzzle/models/variant.py | Variant.add_compound | def add_compound(self, compound):
"""Add the information of a compound variant
This adds a compound dict to variant['compounds']
Args:
compound (dict): A compound dictionary
"""
logger.debug("Adding compound {0} to variant {1}".format(
compound, self['variant_id']))
self['compounds'].append(compound) | python | def add_compound(self, compound):
"""Add the information of a compound variant
This adds a compound dict to variant['compounds']
Args:
compound (dict): A compound dictionary
"""
logger.debug("Adding compound {0} to variant {1}".format(
compound, self['variant_id']))
self['compounds'].append(compound) | [
"def",
"add_compound",
"(",
"self",
",",
"compound",
")",
":",
"logger",
".",
"debug",
"(",
"\"Adding compound {0} to variant {1}\"",
".",
"format",
"(",
"compound",
",",
"self",
"[",
"'variant_id'",
"]",
")",
")",
"self",
"[",
"'compounds'",
"]",
".",
"append",
"(",
"compound",
")"
] | Add the information of a compound variant
This adds a compound dict to variant['compounds']
Args:
compound (dict): A compound dictionary | [
"Add",
"the",
"information",
"of",
"a",
"compound",
"variant"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/variant.py#L159-L170 | train |
robinandeer/puzzle | puzzle/models/variant.py | Variant._set_variant_id | def _set_variant_id(self, variant_id=None):
"""Set the variant id for this variant"""
if not variant_id:
variant_id = '_'.join([
self.CHROM,
str(self.POS),
self.REF,
self.ALT
])
logger.debug("Updating variant id to {0}".format(
variant_id))
self['variant_id'] = variant_id | python | def _set_variant_id(self, variant_id=None):
"""Set the variant id for this variant"""
if not variant_id:
variant_id = '_'.join([
self.CHROM,
str(self.POS),
self.REF,
self.ALT
])
logger.debug("Updating variant id to {0}".format(
variant_id))
self['variant_id'] = variant_id | [
"def",
"_set_variant_id",
"(",
"self",
",",
"variant_id",
"=",
"None",
")",
":",
"if",
"not",
"variant_id",
":",
"variant_id",
"=",
"'_'",
".",
"join",
"(",
"[",
"self",
".",
"CHROM",
",",
"str",
"(",
"self",
".",
"POS",
")",
",",
"self",
".",
"REF",
",",
"self",
".",
"ALT",
"]",
")",
"logger",
".",
"debug",
"(",
"\"Updating variant id to {0}\"",
".",
"format",
"(",
"variant_id",
")",
")",
"self",
"[",
"'variant_id'",
"]",
"=",
"variant_id"
] | Set the variant id for this variant | [
"Set",
"the",
"variant",
"id",
"for",
"this",
"variant"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/variant.py#L181-L194 | train |
inveniosoftware-contrib/json-merger | json_merger/stats.py | ListMatchStats.move_to_result | def move_to_result(self, lst_idx):
"""Moves element from lst available at lst_idx."""
self.in_result_idx.add(lst_idx)
if lst_idx in self.not_in_result_root_match_idx:
self.not_in_result_root_match_idx.remove(lst_idx) | python | def move_to_result(self, lst_idx):
"""Moves element from lst available at lst_idx."""
self.in_result_idx.add(lst_idx)
if lst_idx in self.not_in_result_root_match_idx:
self.not_in_result_root_match_idx.remove(lst_idx) | [
"def",
"move_to_result",
"(",
"self",
",",
"lst_idx",
")",
":",
"self",
".",
"in_result_idx",
".",
"add",
"(",
"lst_idx",
")",
"if",
"lst_idx",
"in",
"self",
".",
"not_in_result_root_match_idx",
":",
"self",
".",
"not_in_result_root_match_idx",
".",
"remove",
"(",
"lst_idx",
")"
] | Moves element from lst available at lst_idx. | [
"Moves",
"element",
"from",
"lst",
"available",
"at",
"lst_idx",
"."
] | adc6d372da018427e1db7b92424d3471e01a4118 | https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/stats.py#L72-L77 | train |
inveniosoftware-contrib/json-merger | json_merger/stats.py | ListMatchStats.add_root_match | def add_root_match(self, lst_idx, root_idx):
"""Adds a match for the elements avaialble at lst_idx and root_idx."""
self.root_matches[lst_idx] = root_idx
if lst_idx in self.in_result_idx:
return
self.not_in_result_root_match_idx.add(lst_idx) | python | def add_root_match(self, lst_idx, root_idx):
"""Adds a match for the elements avaialble at lst_idx and root_idx."""
self.root_matches[lst_idx] = root_idx
if lst_idx in self.in_result_idx:
return
self.not_in_result_root_match_idx.add(lst_idx) | [
"def",
"add_root_match",
"(",
"self",
",",
"lst_idx",
",",
"root_idx",
")",
":",
"self",
".",
"root_matches",
"[",
"lst_idx",
"]",
"=",
"root_idx",
"if",
"lst_idx",
"in",
"self",
".",
"in_result_idx",
":",
"return",
"self",
".",
"not_in_result_root_match_idx",
".",
"add",
"(",
"lst_idx",
")"
] | Adds a match for the elements avaialble at lst_idx and root_idx. | [
"Adds",
"a",
"match",
"for",
"the",
"elements",
"avaialble",
"at",
"lst_idx",
"and",
"root_idx",
"."
] | adc6d372da018427e1db7b92424d3471e01a4118 | https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/stats.py#L79-L85 | train |
robinandeer/puzzle | puzzle/plugins/gemini/mixins/variant_extras/transcripts.py | TranscriptExtras._add_transcripts | def _add_transcripts(self, variant_obj, gemini_variant):
"""
Add all transcripts for a variant
Go through all transcripts found for the variant
Args:
gemini_variant (GeminiQueryRow): The gemini variant
Yields:
transcript (puzzle.models.Transcript)
"""
query = "SELECT * from variant_impacts WHERE variant_id = {0}".format(
gemini_variant['variant_id']
)
gq = GeminiQuery(self.db)
gq.run(query)
for gemini_transcript in gq:
transcript = Transcript(
hgnc_symbol=gemini_transcript['gene'],
transcript_id=gemini_transcript['transcript'],
consequence=gemini_transcript['impact_so'],
biotype=gemini_transcript['biotype'],
polyphen=gemini_transcript['polyphen_pred'],
sift=gemini_transcript['sift_pred'],
HGVSc=gemini_transcript['codon_change'],
HGVSp=', '.join([gemini_transcript['aa_change'] or '', gemini_transcript['aa_length'] or ''])
)
variant_obj.add_transcript(transcript) | python | def _add_transcripts(self, variant_obj, gemini_variant):
"""
Add all transcripts for a variant
Go through all transcripts found for the variant
Args:
gemini_variant (GeminiQueryRow): The gemini variant
Yields:
transcript (puzzle.models.Transcript)
"""
query = "SELECT * from variant_impacts WHERE variant_id = {0}".format(
gemini_variant['variant_id']
)
gq = GeminiQuery(self.db)
gq.run(query)
for gemini_transcript in gq:
transcript = Transcript(
hgnc_symbol=gemini_transcript['gene'],
transcript_id=gemini_transcript['transcript'],
consequence=gemini_transcript['impact_so'],
biotype=gemini_transcript['biotype'],
polyphen=gemini_transcript['polyphen_pred'],
sift=gemini_transcript['sift_pred'],
HGVSc=gemini_transcript['codon_change'],
HGVSp=', '.join([gemini_transcript['aa_change'] or '', gemini_transcript['aa_length'] or ''])
)
variant_obj.add_transcript(transcript) | [
"def",
"_add_transcripts",
"(",
"self",
",",
"variant_obj",
",",
"gemini_variant",
")",
":",
"query",
"=",
"\"SELECT * from variant_impacts WHERE variant_id = {0}\"",
".",
"format",
"(",
"gemini_variant",
"[",
"'variant_id'",
"]",
")",
"gq",
"=",
"GeminiQuery",
"(",
"self",
".",
"db",
")",
"gq",
".",
"run",
"(",
"query",
")",
"for",
"gemini_transcript",
"in",
"gq",
":",
"transcript",
"=",
"Transcript",
"(",
"hgnc_symbol",
"=",
"gemini_transcript",
"[",
"'gene'",
"]",
",",
"transcript_id",
"=",
"gemini_transcript",
"[",
"'transcript'",
"]",
",",
"consequence",
"=",
"gemini_transcript",
"[",
"'impact_so'",
"]",
",",
"biotype",
"=",
"gemini_transcript",
"[",
"'biotype'",
"]",
",",
"polyphen",
"=",
"gemini_transcript",
"[",
"'polyphen_pred'",
"]",
",",
"sift",
"=",
"gemini_transcript",
"[",
"'sift_pred'",
"]",
",",
"HGVSc",
"=",
"gemini_transcript",
"[",
"'codon_change'",
"]",
",",
"HGVSp",
"=",
"', '",
".",
"join",
"(",
"[",
"gemini_transcript",
"[",
"'aa_change'",
"]",
"or",
"''",
",",
"gemini_transcript",
"[",
"'aa_length'",
"]",
"or",
"''",
"]",
")",
")",
"variant_obj",
".",
"add_transcript",
"(",
"transcript",
")"
] | Add all transcripts for a variant
Go through all transcripts found for the variant
Args:
gemini_variant (GeminiQueryRow): The gemini variant
Yields:
transcript (puzzle.models.Transcript) | [
"Add",
"all",
"transcripts",
"for",
"a",
"variant"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/gemini/mixins/variant_extras/transcripts.py#L8-L39 | train |
eleme/meepo | meepo/pub/mysql.py | mysql_pub | def mysql_pub(mysql_dsn, tables=None, blocking=False, **kwargs):
"""MySQL row-based binlog events pub.
**General Usage**
Listen and pub all tables events::
mysql_pub(mysql_dsn)
Listen and pub only some tables events::
mysql_pub(mysql_dsn, tables=["test"])
By default the ``mysql_pub`` will process and pub all existing
row-based binlog (starting from current binlog file with pos 0) and
quit, you may set blocking to True to block and wait for new binlog,
enable this option if you're running the script as a daemon::
mysql_pub(mysql_dsn, blocking=True)
The binlog stream act as a mysql slave and read binlog from master, so the
server_id matters, if it's conflict with other slaves or scripts, strange
bugs may happen. By default, the server_id is randomized by
``randint(1000000000, 4294967295)``, you may set it to a specific value
by server_id arg::
mysql_pub(mysql_dsn, blocking=True, server_id=1024)
**Signals Illustrate**
Sometimes you want more info than the pk value, the mysql_pub expose
a raw signal which will send the original binlog stream events.
For example, the following sql::
INSERT INTO test (data) VALUES ('a');
The row-based binlog generated from the sql, reads by binlog stream and
generates signals equals to::
signal("test_write").send(1)
signal("test_write_raw").send({'values': {'data': 'a', 'id': 1}})
**Binlog Pos Signal**
The mysql_pub has a unique signal ``mysql_binlog_pos`` which contains
the binlog file and binlog pos, you can record the signal and resume
binlog stream from last position with it.
:param mysql_dsn: mysql dsn with row-based binlog enabled.
:param tables: which tables to enable mysql_pub.
:param blocking: whether mysql_pub should wait more binlog when all
existing binlog processed.
:param kwargs: more kwargs to be passed to binlog stream.
"""
# parse mysql settings
parsed = urlparse(mysql_dsn)
mysql_settings = {
"host": parsed.hostname,
"port": parsed.port or 3306,
"user": parsed.username,
"passwd": parsed.password
}
# connect to binlog stream
stream = pymysqlreplication.BinLogStreamReader(
mysql_settings,
server_id=random.randint(1000000000, 4294967295),
blocking=blocking,
only_events=[DeleteRowsEvent, UpdateRowsEvent, WriteRowsEvent],
**kwargs
)
def _pk(values):
if isinstance(event.primary_key, str):
return values[event.primary_key]
return tuple(values[k] for k in event.primary_key)
for event in stream:
if not event.primary_key:
continue
if tables and event.table not in tables:
continue
try:
rows = event.rows
except (UnicodeDecodeError, ValueError) as e:
logger.exception(e)
continue
timestamp = datetime.datetime.fromtimestamp(event.timestamp)
if isinstance(event, WriteRowsEvent):
sg_name = "%s_write" % event.table
sg = signal(sg_name)
sg_raw = signal("%s_raw" % sg_name)
for row in rows:
pk = _pk(row["values"])
sg.send(pk)
sg_raw.send(row)
logger.debug("%s -> %s, %s" % (sg_name, pk, timestamp))
elif isinstance(event, UpdateRowsEvent):
sg_name = "%s_update" % event.table
sg = signal(sg_name)
sg_raw = signal("%s_raw" % sg_name)
for row in rows:
pk = _pk(row["after_values"])
sg.send(pk)
sg_raw.send(row)
logger.debug("%s -> %s, %s" % (sg_name, pk, timestamp))
elif isinstance(event, DeleteRowsEvent):
sg_name = "%s_delete" % event.table
sg = signal(sg_name)
sg_raw = signal("%s_raw" % sg_name)
for row in rows:
pk = _pk(row["values"])
sg.send(pk)
sg_raw.send(row)
logger.debug("%s -> %s, %s" % (sg_name, pk, timestamp))
signal("mysql_binlog_pos").send(
"%s:%s" % (stream.log_file, stream.log_pos)) | python | def mysql_pub(mysql_dsn, tables=None, blocking=False, **kwargs):
"""MySQL row-based binlog events pub.
**General Usage**
Listen and pub all tables events::
mysql_pub(mysql_dsn)
Listen and pub only some tables events::
mysql_pub(mysql_dsn, tables=["test"])
By default the ``mysql_pub`` will process and pub all existing
row-based binlog (starting from current binlog file with pos 0) and
quit, you may set blocking to True to block and wait for new binlog,
enable this option if you're running the script as a daemon::
mysql_pub(mysql_dsn, blocking=True)
The binlog stream act as a mysql slave and read binlog from master, so the
server_id matters, if it's conflict with other slaves or scripts, strange
bugs may happen. By default, the server_id is randomized by
``randint(1000000000, 4294967295)``, you may set it to a specific value
by server_id arg::
mysql_pub(mysql_dsn, blocking=True, server_id=1024)
**Signals Illustrate**
Sometimes you want more info than the pk value, the mysql_pub expose
a raw signal which will send the original binlog stream events.
For example, the following sql::
INSERT INTO test (data) VALUES ('a');
The row-based binlog generated from the sql, reads by binlog stream and
generates signals equals to::
signal("test_write").send(1)
signal("test_write_raw").send({'values': {'data': 'a', 'id': 1}})
**Binlog Pos Signal**
The mysql_pub has a unique signal ``mysql_binlog_pos`` which contains
the binlog file and binlog pos, you can record the signal and resume
binlog stream from last position with it.
:param mysql_dsn: mysql dsn with row-based binlog enabled.
:param tables: which tables to enable mysql_pub.
:param blocking: whether mysql_pub should wait more binlog when all
existing binlog processed.
:param kwargs: more kwargs to be passed to binlog stream.
"""
# parse mysql settings
parsed = urlparse(mysql_dsn)
mysql_settings = {
"host": parsed.hostname,
"port": parsed.port or 3306,
"user": parsed.username,
"passwd": parsed.password
}
# connect to binlog stream
stream = pymysqlreplication.BinLogStreamReader(
mysql_settings,
server_id=random.randint(1000000000, 4294967295),
blocking=blocking,
only_events=[DeleteRowsEvent, UpdateRowsEvent, WriteRowsEvent],
**kwargs
)
def _pk(values):
if isinstance(event.primary_key, str):
return values[event.primary_key]
return tuple(values[k] for k in event.primary_key)
for event in stream:
if not event.primary_key:
continue
if tables and event.table not in tables:
continue
try:
rows = event.rows
except (UnicodeDecodeError, ValueError) as e:
logger.exception(e)
continue
timestamp = datetime.datetime.fromtimestamp(event.timestamp)
if isinstance(event, WriteRowsEvent):
sg_name = "%s_write" % event.table
sg = signal(sg_name)
sg_raw = signal("%s_raw" % sg_name)
for row in rows:
pk = _pk(row["values"])
sg.send(pk)
sg_raw.send(row)
logger.debug("%s -> %s, %s" % (sg_name, pk, timestamp))
elif isinstance(event, UpdateRowsEvent):
sg_name = "%s_update" % event.table
sg = signal(sg_name)
sg_raw = signal("%s_raw" % sg_name)
for row in rows:
pk = _pk(row["after_values"])
sg.send(pk)
sg_raw.send(row)
logger.debug("%s -> %s, %s" % (sg_name, pk, timestamp))
elif isinstance(event, DeleteRowsEvent):
sg_name = "%s_delete" % event.table
sg = signal(sg_name)
sg_raw = signal("%s_raw" % sg_name)
for row in rows:
pk = _pk(row["values"])
sg.send(pk)
sg_raw.send(row)
logger.debug("%s -> %s, %s" % (sg_name, pk, timestamp))
signal("mysql_binlog_pos").send(
"%s:%s" % (stream.log_file, stream.log_pos)) | [
"def",
"mysql_pub",
"(",
"mysql_dsn",
",",
"tables",
"=",
"None",
",",
"blocking",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"# parse mysql settings",
"parsed",
"=",
"urlparse",
"(",
"mysql_dsn",
")",
"mysql_settings",
"=",
"{",
"\"host\"",
":",
"parsed",
".",
"hostname",
",",
"\"port\"",
":",
"parsed",
".",
"port",
"or",
"3306",
",",
"\"user\"",
":",
"parsed",
".",
"username",
",",
"\"passwd\"",
":",
"parsed",
".",
"password",
"}",
"# connect to binlog stream",
"stream",
"=",
"pymysqlreplication",
".",
"BinLogStreamReader",
"(",
"mysql_settings",
",",
"server_id",
"=",
"random",
".",
"randint",
"(",
"1000000000",
",",
"4294967295",
")",
",",
"blocking",
"=",
"blocking",
",",
"only_events",
"=",
"[",
"DeleteRowsEvent",
",",
"UpdateRowsEvent",
",",
"WriteRowsEvent",
"]",
",",
"*",
"*",
"kwargs",
")",
"def",
"_pk",
"(",
"values",
")",
":",
"if",
"isinstance",
"(",
"event",
".",
"primary_key",
",",
"str",
")",
":",
"return",
"values",
"[",
"event",
".",
"primary_key",
"]",
"return",
"tuple",
"(",
"values",
"[",
"k",
"]",
"for",
"k",
"in",
"event",
".",
"primary_key",
")",
"for",
"event",
"in",
"stream",
":",
"if",
"not",
"event",
".",
"primary_key",
":",
"continue",
"if",
"tables",
"and",
"event",
".",
"table",
"not",
"in",
"tables",
":",
"continue",
"try",
":",
"rows",
"=",
"event",
".",
"rows",
"except",
"(",
"UnicodeDecodeError",
",",
"ValueError",
")",
"as",
"e",
":",
"logger",
".",
"exception",
"(",
"e",
")",
"continue",
"timestamp",
"=",
"datetime",
".",
"datetime",
".",
"fromtimestamp",
"(",
"event",
".",
"timestamp",
")",
"if",
"isinstance",
"(",
"event",
",",
"WriteRowsEvent",
")",
":",
"sg_name",
"=",
"\"%s_write\"",
"%",
"event",
".",
"table",
"sg",
"=",
"signal",
"(",
"sg_name",
")",
"sg_raw",
"=",
"signal",
"(",
"\"%s_raw\"",
"%",
"sg_name",
")",
"for",
"row",
"in",
"rows",
":",
"pk",
"=",
"_pk",
"(",
"row",
"[",
"\"values\"",
"]",
")",
"sg",
".",
"send",
"(",
"pk",
")",
"sg_raw",
".",
"send",
"(",
"row",
")",
"logger",
".",
"debug",
"(",
"\"%s -> %s, %s\"",
"%",
"(",
"sg_name",
",",
"pk",
",",
"timestamp",
")",
")",
"elif",
"isinstance",
"(",
"event",
",",
"UpdateRowsEvent",
")",
":",
"sg_name",
"=",
"\"%s_update\"",
"%",
"event",
".",
"table",
"sg",
"=",
"signal",
"(",
"sg_name",
")",
"sg_raw",
"=",
"signal",
"(",
"\"%s_raw\"",
"%",
"sg_name",
")",
"for",
"row",
"in",
"rows",
":",
"pk",
"=",
"_pk",
"(",
"row",
"[",
"\"after_values\"",
"]",
")",
"sg",
".",
"send",
"(",
"pk",
")",
"sg_raw",
".",
"send",
"(",
"row",
")",
"logger",
".",
"debug",
"(",
"\"%s -> %s, %s\"",
"%",
"(",
"sg_name",
",",
"pk",
",",
"timestamp",
")",
")",
"elif",
"isinstance",
"(",
"event",
",",
"DeleteRowsEvent",
")",
":",
"sg_name",
"=",
"\"%s_delete\"",
"%",
"event",
".",
"table",
"sg",
"=",
"signal",
"(",
"sg_name",
")",
"sg_raw",
"=",
"signal",
"(",
"\"%s_raw\"",
"%",
"sg_name",
")",
"for",
"row",
"in",
"rows",
":",
"pk",
"=",
"_pk",
"(",
"row",
"[",
"\"values\"",
"]",
")",
"sg",
".",
"send",
"(",
"pk",
")",
"sg_raw",
".",
"send",
"(",
"row",
")",
"logger",
".",
"debug",
"(",
"\"%s -> %s, %s\"",
"%",
"(",
"sg_name",
",",
"pk",
",",
"timestamp",
")",
")",
"signal",
"(",
"\"mysql_binlog_pos\"",
")",
".",
"send",
"(",
"\"%s:%s\"",
"%",
"(",
"stream",
".",
"log_file",
",",
"stream",
".",
"log_pos",
")",
")"
] | MySQL row-based binlog events pub.
**General Usage**
Listen and pub all tables events::
mysql_pub(mysql_dsn)
Listen and pub only some tables events::
mysql_pub(mysql_dsn, tables=["test"])
By default the ``mysql_pub`` will process and pub all existing
row-based binlog (starting from current binlog file with pos 0) and
quit, you may set blocking to True to block and wait for new binlog,
enable this option if you're running the script as a daemon::
mysql_pub(mysql_dsn, blocking=True)
The binlog stream act as a mysql slave and read binlog from master, so the
server_id matters, if it's conflict with other slaves or scripts, strange
bugs may happen. By default, the server_id is randomized by
``randint(1000000000, 4294967295)``, you may set it to a specific value
by server_id arg::
mysql_pub(mysql_dsn, blocking=True, server_id=1024)
**Signals Illustrate**
Sometimes you want more info than the pk value, the mysql_pub expose
a raw signal which will send the original binlog stream events.
For example, the following sql::
INSERT INTO test (data) VALUES ('a');
The row-based binlog generated from the sql, reads by binlog stream and
generates signals equals to::
signal("test_write").send(1)
signal("test_write_raw").send({'values': {'data': 'a', 'id': 1}})
**Binlog Pos Signal**
The mysql_pub has a unique signal ``mysql_binlog_pos`` which contains
the binlog file and binlog pos, you can record the signal and resume
binlog stream from last position with it.
:param mysql_dsn: mysql dsn with row-based binlog enabled.
:param tables: which tables to enable mysql_pub.
:param blocking: whether mysql_pub should wait more binlog when all
existing binlog processed.
:param kwargs: more kwargs to be passed to binlog stream. | [
"MySQL",
"row",
"-",
"based",
"binlog",
"events",
"pub",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/pub/mysql.py#L50-L180 | train |
ldomic/lintools | lintools/molecule.py | Molecule.load_molecule_in_rdkit_smiles | def load_molecule_in_rdkit_smiles(self, molSize,kekulize=True,bonds=[],bond_color=None,atom_color = {}, size= {} ):
"""
Loads mol file in rdkit without the hydrogens - they do not have to appear in the final
figure. Once loaded, the molecule is converted to SMILES format which RDKit appears to
draw best - since we do not care about the actual coordinates of the original molecule, it
is sufficient to have just 2D information.
Some molecules can be problematic to import and steps such as stopping sanitize function can
be taken. This is done automatically if problems are observed. However, better solutions can
also be implemented and need more research.
The molecule is then drawn from SMILES in 2D representation without hydrogens. The drawing is
saved as an SVG file.
"""
mol_in_rdkit = self.topology_data.mol #need to reload without hydrogens
try:
mol_in_rdkit = Chem.RemoveHs(mol_in_rdkit)
self.topology_data.smiles = Chem.MolFromSmiles(Chem.MolToSmiles(mol_in_rdkit))
except ValueError:
mol_in_rdkit = Chem.RemoveHs(mol_in_rdkit, sanitize = False)
self.topology_data.smiles = Chem.MolFromSmiles(Chem.MolToSmiles(mol_in_rdkit), sanitize=False)
self.atom_identities = {}
i=0
for atom in self.topology_data.smiles.GetAtoms():
self.atom_identities[mol_in_rdkit.GetProp('_smilesAtomOutputOrder')[1:].rsplit(",")[i]] = atom.GetIdx()
i+=1
mc = Chem.Mol(self.topology_data.smiles.ToBinary())
if kekulize:
try:
Chem.Kekulize(mc)
except:
mc = Chem.Mol(self.topology_data.smiles.ToBinary())
if not mc.GetNumConformers():
rdDepictor.Compute2DCoords(mc)
atoms=[]
colors={}
for i in range(mol_in_rdkit.GetNumAtoms()):
atoms.append(i)
if len(atom_color)==0:
colors[i]=(1,1,1)
else:
colors = atom_color
drawer = rdMolDraw2D.MolDraw2DSVG(int(molSize[0]),int(molSize[1]))
drawer.DrawMolecule(mc,highlightAtoms=atoms,highlightBonds=bonds, highlightAtomColors=colors,highlightAtomRadii=size,highlightBondColors=bond_color)
drawer.FinishDrawing()
self.svg = drawer.GetDrawingText().replace('svg:','')
filesvg = open("molecule.svg", "w+")
filesvg.write(self.svg) | python | def load_molecule_in_rdkit_smiles(self, molSize,kekulize=True,bonds=[],bond_color=None,atom_color = {}, size= {} ):
"""
Loads mol file in rdkit without the hydrogens - they do not have to appear in the final
figure. Once loaded, the molecule is converted to SMILES format which RDKit appears to
draw best - since we do not care about the actual coordinates of the original molecule, it
is sufficient to have just 2D information.
Some molecules can be problematic to import and steps such as stopping sanitize function can
be taken. This is done automatically if problems are observed. However, better solutions can
also be implemented and need more research.
The molecule is then drawn from SMILES in 2D representation without hydrogens. The drawing is
saved as an SVG file.
"""
mol_in_rdkit = self.topology_data.mol #need to reload without hydrogens
try:
mol_in_rdkit = Chem.RemoveHs(mol_in_rdkit)
self.topology_data.smiles = Chem.MolFromSmiles(Chem.MolToSmiles(mol_in_rdkit))
except ValueError:
mol_in_rdkit = Chem.RemoveHs(mol_in_rdkit, sanitize = False)
self.topology_data.smiles = Chem.MolFromSmiles(Chem.MolToSmiles(mol_in_rdkit), sanitize=False)
self.atom_identities = {}
i=0
for atom in self.topology_data.smiles.GetAtoms():
self.atom_identities[mol_in_rdkit.GetProp('_smilesAtomOutputOrder')[1:].rsplit(",")[i]] = atom.GetIdx()
i+=1
mc = Chem.Mol(self.topology_data.smiles.ToBinary())
if kekulize:
try:
Chem.Kekulize(mc)
except:
mc = Chem.Mol(self.topology_data.smiles.ToBinary())
if not mc.GetNumConformers():
rdDepictor.Compute2DCoords(mc)
atoms=[]
colors={}
for i in range(mol_in_rdkit.GetNumAtoms()):
atoms.append(i)
if len(atom_color)==0:
colors[i]=(1,1,1)
else:
colors = atom_color
drawer = rdMolDraw2D.MolDraw2DSVG(int(molSize[0]),int(molSize[1]))
drawer.DrawMolecule(mc,highlightAtoms=atoms,highlightBonds=bonds, highlightAtomColors=colors,highlightAtomRadii=size,highlightBondColors=bond_color)
drawer.FinishDrawing()
self.svg = drawer.GetDrawingText().replace('svg:','')
filesvg = open("molecule.svg", "w+")
filesvg.write(self.svg) | [
"def",
"load_molecule_in_rdkit_smiles",
"(",
"self",
",",
"molSize",
",",
"kekulize",
"=",
"True",
",",
"bonds",
"=",
"[",
"]",
",",
"bond_color",
"=",
"None",
",",
"atom_color",
"=",
"{",
"}",
",",
"size",
"=",
"{",
"}",
")",
":",
"mol_in_rdkit",
"=",
"self",
".",
"topology_data",
".",
"mol",
"#need to reload without hydrogens",
"try",
":",
"mol_in_rdkit",
"=",
"Chem",
".",
"RemoveHs",
"(",
"mol_in_rdkit",
")",
"self",
".",
"topology_data",
".",
"smiles",
"=",
"Chem",
".",
"MolFromSmiles",
"(",
"Chem",
".",
"MolToSmiles",
"(",
"mol_in_rdkit",
")",
")",
"except",
"ValueError",
":",
"mol_in_rdkit",
"=",
"Chem",
".",
"RemoveHs",
"(",
"mol_in_rdkit",
",",
"sanitize",
"=",
"False",
")",
"self",
".",
"topology_data",
".",
"smiles",
"=",
"Chem",
".",
"MolFromSmiles",
"(",
"Chem",
".",
"MolToSmiles",
"(",
"mol_in_rdkit",
")",
",",
"sanitize",
"=",
"False",
")",
"self",
".",
"atom_identities",
"=",
"{",
"}",
"i",
"=",
"0",
"for",
"atom",
"in",
"self",
".",
"topology_data",
".",
"smiles",
".",
"GetAtoms",
"(",
")",
":",
"self",
".",
"atom_identities",
"[",
"mol_in_rdkit",
".",
"GetProp",
"(",
"'_smilesAtomOutputOrder'",
")",
"[",
"1",
":",
"]",
".",
"rsplit",
"(",
"\",\"",
")",
"[",
"i",
"]",
"]",
"=",
"atom",
".",
"GetIdx",
"(",
")",
"i",
"+=",
"1",
"mc",
"=",
"Chem",
".",
"Mol",
"(",
"self",
".",
"topology_data",
".",
"smiles",
".",
"ToBinary",
"(",
")",
")",
"if",
"kekulize",
":",
"try",
":",
"Chem",
".",
"Kekulize",
"(",
"mc",
")",
"except",
":",
"mc",
"=",
"Chem",
".",
"Mol",
"(",
"self",
".",
"topology_data",
".",
"smiles",
".",
"ToBinary",
"(",
")",
")",
"if",
"not",
"mc",
".",
"GetNumConformers",
"(",
")",
":",
"rdDepictor",
".",
"Compute2DCoords",
"(",
"mc",
")",
"atoms",
"=",
"[",
"]",
"colors",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"mol_in_rdkit",
".",
"GetNumAtoms",
"(",
")",
")",
":",
"atoms",
".",
"append",
"(",
"i",
")",
"if",
"len",
"(",
"atom_color",
")",
"==",
"0",
":",
"colors",
"[",
"i",
"]",
"=",
"(",
"1",
",",
"1",
",",
"1",
")",
"else",
":",
"colors",
"=",
"atom_color",
"drawer",
"=",
"rdMolDraw2D",
".",
"MolDraw2DSVG",
"(",
"int",
"(",
"molSize",
"[",
"0",
"]",
")",
",",
"int",
"(",
"molSize",
"[",
"1",
"]",
")",
")",
"drawer",
".",
"DrawMolecule",
"(",
"mc",
",",
"highlightAtoms",
"=",
"atoms",
",",
"highlightBonds",
"=",
"bonds",
",",
"highlightAtomColors",
"=",
"colors",
",",
"highlightAtomRadii",
"=",
"size",
",",
"highlightBondColors",
"=",
"bond_color",
")",
"drawer",
".",
"FinishDrawing",
"(",
")",
"self",
".",
"svg",
"=",
"drawer",
".",
"GetDrawingText",
"(",
")",
".",
"replace",
"(",
"'svg:'",
",",
"''",
")",
"filesvg",
"=",
"open",
"(",
"\"molecule.svg\"",
",",
"\"w+\"",
")",
"filesvg",
".",
"write",
"(",
"self",
".",
"svg",
")"
] | Loads mol file in rdkit without the hydrogens - they do not have to appear in the final
figure. Once loaded, the molecule is converted to SMILES format which RDKit appears to
draw best - since we do not care about the actual coordinates of the original molecule, it
is sufficient to have just 2D information.
Some molecules can be problematic to import and steps such as stopping sanitize function can
be taken. This is done automatically if problems are observed. However, better solutions can
also be implemented and need more research.
The molecule is then drawn from SMILES in 2D representation without hydrogens. The drawing is
saved as an SVG file. | [
"Loads",
"mol",
"file",
"in",
"rdkit",
"without",
"the",
"hydrogens",
"-",
"they",
"do",
"not",
"have",
"to",
"appear",
"in",
"the",
"final",
"figure",
".",
"Once",
"loaded",
"the",
"molecule",
"is",
"converted",
"to",
"SMILES",
"format",
"which",
"RDKit",
"appears",
"to",
"draw",
"best",
"-",
"since",
"we",
"do",
"not",
"care",
"about",
"the",
"actual",
"coordinates",
"of",
"the",
"original",
"molecule",
"it",
"is",
"sufficient",
"to",
"have",
"just",
"2D",
"information",
".",
"Some",
"molecules",
"can",
"be",
"problematic",
"to",
"import",
"and",
"steps",
"such",
"as",
"stopping",
"sanitize",
"function",
"can",
"be",
"taken",
".",
"This",
"is",
"done",
"automatically",
"if",
"problems",
"are",
"observed",
".",
"However",
"better",
"solutions",
"can",
"also",
"be",
"implemented",
"and",
"need",
"more",
"research",
".",
"The",
"molecule",
"is",
"then",
"drawn",
"from",
"SMILES",
"in",
"2D",
"representation",
"without",
"hydrogens",
".",
"The",
"drawing",
"is",
"saved",
"as",
"an",
"SVG",
"file",
"."
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/molecule.py#L48-L94 | train |
ldomic/lintools | lintools/molecule.py | Molecule.calc_2d_forces | def calc_2d_forces(self,x1,y1,x2,y2,width):
"""Calculate overlap in 2D space"""
#calculate a
if x1>x2:
a = x1-x2
else:
a = x2-x1
a_sq=a*a
#calculate b
if y1>y2:
b = y1-y2
else:
b = y2-y1
b_sq=b*b
#calculate c
from math import sqrt
c_sq = a_sq+b_sq
c = sqrt(c_sq)
if c > width:
return 0,0
else:
overlap = width-c
return -overlap/2, overlap/2 | python | def calc_2d_forces(self,x1,y1,x2,y2,width):
"""Calculate overlap in 2D space"""
#calculate a
if x1>x2:
a = x1-x2
else:
a = x2-x1
a_sq=a*a
#calculate b
if y1>y2:
b = y1-y2
else:
b = y2-y1
b_sq=b*b
#calculate c
from math import sqrt
c_sq = a_sq+b_sq
c = sqrt(c_sq)
if c > width:
return 0,0
else:
overlap = width-c
return -overlap/2, overlap/2 | [
"def",
"calc_2d_forces",
"(",
"self",
",",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
",",
"width",
")",
":",
"#calculate a",
"if",
"x1",
">",
"x2",
":",
"a",
"=",
"x1",
"-",
"x2",
"else",
":",
"a",
"=",
"x2",
"-",
"x1",
"a_sq",
"=",
"a",
"*",
"a",
"#calculate b",
"if",
"y1",
">",
"y2",
":",
"b",
"=",
"y1",
"-",
"y2",
"else",
":",
"b",
"=",
"y2",
"-",
"y1",
"b_sq",
"=",
"b",
"*",
"b",
"#calculate c",
"from",
"math",
"import",
"sqrt",
"c_sq",
"=",
"a_sq",
"+",
"b_sq",
"c",
"=",
"sqrt",
"(",
"c_sq",
")",
"if",
"c",
">",
"width",
":",
"return",
"0",
",",
"0",
"else",
":",
"overlap",
"=",
"width",
"-",
"c",
"return",
"-",
"overlap",
"/",
"2",
",",
"overlap",
"/",
"2"
] | Calculate overlap in 2D space | [
"Calculate",
"overlap",
"in",
"2D",
"space"
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/molecule.py#L162-L189 | train |
ldomic/lintools | lintools/molecule.py | Molecule.do_step | def do_step(self, values, xy_values,coeff, width):
"""Calculates forces between two diagrams and pushes them apart by tenth of width"""
forces = {k:[] for k,i in enumerate(xy_values)}
for (index1, value1), (index2,value2) in combinations(enumerate(xy_values),2):
f = self.calc_2d_forces(value1[0],value1[1],value2[0],value2[1],width)
if coeff[index1] < coeff[index2]:
if self.b_lenght-coeff[index2]<self.b_lenght/10: #a quick and dirty solution, but works
forces[index1].append(f[1]) # push to left (smaller projection value)
forces[index2].append(f[0])
else:
#all is normal
forces[index1].append(f[0]) # push to left (smaller projection value)
forces[index2].append(f[1])
else:
if self.b_lenght-coeff[index1]<self.b_lenght/10: #a quick and dirty solution, but works
forces[index1].append(f[0]) # push to left (smaller projection value)
forces[index2].append(f[1])
else:
#if all is normal
forces[index1].append(f[1]) # push to left (smaller projection value)
forces[index2].append(f[0])
forces = {k:sum(v) for k,v in forces.items()}
energy = sum([abs(x) for x in forces.values()])
return [(forces[k]/10+v) for k, v in enumerate(values)], energy | python | def do_step(self, values, xy_values,coeff, width):
"""Calculates forces between two diagrams and pushes them apart by tenth of width"""
forces = {k:[] for k,i in enumerate(xy_values)}
for (index1, value1), (index2,value2) in combinations(enumerate(xy_values),2):
f = self.calc_2d_forces(value1[0],value1[1],value2[0],value2[1],width)
if coeff[index1] < coeff[index2]:
if self.b_lenght-coeff[index2]<self.b_lenght/10: #a quick and dirty solution, but works
forces[index1].append(f[1]) # push to left (smaller projection value)
forces[index2].append(f[0])
else:
#all is normal
forces[index1].append(f[0]) # push to left (smaller projection value)
forces[index2].append(f[1])
else:
if self.b_lenght-coeff[index1]<self.b_lenght/10: #a quick and dirty solution, but works
forces[index1].append(f[0]) # push to left (smaller projection value)
forces[index2].append(f[1])
else:
#if all is normal
forces[index1].append(f[1]) # push to left (smaller projection value)
forces[index2].append(f[0])
forces = {k:sum(v) for k,v in forces.items()}
energy = sum([abs(x) for x in forces.values()])
return [(forces[k]/10+v) for k, v in enumerate(values)], energy | [
"def",
"do_step",
"(",
"self",
",",
"values",
",",
"xy_values",
",",
"coeff",
",",
"width",
")",
":",
"forces",
"=",
"{",
"k",
":",
"[",
"]",
"for",
"k",
",",
"i",
"in",
"enumerate",
"(",
"xy_values",
")",
"}",
"for",
"(",
"index1",
",",
"value1",
")",
",",
"(",
"index2",
",",
"value2",
")",
"in",
"combinations",
"(",
"enumerate",
"(",
"xy_values",
")",
",",
"2",
")",
":",
"f",
"=",
"self",
".",
"calc_2d_forces",
"(",
"value1",
"[",
"0",
"]",
",",
"value1",
"[",
"1",
"]",
",",
"value2",
"[",
"0",
"]",
",",
"value2",
"[",
"1",
"]",
",",
"width",
")",
"if",
"coeff",
"[",
"index1",
"]",
"<",
"coeff",
"[",
"index2",
"]",
":",
"if",
"self",
".",
"b_lenght",
"-",
"coeff",
"[",
"index2",
"]",
"<",
"self",
".",
"b_lenght",
"/",
"10",
":",
"#a quick and dirty solution, but works",
"forces",
"[",
"index1",
"]",
".",
"append",
"(",
"f",
"[",
"1",
"]",
")",
"# push to left (smaller projection value)",
"forces",
"[",
"index2",
"]",
".",
"append",
"(",
"f",
"[",
"0",
"]",
")",
"else",
":",
"#all is normal",
"forces",
"[",
"index1",
"]",
".",
"append",
"(",
"f",
"[",
"0",
"]",
")",
"# push to left (smaller projection value)",
"forces",
"[",
"index2",
"]",
".",
"append",
"(",
"f",
"[",
"1",
"]",
")",
"else",
":",
"if",
"self",
".",
"b_lenght",
"-",
"coeff",
"[",
"index1",
"]",
"<",
"self",
".",
"b_lenght",
"/",
"10",
":",
"#a quick and dirty solution, but works",
"forces",
"[",
"index1",
"]",
".",
"append",
"(",
"f",
"[",
"0",
"]",
")",
"# push to left (smaller projection value)",
"forces",
"[",
"index2",
"]",
".",
"append",
"(",
"f",
"[",
"1",
"]",
")",
"else",
":",
"#if all is normal",
"forces",
"[",
"index1",
"]",
".",
"append",
"(",
"f",
"[",
"1",
"]",
")",
"# push to left (smaller projection value)",
"forces",
"[",
"index2",
"]",
".",
"append",
"(",
"f",
"[",
"0",
"]",
")",
"forces",
"=",
"{",
"k",
":",
"sum",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"forces",
".",
"items",
"(",
")",
"}",
"energy",
"=",
"sum",
"(",
"[",
"abs",
"(",
"x",
")",
"for",
"x",
"in",
"forces",
".",
"values",
"(",
")",
"]",
")",
"return",
"[",
"(",
"forces",
"[",
"k",
"]",
"/",
"10",
"+",
"v",
")",
"for",
"k",
",",
"v",
"in",
"enumerate",
"(",
"values",
")",
"]",
",",
"energy"
] | Calculates forces between two diagrams and pushes them apart by tenth of width | [
"Calculates",
"forces",
"between",
"two",
"diagrams",
"and",
"pushes",
"them",
"apart",
"by",
"tenth",
"of",
"width"
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/molecule.py#L192-L217 | train |
robinandeer/puzzle | puzzle/plugins/sql/mixins/variant.py | VariantMixin.variants | def variants(self, case_id, skip=0, count=1000, filters=None):
"""Fetch variants for a case."""
filters = filters or {}
logger.debug("Fetching case with case_id: {0}".format(case_id))
case_obj = self.case(case_id)
plugin, case_id = self.select_plugin(case_obj)
self.filters = plugin.filters
gene_lists = (self.gene_list(list_id) for list_id
in filters.get('gene_lists', []))
nested_geneids = (gene_list.gene_ids for gene_list in gene_lists)
gene_ids = set(itertools.chain.from_iterable(nested_geneids))
if filters.get('gene_ids'):
filters['gene_ids'].extend(gene_ids)
else:
filters['gene_ids'] = gene_ids
variants = plugin.variants(case_id, skip, count, filters)
return variants | python | def variants(self, case_id, skip=0, count=1000, filters=None):
"""Fetch variants for a case."""
filters = filters or {}
logger.debug("Fetching case with case_id: {0}".format(case_id))
case_obj = self.case(case_id)
plugin, case_id = self.select_plugin(case_obj)
self.filters = plugin.filters
gene_lists = (self.gene_list(list_id) for list_id
in filters.get('gene_lists', []))
nested_geneids = (gene_list.gene_ids for gene_list in gene_lists)
gene_ids = set(itertools.chain.from_iterable(nested_geneids))
if filters.get('gene_ids'):
filters['gene_ids'].extend(gene_ids)
else:
filters['gene_ids'] = gene_ids
variants = plugin.variants(case_id, skip, count, filters)
return variants | [
"def",
"variants",
"(",
"self",
",",
"case_id",
",",
"skip",
"=",
"0",
",",
"count",
"=",
"1000",
",",
"filters",
"=",
"None",
")",
":",
"filters",
"=",
"filters",
"or",
"{",
"}",
"logger",
".",
"debug",
"(",
"\"Fetching case with case_id: {0}\"",
".",
"format",
"(",
"case_id",
")",
")",
"case_obj",
"=",
"self",
".",
"case",
"(",
"case_id",
")",
"plugin",
",",
"case_id",
"=",
"self",
".",
"select_plugin",
"(",
"case_obj",
")",
"self",
".",
"filters",
"=",
"plugin",
".",
"filters",
"gene_lists",
"=",
"(",
"self",
".",
"gene_list",
"(",
"list_id",
")",
"for",
"list_id",
"in",
"filters",
".",
"get",
"(",
"'gene_lists'",
",",
"[",
"]",
")",
")",
"nested_geneids",
"=",
"(",
"gene_list",
".",
"gene_ids",
"for",
"gene_list",
"in",
"gene_lists",
")",
"gene_ids",
"=",
"set",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"nested_geneids",
")",
")",
"if",
"filters",
".",
"get",
"(",
"'gene_ids'",
")",
":",
"filters",
"[",
"'gene_ids'",
"]",
".",
"extend",
"(",
"gene_ids",
")",
"else",
":",
"filters",
"[",
"'gene_ids'",
"]",
"=",
"gene_ids",
"variants",
"=",
"plugin",
".",
"variants",
"(",
"case_id",
",",
"skip",
",",
"count",
",",
"filters",
")",
"return",
"variants"
] | Fetch variants for a case. | [
"Fetch",
"variants",
"for",
"a",
"case",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/variant.py#L11-L29 | train |
robinandeer/puzzle | puzzle/plugins/sql/mixins/variant.py | VariantMixin.variant | def variant(self, case_id, variant_id):
"""Fetch a single variant from variant source."""
case_obj = self.case(case_id)
plugin, case_id = self.select_plugin(case_obj)
variant = plugin.variant(case_id, variant_id)
return variant | python | def variant(self, case_id, variant_id):
"""Fetch a single variant from variant source."""
case_obj = self.case(case_id)
plugin, case_id = self.select_plugin(case_obj)
variant = plugin.variant(case_id, variant_id)
return variant | [
"def",
"variant",
"(",
"self",
",",
"case_id",
",",
"variant_id",
")",
":",
"case_obj",
"=",
"self",
".",
"case",
"(",
"case_id",
")",
"plugin",
",",
"case_id",
"=",
"self",
".",
"select_plugin",
"(",
"case_obj",
")",
"variant",
"=",
"plugin",
".",
"variant",
"(",
"case_id",
",",
"variant_id",
")",
"return",
"variant"
] | Fetch a single variant from variant source. | [
"Fetch",
"a",
"single",
"variant",
"from",
"variant",
"source",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/variant.py#L31-L36 | train |
eleme/meepo | meepo/apps/eventsourcing/sub.py | redis_es_sub | def redis_es_sub(session, tables, redis_dsn, strict=False,
namespace=None, ttl=3600*24*3, socket_timeout=1):
"""Redis EventSourcing sub.
This sub should be used together with sqlalchemy_es_pub, it will
use RedisEventStore as events storage layer and use the prepare-commit
pattern in :func:`sqlalchemy_es_pub` to ensure 100% security on
events recording.
:param session: the sqlalchemy to bind the signal
:param tables: tables to be event sourced.
:param redis_dsn: the redis server to store event sourcing events.
:param strict: arg to be passed to RedisPrepareCommit. If set to True,
the exception will not be silent and may cause the failure of sqlalchemy
transaction, user should handle the exception in the app side in this
case.
:param namespace: namespace string or func. If func passed, it should
accept timestamp as arg and return a string namespace.
:param ttl: expiration time for events stored, default to 3 days.
:param socket_timeout: redis socket timeout.
"""
logger = logging.getLogger("meepo.sub.redis_es_sub")
if not isinstance(tables, (list, set)):
raise ValueError("tables should be list or set")
# install event store hook for tables
event_store = RedisEventStore(
redis_dsn, namespace=namespace, ttl=ttl, socket_timeout=socket_timeout)
def _es_event_sub(pk, event):
if event_store.add(event, str(pk)):
logger.info("%s: %s -> %s" % (
event, pk, datetime.datetime.now()))
else:
logger.error("event sourcing failed: %s" % pk)
events = ("%s_%s" % (tb, action) for tb, action in
itertools.product(*[tables, ["write", "update", "delete"]]))
for event in events:
sub_func = functools.partial(_es_event_sub, event=event)
signal(event).connect(sub_func, weak=False)
# install prepare-commit hook
prepare_commit = RedisPrepareCommit(
redis_dsn, strict=strict, namespace=namespace,
socket_timeout=socket_timeout)
signal("session_prepare").connect(
prepare_commit.prepare, sender=session, weak=False)
signal("session_commit").connect(
prepare_commit.commit, sender=session, weak=False)
signal("session_rollback").connect(
prepare_commit.rollback, sender=session, weak=False)
return event_store, prepare_commit | python | def redis_es_sub(session, tables, redis_dsn, strict=False,
namespace=None, ttl=3600*24*3, socket_timeout=1):
"""Redis EventSourcing sub.
This sub should be used together with sqlalchemy_es_pub, it will
use RedisEventStore as events storage layer and use the prepare-commit
pattern in :func:`sqlalchemy_es_pub` to ensure 100% security on
events recording.
:param session: the sqlalchemy to bind the signal
:param tables: tables to be event sourced.
:param redis_dsn: the redis server to store event sourcing events.
:param strict: arg to be passed to RedisPrepareCommit. If set to True,
the exception will not be silent and may cause the failure of sqlalchemy
transaction, user should handle the exception in the app side in this
case.
:param namespace: namespace string or func. If func passed, it should
accept timestamp as arg and return a string namespace.
:param ttl: expiration time for events stored, default to 3 days.
:param socket_timeout: redis socket timeout.
"""
logger = logging.getLogger("meepo.sub.redis_es_sub")
if not isinstance(tables, (list, set)):
raise ValueError("tables should be list or set")
# install event store hook for tables
event_store = RedisEventStore(
redis_dsn, namespace=namespace, ttl=ttl, socket_timeout=socket_timeout)
def _es_event_sub(pk, event):
if event_store.add(event, str(pk)):
logger.info("%s: %s -> %s" % (
event, pk, datetime.datetime.now()))
else:
logger.error("event sourcing failed: %s" % pk)
events = ("%s_%s" % (tb, action) for tb, action in
itertools.product(*[tables, ["write", "update", "delete"]]))
for event in events:
sub_func = functools.partial(_es_event_sub, event=event)
signal(event).connect(sub_func, weak=False)
# install prepare-commit hook
prepare_commit = RedisPrepareCommit(
redis_dsn, strict=strict, namespace=namespace,
socket_timeout=socket_timeout)
signal("session_prepare").connect(
prepare_commit.prepare, sender=session, weak=False)
signal("session_commit").connect(
prepare_commit.commit, sender=session, weak=False)
signal("session_rollback").connect(
prepare_commit.rollback, sender=session, weak=False)
return event_store, prepare_commit | [
"def",
"redis_es_sub",
"(",
"session",
",",
"tables",
",",
"redis_dsn",
",",
"strict",
"=",
"False",
",",
"namespace",
"=",
"None",
",",
"ttl",
"=",
"3600",
"*",
"24",
"*",
"3",
",",
"socket_timeout",
"=",
"1",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"\"meepo.sub.redis_es_sub\"",
")",
"if",
"not",
"isinstance",
"(",
"tables",
",",
"(",
"list",
",",
"set",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"tables should be list or set\"",
")",
"# install event store hook for tables",
"event_store",
"=",
"RedisEventStore",
"(",
"redis_dsn",
",",
"namespace",
"=",
"namespace",
",",
"ttl",
"=",
"ttl",
",",
"socket_timeout",
"=",
"socket_timeout",
")",
"def",
"_es_event_sub",
"(",
"pk",
",",
"event",
")",
":",
"if",
"event_store",
".",
"add",
"(",
"event",
",",
"str",
"(",
"pk",
")",
")",
":",
"logger",
".",
"info",
"(",
"\"%s: %s -> %s\"",
"%",
"(",
"event",
",",
"pk",
",",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
")",
")",
"else",
":",
"logger",
".",
"error",
"(",
"\"event sourcing failed: %s\"",
"%",
"pk",
")",
"events",
"=",
"(",
"\"%s_%s\"",
"%",
"(",
"tb",
",",
"action",
")",
"for",
"tb",
",",
"action",
"in",
"itertools",
".",
"product",
"(",
"*",
"[",
"tables",
",",
"[",
"\"write\"",
",",
"\"update\"",
",",
"\"delete\"",
"]",
"]",
")",
")",
"for",
"event",
"in",
"events",
":",
"sub_func",
"=",
"functools",
".",
"partial",
"(",
"_es_event_sub",
",",
"event",
"=",
"event",
")",
"signal",
"(",
"event",
")",
".",
"connect",
"(",
"sub_func",
",",
"weak",
"=",
"False",
")",
"# install prepare-commit hook",
"prepare_commit",
"=",
"RedisPrepareCommit",
"(",
"redis_dsn",
",",
"strict",
"=",
"strict",
",",
"namespace",
"=",
"namespace",
",",
"socket_timeout",
"=",
"socket_timeout",
")",
"signal",
"(",
"\"session_prepare\"",
")",
".",
"connect",
"(",
"prepare_commit",
".",
"prepare",
",",
"sender",
"=",
"session",
",",
"weak",
"=",
"False",
")",
"signal",
"(",
"\"session_commit\"",
")",
".",
"connect",
"(",
"prepare_commit",
".",
"commit",
",",
"sender",
"=",
"session",
",",
"weak",
"=",
"False",
")",
"signal",
"(",
"\"session_rollback\"",
")",
".",
"connect",
"(",
"prepare_commit",
".",
"rollback",
",",
"sender",
"=",
"session",
",",
"weak",
"=",
"False",
")",
"return",
"event_store",
",",
"prepare_commit"
] | Redis EventSourcing sub.
This sub should be used together with sqlalchemy_es_pub, it will
use RedisEventStore as events storage layer and use the prepare-commit
pattern in :func:`sqlalchemy_es_pub` to ensure 100% security on
events recording.
:param session: the sqlalchemy to bind the signal
:param tables: tables to be event sourced.
:param redis_dsn: the redis server to store event sourcing events.
:param strict: arg to be passed to RedisPrepareCommit. If set to True,
the exception will not be silent and may cause the failure of sqlalchemy
transaction, user should handle the exception in the app side in this
case.
:param namespace: namespace string or func. If func passed, it should
accept timestamp as arg and return a string namespace.
:param ttl: expiration time for events stored, default to 3 days.
:param socket_timeout: redis socket timeout. | [
"Redis",
"EventSourcing",
"sub",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/sub.py#L16-L71 | train |
jalmeroth/pymusiccast | musiccast.py | setup_parser | def setup_parser():
"""Setup an ArgumentParser."""
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=5005)
parser.add_argument('-i', '--interval', type=int, default=480)
parser.add_argument('host', type=str, help='hostname')
return parser | python | def setup_parser():
"""Setup an ArgumentParser."""
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', type=int, default=5005)
parser.add_argument('-i', '--interval', type=int, default=480)
parser.add_argument('host', type=str, help='hostname')
return parser | [
"def",
"setup_parser",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--port'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"5005",
")",
"parser",
".",
"add_argument",
"(",
"'-i'",
",",
"'--interval'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"480",
")",
"parser",
".",
"add_argument",
"(",
"'host'",
",",
"type",
"=",
"str",
",",
"help",
"=",
"'hostname'",
")",
"return",
"parser"
] | Setup an ArgumentParser. | [
"Setup",
"an",
"ArgumentParser",
"."
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/musiccast.py#L13-L19 | train |
jalmeroth/pymusiccast | musiccast.py | main | def main():
"""Connect to a McDevice"""
args = setup_parser().parse_args()
host = getattr(args, "host")
port = getattr(args, "port")
ipv4 = socket.gethostbyname(host)
interval = getattr(args, "interval")
receiver = McDevice(ipv4, udp_port=port, mc_interval=interval)
receiver.handle_status()
# wait for UDP messages
while True:
time.sleep(0.2) | python | def main():
"""Connect to a McDevice"""
args = setup_parser().parse_args()
host = getattr(args, "host")
port = getattr(args, "port")
ipv4 = socket.gethostbyname(host)
interval = getattr(args, "interval")
receiver = McDevice(ipv4, udp_port=port, mc_interval=interval)
receiver.handle_status()
# wait for UDP messages
while True:
time.sleep(0.2) | [
"def",
"main",
"(",
")",
":",
"args",
"=",
"setup_parser",
"(",
")",
".",
"parse_args",
"(",
")",
"host",
"=",
"getattr",
"(",
"args",
",",
"\"host\"",
")",
"port",
"=",
"getattr",
"(",
"args",
",",
"\"port\"",
")",
"ipv4",
"=",
"socket",
".",
"gethostbyname",
"(",
"host",
")",
"interval",
"=",
"getattr",
"(",
"args",
",",
"\"interval\"",
")",
"receiver",
"=",
"McDevice",
"(",
"ipv4",
",",
"udp_port",
"=",
"port",
",",
"mc_interval",
"=",
"interval",
")",
"receiver",
".",
"handle_status",
"(",
")",
"# wait for UDP messages",
"while",
"True",
":",
"time",
".",
"sleep",
"(",
"0.2",
")"
] | Connect to a McDevice | [
"Connect",
"to",
"a",
"McDevice"
] | 616379ae22d6b518c61042d58be6d18a46242168 | https://github.com/jalmeroth/pymusiccast/blob/616379ae22d6b518c61042d58be6d18a46242168/musiccast.py#L22-L35 | train |
rycus86/ghost-client | ghost_client/api.py | Ghost.from_sqlite | def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
"""
Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance
"""
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close() | python | def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
"""
Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance
"""
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close() | [
"def",
"from_sqlite",
"(",
"cls",
",",
"database_path",
",",
"base_url",
",",
"version",
"=",
"'auto'",
",",
"client_id",
"=",
"'ghost-admin'",
")",
":",
"import",
"os",
"import",
"sqlite3",
"fd",
"=",
"os",
".",
"open",
"(",
"database_path",
",",
"os",
".",
"O_RDONLY",
")",
"connection",
"=",
"sqlite3",
".",
"connect",
"(",
"'/dev/fd/%d'",
"%",
"fd",
")",
"os",
".",
"close",
"(",
"fd",
")",
"try",
":",
"row",
"=",
"connection",
".",
"execute",
"(",
"'SELECT secret FROM clients WHERE slug = ?'",
",",
"(",
"client_id",
",",
")",
")",
".",
"fetchone",
"(",
")",
"if",
"row",
":",
"return",
"cls",
"(",
"base_url",
",",
"version",
"=",
"version",
",",
"client_id",
"=",
"client_id",
",",
"client_secret",
"=",
"row",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"GhostException",
"(",
"401",
",",
"[",
"{",
"'errorType'",
":",
"'InternalError'",
",",
"'message'",
":",
"'No client_secret found for client_id: %s'",
"%",
"client_id",
"}",
"]",
")",
"finally",
":",
"connection",
".",
"close",
"(",
")"
] | Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance | [
"Initialize",
"a",
"new",
"Ghost",
"API",
"client",
"reading",
"the",
"client",
"ID",
"and",
"secret",
"from",
"the",
"SQlite",
"database",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L142-L180 | train |
rycus86/ghost-client | ghost_client/api.py | Ghost.login | def login(self, username, password):
"""
Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint
"""
data = self._authenticate(
grant_type='password',
username=username,
password=password,
client_id=self._client_id,
client_secret=self._client_secret
)
self._username = username
self._password = password
return data | python | def login(self, username, password):
"""
Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint
"""
data = self._authenticate(
grant_type='password',
username=username,
password=password,
client_id=self._client_id,
client_secret=self._client_secret
)
self._username = username
self._password = password
return data | [
"def",
"login",
"(",
"self",
",",
"username",
",",
"password",
")",
":",
"data",
"=",
"self",
".",
"_authenticate",
"(",
"grant_type",
"=",
"'password'",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"client_id",
"=",
"self",
".",
"_client_id",
",",
"client_secret",
"=",
"self",
".",
"_client_secret",
")",
"self",
".",
"_username",
"=",
"username",
"self",
".",
"_password",
"=",
"password",
"return",
"data"
] | Authenticate with the server.
:param username: The username of an existing user
:param password: The password for the user
:return: The authentication response from the REST endpoint | [
"Authenticate",
"with",
"the",
"server",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L201-L221 | train |
rycus86/ghost-client | ghost_client/api.py | Ghost.refresh_session | def refresh_session(self):
"""
Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available
"""
if not self._refresh_token:
if self._username and self._password:
return self.login(self._username, self._password)
return
return self._authenticate(
grant_type='refresh_token',
refresh_token=self._refresh_token,
client_id=self._client_id,
client_secret=self._client_secret
) | python | def refresh_session(self):
"""
Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available
"""
if not self._refresh_token:
if self._username and self._password:
return self.login(self._username, self._password)
return
return self._authenticate(
grant_type='refresh_token',
refresh_token=self._refresh_token,
client_id=self._client_id,
client_secret=self._client_secret
) | [
"def",
"refresh_session",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_refresh_token",
":",
"if",
"self",
".",
"_username",
"and",
"self",
".",
"_password",
":",
"return",
"self",
".",
"login",
"(",
"self",
".",
"_username",
",",
"self",
".",
"_password",
")",
"return",
"return",
"self",
".",
"_authenticate",
"(",
"grant_type",
"=",
"'refresh_token'",
",",
"refresh_token",
"=",
"self",
".",
"_refresh_token",
",",
"client_id",
"=",
"self",
".",
"_client_id",
",",
"client_secret",
"=",
"self",
".",
"_client_secret",
")"
] | Re-authenticate using the refresh token if available.
Otherwise log in using the username and password
if it was used to authenticate initially.
:return: The authentication response or `None` if not available | [
"Re",
"-",
"authenticate",
"using",
"the",
"refresh",
"token",
"if",
"available",
".",
"Otherwise",
"log",
"in",
"using",
"the",
"username",
"and",
"password",
"if",
"it",
"was",
"used",
"to",
"authenticate",
"initially",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L223-L243 | train |
rycus86/ghost-client | ghost_client/api.py | Ghost.revoke_access_token | def revoke_access_token(self):
"""
Revoke the access token currently in use.
"""
if not self._access_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='access_token',
token=self._access_token
))
self._access_token = None | python | def revoke_access_token(self):
"""
Revoke the access token currently in use.
"""
if not self._access_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='access_token',
token=self._access_token
))
self._access_token = None | [
"def",
"revoke_access_token",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_access_token",
":",
"return",
"self",
".",
"execute_post",
"(",
"'authentication/revoke'",
",",
"json",
"=",
"dict",
"(",
"token_type_hint",
"=",
"'access_token'",
",",
"token",
"=",
"self",
".",
"_access_token",
")",
")",
"self",
".",
"_access_token",
"=",
"None"
] | Revoke the access token currently in use. | [
"Revoke",
"the",
"access",
"token",
"currently",
"in",
"use",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L260-L273 | train |
rycus86/ghost-client | ghost_client/api.py | Ghost.revoke_refresh_token | def revoke_refresh_token(self):
"""
Revoke the refresh token currently active.
"""
if not self._refresh_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='refresh_token',
token=self._refresh_token
))
self._refresh_token = None | python | def revoke_refresh_token(self):
"""
Revoke the refresh token currently active.
"""
if not self._refresh_token:
return
self.execute_post('authentication/revoke', json=dict(
token_type_hint='refresh_token',
token=self._refresh_token
))
self._refresh_token = None | [
"def",
"revoke_refresh_token",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_refresh_token",
":",
"return",
"self",
".",
"execute_post",
"(",
"'authentication/revoke'",
",",
"json",
"=",
"dict",
"(",
"token_type_hint",
"=",
"'refresh_token'",
",",
"token",
"=",
"self",
".",
"_refresh_token",
")",
")",
"self",
".",
"_refresh_token",
"=",
"None"
] | Revoke the refresh token currently active. | [
"Revoke",
"the",
"refresh",
"token",
"currently",
"active",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L275-L288 | train |
rycus86/ghost-client | ghost_client/api.py | Ghost.logout | def logout(self):
"""
Log out, revoking the access tokens
and forgetting the login details if they were given.
"""
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None | python | def logout(self):
"""
Log out, revoking the access tokens
and forgetting the login details if they were given.
"""
self.revoke_refresh_token()
self.revoke_access_token()
self._username, self._password = None, None | [
"def",
"logout",
"(",
"self",
")",
":",
"self",
".",
"revoke_refresh_token",
"(",
")",
"self",
".",
"revoke_access_token",
"(",
")",
"self",
".",
"_username",
",",
"self",
".",
"_password",
"=",
"None",
",",
"None"
] | Log out, revoking the access tokens
and forgetting the login details if they were given. | [
"Log",
"out",
"revoking",
"the",
"access",
"tokens",
"and",
"forgetting",
"the",
"login",
"details",
"if",
"they",
"were",
"given",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L290-L299 | train |
rycus86/ghost-client | ghost_client/api.py | Ghost.upload | def upload(self, file_obj=None, file_path=None, name=None, data=None):
"""
Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server
"""
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, 'rb')
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
'Either `file_obj` or `file_path` or '
'`name` and `data` needs to be specified'
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post('uploads/', files={'uploadimage': file_arg})
return response
finally:
if close:
content.close() | python | def upload(self, file_obj=None, file_path=None, name=None, data=None):
"""
Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server
"""
close = False
if file_obj:
file_name, content = os.path.basename(file_obj.name), file_obj
elif file_path:
file_name, content = os.path.basename(file_path), open(file_path, 'rb')
close = True
elif name and data:
file_name, content = name, data
else:
raise GhostException(
400,
'Either `file_obj` or `file_path` or '
'`name` and `data` needs to be specified'
)
try:
content_type, _ = mimetypes.guess_type(file_name)
file_arg = (file_name, content, content_type)
response = self.execute_post('uploads/', files={'uploadimage': file_arg})
return response
finally:
if close:
content.close() | [
"def",
"upload",
"(",
"self",
",",
"file_obj",
"=",
"None",
",",
"file_path",
"=",
"None",
",",
"name",
"=",
"None",
",",
"data",
"=",
"None",
")",
":",
"close",
"=",
"False",
"if",
"file_obj",
":",
"file_name",
",",
"content",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"file_obj",
".",
"name",
")",
",",
"file_obj",
"elif",
"file_path",
":",
"file_name",
",",
"content",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"file_path",
")",
",",
"open",
"(",
"file_path",
",",
"'rb'",
")",
"close",
"=",
"True",
"elif",
"name",
"and",
"data",
":",
"file_name",
",",
"content",
"=",
"name",
",",
"data",
"else",
":",
"raise",
"GhostException",
"(",
"400",
",",
"'Either `file_obj` or `file_path` or '",
"'`name` and `data` needs to be specified'",
")",
"try",
":",
"content_type",
",",
"_",
"=",
"mimetypes",
".",
"guess_type",
"(",
"file_name",
")",
"file_arg",
"=",
"(",
"file_name",
",",
"content",
",",
"content_type",
")",
"response",
"=",
"self",
".",
"execute_post",
"(",
"'uploads/'",
",",
"files",
"=",
"{",
"'uploadimage'",
":",
"file_arg",
"}",
")",
"return",
"response",
"finally",
":",
"if",
"close",
":",
"content",
".",
"close",
"(",
")"
] | Upload an image and return its path on the server.
Either `file_obj` or `file_path` or `name` and `data` has to be specified.
:param file_obj: A file object to upload
:param file_path: A file path to upload from
:param name: A file name for uploading
:param data: The file content to upload
:return: The path of the uploaded file on the server | [
"Upload",
"an",
"image",
"and",
"return",
"its",
"path",
"on",
"the",
"server",
".",
"Either",
"file_obj",
"or",
"file_path",
"or",
"name",
"and",
"data",
"has",
"to",
"be",
"specified",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L301-L343 | train |
rycus86/ghost-client | ghost_client/api.py | Ghost.execute_get | def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json() | python | def execute_get(self, resource, **kwargs):
"""
Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
url = '%s/%s' % (self.base_url, resource)
headers = kwargs.pop('headers', dict())
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
if kwargs:
separator = '&' if '?' in url else '?'
for key, value in kwargs.items():
if hasattr(value, '__iter__') and type(value) not in six.string_types:
url = '%s%s%s=%s' % (url, separator, key, ','.join(value))
else:
url = '%s%s%s=%s' % (url, separator, key, value)
separator = '&'
if self._access_token:
headers['Authorization'] = 'Bearer %s' % self._access_token
else:
separator = '&' if '?' in url else '?'
url = '%s%sclient_id=%s&client_secret=%s' % (
url, separator, self._client_id, self._client_secret
)
response = requests.get(url, headers=headers)
if response.status_code // 100 != 2:
raise GhostException(response.status_code, response.json().get('errors', []))
return response.json() | [
"def",
"execute_get",
"(",
"self",
",",
"resource",
",",
"*",
"*",
"kwargs",
")",
":",
"url",
"=",
"'%s/%s'",
"%",
"(",
"self",
".",
"base_url",
",",
"resource",
")",
"headers",
"=",
"kwargs",
".",
"pop",
"(",
"'headers'",
",",
"dict",
"(",
")",
")",
"headers",
"[",
"'Accept'",
"]",
"=",
"'application/json'",
"headers",
"[",
"'Content-Type'",
"]",
"=",
"'application/json'",
"if",
"kwargs",
":",
"separator",
"=",
"'&'",
"if",
"'?'",
"in",
"url",
"else",
"'?'",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"hasattr",
"(",
"value",
",",
"'__iter__'",
")",
"and",
"type",
"(",
"value",
")",
"not",
"in",
"six",
".",
"string_types",
":",
"url",
"=",
"'%s%s%s=%s'",
"%",
"(",
"url",
",",
"separator",
",",
"key",
",",
"','",
".",
"join",
"(",
"value",
")",
")",
"else",
":",
"url",
"=",
"'%s%s%s=%s'",
"%",
"(",
"url",
",",
"separator",
",",
"key",
",",
"value",
")",
"separator",
"=",
"'&'",
"if",
"self",
".",
"_access_token",
":",
"headers",
"[",
"'Authorization'",
"]",
"=",
"'Bearer %s'",
"%",
"self",
".",
"_access_token",
"else",
":",
"separator",
"=",
"'&'",
"if",
"'?'",
"in",
"url",
"else",
"'?'",
"url",
"=",
"'%s%sclient_id=%s&client_secret=%s'",
"%",
"(",
"url",
",",
"separator",
",",
"self",
".",
"_client_id",
",",
"self",
".",
"_client_secret",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"headers",
")",
"if",
"response",
".",
"status_code",
"//",
"100",
"!=",
"2",
":",
"raise",
"GhostException",
"(",
"response",
".",
"status_code",
",",
"response",
".",
"json",
"(",
")",
".",
"get",
"(",
"'errors'",
",",
"[",
"]",
")",
")",
"return",
"response",
".",
"json",
"(",
")"
] | Execute an HTTP GET request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional query parameters (and optionally headers)
:return: The HTTP response as JSON or `GhostException` if unsuccessful | [
"Execute",
"an",
"HTTP",
"GET",
"request",
"against",
"the",
"API",
"endpoints",
".",
"This",
"method",
"is",
"meant",
"for",
"internal",
"use",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L346-L389 | train |
rycus86/ghost-client | ghost_client/api.py | Ghost.execute_post | def execute_post(self, resource, **kwargs):
"""
Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.post, **kwargs).json() | python | def execute_post(self, resource, **kwargs):
"""
Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.post, **kwargs).json() | [
"def",
"execute_post",
"(",
"self",
",",
"resource",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_request",
"(",
"resource",
",",
"requests",
".",
"post",
",",
"*",
"*",
"kwargs",
")",
".",
"json",
"(",
")"
] | Execute an HTTP POST request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful | [
"Execute",
"an",
"HTTP",
"POST",
"request",
"against",
"the",
"API",
"endpoints",
".",
"This",
"method",
"is",
"meant",
"for",
"internal",
"use",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L391-L401 | train |
rycus86/ghost-client | ghost_client/api.py | Ghost.execute_put | def execute_put(self, resource, **kwargs):
"""
Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.put, **kwargs).json() | python | def execute_put(self, resource, **kwargs):
"""
Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful
"""
return self._request(resource, requests.put, **kwargs).json() | [
"def",
"execute_put",
"(",
"self",
",",
"resource",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_request",
"(",
"resource",
",",
"requests",
".",
"put",
",",
"*",
"*",
"kwargs",
")",
".",
"json",
"(",
")"
] | Execute an HTTP PUT request against the API endpoints.
This method is meant for internal use.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
:return: The HTTP response as JSON or `GhostException` if unsuccessful | [
"Execute",
"an",
"HTTP",
"PUT",
"request",
"against",
"the",
"API",
"endpoints",
".",
"This",
"method",
"is",
"meant",
"for",
"internal",
"use",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L403-L413 | train |
rycus86/ghost-client | ghost_client/api.py | Ghost.execute_delete | def execute_delete(self, resource, **kwargs):
"""
Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
"""
self._request(resource, requests.delete, **kwargs) | python | def execute_delete(self, resource, **kwargs):
"""
Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library)
"""
self._request(resource, requests.delete, **kwargs) | [
"def",
"execute_delete",
"(",
"self",
",",
"resource",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_request",
"(",
"resource",
",",
"requests",
".",
"delete",
",",
"*",
"*",
"kwargs",
")"
] | Execute an HTTP DELETE request against the API endpoints.
This method is meant for internal use.
Does not return anything but raises an exception when failed.
:param resource: The last part of the URI
:param kwargs: Additional parameters for the HTTP call (`request` library) | [
"Execute",
"an",
"HTTP",
"DELETE",
"request",
"against",
"the",
"API",
"endpoints",
".",
"This",
"method",
"is",
"meant",
"for",
"internal",
"use",
".",
"Does",
"not",
"return",
"anything",
"but",
"raises",
"an",
"exception",
"when",
"failed",
"."
] | 863d332801d2c1b8e7ad4573c7b16db78a7f8c8d | https://github.com/rycus86/ghost-client/blob/863d332801d2c1b8e7ad4573c7b16db78a7f8c8d/ghost_client/api.py#L415-L425 | train |
inveniosoftware-contrib/json-merger | json_merger/contrib/inspirehep/author_util.py | token_distance | def token_distance(t1, t2, initial_match_penalization):
"""Calculates the edit distance between two tokens."""
if isinstance(t1, NameInitial) or isinstance(t2, NameInitial):
if t1.token == t2.token:
return 0
if t1 == t2:
return initial_match_penalization
return 1.0
return _normalized_edit_dist(t1.token, t2.token) | python | def token_distance(t1, t2, initial_match_penalization):
"""Calculates the edit distance between two tokens."""
if isinstance(t1, NameInitial) or isinstance(t2, NameInitial):
if t1.token == t2.token:
return 0
if t1 == t2:
return initial_match_penalization
return 1.0
return _normalized_edit_dist(t1.token, t2.token) | [
"def",
"token_distance",
"(",
"t1",
",",
"t2",
",",
"initial_match_penalization",
")",
":",
"if",
"isinstance",
"(",
"t1",
",",
"NameInitial",
")",
"or",
"isinstance",
"(",
"t2",
",",
"NameInitial",
")",
":",
"if",
"t1",
".",
"token",
"==",
"t2",
".",
"token",
":",
"return",
"0",
"if",
"t1",
"==",
"t2",
":",
"return",
"initial_match_penalization",
"return",
"1.0",
"return",
"_normalized_edit_dist",
"(",
"t1",
".",
"token",
",",
"t2",
".",
"token",
")"
] | Calculates the edit distance between two tokens. | [
"Calculates",
"the",
"edit",
"distance",
"between",
"two",
"tokens",
"."
] | adc6d372da018427e1db7b92424d3471e01a4118 | https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/contrib/inspirehep/author_util.py#L60-L68 | train |
inveniosoftware-contrib/json-merger | json_merger/contrib/inspirehep/author_util.py | simple_tokenize | def simple_tokenize(name):
"""Simple tokenizer function to be used with the normalizers."""
last_names, first_names = name.split(',')
last_names = _RE_NAME_TOKEN_SEPARATOR.split(last_names)
first_names = _RE_NAME_TOKEN_SEPARATOR.split(first_names)
first_names = [NameToken(n) if len(n) > 1 else NameInitial(n)
for n in first_names if n]
last_names = [NameToken(n) if len(n) > 1 else NameInitial(n)
for n in last_names if n]
return {'lastnames': last_names,
'nonlastnames': first_names} | python | def simple_tokenize(name):
"""Simple tokenizer function to be used with the normalizers."""
last_names, first_names = name.split(',')
last_names = _RE_NAME_TOKEN_SEPARATOR.split(last_names)
first_names = _RE_NAME_TOKEN_SEPARATOR.split(first_names)
first_names = [NameToken(n) if len(n) > 1 else NameInitial(n)
for n in first_names if n]
last_names = [NameToken(n) if len(n) > 1 else NameInitial(n)
for n in last_names if n]
return {'lastnames': last_names,
'nonlastnames': first_names} | [
"def",
"simple_tokenize",
"(",
"name",
")",
":",
"last_names",
",",
"first_names",
"=",
"name",
".",
"split",
"(",
"','",
")",
"last_names",
"=",
"_RE_NAME_TOKEN_SEPARATOR",
".",
"split",
"(",
"last_names",
")",
"first_names",
"=",
"_RE_NAME_TOKEN_SEPARATOR",
".",
"split",
"(",
"first_names",
")",
"first_names",
"=",
"[",
"NameToken",
"(",
"n",
")",
"if",
"len",
"(",
"n",
")",
">",
"1",
"else",
"NameInitial",
"(",
"n",
")",
"for",
"n",
"in",
"first_names",
"if",
"n",
"]",
"last_names",
"=",
"[",
"NameToken",
"(",
"n",
")",
"if",
"len",
"(",
"n",
")",
">",
"1",
"else",
"NameInitial",
"(",
"n",
")",
"for",
"n",
"in",
"last_names",
"if",
"n",
"]",
"return",
"{",
"'lastnames'",
":",
"last_names",
",",
"'nonlastnames'",
":",
"first_names",
"}"
] | Simple tokenizer function to be used with the normalizers. | [
"Simple",
"tokenizer",
"function",
"to",
"be",
"used",
"with",
"the",
"normalizers",
"."
] | adc6d372da018427e1db7b92424d3471e01a4118 | https://github.com/inveniosoftware-contrib/json-merger/blob/adc6d372da018427e1db7b92424d3471e01a4118/json_merger/contrib/inspirehep/author_util.py#L71-L82 | train |
ldomic/lintools | lintools/ligand_description.py | LigDescr.calculate_descriptors | def calculate_descriptors(self,mol):
"""Calculates descriptors such as logP, charges and MR and saves that in a dictionary."""
#make dictionary
self.ligand_atoms = {index:{"name":x.name} for index,x in enumerate(self.topology_data.universe.ligand_noH.atoms)}
#Calculate logP and MR
contribs = self.calculate_logP(mol)
#Calculate Gasteiger charges
self.calculate_Gasteiger_charges(mol)
#Calculate formal charges
fcharges = self.calculate_formal_charge(mol)
for atom in self.ligand_atoms.keys():
self.ligand_atoms[atom]["logP"]=contribs[atom][0]
self.ligand_atoms[atom]["MR"]=contribs[atom][1]
self.ligand_atoms[atom]["Gasteiger_ch"]=mol.GetAtomWithIdx(atom).GetProp("_GasteigerCharge")
self.ligand_atoms[atom]["Formal charges"]=fcharges[atom]
#Determine rotatable bonds
self.rot_bonds=self.get_rotatable_bonds(mol) | python | def calculate_descriptors(self,mol):
"""Calculates descriptors such as logP, charges and MR and saves that in a dictionary."""
#make dictionary
self.ligand_atoms = {index:{"name":x.name} for index,x in enumerate(self.topology_data.universe.ligand_noH.atoms)}
#Calculate logP and MR
contribs = self.calculate_logP(mol)
#Calculate Gasteiger charges
self.calculate_Gasteiger_charges(mol)
#Calculate formal charges
fcharges = self.calculate_formal_charge(mol)
for atom in self.ligand_atoms.keys():
self.ligand_atoms[atom]["logP"]=contribs[atom][0]
self.ligand_atoms[atom]["MR"]=contribs[atom][1]
self.ligand_atoms[atom]["Gasteiger_ch"]=mol.GetAtomWithIdx(atom).GetProp("_GasteigerCharge")
self.ligand_atoms[atom]["Formal charges"]=fcharges[atom]
#Determine rotatable bonds
self.rot_bonds=self.get_rotatable_bonds(mol) | [
"def",
"calculate_descriptors",
"(",
"self",
",",
"mol",
")",
":",
"#make dictionary",
"self",
".",
"ligand_atoms",
"=",
"{",
"index",
":",
"{",
"\"name\"",
":",
"x",
".",
"name",
"}",
"for",
"index",
",",
"x",
"in",
"enumerate",
"(",
"self",
".",
"topology_data",
".",
"universe",
".",
"ligand_noH",
".",
"atoms",
")",
"}",
"#Calculate logP and MR",
"contribs",
"=",
"self",
".",
"calculate_logP",
"(",
"mol",
")",
"#Calculate Gasteiger charges",
"self",
".",
"calculate_Gasteiger_charges",
"(",
"mol",
")",
"#Calculate formal charges",
"fcharges",
"=",
"self",
".",
"calculate_formal_charge",
"(",
"mol",
")",
"for",
"atom",
"in",
"self",
".",
"ligand_atoms",
".",
"keys",
"(",
")",
":",
"self",
".",
"ligand_atoms",
"[",
"atom",
"]",
"[",
"\"logP\"",
"]",
"=",
"contribs",
"[",
"atom",
"]",
"[",
"0",
"]",
"self",
".",
"ligand_atoms",
"[",
"atom",
"]",
"[",
"\"MR\"",
"]",
"=",
"contribs",
"[",
"atom",
"]",
"[",
"1",
"]",
"self",
".",
"ligand_atoms",
"[",
"atom",
"]",
"[",
"\"Gasteiger_ch\"",
"]",
"=",
"mol",
".",
"GetAtomWithIdx",
"(",
"atom",
")",
".",
"GetProp",
"(",
"\"_GasteigerCharge\"",
")",
"self",
".",
"ligand_atoms",
"[",
"atom",
"]",
"[",
"\"Formal charges\"",
"]",
"=",
"fcharges",
"[",
"atom",
"]",
"#Determine rotatable bonds",
"self",
".",
"rot_bonds",
"=",
"self",
".",
"get_rotatable_bonds",
"(",
"mol",
")"
] | Calculates descriptors such as logP, charges and MR and saves that in a dictionary. | [
"Calculates",
"descriptors",
"such",
"as",
"logP",
"charges",
"and",
"MR",
"and",
"saves",
"that",
"in",
"a",
"dictionary",
"."
] | d825a4a7b35f3f857d3b81b46c9aee72b0ec697a | https://github.com/ldomic/lintools/blob/d825a4a7b35f3f857d3b81b46c9aee72b0ec697a/lintools/ligand_description.py#L23-L44 | train |
robinandeer/puzzle | puzzle/plugins/vcf/mixins/variant_mixin.py | VariantMixin.variants | def variants(self, case_id, skip=0, count=1000, filters=None):
"""Return all variants in the VCF.
This function will apply the given filter and return the 'count' first
variants. If skip the first 'skip' variants will not be regarded.
Args:
case_id (str): Path to a vcf file (for this adapter)
skip (int): Skip first variants
count (int): The number of variants to return
filters (dict): A dictionary with filters. Currently this will
look like: {
gene_list: [] (list of hgnc ids),
frequency: None (float),
cadd: None (float),
sv_len: None (float),
consequence: [] (list of consequences),
is_lof: None (Bool),
genetic_models [] (list of genetic models)
sv_type: List (list of sv types),
}
Returns:
puzzle.constants.Results : Named tuple with variants and
nr_of_variants
"""
filters = filters or {}
case_obj = self.case(case_id=case_id)
limit = count + skip
genes = set()
if filters.get('gene_ids'):
genes = set([gene_id.strip() for gene_id in filters['gene_ids']])
frequency = None
if filters.get('frequency'):
frequency = float(filters['frequency'])
cadd = None
if filters.get('cadd'):
cadd = float(filters['cadd'])
genetic_models = None
if filters.get('genetic_models'):
genetic_models = set(filters['genetic_models'])
sv_len = None
if filters.get('sv_len'):
sv_len = float(filters['sv_len'])
impact_severities = None
if filters.get('impact_severities'):
impact_severities = set(filters['impact_severities'])
vcf_file_path = case_obj.variant_source
self.head = get_header(vcf_file_path)
self.vep_header = self.head.vep_columns
self.snpeff_header = self.head.snpeff_columns
variants = self._get_filtered_variants(vcf_file_path, filters)
result = []
skip_index = 0
for index, variant in enumerate(variants):
index += 1
if skip_index >= skip:
variant_obj = self._format_variants(
variant=variant,
index=index,
case_obj=case_obj,
)
if genes and variant_obj:
if not set(variant_obj['gene_symbols']).intersection(genes):
variant_obj = None
if impact_severities and variant_obj:
if not variant_obj['impact_severity'] in impact_severities:
variant_obj = None
if frequency and variant_obj:
if variant_obj.max_freq > frequency:
variant_obj = None
if cadd and variant_obj:
if variant_obj['cadd_score'] < cadd:
variant_obj = None
if genetic_models and variant_obj:
models = set(variant_obj.genetic_models)
if not models.intersection(genetic_models):
variant_obj = None
if sv_len and variant_obj:
if variant_obj.sv_len < sv_len:
variant_obj = None
if variant_obj:
skip_index += 1
if skip_index <= limit:
result.append(variant_obj)
else:
break
else:
skip_index += 1
return Results(result, len(result)) | python | def variants(self, case_id, skip=0, count=1000, filters=None):
"""Return all variants in the VCF.
This function will apply the given filter and return the 'count' first
variants. If skip the first 'skip' variants will not be regarded.
Args:
case_id (str): Path to a vcf file (for this adapter)
skip (int): Skip first variants
count (int): The number of variants to return
filters (dict): A dictionary with filters. Currently this will
look like: {
gene_list: [] (list of hgnc ids),
frequency: None (float),
cadd: None (float),
sv_len: None (float),
consequence: [] (list of consequences),
is_lof: None (Bool),
genetic_models [] (list of genetic models)
sv_type: List (list of sv types),
}
Returns:
puzzle.constants.Results : Named tuple with variants and
nr_of_variants
"""
filters = filters or {}
case_obj = self.case(case_id=case_id)
limit = count + skip
genes = set()
if filters.get('gene_ids'):
genes = set([gene_id.strip() for gene_id in filters['gene_ids']])
frequency = None
if filters.get('frequency'):
frequency = float(filters['frequency'])
cadd = None
if filters.get('cadd'):
cadd = float(filters['cadd'])
genetic_models = None
if filters.get('genetic_models'):
genetic_models = set(filters['genetic_models'])
sv_len = None
if filters.get('sv_len'):
sv_len = float(filters['sv_len'])
impact_severities = None
if filters.get('impact_severities'):
impact_severities = set(filters['impact_severities'])
vcf_file_path = case_obj.variant_source
self.head = get_header(vcf_file_path)
self.vep_header = self.head.vep_columns
self.snpeff_header = self.head.snpeff_columns
variants = self._get_filtered_variants(vcf_file_path, filters)
result = []
skip_index = 0
for index, variant in enumerate(variants):
index += 1
if skip_index >= skip:
variant_obj = self._format_variants(
variant=variant,
index=index,
case_obj=case_obj,
)
if genes and variant_obj:
if not set(variant_obj['gene_symbols']).intersection(genes):
variant_obj = None
if impact_severities and variant_obj:
if not variant_obj['impact_severity'] in impact_severities:
variant_obj = None
if frequency and variant_obj:
if variant_obj.max_freq > frequency:
variant_obj = None
if cadd and variant_obj:
if variant_obj['cadd_score'] < cadd:
variant_obj = None
if genetic_models and variant_obj:
models = set(variant_obj.genetic_models)
if not models.intersection(genetic_models):
variant_obj = None
if sv_len and variant_obj:
if variant_obj.sv_len < sv_len:
variant_obj = None
if variant_obj:
skip_index += 1
if skip_index <= limit:
result.append(variant_obj)
else:
break
else:
skip_index += 1
return Results(result, len(result)) | [
"def",
"variants",
"(",
"self",
",",
"case_id",
",",
"skip",
"=",
"0",
",",
"count",
"=",
"1000",
",",
"filters",
"=",
"None",
")",
":",
"filters",
"=",
"filters",
"or",
"{",
"}",
"case_obj",
"=",
"self",
".",
"case",
"(",
"case_id",
"=",
"case_id",
")",
"limit",
"=",
"count",
"+",
"skip",
"genes",
"=",
"set",
"(",
")",
"if",
"filters",
".",
"get",
"(",
"'gene_ids'",
")",
":",
"genes",
"=",
"set",
"(",
"[",
"gene_id",
".",
"strip",
"(",
")",
"for",
"gene_id",
"in",
"filters",
"[",
"'gene_ids'",
"]",
"]",
")",
"frequency",
"=",
"None",
"if",
"filters",
".",
"get",
"(",
"'frequency'",
")",
":",
"frequency",
"=",
"float",
"(",
"filters",
"[",
"'frequency'",
"]",
")",
"cadd",
"=",
"None",
"if",
"filters",
".",
"get",
"(",
"'cadd'",
")",
":",
"cadd",
"=",
"float",
"(",
"filters",
"[",
"'cadd'",
"]",
")",
"genetic_models",
"=",
"None",
"if",
"filters",
".",
"get",
"(",
"'genetic_models'",
")",
":",
"genetic_models",
"=",
"set",
"(",
"filters",
"[",
"'genetic_models'",
"]",
")",
"sv_len",
"=",
"None",
"if",
"filters",
".",
"get",
"(",
"'sv_len'",
")",
":",
"sv_len",
"=",
"float",
"(",
"filters",
"[",
"'sv_len'",
"]",
")",
"impact_severities",
"=",
"None",
"if",
"filters",
".",
"get",
"(",
"'impact_severities'",
")",
":",
"impact_severities",
"=",
"set",
"(",
"filters",
"[",
"'impact_severities'",
"]",
")",
"vcf_file_path",
"=",
"case_obj",
".",
"variant_source",
"self",
".",
"head",
"=",
"get_header",
"(",
"vcf_file_path",
")",
"self",
".",
"vep_header",
"=",
"self",
".",
"head",
".",
"vep_columns",
"self",
".",
"snpeff_header",
"=",
"self",
".",
"head",
".",
"snpeff_columns",
"variants",
"=",
"self",
".",
"_get_filtered_variants",
"(",
"vcf_file_path",
",",
"filters",
")",
"result",
"=",
"[",
"]",
"skip_index",
"=",
"0",
"for",
"index",
",",
"variant",
"in",
"enumerate",
"(",
"variants",
")",
":",
"index",
"+=",
"1",
"if",
"skip_index",
">=",
"skip",
":",
"variant_obj",
"=",
"self",
".",
"_format_variants",
"(",
"variant",
"=",
"variant",
",",
"index",
"=",
"index",
",",
"case_obj",
"=",
"case_obj",
",",
")",
"if",
"genes",
"and",
"variant_obj",
":",
"if",
"not",
"set",
"(",
"variant_obj",
"[",
"'gene_symbols'",
"]",
")",
".",
"intersection",
"(",
"genes",
")",
":",
"variant_obj",
"=",
"None",
"if",
"impact_severities",
"and",
"variant_obj",
":",
"if",
"not",
"variant_obj",
"[",
"'impact_severity'",
"]",
"in",
"impact_severities",
":",
"variant_obj",
"=",
"None",
"if",
"frequency",
"and",
"variant_obj",
":",
"if",
"variant_obj",
".",
"max_freq",
">",
"frequency",
":",
"variant_obj",
"=",
"None",
"if",
"cadd",
"and",
"variant_obj",
":",
"if",
"variant_obj",
"[",
"'cadd_score'",
"]",
"<",
"cadd",
":",
"variant_obj",
"=",
"None",
"if",
"genetic_models",
"and",
"variant_obj",
":",
"models",
"=",
"set",
"(",
"variant_obj",
".",
"genetic_models",
")",
"if",
"not",
"models",
".",
"intersection",
"(",
"genetic_models",
")",
":",
"variant_obj",
"=",
"None",
"if",
"sv_len",
"and",
"variant_obj",
":",
"if",
"variant_obj",
".",
"sv_len",
"<",
"sv_len",
":",
"variant_obj",
"=",
"None",
"if",
"variant_obj",
":",
"skip_index",
"+=",
"1",
"if",
"skip_index",
"<=",
"limit",
":",
"result",
".",
"append",
"(",
"variant_obj",
")",
"else",
":",
"break",
"else",
":",
"skip_index",
"+=",
"1",
"return",
"Results",
"(",
"result",
",",
"len",
"(",
"result",
")",
")"
] | Return all variants in the VCF.
This function will apply the given filter and return the 'count' first
variants. If skip the first 'skip' variants will not be regarded.
Args:
case_id (str): Path to a vcf file (for this adapter)
skip (int): Skip first variants
count (int): The number of variants to return
filters (dict): A dictionary with filters. Currently this will
look like: {
gene_list: [] (list of hgnc ids),
frequency: None (float),
cadd: None (float),
sv_len: None (float),
consequence: [] (list of consequences),
is_lof: None (Bool),
genetic_models [] (list of genetic models)
sv_type: List (list of sv types),
}
Returns:
puzzle.constants.Results : Named tuple with variants and
nr_of_variants | [
"Return",
"all",
"variants",
"in",
"the",
"VCF",
"."
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/vcf/mixins/variant_mixin.py#L54-L164 | train |
robinandeer/puzzle | puzzle/plugins/vcf/mixins/variant_mixin.py | VariantMixin._get_filtered_variants | def _get_filtered_variants(self, vcf_file_path, filters={}):
"""Check if variants follows the filters
This function will try to make filters faster for the vcf adapter
Args:
vcf_file_path(str): Path to vcf
filters (dict): A dictionary with filters
Yields:
varian_line (str): A vcf variant line
"""
genes = set()
consequences = set()
sv_types = set()
if filters.get('gene_ids'):
genes = set([gene_id.strip() for gene_id in filters['gene_ids']])
if filters.get('consequence'):
consequences = set(filters['consequence'])
if filters.get('sv_types'):
sv_types = set(filters['sv_types'])
logger.info("Get variants from {0}".format(vcf_file_path))
if filters.get('range'):
range_str = "{0}:{1}-{2}".format(
filters['range']['chromosome'],
filters['range']['start'],
filters['range']['end'])
vcf = VCF(vcf_file_path)
handle = vcf(range_str)
else:
handle = VCF(vcf_file_path)
for variant in handle:
variant_line = str(variant)
keep_variant = True
if genes and keep_variant:
keep_variant = False
for gene in genes:
if "{0}".format(gene) in variant_line:
keep_variant = True
break
if consequences and keep_variant:
keep_variant = False
for consequence in consequences:
if consequence in variant_line:
keep_variant = True
break
if sv_types and keep_variant:
keep_variant = False
for sv_type in sv_types:
if sv_type in variant_line:
keep_variant = True
break
if keep_variant:
yield variant | python | def _get_filtered_variants(self, vcf_file_path, filters={}):
"""Check if variants follows the filters
This function will try to make filters faster for the vcf adapter
Args:
vcf_file_path(str): Path to vcf
filters (dict): A dictionary with filters
Yields:
varian_line (str): A vcf variant line
"""
genes = set()
consequences = set()
sv_types = set()
if filters.get('gene_ids'):
genes = set([gene_id.strip() for gene_id in filters['gene_ids']])
if filters.get('consequence'):
consequences = set(filters['consequence'])
if filters.get('sv_types'):
sv_types = set(filters['sv_types'])
logger.info("Get variants from {0}".format(vcf_file_path))
if filters.get('range'):
range_str = "{0}:{1}-{2}".format(
filters['range']['chromosome'],
filters['range']['start'],
filters['range']['end'])
vcf = VCF(vcf_file_path)
handle = vcf(range_str)
else:
handle = VCF(vcf_file_path)
for variant in handle:
variant_line = str(variant)
keep_variant = True
if genes and keep_variant:
keep_variant = False
for gene in genes:
if "{0}".format(gene) in variant_line:
keep_variant = True
break
if consequences and keep_variant:
keep_variant = False
for consequence in consequences:
if consequence in variant_line:
keep_variant = True
break
if sv_types and keep_variant:
keep_variant = False
for sv_type in sv_types:
if sv_type in variant_line:
keep_variant = True
break
if keep_variant:
yield variant | [
"def",
"_get_filtered_variants",
"(",
"self",
",",
"vcf_file_path",
",",
"filters",
"=",
"{",
"}",
")",
":",
"genes",
"=",
"set",
"(",
")",
"consequences",
"=",
"set",
"(",
")",
"sv_types",
"=",
"set",
"(",
")",
"if",
"filters",
".",
"get",
"(",
"'gene_ids'",
")",
":",
"genes",
"=",
"set",
"(",
"[",
"gene_id",
".",
"strip",
"(",
")",
"for",
"gene_id",
"in",
"filters",
"[",
"'gene_ids'",
"]",
"]",
")",
"if",
"filters",
".",
"get",
"(",
"'consequence'",
")",
":",
"consequences",
"=",
"set",
"(",
"filters",
"[",
"'consequence'",
"]",
")",
"if",
"filters",
".",
"get",
"(",
"'sv_types'",
")",
":",
"sv_types",
"=",
"set",
"(",
"filters",
"[",
"'sv_types'",
"]",
")",
"logger",
".",
"info",
"(",
"\"Get variants from {0}\"",
".",
"format",
"(",
"vcf_file_path",
")",
")",
"if",
"filters",
".",
"get",
"(",
"'range'",
")",
":",
"range_str",
"=",
"\"{0}:{1}-{2}\"",
".",
"format",
"(",
"filters",
"[",
"'range'",
"]",
"[",
"'chromosome'",
"]",
",",
"filters",
"[",
"'range'",
"]",
"[",
"'start'",
"]",
",",
"filters",
"[",
"'range'",
"]",
"[",
"'end'",
"]",
")",
"vcf",
"=",
"VCF",
"(",
"vcf_file_path",
")",
"handle",
"=",
"vcf",
"(",
"range_str",
")",
"else",
":",
"handle",
"=",
"VCF",
"(",
"vcf_file_path",
")",
"for",
"variant",
"in",
"handle",
":",
"variant_line",
"=",
"str",
"(",
"variant",
")",
"keep_variant",
"=",
"True",
"if",
"genes",
"and",
"keep_variant",
":",
"keep_variant",
"=",
"False",
"for",
"gene",
"in",
"genes",
":",
"if",
"\"{0}\"",
".",
"format",
"(",
"gene",
")",
"in",
"variant_line",
":",
"keep_variant",
"=",
"True",
"break",
"if",
"consequences",
"and",
"keep_variant",
":",
"keep_variant",
"=",
"False",
"for",
"consequence",
"in",
"consequences",
":",
"if",
"consequence",
"in",
"variant_line",
":",
"keep_variant",
"=",
"True",
"break",
"if",
"sv_types",
"and",
"keep_variant",
":",
"keep_variant",
"=",
"False",
"for",
"sv_type",
"in",
"sv_types",
":",
"if",
"sv_type",
"in",
"variant_line",
":",
"keep_variant",
"=",
"True",
"break",
"if",
"keep_variant",
":",
"yield",
"variant"
] | Check if variants follows the filters
This function will try to make filters faster for the vcf adapter
Args:
vcf_file_path(str): Path to vcf
filters (dict): A dictionary with filters
Yields:
varian_line (str): A vcf variant line | [
"Check",
"if",
"variants",
"follows",
"the",
"filters"
] | 9476f05b416d3a5135d25492cb31411fdf831c58 | https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/vcf/mixins/variant_mixin.py#L166-L231 | train |
znerol/py-fnvhash | fnvhash/__init__.py | fnv | def fnv(data, hval_init, fnv_prime, fnv_size):
"""
Core FNV hash algorithm used in FNV0 and FNV1.
"""
assert isinstance(data, bytes)
hval = hval_init
for byte in data:
hval = (hval * fnv_prime) % fnv_size
hval = hval ^ _get_byte(byte)
return hval | python | def fnv(data, hval_init, fnv_prime, fnv_size):
"""
Core FNV hash algorithm used in FNV0 and FNV1.
"""
assert isinstance(data, bytes)
hval = hval_init
for byte in data:
hval = (hval * fnv_prime) % fnv_size
hval = hval ^ _get_byte(byte)
return hval | [
"def",
"fnv",
"(",
"data",
",",
"hval_init",
",",
"fnv_prime",
",",
"fnv_size",
")",
":",
"assert",
"isinstance",
"(",
"data",
",",
"bytes",
")",
"hval",
"=",
"hval_init",
"for",
"byte",
"in",
"data",
":",
"hval",
"=",
"(",
"hval",
"*",
"fnv_prime",
")",
"%",
"fnv_size",
"hval",
"=",
"hval",
"^",
"_get_byte",
"(",
"byte",
")",
"return",
"hval"
] | Core FNV hash algorithm used in FNV0 and FNV1. | [
"Core",
"FNV",
"hash",
"algorithm",
"used",
"in",
"FNV0",
"and",
"FNV1",
"."
] | ea6d6993e1082dee2ca3b9aba7a7eb2b7ab6a52a | https://github.com/znerol/py-fnvhash/blob/ea6d6993e1082dee2ca3b9aba7a7eb2b7ab6a52a/fnvhash/__init__.py#L26-L36 | train |
eleme/meepo | meepo/apps/eventsourcing/pub.py | sqlalchemy_es_pub.session_prepare | def session_prepare(self, session, _):
"""Send session_prepare signal in session "before_commit".
The signal contains another event argument, which records whole info
of what's changed in this session, so the signal receiver can receive
and record the event.
"""
if not hasattr(session, 'meepo_unique_id'):
self._session_init(session)
evt = collections.defaultdict(set)
for action in ("write", "update", "delete"):
objs = getattr(session, "pending_%s" % action)
# filter tables if possible
if self.tables:
objs = [o for o in objs
if o.__table__.fullname in self.tables]
for obj in objs:
evt_name = "%s_%s" % (obj.__table__.fullname, action)
evt[evt_name].add(obj)
self.logger.debug("%s - session_prepare: %s -> %s" % (
session.meepo_unique_id, evt_name, evt))
# only trigger signal when event exists
if evt:
signal("session_prepare").send(session, event=evt) | python | def session_prepare(self, session, _):
"""Send session_prepare signal in session "before_commit".
The signal contains another event argument, which records whole info
of what's changed in this session, so the signal receiver can receive
and record the event.
"""
if not hasattr(session, 'meepo_unique_id'):
self._session_init(session)
evt = collections.defaultdict(set)
for action in ("write", "update", "delete"):
objs = getattr(session, "pending_%s" % action)
# filter tables if possible
if self.tables:
objs = [o for o in objs
if o.__table__.fullname in self.tables]
for obj in objs:
evt_name = "%s_%s" % (obj.__table__.fullname, action)
evt[evt_name].add(obj)
self.logger.debug("%s - session_prepare: %s -> %s" % (
session.meepo_unique_id, evt_name, evt))
# only trigger signal when event exists
if evt:
signal("session_prepare").send(session, event=evt) | [
"def",
"session_prepare",
"(",
"self",
",",
"session",
",",
"_",
")",
":",
"if",
"not",
"hasattr",
"(",
"session",
",",
"'meepo_unique_id'",
")",
":",
"self",
".",
"_session_init",
"(",
"session",
")",
"evt",
"=",
"collections",
".",
"defaultdict",
"(",
"set",
")",
"for",
"action",
"in",
"(",
"\"write\"",
",",
"\"update\"",
",",
"\"delete\"",
")",
":",
"objs",
"=",
"getattr",
"(",
"session",
",",
"\"pending_%s\"",
"%",
"action",
")",
"# filter tables if possible",
"if",
"self",
".",
"tables",
":",
"objs",
"=",
"[",
"o",
"for",
"o",
"in",
"objs",
"if",
"o",
".",
"__table__",
".",
"fullname",
"in",
"self",
".",
"tables",
"]",
"for",
"obj",
"in",
"objs",
":",
"evt_name",
"=",
"\"%s_%s\"",
"%",
"(",
"obj",
".",
"__table__",
".",
"fullname",
",",
"action",
")",
"evt",
"[",
"evt_name",
"]",
".",
"add",
"(",
"obj",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"%s - session_prepare: %s -> %s\"",
"%",
"(",
"session",
".",
"meepo_unique_id",
",",
"evt_name",
",",
"evt",
")",
")",
"# only trigger signal when event exists",
"if",
"evt",
":",
"signal",
"(",
"\"session_prepare\"",
")",
".",
"send",
"(",
"session",
",",
"event",
"=",
"evt",
")"
] | Send session_prepare signal in session "before_commit".
The signal contains another event argument, which records whole info
of what's changed in this session, so the signal receiver can receive
and record the event. | [
"Send",
"session_prepare",
"signal",
"in",
"session",
"before_commit",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/pub.py#L75-L100 | train |
eleme/meepo | meepo/apps/eventsourcing/pub.py | sqlalchemy_es_pub.session_commit | def session_commit(self, session):
"""Send session_commit signal in sqlalchemy ``before_commit``.
This marks the success of session so the session may enter commit
state.
"""
# this may happen when there's nothing to commit
if not hasattr(session, 'meepo_unique_id'):
self.logger.debug("skipped - session_commit")
return
# normal session pub
self.logger.debug("%s - session_commit" % session.meepo_unique_id)
self._session_pub(session)
signal("session_commit").send(session)
self._session_del(session) | python | def session_commit(self, session):
"""Send session_commit signal in sqlalchemy ``before_commit``.
This marks the success of session so the session may enter commit
state.
"""
# this may happen when there's nothing to commit
if not hasattr(session, 'meepo_unique_id'):
self.logger.debug("skipped - session_commit")
return
# normal session pub
self.logger.debug("%s - session_commit" % session.meepo_unique_id)
self._session_pub(session)
signal("session_commit").send(session)
self._session_del(session) | [
"def",
"session_commit",
"(",
"self",
",",
"session",
")",
":",
"# this may happen when there's nothing to commit",
"if",
"not",
"hasattr",
"(",
"session",
",",
"'meepo_unique_id'",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"skipped - session_commit\"",
")",
"return",
"# normal session pub",
"self",
".",
"logger",
".",
"debug",
"(",
"\"%s - session_commit\"",
"%",
"session",
".",
"meepo_unique_id",
")",
"self",
".",
"_session_pub",
"(",
"session",
")",
"signal",
"(",
"\"session_commit\"",
")",
".",
"send",
"(",
"session",
")",
"self",
".",
"_session_del",
"(",
"session",
")"
] | Send session_commit signal in sqlalchemy ``before_commit``.
This marks the success of session so the session may enter commit
state. | [
"Send",
"session_commit",
"signal",
"in",
"sqlalchemy",
"before_commit",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/pub.py#L102-L117 | train |
eleme/meepo | meepo/apps/eventsourcing/pub.py | sqlalchemy_es_pub.session_rollback | def session_rollback(self, session):
"""Send session_rollback signal in sqlalchemy ``after_rollback``.
This marks the failure of session so the session may enter commit
phase.
"""
# this may happen when there's nothing to rollback
if not hasattr(session, 'meepo_unique_id'):
self.logger.debug("skipped - session_rollback")
return
# del session meepo id after rollback
self.logger.debug("%s - after_rollback" % session.meepo_unique_id)
signal("session_rollback").send(session)
self._session_del(session) | python | def session_rollback(self, session):
"""Send session_rollback signal in sqlalchemy ``after_rollback``.
This marks the failure of session so the session may enter commit
phase.
"""
# this may happen when there's nothing to rollback
if not hasattr(session, 'meepo_unique_id'):
self.logger.debug("skipped - session_rollback")
return
# del session meepo id after rollback
self.logger.debug("%s - after_rollback" % session.meepo_unique_id)
signal("session_rollback").send(session)
self._session_del(session) | [
"def",
"session_rollback",
"(",
"self",
",",
"session",
")",
":",
"# this may happen when there's nothing to rollback",
"if",
"not",
"hasattr",
"(",
"session",
",",
"'meepo_unique_id'",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"skipped - session_rollback\"",
")",
"return",
"# del session meepo id after rollback",
"self",
".",
"logger",
".",
"debug",
"(",
"\"%s - after_rollback\"",
"%",
"session",
".",
"meepo_unique_id",
")",
"signal",
"(",
"\"session_rollback\"",
")",
".",
"send",
"(",
"session",
")",
"self",
".",
"_session_del",
"(",
"session",
")"
] | Send session_rollback signal in sqlalchemy ``after_rollback``.
This marks the failure of session so the session may enter commit
phase. | [
"Send",
"session_rollback",
"signal",
"in",
"sqlalchemy",
"after_rollback",
"."
] | 8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a | https://github.com/eleme/meepo/blob/8212f0fe9b1d44be0c5de72d221a31c1d24bfe7a/meepo/apps/eventsourcing/pub.py#L119-L133 | train |
jam31118/vis | vis/plot.py | process_fig_and_ax_argument | def process_fig_and_ax_argument(fig, ax, default_figsize=None):
"""Process 'fig' and 'ax' arguments.
'fig' is of type: 'matplotlib.figure.Figure' (or its child object)
'ax' is of type: 'matplotlib.axes._base._AxesBase' (or its child object)
'fig' and 'ax' should be simultaneously None or of respective proper type.
"""
if default_figsize is not None:
assert type(default_figsize) in [tuple, list]
assert len(default_figsize) == 2
if (fig is None) and (ax is None):
fig, ax = plt.subplots(figsize=default_figsize)
else:
assert (is_figure(fig)) and (is_axes(ax))
return fig, ax | python | def process_fig_and_ax_argument(fig, ax, default_figsize=None):
"""Process 'fig' and 'ax' arguments.
'fig' is of type: 'matplotlib.figure.Figure' (or its child object)
'ax' is of type: 'matplotlib.axes._base._AxesBase' (or its child object)
'fig' and 'ax' should be simultaneously None or of respective proper type.
"""
if default_figsize is not None:
assert type(default_figsize) in [tuple, list]
assert len(default_figsize) == 2
if (fig is None) and (ax is None):
fig, ax = plt.subplots(figsize=default_figsize)
else:
assert (is_figure(fig)) and (is_axes(ax))
return fig, ax | [
"def",
"process_fig_and_ax_argument",
"(",
"fig",
",",
"ax",
",",
"default_figsize",
"=",
"None",
")",
":",
"if",
"default_figsize",
"is",
"not",
"None",
":",
"assert",
"type",
"(",
"default_figsize",
")",
"in",
"[",
"tuple",
",",
"list",
"]",
"assert",
"len",
"(",
"default_figsize",
")",
"==",
"2",
"if",
"(",
"fig",
"is",
"None",
")",
"and",
"(",
"ax",
"is",
"None",
")",
":",
"fig",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"figsize",
"=",
"default_figsize",
")",
"else",
":",
"assert",
"(",
"is_figure",
"(",
"fig",
")",
")",
"and",
"(",
"is_axes",
"(",
"ax",
")",
")",
"return",
"fig",
",",
"ax"
] | Process 'fig' and 'ax' arguments.
'fig' is of type: 'matplotlib.figure.Figure' (or its child object)
'ax' is of type: 'matplotlib.axes._base._AxesBase' (or its child object)
'fig' and 'ax' should be simultaneously None or of respective proper type. | [
"Process",
"fig",
"and",
"ax",
"arguments",
"."
] | 965ebec102c539b323d5756fef04153ac71e50d9 | https://github.com/jam31118/vis/blob/965ebec102c539b323d5756fef04153ac71e50d9/vis/plot.py#L123-L139 | train |
jam31118/vis | vis/plot.py | get_square_axes_limits | def get_square_axes_limits(coords, margin=0.05):
"""Return N-dimensional square's limits
## Arguments
# 'coords': list of coordinates of poins to be plotted
# 'margin': margin to be added from boundaries of the square.
- 'margin' can be negative if one wants to reduce the square size.
## Example
if 'coords' was given as [x,y,z],
then the resulting square's limits are given by:
(xlim, ylim, zlim)
where,
xlim == (x_mid - max_width, x_mid + max_width)
ylim == (y_mid - max_width, y_mid + max_width)
zlim == (z_mid - max_width, z_mid + max_width)
x_mid = 0.5 * (min(x) + max(x)) (and so on)
max_width = max([x_width, y_width, z_width])
where x_width = 0.5 * (max(x) - min(x)) (and so on)
"""
#coords = [x,y,z]
try: coords = [np.array(coord) for coord in coords]
except: raise Exception("Failed to convert elements of 'coords' into numpy.array")
lims = [(coord.min(), coord.max()) for coord in coords]
mids = [0.5 * (lim[0] + lim[1]) for lim in lims]
widths = [0.5 * (lim[1] - lim[0]) for lim in lims]
max_width = max(widths)
max_width += max_width * margin
ax_lims = tuple((mid - max_width, mid + max_width) for mid in mids)
#xlim, ylim, zlim = ax_lims
return ax_lims | python | def get_square_axes_limits(coords, margin=0.05):
"""Return N-dimensional square's limits
## Arguments
# 'coords': list of coordinates of poins to be plotted
# 'margin': margin to be added from boundaries of the square.
- 'margin' can be negative if one wants to reduce the square size.
## Example
if 'coords' was given as [x,y,z],
then the resulting square's limits are given by:
(xlim, ylim, zlim)
where,
xlim == (x_mid - max_width, x_mid + max_width)
ylim == (y_mid - max_width, y_mid + max_width)
zlim == (z_mid - max_width, z_mid + max_width)
x_mid = 0.5 * (min(x) + max(x)) (and so on)
max_width = max([x_width, y_width, z_width])
where x_width = 0.5 * (max(x) - min(x)) (and so on)
"""
#coords = [x,y,z]
try: coords = [np.array(coord) for coord in coords]
except: raise Exception("Failed to convert elements of 'coords' into numpy.array")
lims = [(coord.min(), coord.max()) for coord in coords]
mids = [0.5 * (lim[0] + lim[1]) for lim in lims]
widths = [0.5 * (lim[1] - lim[0]) for lim in lims]
max_width = max(widths)
max_width += max_width * margin
ax_lims = tuple((mid - max_width, mid + max_width) for mid in mids)
#xlim, ylim, zlim = ax_lims
return ax_lims | [
"def",
"get_square_axes_limits",
"(",
"coords",
",",
"margin",
"=",
"0.05",
")",
":",
"#coords = [x,y,z]",
"try",
":",
"coords",
"=",
"[",
"np",
".",
"array",
"(",
"coord",
")",
"for",
"coord",
"in",
"coords",
"]",
"except",
":",
"raise",
"Exception",
"(",
"\"Failed to convert elements of 'coords' into numpy.array\"",
")",
"lims",
"=",
"[",
"(",
"coord",
".",
"min",
"(",
")",
",",
"coord",
".",
"max",
"(",
")",
")",
"for",
"coord",
"in",
"coords",
"]",
"mids",
"=",
"[",
"0.5",
"*",
"(",
"lim",
"[",
"0",
"]",
"+",
"lim",
"[",
"1",
"]",
")",
"for",
"lim",
"in",
"lims",
"]",
"widths",
"=",
"[",
"0.5",
"*",
"(",
"lim",
"[",
"1",
"]",
"-",
"lim",
"[",
"0",
"]",
")",
"for",
"lim",
"in",
"lims",
"]",
"max_width",
"=",
"max",
"(",
"widths",
")",
"max_width",
"+=",
"max_width",
"*",
"margin",
"ax_lims",
"=",
"tuple",
"(",
"(",
"mid",
"-",
"max_width",
",",
"mid",
"+",
"max_width",
")",
"for",
"mid",
"in",
"mids",
")",
"#xlim, ylim, zlim = ax_lims",
"return",
"ax_lims"
] | Return N-dimensional square's limits
## Arguments
# 'coords': list of coordinates of poins to be plotted
# 'margin': margin to be added from boundaries of the square.
- 'margin' can be negative if one wants to reduce the square size.
## Example
if 'coords' was given as [x,y,z],
then the resulting square's limits are given by:
(xlim, ylim, zlim)
where,
xlim == (x_mid - max_width, x_mid + max_width)
ylim == (y_mid - max_width, y_mid + max_width)
zlim == (z_mid - max_width, z_mid + max_width)
x_mid = 0.5 * (min(x) + max(x)) (and so on)
max_width = max([x_width, y_width, z_width])
where x_width = 0.5 * (max(x) - min(x)) (and so on) | [
"Return",
"N",
"-",
"dimensional",
"square",
"s",
"limits"
] | 965ebec102c539b323d5756fef04153ac71e50d9 | https://github.com/jam31118/vis/blob/965ebec102c539b323d5756fef04153ac71e50d9/vis/plot.py#L209-L246 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.